Merge trunk into branch


git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3077@1396918 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index 5f3e470..5272889 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -335,7 +335,7 @@
         echo "The patch appears to be a documentation patch that doesn't require tests."
         JIRA_COMMENT="$JIRA_COMMENT
 
-    +0 tests included.  The patch appears to be a documentation patch that doesn't require tests."
+    {color:green}+0 tests included{color}.  The patch appears to be a documentation patch that doesn't require tests."
         return 0
       fi
     fi
@@ -681,12 +681,46 @@
 
   failed_tests=""
   modules=$(findModules)
-  for module in $modules;
-  do
+  #
+  # If we are building hadoop-hdfs-project, we must build the native component
+  # of hadoop-common-project first.  In order to accomplish this, we move the
+  # hadoop-hdfs subprojects to the end of the list so that common will come
+  # first.
+  #
+  # Of course, we may not be building hadoop-common at all-- in this case, we
+  # explicitly insert a mvn compile -Pnative of common, to ensure that the
+  # native libraries show up where we need them.
+  #
+  building_common=0
+  for module in $modules; do
+      if [[ $module == hadoop-hdfs-project* ]]; then
+          hdfs_modules="$hdfs_modules $module"
+      elif [[ $module == hadoop-common-project* ]]; then
+          ordered_modules="$ordered_modules $module"
+          building_common=1
+      else
+          ordered_modules="$ordered_modules $module"
+      fi
+  done
+  if [ -n $hdfs_modules ]; then
+      ordered_modules="$ordered_modules $hdfs_modules"
+      if [[ $building_common -eq 0 ]]; then
+          echo "  Building hadoop-common with -Pnative in order to provide \
+libhadoop.so to the hadoop-hdfs unit tests."
+          echo "  $MVN compile -Pnative -D${PROJECT_NAME}PatchProcess"
+          if ! $MVN compile -Pnative -D${PROJECT_NAME}PatchProcess; then
+              JIRA_COMMENT="$JIRA_COMMENT
+        {color:red}-1 core tests{color}.  Failed to build the native portion \
+of hadoop-common prior to running the unit tests in $ordered_modules"
+              return 1
+          fi
+      fi
+  fi
+  for module in $ordered_modules; do
     cd $module
     echo "  Running tests in $module"
     echo "  $MVN clean install -fn -Pnative -D${PROJECT_NAME}PatchProcess"
-    $MVN clean install -fn -Pnative -D${PROJECT_NAME}PatchProcess
+    $MVN clean install -fn -Pnative -Drequire.test.libhadoop -D${PROJECT_NAME}PatchProcess
     module_failed_tests=`find . -name 'TEST*.xml' | xargs $GREP  -l -E "<failure|<error" | sed -e "s|.*target/surefire-reports/TEST-|                  |g" | sed -e "s|\.xml||g"`
     # With -fn mvn always exits with a 0 exit code.  Because of this we need to
     # find the errors instead of using the exit code.  We assume that if the build
@@ -914,6 +948,7 @@
 fi
 buildWithPatch
 checkAuthor
+(( RESULT = RESULT + $? ))
 
 if [[ $JENKINS == "true" ]] ; then
   cleanUpXml
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
index 0fd400e..89490bc 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
@@ -19,6 +19,8 @@
 import org.ietf.jgss.GSSManager;
 import org.ietf.jgss.GSSName;
 import org.ietf.jgss.Oid;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import javax.security.auth.Subject;
 import javax.security.auth.login.AppConfigurationEntry;
@@ -44,6 +46,9 @@
  * sequence.
  */
 public class KerberosAuthenticator implements Authenticator {
+  
+  private static Logger LOG = LoggerFactory.getLogger(
+      KerberosAuthenticator.class);
 
   /**
    * HTTP header used by the SPNEGO server endpoint during an authentication sequence.
@@ -152,9 +157,18 @@
       }
       conn.setRequestMethod(AUTH_HTTP_METHOD);
       conn.connect();
-      if (isNegotiate()) {
+      
+      if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
+        LOG.debug("JDK performed authentication on our behalf.");
+        // If the JDK already did the SPNEGO back-and-forth for
+        // us, just pull out the token.
+        AuthenticatedURL.extractToken(conn, token);
+        return;
+      } else if (isNegotiate()) {
+        LOG.debug("Performing our own SPNEGO sequence.");
         doSpnegoSequence(token);
       } else {
+        LOG.debug("Using fallback authenticator sequence.");
         getFallBackAuthenticator().authenticate(url, token);
       }
     }
@@ -168,7 +182,11 @@
    * @return the fallback {@link Authenticator}.
    */
   protected Authenticator getFallBackAuthenticator() {
-    return new PseudoAuthenticator();
+    Authenticator auth = new PseudoAuthenticator();
+    if (connConfigurator != null) {
+      auth.setConnectionConfigurator(connConfigurator);
+    }
+    return auth;
   }
 
   /*
@@ -197,11 +215,16 @@
       AccessControlContext context = AccessController.getContext();
       Subject subject = Subject.getSubject(context);
       if (subject == null) {
+        LOG.debug("No subject in context, logging in");
         subject = new Subject();
         LoginContext login = new LoginContext("", subject,
             null, new KerberosConfiguration());
         login.login();
       }
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Using subject: " + subject);
+      }
       Subject.doAs(subject, new PrivilegedExceptionAction<Void>() {
 
         @Override
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index d4d4639..ecf895f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -7,6 +7,8 @@
     HADOOP-8124. Remove the deprecated FSDataOutputStream constructor,
     FSDataOutputStream.sync() and Syncable.sync().  (szetszwo)
 
+    HADOOP-8886. Remove KFS support. (eli)
+
   NEW FEATURES
     
     HADOOP-8469. Make NetworkTopology class pluggable.  (Junping Du via
@@ -117,6 +119,9 @@
     HADOOP-8840. Fix the test-patch colorizer to cover all sorts of +1 lines.
     (Harsh J via bobby)
 
+    HADOOP-8864. Addendum to HADOOP-8840: Add a coloring case for +0 results
+    too. (harsh)
+
   BUG FIXES
 
     HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName.
@@ -244,6 +249,16 @@
     required context item is not configured
     (Brahma Reddy Battula via harsh)
 
+    HADOOP-3957. Change MutableQuantiles to use a shared thread for rolling
+    over metrics. (Andrew Wang via todd)
+
+    HADOOP-8386. hadoop script doesn't work if 'cd' prints to stdout
+    (default behavior in some bash setups (esp. Ubuntu))
+    (Chiristopher Berner and Andy Isaacson via harsh)
+
+    HADOOP-8839. test-patch's -1 on @author tag presence doesn't cause
+    a -1 to the overall result (harsh)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -272,8 +287,24 @@
 
     HADOOP-8736. Add Builder for building RPC server. (Brandon Li via Suresh)
 
+    HADOOP-8851. Use -XX:+HeapDumpOnOutOfMemoryError JVM option in the forked
+    tests. (Ivan A. Veselovsky via atm)
+
+    HADOOP-8783. Improve RPC.Server's digest auth (daryn)
+
+    HADOOP-8889. Upgrade to Surefire 2.12.3 (todd)
+
+    HADOOP-8804. Improve Web UIs when the wildcard address is used.
+    (Senthil Kumar via eli)
+
+    HADOOP-8894. GenericTestUtils.waitFor should dump thread stacks on timeout
+    (todd)
+
   OPTIMIZATIONS
 
+    HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang
+    via atm)
+
   BUG FIXES
 
     HADOOP-8795. BASH tab completion doesn't look in PATH, assumes path to
@@ -288,6 +319,14 @@
     HADOOP-8791. Fix rm command documentation to indicte it deletes
     files and not directories. (Jing Zhao via suresh)
 
+    HADOOP-8616. ViewFS configuration requires a trailing slash. (Sandy Ryza
+    via atm)
+
+    HADOOP-8756. Fix SEGV when libsnappy is in java.library.path but
+    not LD_LIBRARY_PATH. (Colin Patrick McCabe via eli)
+
+    HADOOP-8881. FileBasedKeyStoresFactory initialization logging should be debug not info. (tucu)
+
 Release 2.0.2-alpha - 2012-09-07 
 
   INCOMPATIBLE CHANGES
@@ -298,6 +337,8 @@
     HADOOP-8689. Make trash a server side configuration option. (eli)
 
     HADOOP-8710. Remove ability for users to easily run the trash emptire. (eli)
+    
+    HADOOP-8794. Rename YARN_HOME to HADOOP_YARN_HOME. (vinodkv via acmurthy)
 
   NEW FEATURES
  
@@ -543,8 +584,6 @@
     HADOOP-8031. Configuration class fails to find embedded .jar resources; 
     should use URL.openStream() (genman via tucu)
 
-    HADOOP-8738. junit JAR is showing up in the distro (tucu)
-
     HADOOP-8737. cmake: always use JAVA_HOME to find libjvm.so, jni.h, jni_md.h.
     (Colin Patrick McCabe via eli)
 
@@ -574,6 +613,8 @@
 
     HADOOP-8781. hadoop-config.sh should add JAVA_LIBRARY_PATH to LD_LIBRARY_PATH. (tucu)
 
+    HADOOP-8855. SSL-based image transfer does not work when Kerberos is disabled. (todd via eli)
+
   BREAKDOWN OF HDFS-3042 SUBTASKS
 
     HADOOP-8220. ZKFailoverController doesn't handle failure to become active
@@ -976,6 +1017,18 @@
     HADOOP-8655. Fix TextInputFormat for large deliminators. (Gelesh via
     bobby) 
 
+Release 0.23.5 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 0.23.4 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -990,7 +1043,10 @@
 
   BUG FIXES
 
-Release 0.23.3 - UNRELEASED
+    HADOOP-8843. Old trash directories are never deleted on upgrade
+    from 1.x (jlowe)
+
+Release 0.23.3
 
   INCOMPATIBLE CHANGES
 
diff --git a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
index 0244955..19c6e5b 100644
--- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
@@ -176,18 +176,6 @@
      </Match>
 
      <Match>
-       <Class name="org.apache.hadoop.fs.kfs.KFSOutputStream" />
-       <Field name="path" />
-       <Bug pattern="URF_UNREAD_FIELD" />
-     </Match>
-
-     <Match>
-       <Class name="org.apache.hadoop.fs.kfs.KosmosFileSystem" />
-       <Method name="initialize" />
-       <Bug pattern="DM_EXIT" />
-     </Match>
-
-     <Match>
        <Class name="org.apache.hadoop.io.Closeable" />
        <Bug pattern="NM_SAME_SIMPLE_NAME_AS_INTERFACE" />
      </Match>
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 47f3c99..e8fdee5 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -195,11 +195,6 @@
       <scope>compile</scope>
     </dependency>
     <dependency>
-      <groupId>net.sf.kosmosfs</groupId>
-      <artifactId>kfs</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
       <groupId>org.apache.ant</groupId>
       <artifactId>ant</artifactId>
       <scope>provided</scope>
diff --git a/hadoop-common-project/hadoop-common/src/CMakeLists.txt b/hadoop-common-project/hadoop-common/src/CMakeLists.txt
index 69c52a9..bff64d9 100644
--- a/hadoop-common-project/hadoop-common/src/CMakeLists.txt
+++ b/hadoop-common-project/hadoop-common/src/CMakeLists.txt
@@ -123,6 +123,7 @@
     ${D}/security/JniBasedUnixGroupsMapping.c
     ${D}/security/JniBasedUnixGroupsNetgroupMapping.c
     ${D}/security/getGroup.c
+    ${D}/util/NativeCodeLoader.c
     ${D}/util/NativeCrc32.c
     ${D}/util/bulk_crc32.c
 )
diff --git a/hadoop-common-project/hadoop-common/src/config.h.cmake b/hadoop-common-project/hadoop-common/src/config.h.cmake
index 9098b68..7423de7 100644
--- a/hadoop-common-project/hadoop-common/src/config.h.cmake
+++ b/hadoop-common-project/hadoop-common/src/config.h.cmake
@@ -2,7 +2,6 @@
 #define CONFIG_H
 
 #cmakedefine HADOOP_ZLIB_LIBRARY "@HADOOP_ZLIB_LIBRARY@"
-#cmakedefine HADOOP_RUNAS_HOME "@HADOOP_RUNAS_HOME@"
 #cmakedefine HADOOP_SNAPPY_LIBRARY "@HADOOP_SNAPPY_LIBRARY@"
 #cmakedefine HAVE_SYNC_FILE_RANGE
 #cmakedefine HAVE_POSIX_FADVISE
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
index 486465a..f57d9ab 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
@@ -19,7 +19,7 @@
 
 bin=`which $0`
 bin=`dirname ${bin}`
-bin=`cd "$bin"; pwd`
+bin=`cd "$bin" > /dev/null; pwd`
  
 DEFAULT_LIBEXEC_DIR="$bin"/../libexec
 HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
index 94fa4a4..4f83ffd8 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
@@ -269,21 +269,21 @@
 CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/$HDFS_DIR'/*'
 
 # put yarn in classpath if present
-if [ "$YARN_HOME" = "" ]; then
+if [ "$HADOOP_YARN_HOME" = "" ]; then
   if [ -d "${HADOOP_PREFIX}/$YARN_DIR" ]; then
-    export YARN_HOME=$HADOOP_PREFIX
+    export HADOOP_YARN_HOME=$HADOOP_PREFIX
   fi
 fi
 
-if [ -d "$YARN_HOME/$YARN_DIR/webapps" ]; then
-  CLASSPATH=${CLASSPATH}:$YARN_HOME/$YARN_DIR
+if [ -d "$HADOOP_YARN_HOME/$YARN_DIR/webapps" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/$YARN_DIR
 fi
 
-if [ -d "$YARN_HOME/$YARN_LIB_JARS_DIR" ]; then
-  CLASSPATH=${CLASSPATH}:$YARN_HOME/$YARN_LIB_JARS_DIR'/*'
+if [ -d "$HADOOP_YARN_HOME/$YARN_LIB_JARS_DIR" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/$YARN_LIB_JARS_DIR'/*'
 fi
 
-CLASSPATH=${CLASSPATH}:$YARN_HOME/$YARN_DIR'/*'
+CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/$YARN_DIR'/*'
 
 # put mapred in classpath if present AND different from YARN
 if [ "$HADOOP_MAPRED_HOME" = "" ]; then
@@ -292,7 +292,7 @@
   fi
 fi
 
-if [ "$HADOOP_MAPRED_HOME/$MAPRED_DIR" != "$YARN_HOME/$YARN_DIR" ] ; then
+if [ "$HADOOP_MAPRED_HOME/$MAPRED_DIR" != "$HADOOP_YARN_HOME/$YARN_DIR" ] ; then
   if [ -d "$HADOOP_MAPRED_HOME/$MAPRED_DIR/webapps" ]; then
     CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/$MAPRED_DIR
   fi
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh b/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh
index f4047db..3124328 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh
@@ -33,6 +33,6 @@
 fi
 
 # start yarn daemons if yarn is present
-if [ -f "${YARN_HOME}"/sbin/start-yarn.sh ]; then
-  "${YARN_HOME}"/sbin/start-yarn.sh --config $HADOOP_CONF_DIR
+if [ -f "${HADOOP_YARN_HOME}"/sbin/start-yarn.sh ]; then
+  "${HADOOP_YARN_HOME}"/sbin/start-yarn.sh --config $HADOOP_CONF_DIR
 fi
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties
index da57735..9e51b1c 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties
@@ -2,7 +2,7 @@
 # See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
 
 *.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
-# default sampling period
+# default sampling period, in seconds
 *.period=10
 
 # The namenode-metrics.out will contain metrics from all context
diff --git a/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html b/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
index 3557e06..e3915ca 100644
--- a/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
+++ b/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
@@ -1 +1,10722 @@
-THIS IS A PLACEHOLDER.  REAL RELEASE NOTES WILL BE ADDED TO THIS FILE IN RELEASE BRANCHES.
+<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<title>Hadoop  2.0.2-alpha Release Notes</title>
+<STYLE type="text/css">
+	H1 {font-family: sans-serif}
+	H2 {font-family: sans-serif; margin-left: 7mm}
+	TABLE {margin-left: 7mm}
+</STYLE>
+</head>
+<body>
+<h1>Hadoop  2.0.2-alpha Release Notes</h1>
+These release notes include new developer and user-facing incompatibilities, features, and major improvements. 
+<a name="changes"/>
+<h2>Changes since Hadoop 2.0.1-alpha</h2>
+<ul>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-137">YARN-137</a>.
+     Major improvement reported by Siddharth Seth and fixed by Siddharth Seth (scheduler)<br>
+     <b>Change the default scheduler to the CapacityScheduler</b><br>
+     <blockquote>There's some bugs in the FifoScheduler atm - doesn't distribute tasks across nodes and some headroom (available resource) issues.

+That's not the best experience for users trying out the 2.0 branch. The CS with the default configuration of a single queue behaves the same as the FifoScheduler and doesn't have these issues.

+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-108">YARN-108</a>.
+     Critical bug reported by Jason Lowe and fixed by Jason Lowe (nodemanager)<br>
+     <b>FSDownload can create cache directories with the wrong permissions</b><br>
+     <blockquote>When the cluster is configured with a restrictive umask, e.g.: {{fs.permissions.umask-mode=0077}}, the nodemanager can end up creating directory entries in the public cache with the wrong permissions.  The permissions can end up where only the nodemanager user can access files in the public cache, preventing jobs from running properly.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-106">YARN-106</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (nodemanager)<br>
+     <b>Nodemanager needs to set permissions of local directories</b><br>
+     <blockquote>If the nodemanager process is running with a restrictive default umask (e.g.: 0077) then it will create its local directories with permissions that are too restrictive to allow containers from other users to run.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-88">YARN-88</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (nodemanager)<br>
+     <b>DefaultContainerExecutor can fail to set proper permissions</b><br>
+     <blockquote>{{DefaultContainerExecutor}} can fail to set the proper permissions on its local directories if the cluster has been configured with a restrictive umask, e.g.: fs.permissions.umask-mode=0077.  The configured umask ends up defeating the permissions requested by {{DefaultContainerExecutor}} when it creates directories.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-87">YARN-87</a>.
+     Critical bug reported by Jason Lowe and fixed by Jason Lowe (nodemanager)<br>
+     <b>NM ResourceLocalizationService does not set permissions of local cache directories</b><br>
+     <blockquote>{{ResourceLocalizationService}} creates a file cache and user cache directory when it starts up but doesn't specify the permissions for them when they are created.  If the cluster configs are set to limit the default permissions (e.g.: fs.permissions.umask-mode=0077 instead of the default 0022), then the cache directories are created with too-restrictive permissions and no jobs are able to run.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-83">YARN-83</a>.
+     Major bug reported by Bikas Saha and fixed by Bikas Saha (client)<br>
+     <b>Change package of YarnClient to include apache</b><br>
+     <blockquote>Currently its org.hadoop.* instead of org.apache.hadoop.*</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-80">YARN-80</a>.
+     Major improvement reported by Todd Lipcon and fixed by Arun C Murthy (capacityscheduler)<br>
+     <b>Support delay scheduling for node locality in MR2's capacity scheduler</b><br>
+     <blockquote>The capacity scheduler in MR2 doesn't support delay scheduling for achieving node-level locality. So, jobs exhibit poor data locality even if they have good rack locality. Especially on clusters where disk throughput is much better than network capacity, this hurts overall job performance. We should optionally support node-level delay scheduling heuristics similar to what the fair scheduler implements in MR1.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-79">YARN-79</a>.
+     Major bug reported by Bikas Saha and fixed by Vinod Kumar Vavilapalli (client)<br>
+     <b>Calling YarnClientImpl.close throws Exception</b><br>
+     <blockquote>The following exception is thrown

+===========

+*org.apache.hadoop.HadoopIllegalArgumentException: Cannot close proxy - is not Closeable or does not provide closeable invocation handler class org.apache.hadoop.yarn.api.impl.pb.client.ClientRMProtocolPBClientImpl*

+	*at org.apache.hadoop.ipc.RPC.stopProxy(RPC.java:624)*

+	*at org.hadoop.yarn.client.YarnClientImpl.stop(YarnClientImpl.java:102)*

+	at org.apache.hadoop.yarn.applications.unmanagedamlauncher.UnmanagedAMLauncher.run(UnmanagedAMLauncher.java:336)

+	at org.apache.hadoop.yarn.applications.unmanagedamlauncher.TestUnmanagedAMLauncher.testDSShell(TestUnmanagedAMLauncher.java:156)

+	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)

+	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)

+	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)

+	at java.lang.reflect.Method.invoke(Method.java:597)

+	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:44)

+	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:15)

+	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:41)

+	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:20)

+	at org.junit.runners.BlockJUnit4ClassRunner.runNotIgnored(BlockJUnit4ClassRunner.java:79)

+	at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:71)

+	at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:49)

+	at org.junit.runners.ParentRunner$3.run(ParentRunner.java:193)

+	at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:52)

+	at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:191)

+	at org.junit.runners.ParentRunner.access$000(ParentRunner.java:42)

+	at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:184)

+	at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:28)

+	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:31)

+	at org.junit.runners.ParentRunner.run(ParentRunner.java:236)

+	at org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:236)

+	at org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:134)

+	at org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:113)

+	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)

+	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)

+	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)

+	at java.lang.reflect.Method.invoke(Method.java:597)

+	at org.apache.maven.surefire.util.ReflectionUtils.invokeMethodWithArray(ReflectionUtils.java:189)

+	at org.apache.maven.surefire.booter.ProviderFactory$ProviderProxy.invoke(ProviderFactory.java:165)

+	at org.apache.maven.surefire.booter.ProviderFactory.invokeProvider(ProviderFactory.java:85)

+	at org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:103)

+	at org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:74)

+===========</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-75">YARN-75</a>.
+     Major bug reported by Siddharth Seth and fixed by Siddharth Seth <br>
+     <b>RMContainer should handle a RELEASE event while RUNNING</b><br>
+     <blockquote>An AppMaster can send a container release at any point. Currently this results in an exception, if this is done while the RM considers the container to be RUNNING.

+The event not being processed correctly also implies that these containers do not show up in the Completed Container List seen by the AM (AMRMProtocol). MR-3902 depends on this set being complete. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-68">YARN-68</a>.
+     Major bug reported by patrick white and fixed by Daryn Sharp (nodemanager)<br>
+     <b>NodeManager will refuse to shutdown indefinitely due to container log aggregation</b><br>
+     <blockquote>The nodemanager is able to get into a state where containermanager.logaggregation.AppLogAggregatorImpl will apparently wait

+indefinitely for log aggregation to complete for an application, even if that application has abnormally terminated and is no longer present. 

+

+Observed behavior is that an attempt to stop the nodemanager daemon will return but have no effect, the nm log continually displays messages similar to this:

+

+[Thread-1]2012-08-21 17:44:07,581 INFO

+org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.AppLogAggregatorImpl:

+Waiting for aggregation to complete for application_1345221477405_2733

+

+The only recovery we found to work was to 'kill -9' the nm process.

+

+What exactly causes the NM to enter this state is unclear but we do see this behavior reliably when the NM has run a task which failed, for example when debugging oozie distcp actions and having a distcp map task fail, the NM that was running the container will now enter this state where a shutdown on said NM will never complete, 'never' in this case was waiting for 2 hours before killing the nodemanager process.

+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-66">YARN-66</a>.
+     Critical bug reported by Thomas Graves and fixed by Thomas Graves (nodemanager)<br>
+     <b>aggregated logs permissions not set properly</b><br>
+     <blockquote>If the default file permissions are set to something restrictive - like 700, application logs get aggregated and created with those restrictive file permissions which doesn't allow the history server to serve them up.

+

+

+They need to be created with group readable similar to how log aggregation sets up the directory permissions.

+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-63">YARN-63</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (resourcemanager)<br>
+     <b>RMNodeImpl is missing valid transitions from the UNHEALTHY state</b><br>
+     <blockquote>The ResourceManager isn't properly handling nodes that have been marked UNHEALTHY when they are lost or are decommissioned.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-60">YARN-60</a>.
+     Blocker sub-task reported by Daryn Sharp and fixed by Vinod Kumar Vavilapalli (nodemanager)<br>
+     <b>NMs rejects all container tokens after secret key rolls</b><br>
+     <blockquote>The NM's token secret manager will reject all container tokens after the secret key is activated which means the NM will not launch _any_ containers including AMs.  The whole yarn cluster becomes inoperable in 1d.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-58">YARN-58</a>.
+     Critical bug reported by Daryn Sharp and fixed by Jason Lowe (nodemanager)<br>
+     <b>NM leaks filesystems</b><br>
+     <blockquote>The NM is exhausting its fds because it's not closing fs instances when the app is finished.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-42">YARN-42</a>.
+     Major bug reported by Devaraj K and fixed by Devaraj K (nodemanager)<br>
+     <b>Node Manager throws NPE on startup</b><br>
+     <blockquote>NM throws NPE on startup if it doesn't have persmission's on nm local dir's

+

+

+{code:xml}

+2012-05-14 16:32:13,468 FATAL org.apache.hadoop.yarn.server.nodemanager.NodeManager: Error starting NodeManager

+org.apache.hadoop.yarn.YarnException: Failed to initialize LocalizationService

+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService.init(ResourceLocalizationService.java:202)

+	at org.apache.hadoop.yarn.service.CompositeService.init(CompositeService.java:58)

+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.init(ContainerManagerImpl.java:183)

+	at org.apache.hadoop.yarn.service.CompositeService.init(CompositeService.java:58)

+	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.init(NodeManager.java:166)

+	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.initAndStartNodeManager(NodeManager.java:268)

+	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.main(NodeManager.java:284)

+Caused by: java.io.IOException: mkdir of /mrv2/tmp/nm-local-dir/usercache failed

+	at org.apache.hadoop.fs.FileSystem.primitiveMkdir(FileSystem.java:907)

+	at org.apache.hadoop.fs.DelegateToFileSystem.mkdir(DelegateToFileSystem.java:143)

+	at org.apache.hadoop.fs.FilterFs.mkdir(FilterFs.java:189)

+	at org.apache.hadoop.fs.FileContext$4.next(FileContext.java:706)

+	at org.apache.hadoop.fs.FileContext$4.next(FileContext.java:703)

+	at org.apache.hadoop.fs.FileContext$FSLinkResolver.resolve(FileContext.java:2325)

+	at org.apache.hadoop.fs.FileContext.mkdir(FileContext.java:703)

+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService.init(ResourceLocalizationService.java:188)

+	... 6 more

+2012-05-14 16:32:13,472 INFO org.apache.hadoop.yarn.service.CompositeService: Error stopping org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.NonAggregatingLogHandler

+java.lang.NullPointerException

+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.NonAggregatingLogHandler.stop(NonAggregatingLogHandler.java:82)

+	at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:99)

+	at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:89)

+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.stop(ContainerManagerImpl.java:266)

+	at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:99)

+	at org.apache.hadoop.yarn.service.CompositeService.stop(CompositeService.java:89)

+	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.stop(NodeManager.java:182)

+	at org.apache.hadoop.yarn.service.CompositeService$CompositeServiceShutdownHook.run(CompositeService.java:122)

+	at org.apache.hadoop.util.ShutdownHookManager$1.run(ShutdownHookManager.java:54)

+{code}

+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-39">YARN-39</a>.
+     Critical sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli <br>
+     <b>RM-NM secret-keys should be randomly generated and rolled every so often</b><br>
+     <blockquote> - RM should generate the master-key randomly

+ - The master-key should roll every so often

+ - NM should remember old expired keys so that already doled out container-requests can be satisfied.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-37">YARN-37</a>.
+     Minor bug reported by Jason Lowe and fixed by Mayank Bansal (resourcemanager)<br>
+     <b>TestRMAppTransitions.testAppSubmittedKilled passes for the wrong reason</b><br>
+     <blockquote>TestRMAppTransitions#testAppSubmittedKilled causes an invalid event exception but the test doesn't catch the error since the final app state is still killed.  Killed for the wrong reason, but the final state is the same.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-36">YARN-36</a>.
+     Blocker bug reported by Eli Collins and fixed by Radim Kolar <br>
+     <b>branch-2.1.0-alpha doesn't build</b><br>
+     <blockquote>branch-2.1.0-alpha doesn't build due to the following. Per YARN-1 I updated the mvn version to be 2.1.0-SNAPSHOT, before I hit this issue it didn't compile due to the bogus version. 

+

+{noformat}

+hadoop-branch-2.1.0-alpha $ mvn compile

+[INFO] Scanning for projects...

+[ERROR] The build could not read 1 project -&gt; [Help 1]

+[ERROR]   

+[ERROR]   The project org.apache.hadoop:hadoop-yarn-project:2.1.0-SNAPSHOT (/home/eli/src/hadoop-branch-2.1.0-alpha/hadoop-yarn-project/pom.xml) has 1 error

+[ERROR]     'dependencies.dependency.version' for org.hsqldb:hsqldb:jar is missing. @ line 160, column 17

+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-31">YARN-31</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves <br>
+     <b>TestDelegationTokenRenewer fails on jdk7</b><br>
+     <blockquote>TestDelegationTokenRenewer fails when run with jdk7.  

+

+With JDK7, test methods run in an undefined order. Here it is expecting that testDTRenewal runs first but it no longer is.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-29">YARN-29</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (client)<br>
+     <b>Add a yarn-client module</b><br>
+     <blockquote>I see that we are duplicating (some) code for talking to RM via client API. In this light, a yarn-client module will be useful so that clients of all frameworks can use/extend it.

+

+And that same module can be the destination for all the YARN's command line tools.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-27">YARN-27</a>.
+     Major bug reported by Ramya Sunil and fixed by Arun C Murthy <br>
+     <b>Failed refreshQueues due to misconfiguration prevents further refreshing of queues</b><br>
+     <blockquote>Stumbled upon this problem while refreshing queues with incorrect configuration. The exact scenario was:

+1. Added a new queue "newQueue" without defining its capacity.

+2. "bin/mapred queue -refreshQueues" fails correctly with "Illegal capacity of -1 for queue root.newQueue"

+3. However, after defining the capacity of "newQueue" followed by a second "bin/mapred queue -refreshQueues" throws "org.apache.hadoop.metrics2.MetricsException: Metrics source QueueMetrics,q0=root,q1=newQueue already exists!" Also see Hadoop:name=QueueMetrics,q0=root,q1=newQueue,service=ResourceManager metrics being available even though the queue was not added.

+

+The expected behavior would be to refresh the queues correctly and allow addition of "newQueue". </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-25">YARN-25</a>.
+     Major bug reported by Thomas Graves and fixed by Robert Joseph Evans <br>
+     <b>remove old aggregated logs</b><br>
+     <blockquote>Currently the aggregated user logs under NM_REMOTE_APP_LOG_DIR are never removed.  We should have mechanism to remove them after certain period.

+

+It might make sense for job history server to remove them.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-22">YARN-22</a>.
+     Minor bug reported by Eli Collins and fixed by Mayank Bansal <br>
+     <b>Using URI for yarn.nodemanager log dirs fails</b><br>
+     <blockquote>If I use URIs (eg file:///home/eli/hadoop/dirs) for yarn.nodemanager.log-dirs or yarn.nodemanager.remote-app-log-dir the container log servlet fails with an NPE (works if I remove the "file" scheme). Using a URI for yarn.nodemanager.local-dirs works.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-15">YARN-15</a>.
+     Critical bug reported by Alejandro Abdelnur and fixed by Arun C Murthy (nodemanager)<br>
+     <b>YarnConfiguration DEFAULT_YARN_APPLICATION_CLASSPATH should be updated</b><br>
+     <blockquote>

+{code}

+  /**

+   * Default CLASSPATH for YARN applications. A comma-separated list of

+   * CLASSPATH entries

+   */

+  public static final String[] DEFAULT_YARN_APPLICATION_CLASSPATH = {

+      "$HADOOP_CONF_DIR", "$HADOOP_COMMON_HOME/share/hadoop/common/*",

+      "$HADOOP_COMMON_HOME/share/hadoop/common/lib/*",

+      "$HADOOP_HDFS_HOME/share/hadoop/hdfs/*",

+      "$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*",

+      "$YARN_HOME/share/hadoop/mapreduce/*",

+      "$YARN_HOME/share/hadoop/mapreduce/lib/*"};

+{code}

+

+It should have {{share/yarn/}} and MR should add the {{share/mapreduce/}} (another JIRA?)</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-14">YARN-14</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (nodemanager)<br>
+     <b>Symlinks to peer distributed cache files no longer work</b><br>
+     <blockquote>Trying to create a symlink to another file that is specified for the distributed cache will fail to create the link.  For example:

+

+hadoop jar ... -files "x,y,x#z"

+

+will localize the files x and y as x and y, but the z symlink for x will not be created.  This is a regression from 1.x behavior.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-13">YARN-13</a>.
+     Critical bug reported by Todd Lipcon and fixed by  <br>
+     <b>Merge of yarn reorg into branch-2 copied trunk tree</b><br>
+     <blockquote>When the move of yarn from inside MR to the project root was merged into branch-2, it seems like the trunk code base was actually copied into the branch-2 branch, instead of a parallel move occurring. So, the poms in branch-2 show the version as 3.0.0-SNAPSHOT instead of a 2.x snapshot version. This is breaking the branch-2 build.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-12">YARN-12</a>.
+     Major bug reported by Junping Du and fixed by Junping Du (scheduler)<br>
+     <b>Several Findbugs issues with new FairScheduler in YARN</b><br>
+     <blockquote>The good feature of FairScheduler is added recently to YARN. As recently PreCommit test from MAPREDUCE-4309, there are several bugs found by Findbugs related to FairScheduler:

+org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerEventLog.shutdown() might ignore java.lang.Exception

+Inconsistent synchronization of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerEventLog.logDisabled; locked 50% of time

+Inconsistent synchronization of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager.queueMaxAppsDefault; locked 50% of time

+Inconsistent synchronization of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager.userMaxAppsDefault; locked 50% of time

+The details are in:https://builds.apache.org/job/PreCommit-MAPREDUCE-Build/2612//artifact/trunk/patchprocess/newPatchFindbugsWarningshadoop-yarn-server-resourcemanager.html#DE_MIGHT_IGNORE 

+

+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-10">YARN-10</a>.
+     Major improvement reported by Arun C Murthy and fixed by Hitesh Shah <br>
+     <b>dist-shell shouldn't have a (test) dependency on hadoop-mapreduce-client-core</b><br>
+     <blockquote>dist-shell shouldn't have a (test) dependency on hadoop-mapreduce-client-core, this should be removed.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-9">YARN-9</a>.
+     Major improvement reported by Arun C Murthy and fixed by Vinod Kumar Vavilapalli <br>
+     <b>Rename YARN_HOME to HADOOP_YARN_HOME</b><br>
+     <blockquote>We should rename YARN_HOME to HADOOP_YARN_HOME to be consistent with rest of Hadoop sub-projects.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1">YARN-1</a>.
+     Major task reported by Arun C Murthy and fixed by Arun C Murthy <br>
+     <b>Move YARN out of hadoop-mapreduce</b><br>
+     <blockquote>Move YARN out of hadoop-mapreduce-project into hadoop-yarn-project in hadoop trunk</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4691">MAPREDUCE-4691</a>.
+     Critical bug reported by Jason Lowe and fixed by Robert Joseph Evans (jobhistoryserver , mrv2)<br>
+     <b>Historyserver can report "Unknown job" after RM says job has completed</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4689">MAPREDUCE-4689</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (client)<br>
+     <b>JobClient.getMapTaskReports on failed job results in NPE</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4649">MAPREDUCE-4649</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (jobhistoryserver)<br>
+     <b>mr-jobhistory-daemon.sh needs to be updated post YARN-1</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4647">MAPREDUCE-4647</a>.
+     Major bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>We should only unjar jobjar if there is a lib directory in it.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4646">MAPREDUCE-4646</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (mrv2)<br>
+     <b>client does not receive job diagnostics for failed jobs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4642">MAPREDUCE-4642</a>.
+     Major bug reported by Robert Kanter and fixed by Robert Kanter (test)<br>
+     <b>MiniMRClientClusterFactory should not use job.setJar()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4641">MAPREDUCE-4641</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (mrv2)<br>
+     <b>Exception in commitJob marks job as successful in job history</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4638">MAPREDUCE-4638</a>.
+     Major improvement reported by Arun C Murthy and fixed by Arun C Murthy <br>
+     <b>MR AppMaster shouldn't rely on YARN_APPLICATION_CLASSPATH providing MR jars</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4635">MAPREDUCE-4635</a>.
+     Major bug reported by Bikas Saha and fixed by Bikas Saha <br>
+     <b>MR side of YARN-83. Changing package of YarnClient</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4633">MAPREDUCE-4633</a>.
+     Critical bug reported by Thomas Graves and fixed by Thomas Graves (jobhistoryserver)<br>
+     <b>history server doesn't set permissions on all subdirs </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4629">MAPREDUCE-4629</a>.
+     Major bug reported by Karthik Kambatla and fixed by Karthik Kambatla <br>
+     <b>Remove JobHistory.DEBUG_MODE</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4614">MAPREDUCE-4614</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (client , task)<br>
+     <b>Simplify debugging a job's tokens</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4612">MAPREDUCE-4612</a>.
+     Critical bug reported by Thomas Graves and fixed by Thomas Graves <br>
+     <b>job summary file permissions not set when its created</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4611">MAPREDUCE-4611</a>.
+     Critical bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans <br>
+     <b>MR AM dies badly when Node is decomissioned</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4610">MAPREDUCE-4610</a>.
+     Major bug reported by Tom White and fixed by Tom White (mrv2)<br>
+     <b>Support deprecated mapreduce.job.counters.limit property in MR2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4608">MAPREDUCE-4608</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>hadoop-mapreduce-client is missing some dependencies</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4604">MAPREDUCE-4604</a>.
+     Critical bug reported by Ravi Prakash and fixed by Ravi Prakash (mrv2)<br>
+     <b>In mapred-default, mapreduce.map.maxattempts &amp; mapreduce.reduce.maxattempts defaults are set to 4 as well as mapreduce.job.maxtaskfailures.per.tracker. </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4600">MAPREDUCE-4600</a>.
+     Critical bug reported by Robert Joseph Evans and fixed by Daryn Sharp <br>
+     <b>TestTokenCache.java from MRV1 no longer compiles</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4580">MAPREDUCE-4580</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli <br>
+     <b>Change MapReduce to use the yarn-client module</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4579">MAPREDUCE-4579</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves <br>
+     <b>TestTaskAttempt fails jdk7</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4577">MAPREDUCE-4577</a>.
+     Minor bug reported by Alejandro Abdelnur and fixed by Aaron T. Myers (test)<br>
+     <b>HDFS-3672 broke TestCombineFileInputFormat.testMissingBlocks() test</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4572">MAPREDUCE-4572</a>.
+     Major bug reported by Ahmed Radwan and fixed by Ahmed Radwan (tasktracker , webapps)<br>
+     <b>Can not access user logs - Jetty is not configured by default to serve aliases/symlinks</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4570">MAPREDUCE-4570</a>.
+     Minor bug reported by Ahmed Radwan and fixed by Ahmed Radwan (mrv2)<br>
+     <b>ProcfsBasedProcessTree#constructProcessInfo() prints a warning if procfsDir/&lt;pid&gt;/stat is not found.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4569">MAPREDUCE-4569</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves <br>
+     <b>TestHsWebServicesJobsQuery fails on jdk7</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4562">MAPREDUCE-4562</a>.
+     Major bug reported by Jarek Jarcec Cecho and fixed by Jarek Jarcec Cecho <br>
+     <b>Support for "FileSystemCounter" legacy counter group name for compatibility reasons is creating incorrect counter name</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4511">MAPREDUCE-4511</a>.
+     Major improvement reported by Ahmed Radwan and fixed by Ahmed Radwan (mrv1 , mrv2 , performance)<br>
+     <b>Add IFile readahead</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4504">MAPREDUCE-4504</a>.
+     Major bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>SortValidator writes to wrong directory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4503">MAPREDUCE-4503</a>.
+     Major bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>Should throw InvalidJobConfException if duplicates found in cacheArchives or cacheFiles</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4498">MAPREDUCE-4498</a>.
+     Critical bug reported by Robert Kanter and fixed by Robert Kanter (build , examples)<br>
+     <b>Remove hsqldb jar from Hadoop runtime classpath</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4496">MAPREDUCE-4496</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (applicationmaster , mrv2)<br>
+     <b>AM logs link is missing user name</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4494">MAPREDUCE-4494</a>.
+     Major bug reported by Ahmed Radwan and fixed by Ahmed Radwan (mrv2 , test)<br>
+     <b>TestFifoScheduler failing with Metrics source QueueMetrics,q0=default already exists!</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4493">MAPREDUCE-4493</a>.
+     Critical bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>Distibuted Cache Compatability Issues</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4492">MAPREDUCE-4492</a>.
+     Minor bug reported by Nishan Shetty and fixed by Mayank Bansal (mrv2)<br>
+     <b>Configuring total queue capacity between 100.5 and 99.5 at perticular level is sucessfull</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4484">MAPREDUCE-4484</a>.
+     Major bug reported by Ahmed Radwan and fixed by Ahmed Radwan (mrv2)<br>
+     <b>Incorrect IS_MINI_YARN_CLUSTER property name in YarnConfiguration</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4483">MAPREDUCE-4483</a>.
+     Major bug reported by John George and fixed by John George <br>
+     <b>2.0 build does not work </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4470">MAPREDUCE-4470</a>.
+     Major bug reported by Kihwal Lee and fixed by Ilya Katsov (test)<br>
+     <b>Fix TestCombineFileInputFormat.testForEmptyFile</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4467">MAPREDUCE-4467</a>.
+     Critical bug reported by Andrey Klochkov and fixed by Kihwal Lee (nodemanager)<br>
+     <b>IndexCache failures due to missing synchronization</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4465">MAPREDUCE-4465</a>.
+     Trivial bug reported by Bo Wang and fixed by Bo Wang <br>
+     <b>Update description of yarn.nodemanager.address property</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4457">MAPREDUCE-4457</a>.
+     Critical bug reported by Thomas Graves and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>mr job invalid transition TA_TOO_MANY_FETCH_FAILURE at FAILED</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4456">MAPREDUCE-4456</a>.
+     Major bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>LocalDistributedCacheManager can get an ArrayIndexOutOfBounds when creating symlinks</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4449">MAPREDUCE-4449</a>.
+     Major bug reported by Ahmed Radwan and fixed by Ahmed Radwan (mrv2)<br>
+     <b>Incorrect MR_HISTORY_STORAGE property name in JHAdminConfig</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4448">MAPREDUCE-4448</a>.
+     Critical bug reported by Jason Lowe and fixed by Jason Lowe (mrv2 , nodemanager)<br>
+     <b>Nodemanager crashes upon application cleanup if aggregation failed to start</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4447">MAPREDUCE-4447</a>.
+     Major bug reported by Eli Collins and fixed by Eli Collins (build)<br>
+     <b>Remove aop from cruft from the ant build </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4444">MAPREDUCE-4444</a>.
+     Blocker bug reported by Nathan Roberts and fixed by Jason Lowe (nodemanager)<br>
+     <b>nodemanager fails to start when one of the local-dirs is bad</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4441">MAPREDUCE-4441</a>.
+     Blocker bug reported by Karthik Kambatla and fixed by Karthik Kambatla <br>
+     <b>Fix build issue caused by MR-3451</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4440">MAPREDUCE-4440</a>.
+     Major bug reported by Arun C Murthy and fixed by Arun C Murthy <br>
+     <b>Change SchedulerApp &amp; SchedulerNode to be a minimal interface </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4437">MAPREDUCE-4437</a>.
+     Critical bug reported by Jason Lowe and fixed by Jason Lowe (applicationmaster , mrv2)<br>
+     <b>Race in MR ApplicationMaster can cause reducers to never be scheduled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4432">MAPREDUCE-4432</a>.
+     Trivial bug reported by Gabriel Reid and fixed by  <br>
+     <b>Confusing warning message when GenericOptionsParser is not used</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4427">MAPREDUCE-4427</a>.
+     Major improvement reported by Bikas Saha and fixed by Bikas Saha <br>
+     <b>Enable the RM to work with AM's that are not managed by it</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4423">MAPREDUCE-4423</a>.
+     Critical bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>Potential infinite fetching of map output</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4422">MAPREDUCE-4422</a>.
+     Major improvement reported by Arun C Murthy and fixed by Ahmed Radwan (nodemanager)<br>
+     <b>YARN_APPLICATION_CLASSPATH needs a documented default value in YarnConfiguration</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4419">MAPREDUCE-4419</a>.
+     Major bug reported by Nishan Shetty and fixed by Devaraj K (mrv2)<br>
+     <b>./mapred queue -info &lt;queuename&gt; -showJobs displays all the jobs irrespective of &lt;queuename&gt; </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4417">MAPREDUCE-4417</a>.
+     Major new feature reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (mrv2 , security)<br>
+     <b>add support for encrypted shuffle</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4416">MAPREDUCE-4416</a>.
+     Critical bug reported by Kihwal Lee and fixed by Kihwal Lee (client , mrv2)<br>
+     <b>Some tests fail if Clover is enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4408">MAPREDUCE-4408</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Robert Kanter (mrv1 , mrv2)<br>
+     <b>allow jobs to set a JAR that is in the distributed cached</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4407">MAPREDUCE-4407</a>.
+     Major bug reported by Ahmed Radwan and fixed by Ahmed Radwan (build , mrv2)<br>
+     <b>Add hadoop-yarn-server-tests-&lt;version&gt;-tests.jar to hadoop dist package</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4406">MAPREDUCE-4406</a>.
+     Major bug reported by Ahmed Radwan and fixed by Ahmed Radwan (mrv2 , test)<br>
+     <b>Users should be able to specify the MiniCluster ResourceManager and JobHistoryServer ports</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4402">MAPREDUCE-4402</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (test)<br>
+     <b>TestFileInputFormat fails intermittently</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4395">MAPREDUCE-4395</a>.
+     Critical bug reported by Bhallamudi Venkata Siva Kamesh and fixed by Bhallamudi Venkata Siva Kamesh (distributed-cache , job submission , mrv2)<br>
+     <b>Possible NPE at ClientDistributedCacheManager#determineTimestamps</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4392">MAPREDUCE-4392</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (mrv2)<br>
+     <b>Counters.makeCompactString() changed behavior from 0.20</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4387">MAPREDUCE-4387</a>.
+     Major bug reported by Kihwal Lee and fixed by Kihwal Lee (resourcemanager)<br>
+     <b>RM gets fatal error and exits during TestRM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4384">MAPREDUCE-4384</a>.
+     Major bug reported by Kihwal Lee and fixed by Kihwal Lee (nodemanager)<br>
+     <b>Race conditions in IndexCache</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4383">MAPREDUCE-4383</a>.
+     Minor bug reported by Andy Isaacson and fixed by Andy Isaacson (pipes)<br>
+     <b>HadoopPipes.cc needs to include unistd.h</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4380">MAPREDUCE-4380</a>.
+     Minor bug reported by Devaraj K and fixed by Devaraj K (mrv2 , nodemanager)<br>
+     <b>Empty Userlogs directory is getting created under logs directory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4379">MAPREDUCE-4379</a>.
+     Blocker bug reported by Devaraj K and fixed by Devaraj K (mrv2 , nodemanager)<br>
+     <b>Node Manager throws java.lang.OutOfMemoryError: Java heap space due to org.apache.hadoop.fs.LocalDirAllocator.contexts</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4376">MAPREDUCE-4376</a>.
+     Major bug reported by Jason Lowe and fixed by Kihwal Lee (mrv2 , test)<br>
+     <b>TestClusterMRNotification times out</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4375">MAPREDUCE-4375</a>.
+     Major improvement reported by Robert Joseph Evans and fixed by Robert Joseph Evans (applicationmaster)<br>
+     <b>Show Configuration Tracability in MR UI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4372">MAPREDUCE-4372</a>.
+     Major bug reported by Devaraj K and fixed by Devaraj K (mrv2 , resourcemanager)<br>
+     <b>Deadlock in Resource Manager between SchedulerEventDispatcher.EventProcessor and Shutdown hook manager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4361">MAPREDUCE-4361</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (mrv2)<br>
+     <b>Fix detailed metrics for protobuf-based RPC on 0.23</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4355">MAPREDUCE-4355</a>.
+     Major new feature reported by Karthik Kambatla and fixed by Karthik Kambatla (mrv1 , mrv2)<br>
+     <b>Add RunningJob.getJobStatus()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4341">MAPREDUCE-4341</a>.
+     Major bug reported by Thomas Graves and fixed by Karthik Kambatla (capacity-sched , mrv2)<br>
+     <b>add types to capacity scheduler properties documentation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4336">MAPREDUCE-4336</a>.
+     Major bug reported by Siddharth Seth and fixed by Ahmed Radwan (mrv2)<br>
+     <b>Distributed Shell fails when used with the CapacityScheduler</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4320">MAPREDUCE-4320</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves (contrib/gridmix)<br>
+     <b>gridmix mainClass wrong in pom.xml</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4313">MAPREDUCE-4313</a>.
+     Blocker bug reported by Eli Collins and fixed by Robert Joseph Evans (build , test)<br>
+     <b>TestTokenCache doesn't compile due TokenCache.getDelegationToken compilation error</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4311">MAPREDUCE-4311</a>.
+     Major bug reported by Thomas Graves and fixed by Karthik Kambatla (capacity-sched , mrv2)<br>
+     <b>Capacity scheduler.xml does not accept decimal values for capacity and maximum-capacity settings</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4307">MAPREDUCE-4307</a>.
+     Major bug reported by Ahmed Radwan and fixed by Ahmed Radwan (mrv2)<br>
+     <b>TeraInputFormat calls FileSystem.getDefaultBlockSize() without a Path - Failure when using ViewFileSystem</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4306">MAPREDUCE-4306</a>.
+     Major bug reported by Ahmed Radwan and fixed by Ahmed Radwan (mrv2)<br>
+     <b>Problem running Distributed Shell applications as a user other than the one started the daemons</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4302">MAPREDUCE-4302</a>.
+     Critical bug reported by Daryn Sharp and fixed by Daryn Sharp (nodemanager)<br>
+     <b>NM goes down if error encountered during log aggregation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4301">MAPREDUCE-4301</a>.
+     Major improvement reported by Robert Joseph Evans and fixed by Robert Joseph Evans (applicationmaster)<br>
+     <b>Dedupe some strings in MRAM for memory savings</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4300">MAPREDUCE-4300</a>.
+     Major bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (applicationmaster)<br>
+     <b>OOM in AM can turn it into a zombie.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4299">MAPREDUCE-4299</a>.
+     Major bug reported by Tom White and fixed by Tom White (mrv2)<br>
+     <b>Terasort hangs with MR2 FifoScheduler</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4297">MAPREDUCE-4297</a>.
+     Major bug reported by Ravi Prakash and fixed by Ravi Prakash (contrib/gridmix)<br>
+     <b>Usersmap file in gridmix should not fail on empty lines</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4295">MAPREDUCE-4295</a>.
+     Critical bug reported by Thomas Graves and fixed by Thomas Graves (mrv2 , resourcemanager)<br>
+     <b>RM crashes due to DNS issue</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4290">MAPREDUCE-4290</a>.
+     Major bug reported by Nishan Shetty and fixed by Devaraj K (mrv2)<br>
+     <b>JobStatus.getState() API is giving ambiguous values</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4283">MAPREDUCE-4283</a>.
+     Major improvement reported by Jason Lowe and fixed by Jason Lowe (jobhistoryserver , mrv2)<br>
+     <b>Display tail of aggregated logs by default</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4276">MAPREDUCE-4276</a>.
+     Major bug reported by Ahmed Radwan and fixed by Ahmed Radwan (mrv2)<br>
+     <b>Allow setting yarn.nodemanager.delete.debug-delay-sec property to "-1" for easier container debugging.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4270">MAPREDUCE-4270</a>.
+     Major bug reported by Brock Noland and fixed by Thomas Graves (mrv2)<br>
+     <b>data_join test classes are in the wrong packge</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4269">MAPREDUCE-4269</a>.
+     Major bug reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2)<br>
+     <b>documentation: Gridmix has javadoc warnings in StressJobFactory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4267">MAPREDUCE-4267</a>.
+     Critical bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>mavenize pipes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4264">MAPREDUCE-4264</a>.
+     Blocker bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>Got ClassCastException when using mapreduce.history.server.delegationtoken.required=true</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4262">MAPREDUCE-4262</a>.
+     Minor bug reported by Devaraj K and fixed by Devaraj K (mrv2 , nodemanager)<br>
+     <b>NM gives wrong log message saying "Connected to ResourceManager" before trying to connect</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4252">MAPREDUCE-4252</a>.
+     Major bug reported by Tom White and fixed by Tom White (mrv2)<br>
+     <b>MR2 job never completes with 1 pending task</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4250">MAPREDUCE-4250</a>.
+     Major bug reported by Patrick Hunt and fixed by Patrick Hunt (nodemanager)<br>
+     <b>hadoop-config.sh missing variable exports, causes Yarn jobs to fail with ClassNotFoundException MRAppMaster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4238">MAPREDUCE-4238</a>.
+     Critical bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>mavenize data_join</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4237">MAPREDUCE-4237</a>.
+     Major bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans <br>
+     <b>TestNodeStatusUpdater can fail if localhost has a domain associated with it</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4233">MAPREDUCE-4233</a>.
+     Critical bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans <br>
+     <b>NPE can happen in RMNMNodeInfo.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4228">MAPREDUCE-4228</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (applicationmaster , mrv2)<br>
+     <b>mapreduce.job.reduce.slowstart.completedmaps is not working properly to delay the scheduling of the reduce tasks</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4226">MAPREDUCE-4226</a>.
+     Major bug reported by Tom White and fixed by Tom White (mrv2)<br>
+     <b>ConcurrentModificationException in FileSystemCounterGroup</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4224">MAPREDUCE-4224</a>.
+     Major bug reported by Devaraj K and fixed by Devaraj K (mrv2 , scheduler , test)<br>
+     <b>TestFifoScheduler throws org.apache.hadoop.metrics2.MetricsException </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4220">MAPREDUCE-4220</a>.
+     Minor bug reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2)<br>
+     <b>RM apps page starttime/endtime sorts are incorrect</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4215">MAPREDUCE-4215</a>.
+     Major bug reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2)<br>
+     <b>RM app page shows 500 error on appid parse error</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4212">MAPREDUCE-4212</a>.
+     Major test reported by Daryn Sharp and fixed by Daryn Sharp (test)<br>
+     <b>TestJobClientGetJob sometimes fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4211">MAPREDUCE-4211</a>.
+     Minor bug reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2)<br>
+     <b>Error conditions (missing appid, appid not found) are masked in the RM app page</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4210">MAPREDUCE-4210</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (webapps)<br>
+     <b>Expose listener address for WebApp</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4209">MAPREDUCE-4209</a>.
+     Major bug reported by Radim Kolar and fixed by  (build)<br>
+     <b>junit dependency in hadoop-mapreduce-client is missing scope test</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4206">MAPREDUCE-4206</a>.
+     Minor bug reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2)<br>
+     <b>Sorting by Last Health-Update on the RM nodes page sorts does not work correctly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4205">MAPREDUCE-4205</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (mrv2)<br>
+     <b>retrofit all JVM shutdown hooks to use ShutdownHookManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4197">MAPREDUCE-4197</a>.
+     Major bug reported by Ravi Prakash and fixed by Ravi Prakash <br>
+     <b>Include the hsqldb jar in the hadoop-mapreduce tar file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4194">MAPREDUCE-4194</a>.
+     Major bug reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2)<br>
+     <b>ConcurrentModificationError in DirectoryCollection</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4190">MAPREDUCE-4190</a>.
+     Major improvement reported by Thomas Graves and fixed by Thomas Graves (mrv2 , webapps)<br>
+     <b> Improve web UI for task attempts userlog link</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4189">MAPREDUCE-4189</a>.
+     Critical bug reported by Devaraj K and fixed by Devaraj K (mrv2)<br>
+     <b>TestContainerManagerSecurity is failing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4169">MAPREDUCE-4169</a>.
+     Minor bug reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2)<br>
+     <b>Container Logs appear in unsorted order</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4165">MAPREDUCE-4165</a>.
+     Trivial bug reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2)<br>
+     <b>Committing is misspelled as commiting in task logs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4163">MAPREDUCE-4163</a>.
+     Major sub-task reported by Daryn Sharp and fixed by Daryn Sharp (mrv2)<br>
+     <b>consistently set the bind address</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4162">MAPREDUCE-4162</a>.
+     Major sub-task reported by Daryn Sharp and fixed by Daryn Sharp (client , mrv2)<br>
+     <b>Correctly set token service</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4161">MAPREDUCE-4161</a>.
+     Major sub-task reported by Daryn Sharp and fixed by Daryn Sharp (client , mrv2)<br>
+     <b>create sockets consistently</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4160">MAPREDUCE-4160</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves (test)<br>
+     <b>some mrv1 ant tests fail with timeout - due to 4156</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4159">MAPREDUCE-4159</a>.
+     Major bug reported by Nishan Shetty and fixed by Devaraj K (mrv2)<br>
+     <b>Job is running in Uber mode after setting "mapreduce.job.ubertask.maxreduces" to zero</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4157">MAPREDUCE-4157</a>.
+     Major improvement reported by Jason Lowe and fixed by Jason Lowe (mrv2)<br>
+     <b>ResourceManager should not kill apps that are well behaved</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4156">MAPREDUCE-4156</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves (build)<br>
+     <b>ant build fails compiling JobInProgress</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4152">MAPREDUCE-4152</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>map task left hanging after AM dies trying to connect to RM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4151">MAPREDUCE-4151</a>.
+     Major improvement reported by Jason Lowe and fixed by Jason Lowe (mrv2 , webapps)<br>
+     <b>RM scheduler web page should filter apps to those that are relevant to scheduling</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4148">MAPREDUCE-4148</a>.
+     Major bug reported by Tom White and fixed by Tom White (mrv2)<br>
+     <b>MapReduce should not have a compile-time dependency on HDFS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4146">MAPREDUCE-4146</a>.
+     Major improvement reported by Tom White and fixed by Ahmed Radwan <br>
+     <b>Support limits on task status string length and number of block locations in branch-2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4144">MAPREDUCE-4144</a>.
+     Critical bug reported by Jason Lowe and fixed by Jason Lowe (mrv2)<br>
+     <b>ResourceManager NPE while handling NODE_UPDATE</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4140">MAPREDUCE-4140</a>.
+     Major bug reported by Patrick Hunt and fixed by Patrick Hunt (client , mrv2)<br>
+     <b>mapreduce classes incorrectly importing "clover.org.apache.*" classes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4139">MAPREDUCE-4139</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (mrv2)<br>
+     <b>Potential ResourceManager deadlock when SchedulerEventDispatcher is stopped</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4134">MAPREDUCE-4134</a>.
+     Major task reported by Ravi Prakash and fixed by Ravi Prakash (mrv2)<br>
+     <b>Remove references of mapred.child.ulimit etc. since they are not being used any more</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4133">MAPREDUCE-4133</a>.
+     Major bug reported by John George and fixed by John George <br>
+     <b>MR over viewfs is broken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4129">MAPREDUCE-4129</a>.
+     Major bug reported by Ahmed Radwan and fixed by Ahmed Radwan (mrv2)<br>
+     <b>Lots of unneeded counters log messages</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4128">MAPREDUCE-4128</a>.
+     Major bug reported by Bikas Saha and fixed by Bikas Saha (mrv2)<br>
+     <b>AM Recovery expects all attempts of a completed task to also be completed.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4117">MAPREDUCE-4117</a>.
+     Critical bug reported by Devaraj K and fixed by Devaraj K (client , mrv2)<br>
+     <b>mapred job -status throws NullPointerException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4102">MAPREDUCE-4102</a>.
+     Major bug reported by Thomas Graves and fixed by Bhallamudi Venkata Siva Kamesh (webapps)<br>
+     <b>job counters not available in Jobhistory webui for killed jobs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4099">MAPREDUCE-4099</a>.
+     Critical bug reported by Jason Lowe and fixed by Jason Lowe (mrv2)<br>
+     <b>ApplicationMaster may fail to remove staging directory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4097">MAPREDUCE-4097</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Roman Shaposhnik (build)<br>
+     <b>tools testcases fail because missing mrapp-generated-classpath file in classpath</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4092">MAPREDUCE-4092</a>.
+     Blocker bug reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2)<br>
+     <b>commitJob Exception does not fail job (regression in 0.23 vs 0.20)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4091">MAPREDUCE-4091</a>.
+     Critical bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build , test)<br>
+     <b>tools testcases failing because of MAPREDUCE-4082</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4089">MAPREDUCE-4089</a>.
+     Blocker bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>Hung Tasks never time out. </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4082">MAPREDUCE-4082</a>.
+     Critical bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>hadoop-mapreduce-client-app's mrapp-generated-classpath file should not be in the module JAR</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4079">MAPREDUCE-4079</a>.
+     Blocker improvement reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mr-am , mrv2)<br>
+     <b>Allow MR AppMaster to limit ephemeral port range.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4074">MAPREDUCE-4074</a>.
+     Major bug reported by Devaraj K and fixed by xieguiming <br>
+     <b>Client continuously retries to RM When RM goes down before launching Application Master</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4073">MAPREDUCE-4073</a>.
+     Critical bug reported by Siddharth Seth and fixed by Siddharth Seth (mrv2 , scheduler)<br>
+     <b>CS assigns multiple off-switch containers when using multi-level-queues</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4072">MAPREDUCE-4072</a>.
+     Major bug reported by Anupam Seth and fixed by Anupam Seth (mrv2)<br>
+     <b>User set java.library.path seems to overwrite default creating problems native lib loading</b><br>
+     <blockquote>-Djava.library.path in mapred.child.java.opts can cause issues with native libraries.  LD_LIBRARY_PATH through mapred.child.env should be used instead.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4068">MAPREDUCE-4068</a>.
+     Blocker bug reported by Ahmed Radwan and fixed by Robert Kanter (mrv2)<br>
+     <b>Jars in lib subdirectory of the submittable JAR are not added to the classpath</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4062">MAPREDUCE-4062</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>AM Launcher thread can hang forever</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4060">MAPREDUCE-4060</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (build)<br>
+     <b>Multiple SLF4J binding warning</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4059">MAPREDUCE-4059</a>.
+     Major improvement reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>The history server should have a separate pluggable storage/query interface</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4053">MAPREDUCE-4053</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>Counters group names deprecation is wrong, iterating over group names deprecated names don't show up</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4051">MAPREDUCE-4051</a>.
+     Major task reported by Ravi Prakash and fixed by Ravi Prakash <br>
+     <b>Remove the empty hadoop-mapreduce-project/assembly/all.xml file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4050">MAPREDUCE-4050</a>.
+     Major bug reported by Bhallamudi Venkata Siva Kamesh and fixed by Bhallamudi Venkata Siva Kamesh (mrv2)<br>
+     <b>Invalid node link</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4048">MAPREDUCE-4048</a>.
+     Major bug reported by Devaraj K and fixed by Devaraj K (mrv2)<br>
+     <b>NullPointerException exception while accessing the Application Master UI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4040">MAPREDUCE-4040</a>.
+     Minor bug reported by Bhallamudi Venkata Siva Kamesh and fixed by Bhallamudi Venkata Siva Kamesh (jobhistoryserver , mrv2)<br>
+     <b>History links should use hostname rather than IP address.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4031">MAPREDUCE-4031</a>.
+     Critical bug reported by Devaraj K and fixed by Devaraj K (mrv2 , nodemanager)<br>
+     <b>Node Manager hangs on shut down</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4024">MAPREDUCE-4024</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>RM webservices can't query on finalStatus</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4020">MAPREDUCE-4020</a>.
+     Major bug reported by Jason Lowe and fixed by Anupam Seth (mrv2 , webapps)<br>
+     <b>Web services returns incorrect JSON for deep queue tree</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4017">MAPREDUCE-4017</a>.
+     Trivial improvement reported by Koji Noguchi and fixed by Thomas Graves (jobhistoryserver , jobtracker)<br>
+     <b>Add jobname to jobsummary log</b><br>
+     <blockquote>The Job Summary log may contain commas in values that are escaped by a '\' character.  This was true before, but is more likely to be exposed now. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4012">MAPREDUCE-4012</a>.
+     Minor bug reported by Koji Noguchi and fixed by Thomas Graves <br>
+     <b>Hadoop Job setup error leaves no useful info to users (when LinuxTaskController is used)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4010">MAPREDUCE-4010</a>.
+     Critical bug reported by Jason Lowe and fixed by Alejandro Abdelnur (mrv2)<br>
+     <b>TestWritableJobConf fails on trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4002">MAPREDUCE-4002</a>.
+     Major bug reported by Bhallamudi Venkata Siva Kamesh and fixed by Bhallamudi Venkata Siva Kamesh (examples)<br>
+     <b>MultiFileWordCount job fails if the input path is not from default file system</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3999">MAPREDUCE-3999</a>.
+     Major bug reported by Ravi Prakash and fixed by Ravi Prakash (mrv2 , webapps)<br>
+     <b>Tracking link gives an error if the AppMaster hasn't started yet</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3993">MAPREDUCE-3993</a>.
+     Major bug reported by Todd Lipcon and fixed by Karthik Kambatla (mrv1 , mrv2)<br>
+     <b>Graceful handling of codec errors during decompression</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3992">MAPREDUCE-3992</a>.
+     Major bug reported by Todd Lipcon and fixed by Todd Lipcon (mrv1)<br>
+     <b>Reduce fetcher doesn't verify HTTP status code of response</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3988">MAPREDUCE-3988</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Eric Payne (mrv2)<br>
+     <b>mapreduce.job.local.dir doesn't point to a single directory on a node.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3983">MAPREDUCE-3983</a>.
+     Major test reported by Robert Joseph Evans and fixed by Ravi Prakash (mrv1)<br>
+     <b>TestTTResourceReporting can fail, and should just be deleted</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3972">MAPREDUCE-3972</a>.
+     Major sub-task reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>Locking and exception issues in JobHistory Server.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3947">MAPREDUCE-3947</a>.
+     Minor bug reported by Todd Lipcon and fixed by Devaraj K <br>
+     <b>yarn.app.mapreduce.am.resource.mb not documented</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3942">MAPREDUCE-3942</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mrv2 , security)<br>
+     <b>Randomize master key generation for ApplicationTokenSecretManager and roll it every so often</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3940">MAPREDUCE-3940</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mrv2 , security)<br>
+     <b>ContainerTokens should have an expiry interval</b><br>
+     <blockquote>ContainerTokens now have an expiry interval so that stale tokens cannot be used for launching containers.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3932">MAPREDUCE-3932</a>.
+     Critical bug reported by Vinod Kumar Vavilapalli and fixed by Robert Joseph Evans (mr-am , mrv2)<br>
+     <b>MR tasks failing and crashing the AM when available-resources/headRoom becomes zero</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3927">MAPREDUCE-3927</a>.
+     Critical bug reported by MengWang and fixed by Bhallamudi Venkata Siva Kamesh (mrv2)<br>
+     <b>Shuffle hang when set map.failures.percent</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3907">MAPREDUCE-3907</a>.
+     Minor improvement reported by Eugene Koontz and fixed by Eugene Koontz (documentation)<br>
+     <b>Document entries mapred-default.xml for the jobhistory server.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3906">MAPREDUCE-3906</a>.
+     Trivial improvement reported by Eugene Koontz and fixed by Eugene Koontz (documentation)<br>
+     <b>Fix inconsistency in documentation regarding mapreduce.jobhistory.principal</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3893">MAPREDUCE-3893</a>.
+     Critical bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>allow capacity scheduler configs maximum-applications and maximum-am-resource-percent configurable on a per queue basis</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3889">MAPREDUCE-3889</a>.
+     Critical bug reported by Thomas Graves and fixed by Devaraj K (mrv2)<br>
+     <b>job client tries to use /tasklog interface, but that doesn't exist anymore</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3873">MAPREDUCE-3873</a>.
+     Minor bug reported by Nishan Shetty and fixed by xieguiming (mrv2 , nodemanager)<br>
+     <b>Nodemanager is not getting decommisioned if the absolute ip is given in exclude file.</b><br>
+     <blockquote>Fixed NodeManagers' decommissioning at RM to accept IP addresses also.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3871">MAPREDUCE-3871</a>.
+     Major improvement reported by Tom White and fixed by Tom White (distributed-cache)<br>
+     <b>Allow symlinking in LocalJobRunner DistributedCache</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3870">MAPREDUCE-3870</a>.
+     Major bug reported by Bhallamudi Venkata Siva Kamesh and fixed by Bhallamudi Venkata Siva Kamesh (mrv2)<br>
+     <b>Invalid App Metrics</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3850">MAPREDUCE-3850</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (security)<br>
+     <b>Avoid redundant calls for tokens in TokenCache</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3842">MAPREDUCE-3842</a>.
+     Critical improvement reported by Alejandro Abdelnur and fixed by Thomas Graves (mrv2 , webapps)<br>
+     <b>stop webpages from automatic refreshing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3812">MAPREDUCE-3812</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Harsh J (mrv2 , performance)<br>
+     <b>Lower default allocation sizes, fix allocation configurations and document them</b><br>
+     <blockquote>Removes two sets of previously available config properties:

+

+1. ( yarn.scheduler.fifo.minimum-allocation-mb and yarn.scheduler.fifo.maximum-allocation-mb ) and,

+2. ( yarn.scheduler.capacity.minimum-allocation-mb and yarn.scheduler.capacity.maximum-allocation-mb )

+

+In favor of two new, generically named properties:

+

+1. yarn.scheduler.minimum-allocation-mb - This acts as the floor value of memory resource requests for containers.

+2. yarn.scheduler.maximum-allocation-mb - This acts as the ceiling value of memory resource requests for containers.

+

+Both these properties need to be set at the ResourceManager (RM) to take effect, as the RM is where the scheduler resides.

+

+Also changes the default minimum and maximums to 128 MB and 10 GB respectively.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3782">MAPREDUCE-3782</a>.
+     Critical bug reported by Arpit Gupta and fixed by Jason Lowe (mrv2)<br>
+     <b>teragen terasort jobs fail when using webhdfs:// </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3773">MAPREDUCE-3773</a>.
+     Major new feature reported by Owen O'Malley and fixed by Owen O'Malley (jobtracker)<br>
+     <b>Add queue metrics with buckets for job run times</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3728">MAPREDUCE-3728</a>.
+     Critical bug reported by Roman Shaposhnik and fixed by Giridharan Kesavan (mrv2 , nodemanager)<br>
+     <b>ShuffleHandler can't access results when configured in a secure mode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3682">MAPREDUCE-3682</a>.
+     Major bug reported by David Capwell and fixed by Ravi Prakash (mrv2)<br>
+     <b>Tracker URL says AM tasks run on localhost</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3672">MAPREDUCE-3672</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Anupam Seth (mr-am , mrv2)<br>
+     <b>Killed maps shouldn't be counted towards JobCounter.NUM_FAILED_MAPS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3659">MAPREDUCE-3659</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (security)<br>
+     <b>Host-based token support</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3650">MAPREDUCE-3650</a>.
+     Blocker bug reported by Thomas Graves and fixed by Ravi Prakash (mrv2)<br>
+     <b>testGetTokensForHftpFS() fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3621">MAPREDUCE-3621</a>.
+     Major bug reported by Thomas Graves and fixed by Ravi Prakash (mrv2)<br>
+     <b>TestDBJob and TestDataDrivenDBInputFormat ant tests fail</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3613">MAPREDUCE-3613</a>.
+     Critical sub-task reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>web service calls header contains 2 content types</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3543">MAPREDUCE-3543</a>.
+     Critical bug reported by Mahadev konar and fixed by Thomas Graves (mrv2)<br>
+     <b>Mavenize Gridmix.</b><br>
+     <blockquote>Note that to apply this you should first run the script - ./MAPREDUCE-3543v3.sh svn, then apply the patch.

+

+If this is merged to more then trunk, the version inside of hadoop-tools/hadoop-gridmix/pom.xml will need to be udpated accordingly.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3506">MAPREDUCE-3506</a>.
+     Minor bug reported by Ratandeep Ratti and fixed by Jason Lowe (client , mrv2)<br>
+     <b>Calling getPriority on JobInfo after parsing a history log with JobHistoryParser throws a NullPointerException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3493">MAPREDUCE-3493</a>.
+     Minor bug reported by Ahmed Radwan and fixed by  (mrv2)<br>
+     <b>Add the default mapreduce.shuffle.port property to mapred-default.xml</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3451">MAPREDUCE-3451</a>.
+     Major new feature reported by Patrick Wendell and fixed by Patrick Wendell (mrv2 , scheduler)<br>
+     <b>Port Fair Scheduler to MR2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3350">MAPREDUCE-3350</a>.
+     Critical bug reported by Vinod Kumar Vavilapalli and fixed by Jonathan Eagles (mrv2 , webapps)<br>
+     <b>Per-app RM page should have the list of application-attempts like on the app JHS page</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3348">MAPREDUCE-3348</a>.
+     Major bug reported by Devaraj K and fixed by Devaraj K (mrv2)<br>
+     <b>mapred job -status fails to give info even if the job is present in History</b><br>
+     <blockquote>Fixed a bug in MR client to redirect to JobHistoryServer correctly when RM forgets the app.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3289">MAPREDUCE-3289</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (mrv2 , nodemanager , performance)<br>
+     <b>Make use of fadvise in the NM's shuffle handler</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3082">MAPREDUCE-3082</a>.
+     Major bug reported by Rajit Saha and fixed by John George (harchive)<br>
+     <b>archive command take wrong path for input file with current directory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2786">MAPREDUCE-2786</a>.
+     Minor improvement reported by Plamen Jeliazkov and fixed by Plamen Jeliazkov (benchmarks)<br>
+     <b>TestDFSIO should also test compression reading/writing from command-line.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2739">MAPREDUCE-2739</a>.
+     Minor bug reported by Ahmed Radwan and fixed by Bo Wang (mrv2)<br>
+     <b>MR-279: Update installation docs (remove YarnClientFactory)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2374">MAPREDUCE-2374</a>.
+     Major bug reported by Todd Lipcon and fixed by Andy Isaacson <br>
+     <b>"Text File Busy" errors launching MR tasks</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2289">MAPREDUCE-2289</a>.
+     Major bug reported by Todd Lipcon and fixed by Ahmed Radwan (job submission)<br>
+     <b>Permissions race can make getStagingDir fail on local filesystem</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2220">MAPREDUCE-2220</a>.
+     Minor bug reported by Rui KUBO and fixed by Rui KUBO (documentation)<br>
+     <b>Fix new API FileOutputFormat-related typos in mapred-default.xml</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-987">MAPREDUCE-987</a>.
+     Minor new feature reported by Philip Zeyliger and fixed by Ahmed Radwan (build , test)<br>
+     <b>Exposing MiniDFS and MiniMR clusters as a single process command-line</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3972">HDFS-3972</a>.
+     Critical bug reported by Todd Lipcon and fixed by Todd Lipcon (name-node)<br>
+     <b>Trash emptier fails in secure HA cluster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3928">HDFS-3928</a>.
+     Major bug reported by Eli Collins and fixed by Eli Collins (test)<br>
+     <b>MiniDFSCluster should reset the first ExitException on shutdown</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3902">HDFS-3902</a>.
+     Minor bug reported by Andy Isaacson and fixed by Andy Isaacson <br>
+     <b>TestDatanodeBlockScanner#testBlockCorruptionPolicy is broken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3895">HDFS-3895</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>hadoop-client must include commons-cli</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3890">HDFS-3890</a>.
+     Critical bug reported by Thomas Graves and fixed by Thomas Graves <br>
+     <b>filecontext mkdirs doesn't apply umask as expected</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3888">HDFS-3888</a>.
+     Minor bug reported by Jing Zhao and fixed by Jing Zhao <br>
+     <b>BlockPlacementPolicyDefault code cleanup</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3887">HDFS-3887</a>.
+     Trivial improvement reported by Jing Zhao and fixed by Jing Zhao (name-node)<br>
+     <b>Remove redundant chooseTarget methods in BlockPlacementPolicy.java</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3879">HDFS-3879</a>.
+     Minor bug reported by Eli Collins and fixed by Eli Collins (name-node)<br>
+     <b>Fix findbugs warning in TransferFsImage on branch-2 </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3873">HDFS-3873</a>.
+     Major bug reported by Daryn Sharp and fixed by Daryn Sharp (hdfs client)<br>
+     <b>Hftp assumes security is disabled if token fetch fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3871">HDFS-3871</a>.
+     Minor improvement reported by Arun C Murthy and fixed by Arun C Murthy (hdfs client)<br>
+     <b>Change NameNodeProxies to use HADOOP-8748</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3866">HDFS-3866</a>.
+     Minor improvement reported by Ryan Hennig and fixed by Plamen Jeliazkov (build)<br>
+     <b>HttpFS POM should have property where to download tomcat from</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3864">HDFS-3864</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (name-node)<br>
+     <b>NN does not update internal file mtime for OP_CLOSE when reading from the edit log</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3861">HDFS-3861</a>.
+     Blocker bug reported by Kihwal Lee and fixed by Kihwal Lee (hdfs client)<br>
+     <b>Deadlock in DFSClient</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3860">HDFS-3860</a>.
+     Major bug reported by Jing Zhao and fixed by Jing Zhao <br>
+     <b>HeartbeatManager#Monitor may wrongly hold the writelock of namesystem</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3856">HDFS-3856</a>.
+     Blocker bug reported by Thomas Graves and fixed by Eli Collins (test)<br>
+     <b>TestHDFSServerPorts failure is causing surefire fork failure</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3853">HDFS-3853</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (name-node)<br>
+     <b>Port MiniDFSCluster enableManagedDfsDirsRedundancy option to branch-2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3852">HDFS-3852</a>.
+     Major bug reported by Aaron T. Myers and fixed by Daryn Sharp (hdfs client , security)<br>
+     <b>TestHftpDelegationToken is broken after HADOOP-8225</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3849">HDFS-3849</a>.
+     Critical bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (name-node)<br>
+     <b>When re-loading the FSImage, we should clear the existing genStamp and leases.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3844">HDFS-3844</a>.
+     Trivial improvement reported by Jing Zhao and fixed by Jing Zhao <br>
+     <b>Add @Override where necessary and remove unnecessary {@inheritdoc} and imports</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3841">HDFS-3841</a>.
+     Major bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans <br>
+     <b>Port HDFS-3835 to branch-0.23</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3837">HDFS-3837</a>.
+     Major bug reported by Eli Collins and fixed by Eli Collins (data-node)<br>
+     <b>Fix DataNode.recoverBlock findbugs warning</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3835">HDFS-3835</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (name-node , security)<br>
+     <b>Long-lived 2NN cannot perform a checkpoint if security is enabled and the NN restarts with outstanding delegation tokens</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3833">HDFS-3833</a>.
+     Major bug reported by Brandon Li and fixed by Brandon Li (test)<br>
+     <b>TestDFSShell fails on Windows due to file concurrent read write</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3832">HDFS-3832</a>.
+     Major bug reported by Suresh Srinivas and fixed by Suresh Srinivas (data-node , name-node)<br>
+     <b>Remove protocol methods related to DistributedUpgrade</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3830">HDFS-3830</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (libhdfs)<br>
+     <b>test_libhdfs_threaded: use forceNewInstance</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3819">HDFS-3819</a>.
+     Minor improvement reported by Jing Zhao and fixed by Jing Zhao <br>
+     <b>Should check whether invalidate work percentage default value is not greater than 1.0f</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3816">HDFS-3816</a>.
+     Major bug reported by Jing Zhao and fixed by Jing Zhao (name-node)<br>
+     <b>Invalidate work percentage default value should be 0.32f instead of 32</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3808">HDFS-3808</a>.
+     Critical bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (fuse-dfs)<br>
+     <b>fuse_dfs: postpone libhdfs intialization until after fork</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3803">HDFS-3803</a>.
+     Minor bug reported by Andrew Purtell and fixed by  (data-node)<br>
+     <b>BlockPoolSliceScanner new work period notice is very chatty at INFO level</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3802">HDFS-3802</a>.
+     Trivial improvement reported by Jing Zhao and fixed by Jing Zhao <br>
+     <b>StartupOption.name in HdfsServerConstants should be final</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3796">HDFS-3796</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (test)<br>
+     <b>Speed up edit log tests by avoiding fsync()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3794">HDFS-3794</a>.
+     Major bug reported by Ravi Prakash and fixed by Ravi Prakash (webhdfs)<br>
+     <b>WebHDFS Open used with Offset returns the original (and incorrect) Content Length in the HTTP Header.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3790">HDFS-3790</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (fuse-dfs)<br>
+     <b>test_fuse_dfs.c doesn't compile on centos 5</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3788">HDFS-3788</a>.
+     Critical bug reported by Eli Collins and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>distcp can't copy large files using webhdfs due to missing Content-Length header</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3765">HDFS-3765</a>.
+     Major improvement reported by Vinay and fixed by Vinay (ha)<br>
+     <b>Namenode INITIALIZESHAREDEDITS should be able to initialize all shared storages</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3760">HDFS-3760</a>.
+     Minor bug reported by Andy Isaacson and fixed by Andy Isaacson (hdfs client)<br>
+     <b>primitiveCreate is a write, not a read</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3758">HDFS-3758</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (fuse-dfs)<br>
+     <b>TestFuseDFS test failing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3756">HDFS-3756</a>.
+     Critical bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>DelegationTokenFetcher creates 2 HTTP connections, the second one not properly configured</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3755">HDFS-3755</a>.
+     Major bug reported by Todd Lipcon and fixed by Todd Lipcon (name-node)<br>
+     <b>Creating an already-open-for-write file with overwrite=true fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3754">HDFS-3754</a>.
+     Major bug reported by Eli Collins and fixed by Eli Collins (data-node)<br>
+     <b>BlockSender doesn't shutdown ReadaheadPool threads</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3738">HDFS-3738</a>.
+     Minor bug reported by Aaron T. Myers and fixed by Aaron T. Myers (test)<br>
+     <b>TestDFSClientRetries#testFailuresArePerOperation sets incorrect timeout config</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3733">HDFS-3733</a>.
+     Major bug reported by Andy Isaacson and fixed by Andy Isaacson (webhdfs)<br>
+     <b>Audit logs should include WebHDFS access</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3732">HDFS-3732</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (fuse-dfs)<br>
+     <b>fuse_dfs: incorrect configuration value checked for connection expiry timer period</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3731">HDFS-3731</a>.
+     Blocker bug reported by Suresh Srinivas and fixed by Kihwal Lee (data-node)<br>
+     <b>2.0 release upgrade must handle blocks being written from 1.0</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3724">HDFS-3724</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur <br>
+     <b>add InterfaceAudience annotations to HttpFS classes and making inner enum static</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3723">HDFS-3723</a>.
+     Major improvement reported by E. Sammer and fixed by Jing Zhao (scripts , tools)<br>
+     <b>All commands should support meaningful --help</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3721">HDFS-3721</a>.
+     Critical bug reported by Todd Lipcon and fixed by Aaron T. Myers (data-node , hdfs client)<br>
+     <b>hsync support broke wire compatibility</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3720">HDFS-3720</a>.
+     Major bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (libhdfs)<br>
+     <b>hdfs.h must get packaged</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3718">HDFS-3718</a>.
+     Critical bug reported by Kihwal Lee and fixed by Kihwal Lee (data-node)<br>
+     <b>Datanode won't shutdown because of runaway DataBlockScanner thread</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3715">HDFS-3715</a>.
+     Major bug reported by Eli Collins and fixed by Andrew Wang (test)<br>
+     <b>Fix TestFileCreation#testFileCreationNamenodeRestart</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3711">HDFS-3711</a>.
+     Major improvement reported by Andrew Wang and fixed by Andrew Wang <br>
+     <b>Manually convert remaining tests to JUnit4</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3710">HDFS-3710</a>.
+     Minor bug reported by Andy Isaacson and fixed by Andy Isaacson (libhdfs)<br>
+     <b>libhdfs misuses O_RDONLY/WRONLY/RDWR</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3709">HDFS-3709</a>.
+     Major test reported by Eli Collins and fixed by Eli Collins (test)<br>
+     <b>TestStartup tests still binding to the ephemeral port </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3707">HDFS-3707</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>TestFSInputChecker: improper use of skip</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3697">HDFS-3697</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (data-node , performance)<br>
+     <b>Enable fadvise readahead by default</b><br>
+     <blockquote>The datanode now performs 4MB readahead by default when reading data from its disks, if the native libraries are present. This has been shown to improve performance in many workloads. The feature may be disabled by setting dfs.datanode.readahead.bytes to "0".</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3696">HDFS-3696</a>.
+     Critical bug reported by Kihwal Lee and fixed by Tsz Wo (Nicholas), SZE <br>
+     <b>Create files with WebHdfsFileSystem goes OOM when file size is big</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3690">HDFS-3690</a>.
+     Major bug reported by Eli Collins and fixed by Eli Collins <br>
+     <b>BlockPlacementPolicyDefault incorrectly casts LOG</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3688">HDFS-3688</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (data-node)<br>
+     <b>Namenode loses datanode hostname if datanode re-registers</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3683">HDFS-3683</a>.
+     Minor bug reported by Todd Lipcon and fixed by Plamen Jeliazkov (name-node)<br>
+     <b>Edit log replay progress indicator shows &gt;100% complete</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3679">HDFS-3679</a>.
+     Minor bug reported by Conrad Meyer and fixed by Conrad Meyer (fuse-dfs)<br>
+     <b>fuse_dfs notrash option sets usetrash</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3675">HDFS-3675</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (libhdfs)<br>
+     <b>libhdfs: follow documented return codes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3673">HDFS-3673</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>libhdfs: fix some compiler warnings</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3672">HDFS-3672</a>.
+     Major improvement reported by Andrew Wang and fixed by Andrew Wang <br>
+     <b>Expose disk-location information for blocks to enable better scheduling</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3666">HDFS-3666</a>.
+     Minor improvement reported by Eli Collins and fixed by Eli Collins <br>
+     <b>Plumb more exception messages to terminate</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3665">HDFS-3665</a>.
+     Major test reported by Eli Collins and fixed by Eli Collins (test)<br>
+     <b>Add a test for renaming across file systems via a symlink</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3664">HDFS-3664</a>.
+     Major bug reported by Eli Collins and fixed by Colin Patrick McCabe (test)<br>
+     <b>BlockManager race when stopping active services</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3663">HDFS-3663</a>.
+     Major improvement reported by Eli Collins and fixed by Eli Collins (test)<br>
+     <b>MiniDFSCluster should capture the code path that led to the first ExitException </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3658">HDFS-3658</a>.
+     Major bug reported by Eli Collins and fixed by Tsz Wo (Nicholas), SZE <br>
+     <b>TestDFSClientRetries#testNamenodeRestart failed</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3650">HDFS-3650</a>.
+     Major improvement reported by Andrew Wang and fixed by Andrew Wang <br>
+     <b>Use MutableQuantiles to provide latency histograms for various operations</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3646">HDFS-3646</a>.
+     Critical bug reported by Kihwal Lee and fixed by Kihwal Lee (hdfs client)<br>
+     <b>LeaseRenewer can hold reference to inactive DFSClient instances forever</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3641">HDFS-3641</a>.
+     Minor improvement reported by Eli Collins and fixed by Eli Collins <br>
+     <b>Move server Util time methods to common and use now instead of System#currentTimeMillis</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3637">HDFS-3637</a>.
+     Major new feature reported by Aaron T. Myers and fixed by Aaron T. Myers (data-node , hdfs client , security)<br>
+     <b>Add support for encrypting the DataTransferProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3634">HDFS-3634</a>.
+     Minor test reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (fuse-dfs)<br>
+     <b>Add self-contained, mavenized fuse_dfs test</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3633">HDFS-3633</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (libhdfs)<br>
+     <b>libhdfs: hdfsDelete should pass JNI_FALSE or JNI_TRUE</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3629">HDFS-3629</a>.
+     Trivial bug reported by Brandon Li and fixed by Brandon Li (name-node)<br>
+     <b>fix the typo in the error message about inconsistent storage layout version</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3622">HDFS-3622</a>.
+     Major bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans <br>
+     <b>Backport HDFS-3541 to branch-0.23</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3615">HDFS-3615</a>.
+     Major bug reported by Eli Collins and fixed by Aaron T. Myers (security)<br>
+     <b>Two BlockTokenSecretManager findbugs warnings</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3613">HDFS-3613</a>.
+     Trivial improvement reported by Harsh J and fixed by Andrew Wang (name-node)<br>
+     <b>GSet prints some INFO level values, which aren't really very useful to all</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3612">HDFS-3612</a>.
+     Trivial improvement reported by Harsh J and fixed by Andy Isaacson (name-node)<br>
+     <b>Single namenode image directory config warning can be improved</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3611">HDFS-3611</a>.
+     Trivial bug reported by Harsh J and fixed by Colin Patrick McCabe (name-node)<br>
+     <b>NameNode prints unnecessary WARNs about edit log normally skipping a few bytes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3610">HDFS-3610</a>.
+     Minor improvement reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>fuse_dfs: Provide a way to use the default (configured) NN URI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3609">HDFS-3609</a>.
+     Major bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (libhdfs)<br>
+     <b>libhdfs: don't force the URI to look like hdfs://hostname:port</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3608">HDFS-3608</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>fuse_dfs: detect changes in UID ticket cache</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3606">HDFS-3606</a>.
+     Minor test reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (libhdfs)<br>
+     <b>libhdfs: create self-contained unit test</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3605">HDFS-3605</a>.
+     Major bug reported by Brahma Reddy Battula and fixed by Todd Lipcon (ha , name-node)<br>
+     <b>Block mistakenly marked corrupt during edit log catchup phase of failover</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3604">HDFS-3604</a>.
+     Minor improvement reported by Eli Collins and fixed by Eli Collins <br>
+     <b>Add dfs.webhdfs.enabled to hdfs-default.xml</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3603">HDFS-3603</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (test)<br>
+     <b>Decouple TestHDFSTrash from TestTrash</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3597">HDFS-3597</a>.
+     Minor bug reported by Andy Isaacson and fixed by Andy Isaacson <br>
+     <b>SNN can fail to start on upgrade</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3591">HDFS-3591</a>.
+     Major bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans <br>
+     <b>Backport HDFS-3357 to branch-0.23</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3583">HDFS-3583</a>.
+     Major improvement reported by Eli Collins and fixed by Andrew Wang (test)<br>
+     <b>Convert remaining tests to Junit4</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3582">HDFS-3582</a>.
+     Minor improvement reported by Eli Collins and fixed by Eli Collins (test)<br>
+     <b>Hook daemon process exit for testing </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3581">HDFS-3581</a>.
+     Major bug reported by Eli Collins and fixed by Eli Collins (name-node)<br>
+     <b>FSPermissionChecker#checkPermission sticky bit check missing range check </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3580">HDFS-3580</a>.
+     Minor bug reported by Andy Isaacson and fixed by Andy Isaacson <br>
+     <b>incompatible types; no instance(s) of type variable(s) V exist so that V conforms to boolean compiling HttpFSServer.java with OpenJDK</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3579">HDFS-3579</a>.
+     Major bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (libhdfs)<br>
+     <b>libhdfs: fix exception handling</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3577">HDFS-3577</a>.
+     Blocker bug reported by Alejandro Abdelnur and fixed by Tsz Wo (Nicholas), SZE (hdfs client)<br>
+     <b>WebHdfsFileSystem can not read files larger than 24KB</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3575">HDFS-3575</a>.
+     Minor bug reported by Brock Noland and fixed by Brock Noland <br>
+     <b>HttpFS does not log Exception Stacktraces</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3574">HDFS-3574</a>.
+     Minor bug reported by Todd Lipcon and fixed by Todd Lipcon (name-node)<br>
+     <b>Fix small race and do some cleanup in GetImageServlet</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3572">HDFS-3572</a>.
+     Minor bug reported by Todd Lipcon and fixed by Todd Lipcon (name-node , security)<br>
+     <b>Cleanup code which inits SPNEGO in HttpServer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3568">HDFS-3568</a>.
+     Major improvement reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>fuse_dfs: add support for security</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3559">HDFS-3559</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>DFSTestUtil: use Builder class to construct DFSTestUtil instances</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3555">HDFS-3555</a>.
+     Major bug reported by Jeff Lord and fixed by Andy Isaacson (data-node , hdfs client)<br>
+     <b>idle client socket triggers DN ERROR log (should be INFO or DEBUG)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3551">HDFS-3551</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>WebHDFS CREATE does not use client location for redirection</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3548">HDFS-3548</a>.
+     Critical bug reported by Todd Lipcon and fixed by Colin Patrick McCabe (name-node)<br>
+     <b>NamenodeFsck.copyBlock fails to create a Block Reader</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3541">HDFS-3541</a>.
+     Major bug reported by suja s and fixed by Vinay (data-node)<br>
+     <b>Deadlock between recovery, xceiver and packet responder</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3539">HDFS-3539</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>libhdfs code cleanups</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3537">HDFS-3537</a>.
+     Minor improvement reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (fuse-dfs , libhdfs)<br>
+     <b>Move libhdfs and fuse-dfs source to native subdirectories</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3535">HDFS-3535</a>.
+     Major new feature reported by Andy Isaacson and fixed by Andy Isaacson (name-node)<br>
+     <b>Audit logging should log denied accesses</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3531">HDFS-3531</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (name-node)<br>
+     <b>EditLogFileOutputStream#preallocate should check for incomplete writes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3524">HDFS-3524</a>.
+     Major bug reported by Eli Collins and fixed by Brandon Li (test)<br>
+     <b>TestFileLengthOnClusterRestart failed due to error message change</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3522">HDFS-3522</a>.
+     Major bug reported by Brandon Li and fixed by Brandon Li (name-node)<br>
+     <b>If NN is in safemode, it should throw SafeModeException when getBlockLocations has zero locations</b><br>
+     <blockquote>getBlockLocations(), and hence open() for read, will now throw SafeModeException if the NameNode is still in safe mode and there are no replicas reported yet for one of the blocks in the file.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3520">HDFS-3520</a>.
+     Major improvement reported by Eli Collins and fixed by Eli Collins (name-node)<br>
+     <b>Add transfer rate logging to TransferFsImage</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3518">HDFS-3518</a>.
+     Major bug reported by Bikas Saha and fixed by Tsz Wo (Nicholas), SZE (hdfs client)<br>
+     <b>Provide API to check HDFS operational state</b><br>
+     <blockquote>Add a utility method HdfsUtils.isHealthy(uri) for checking if the given HDFS is healthy.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3517">HDFS-3517</a>.
+     Minor bug reported by Eli Collins and fixed by Eli Collins (test)<br>
+     <b>TestStartup should bind ephemeral ports</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3516">HDFS-3516</a>.
+     Major improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (hdfs client)<br>
+     <b>Check content-type in WebHdfsFileSystem</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3514">HDFS-3514</a>.
+     Major improvement reported by Henry Robinson and fixed by Henry Robinson (test)<br>
+     <b>Add missing TestParallelLocalRead</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3513">HDFS-3513</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur <br>
+     <b>HttpFS should cache filesystems</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3505">HDFS-3505</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>DirectoryScanner does not join all threads in shutdown</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3504">HDFS-3504</a>.
+     Major improvement reported by Siddharth Seth and fixed by Tsz Wo (Nicholas), SZE <br>
+     <b>Configurable retry in DFSClient</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3502">HDFS-3502</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Change INodeFile and INodeFileUnderConstruction to package private</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3501">HDFS-3501</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (ha , name-node)<br>
+     <b>Checkpointing with security enabled will stop working after ticket lifetime expires</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3491">HDFS-3491</a>.
+     Major bug reported by Romain Rigaux and fixed by Alejandro Abdelnur <br>
+     <b>HttpFs does not set permissions correctly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3490">HDFS-3490</a>.
+     Minor bug reported by Todd Lipcon and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>DN WebHDFS methods throw NPE if Namenode RPC address param not specified</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3487">HDFS-3487</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (tools)<br>
+     <b>offlineimageviewer should give byte offset information when it encounters an exception</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3486">HDFS-3486</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (security , tools)<br>
+     <b>offlineimageviewer can't read fsimage files that contain persistent delegation tokens</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3485">HDFS-3485</a>.
+     Minor bug reported by Andy Isaacson and fixed by Andy Isaacson <br>
+     <b>DataTransferThrottler will over-throttle when currentTimeMillis jumps</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3484">HDFS-3484</a>.
+     Minor bug reported by Aaron T. Myers and fixed by Aaron T. Myers (hdfs client)<br>
+     <b>hdfs fsck doesn't work if NN HTTP address is set to 0.0.0.0 even if NN RPC address is configured</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3481">HDFS-3481</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur <br>
+     <b>Refactor HttpFS handling of JAX-RS query string parameters</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3480">HDFS-3480</a>.
+     Major bug reported by Eli Collins and fixed by Vinay (build)<br>
+     <b>Multiple SLF4J binding warning</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3475">HDFS-3475</a>.
+     Trivial improvement reported by Harsh J and fixed by Harsh J <br>
+     <b>Make the replication and invalidation rates configurable</b><br>
+     <blockquote>This change adds two new configuration parameters. 

+# {{dfs.namenode.invalidate.work.pct.per.iteration}} for controlling deletion rate of blocks. 

+# {{dfs.namenode.replication.work.multiplier.per.iteration}} for controlling replication rate. This in turn allows controlling the time it takes for decommissioning. 

+

+Please see hdfs-default.xml for detailed description.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3474">HDFS-3474</a>.
+     Major sub-task reported by Ivan Kelly and fixed by Ivan Kelly <br>
+     <b>Cleanup Exception handling in BookKeeper journal manager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3469">HDFS-3469</a>.
+     Minor bug reported by Vinay and fixed by Vinay (auto-failover)<br>
+     <b>start-dfs.sh will start zkfc, but stop-dfs.sh will not stop zkfc similarly.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3468">HDFS-3468</a>.
+     Major sub-task reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G <br>
+     <b>Make BKJM-ZK session timeout configurable.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3466">HDFS-3466</a>.
+     Major bug reported by Owen O'Malley and fixed by Owen O'Malley (name-node , security)<br>
+     <b>The SPNEGO filter for the NameNode should come out of the web keytab file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3460">HDFS-3460</a>.
+     Critical bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur <br>
+     <b>HttpFS proxyuser validation with Kerberos ON uses full principal name</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3454">HDFS-3454</a>.
+     Minor improvement reported by Eli Collins and fixed by Eli Collins (balancer)<br>
+     <b>Balancer unconditionally logs InterruptedException at INFO level on shutdown if security is enabled </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3452">HDFS-3452</a>.
+     Blocker sub-task reported by suja s and fixed by Uma Maheswara Rao G <br>
+     <b>BKJM:Switch from standby to active fails and NN gets shut down due to delay in clearing of lock</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3446">HDFS-3446</a>.
+     Major bug reported by Matthew Jacobs and fixed by Matthew Jacobs (name-node)<br>
+     <b>HostsFileReader silently ignores bad includes/excludes</b><br>
+     <blockquote>HDFS no longer silently ignores missing or unreadable host files specified by dfs.hosts or dfs.hosts.exclude. In order to specify that no hosts should be included or excluded, administrators should either refrain from setting the relevant config properties, or create an empty file in order to represent an empty list.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3444">HDFS-3444</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (hdfs client)<br>
+     <b>hdfs groups command doesn't work with security enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3442">HDFS-3442</a>.
+     Minor bug reported by suja s and fixed by Andrew Wang <br>
+     <b>Incorrect count for Missing Replicas in FSCK report</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3441">HDFS-3441</a>.
+     Major sub-task reported by suja s and fixed by Rakesh R <br>
+     <b>Race condition between rolling logs at active NN and purging at standby</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3440">HDFS-3440</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>should more effectively limit stream memory consumption when reading corrupt edit logs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3438">HDFS-3438</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (ha)<br>
+     <b>BootstrapStandby should not require a rollEdits on active node</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3436">HDFS-3436</a>.
+     Major bug reported by Brahma Reddy Battula and fixed by Vinay (data-node)<br>
+     <b>adding new datanode to existing  pipeline fails in case of Append/Recovery</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3433">HDFS-3433</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (name-node)<br>
+     <b>GetImageServlet should allow administrative requestors when security is enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3428">HDFS-3428</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>move DelegationTokenRenewer to common</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3423">HDFS-3423</a>.
+     Major sub-task reported by Rakesh R and fixed by Ivan Kelly <br>
+     <b>BKJM: NN startup is failing, when tries to recoverUnfinalizedSegments() a bad inProgress_ ZNodes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3422">HDFS-3422</a>.
+     Minor bug reported by Todd Lipcon and fixed by Todd Lipcon (test)<br>
+     <b>TestStandbyIsHot timeouts too aggressive</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3419">HDFS-3419</a>.
+     Minor improvement reported by Eli Collins and fixed by Eli Collins <br>
+     <b>Cleanup LocatedBlock</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3417">HDFS-3417</a>.
+     Minor improvement reported by Eli Collins and fixed by Eli Collins (data-node)<br>
+     <b>Rename BalancerDatanode#getName to getDisplayName to be consistent with Datanode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3416">HDFS-3416</a>.
+     Minor improvement reported by Eli Collins and fixed by Eli Collins (data-node)<br>
+     <b>Cleanup DatanodeID and DatanodeRegistration constructors used by testing </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3415">HDFS-3415</a>.
+     Major bug reported by Brahma Reddy Battula and fixed by Brandon Li (name-node)<br>
+     <b>During NameNode starting up, it may pick wrong storage directory inspector when the layout versions of the storage directories are different</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3414">HDFS-3414</a>.
+     Minor bug reported by Aaron T. Myers and fixed by Aaron T. Myers (balancer)<br>
+     <b>Balancer does not find NameNode if rpc-address or servicerpc-address are not set in client configs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3413">HDFS-3413</a>.
+     Critical bug reported by Todd Lipcon and fixed by Aaron T. Myers (ha , test)<br>
+     <b>TestFailureToReadEdits timing out</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3408">HDFS-3408</a>.
+     Minor sub-task reported by Rakesh R and fixed by Rakesh R (name-node)<br>
+     <b>BKJM : Namenode format fails, if there is no BK root</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3404">HDFS-3404</a>.
+     Major improvement reported by Aaron T. Myers and fixed by Aaron T. Myers <br>
+     <b>Make putImage in GetImageServlet infer remote address to fetch from request</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3401">HDFS-3401</a>.
+     Major improvement reported by Eli Collins and fixed by Eli Collins (data-node , test)<br>
+     <b>Cleanup DatanodeDescriptor creation in the tests</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3400">HDFS-3400</a>.
+     Major improvement reported by Aaron T. Myers and fixed by Aaron T. Myers (data-node , scripts)<br>
+     <b>DNs should be able start with jsvc even if security is disabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3398">HDFS-3398</a>.
+     Minor bug reported by Brahma Reddy Battula and fixed by amith (hdfs client)<br>
+     <b>Client will not retry when primaryDN is down once it's just got pipeline</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3394">HDFS-3394</a>.
+     Minor improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Do not use generic in INodeFile.getLastBlock()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3391">HDFS-3391</a>.
+     Critical bug reported by Arun C Murthy and fixed by Todd Lipcon <br>
+     <b>TestPipelinesFailover#testLeaseRecoveryAfterFailover is failing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3390">HDFS-3390</a>.
+     Minor improvement reported by Aaron T. Myers and fixed by Aaron T. Myers (hdfs client)<br>
+     <b>DFSAdmin should print full stack traces of errors when DEBUG logging is enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3389">HDFS-3389</a>.
+     Major sub-task reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (name-node)<br>
+     <b>Document the BKJM usage in Namenode HA.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3385">HDFS-3385</a>.
+     Major bug reported by Brahma Reddy Battula and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>ClassCastException when trying to append a file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3372">HDFS-3372</a>.
+     Minor improvement reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (tools)<br>
+     <b>offlineEditsViewer should be able to read a binary edits file with recovery mode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3369">HDFS-3369</a>.
+     Minor sub-task reported by John George and fixed by John George (name-node)<br>
+     <b>change variable names referring to inode in blockmanagement to more appropriate</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3368">HDFS-3368</a>.
+     Major bug reported by Konstantin Shvachko and fixed by Konstantin Shvachko (name-node)<br>
+     <b>Missing blocks due to bad DataNodes coming up and down.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3359">HDFS-3359</a>.
+     Critical bug reported by Todd Lipcon and fixed by Todd Lipcon (hdfs client)<br>
+     <b>DFSClient.close should close cached sockets</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3341">HDFS-3341</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>Change minimum RPC versions to 2.0.0-SNAPSHOT instead of 2.0.0</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3335">HDFS-3335</a>.
+     Major improvement reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>check for edit log corruption at the end of the log</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3334">HDFS-3334</a>.
+     Major bug reported by Daryn Sharp and fixed by Daryn Sharp (hdfs client)<br>
+     <b>ByteRangeInputStream leaks streams</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3331">HDFS-3331</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>setBalancerBandwidth do not checkSuperuserPrivilege</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3321">HDFS-3321</a>.
+     Major bug reported by Ravi Prakash and fixed by Ravi Prakash <br>
+     <b>Error message for insufficient data nodes to come out of safemode is wrong.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3318">HDFS-3318</a>.
+     Blocker bug reported by Daryn Sharp and fixed by Daryn Sharp (hdfs client)<br>
+     <b>Hftp hangs on transfers &gt;2GB</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3312">HDFS-3312</a>.
+     Blocker bug reported by Daryn Sharp and fixed by Daryn Sharp (hdfs client)<br>
+     <b>Hftp selects wrong token service</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3308">HDFS-3308</a>.
+     Critical bug reported by Daryn Sharp and fixed by Daryn Sharp (webhdfs)<br>
+     <b>hftp/webhdfs can't get tokens if authority has no port</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3306">HDFS-3306</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>fuse_dfs: don't lock release operations</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3291">HDFS-3291</a>.
+     Major test reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur <br>
+     <b>add test that covers HttpFS working w/ a non-HDFS Hadoop filesystem</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3276">HDFS-3276</a>.
+     Minor improvement reported by Vinithra Varadharajan and fixed by Todd Lipcon (ha , name-node)<br>
+     <b>initializeSharedEdits should have a -nonInteractive flag</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3266">HDFS-3266</a>.
+     Minor bug reported by Aaron T. Myers and fixed by madhukara phatak <br>
+     <b>DFSTestUtil#waitCorruptReplicas doesn't sleep between checks</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3258">HDFS-3258</a>.
+     Major test reported by Eli Collins and fixed by Junping Du (test)<br>
+     <b>Test for HADOOP-8144 (pseudoSortByDistance in NetworkTopology for first rack local node)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3243">HDFS-3243</a>.
+     Major bug reported by Todd Lipcon and fixed by Henry Robinson (hdfs client , test)<br>
+     <b>TestParallelRead timing out on jenkins</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3235">HDFS-3235</a>.
+     Minor bug reported by Henry Robinson and fixed by Henry Robinson <br>
+     <b>MiniDFSClusterManager doesn't correctly support -format option</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3230">HDFS-3230</a>.
+     Minor improvement reported by Eli Collins and fixed by Eli Collins (test)<br>
+     <b>Cleanup DatanodeID creation in the tests</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3194">HDFS-3194</a>.
+     Major bug reported by suja s and fixed by Andy Isaacson (data-node)<br>
+     <b>DataNode block scanner is running too frequently</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3190">HDFS-3190</a>.
+     Minor sub-task reported by Todd Lipcon and fixed by Todd Lipcon (name-node)<br>
+     <b>Simple refactors in existing NN code to assist QuorumJournalManager extension</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3177">HDFS-3177</a>.
+     Major bug reported by Kihwal Lee and fixed by Kihwal Lee (data-node , hdfs client)<br>
+     <b>Allow DFSClient to find out and use the CRC type being used for a file.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3176">HDFS-3176</a>.
+     Major bug reported by Kihwal Lee and fixed by Kihwal Lee (hdfs client)<br>
+     <b>JsonUtil should not parse the MD5MD5CRC32FileChecksum bytes on its own.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3170">HDFS-3170</a>.
+     Major improvement reported by Todd Lipcon and fixed by Matthew Jacobs (data-node)<br>
+     <b>Add more useful metrics for write latency</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3168">HDFS-3168</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Clean up FSNamesystem and BlockManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3166">HDFS-3166</a>.
+     Critical bug reported by Daryn Sharp and fixed by Daryn Sharp (hdfs client)<br>
+     <b>Hftp connections do not have a timeout</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3157">HDFS-3157</a>.
+     Major bug reported by J.Andreina and fixed by Ashish Singhi (name-node)<br>
+     <b>Error in deleting block is keep on coming from DN even after the block report and directory scanning has happened</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3150">HDFS-3150</a>.
+     Major new feature reported by Eli Collins and fixed by Eli Collins (data-node , hdfs client)<br>
+     <b>Add option for clients to contact DNs via hostname</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3136">HDFS-3136</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (build)<br>
+     <b>Multiple SLF4J binding warning</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3134">HDFS-3134</a>.
+     Major improvement reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (name-node)<br>
+     <b>Harden edit log loader against malformed or malicious input</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3113">HDFS-3113</a>.
+     Major new feature reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur <br>
+     <b>httpfs does not support delegation tokens</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3110">HDFS-3110</a>.
+     Major improvement reported by Henry Robinson and fixed by Henry Robinson (libhdfs , performance)<br>
+     <b>libhdfs implementation of direct read API</b><br>
+     <blockquote>libhdfs is enhanced to read directly into user-supplied buffers when possible, reducing the number of memory copies.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3067">HDFS-3067</a>.
+     Major bug reported by Henry Robinson and fixed by Henry Robinson (hdfs client)<br>
+     <b>NPE in DFSInputStream.readBuffer if read is repeated on corrupted block</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3058">HDFS-3058</a>.
+     Major sub-task reported by Ivan Kelly and fixed by Ivan Kelly <br>
+     <b>HA: Bring BookKeeperJournalManager up to date with HA changes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3054">HDFS-3054</a>.
+     Major bug reported by patrick white and fixed by Colin Patrick McCabe (tools)<br>
+     <b>distcp -skipcrccheck has no effect</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3048">HDFS-3048</a>.
+     Major bug reported by Eli Collins and fixed by Andy Isaacson (name-node)<br>
+     <b>Small race in BlockManager#close</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3042">HDFS-3042</a>.
+     Major new feature reported by Todd Lipcon and fixed by Todd Lipcon (auto-failover , ha)<br>
+     <b>Automatic failover support for NN HA</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3040">HDFS-3040</a>.
+     Trivial improvement reported by Aaron T. Myers and fixed by madhukara phatak (test)<br>
+     <b>TestMulitipleNNDataBlockScanner is misspelled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3037">HDFS-3037</a>.
+     Minor bug reported by Aaron T. Myers and fixed by Aaron T. Myers (test)<br>
+     <b>TestMulitipleNNDataBlockScanner#testBlockScannerAfterRestart is racy</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3031">HDFS-3031</a>.
+     Major bug reported by Stephen Chu and fixed by Todd Lipcon (ha)<br>
+     <b>HA: Error (failed to close file) when uploading large file + kill active NN + manual failover</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3002">HDFS-3002</a>.
+     Trivial improvement reported by Suresh Srinivas and fixed by Suresh Srinivas (test)<br>
+     <b>TestNameNodeMetrics need not wait for metrics update with new metrics framework</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2988">HDFS-2988</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Miomir Boljanovic (name-node)<br>
+     <b>Improve error message when storage directory lock fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2982">HDFS-2982</a>.
+     Critical bug reported by Todd Lipcon and fixed by Colin Patrick McCabe (name-node)<br>
+     <b>Startup performance suffers when there are many edit log segments</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2978">HDFS-2978</a>.
+     Major new feature reported by Aaron T. Myers and fixed by Aaron T. Myers (name-node)<br>
+     <b>The NameNode should expose name dir statuses via JMX</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2966">HDFS-2966</a>.
+     Minor bug reported by Steve Loughran and fixed by Steve Loughran (test)<br>
+     <b>TestNameNodeMetrics tests can fail under load</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2963">HDFS-2963</a>.
+     Minor bug reported by J.Andreina and fixed by Andrew Wang <br>
+     <b>Console Output is confusing while executing metasave (dfsadmin command) </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2914">HDFS-2914</a>.
+     Major bug reported by Hari Mankude and fixed by Vinay (ha , name-node)<br>
+     <b>HA: Standby should not enter safemode when resources are low</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2885">HDFS-2885</a>.
+     Major improvement reported by Eli Collins and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Remove "federation" from the nameservice config options</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2834">HDFS-2834</a>.
+     Major improvement reported by Henry Robinson and fixed by Henry Robinson (hdfs client , performance)<br>
+     <b>ByteBuffer-based read API for DFSInputStream</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2800">HDFS-2800</a>.
+     Major bug reported by Aaron T. Myers and fixed by Todd Lipcon (ha , test)<br>
+     <b>HA: TestStandbyCheckpoints.testCheckpointCancellation is racy</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2797">HDFS-2797</a>.
+     Major bug reported by Aaron T. Myers and fixed by Colin Patrick McCabe (ha , name-node)<br>
+     <b>Fix misuses of InputStream#skip in the edit log code</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2793">HDFS-2793</a>.
+     Major new feature reported by Aaron T. Myers and fixed by Todd Lipcon (name-node)<br>
+     <b>Add an admin command to trigger an edit log roll</b><br>
+     <blockquote>Introduced a new command, "hdfs dfsadmin -rollEdits" which requests that the active NameNode roll its edit log. This can be useful for administrators manually backing up log segments.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2759">HDFS-2759</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (ha , name-node)<br>
+     <b>Pre-allocate HDFS edit log files after writing version number</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2757">HDFS-2757</a>.
+     Major bug reported by Jean-Daniel Cryans and fixed by Jean-Daniel Cryans <br>
+     <b>Cannot read a local block that's being written to when using the local read short circuit</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2727">HDFS-2727</a>.
+     Minor improvement reported by Sho Shimauchi and fixed by Colin Patrick McCabe (libhdfs)<br>
+     <b>libhdfs should get the default block size from the server</b><br>
+     <blockquote>libhdfs now uses the server block size configuration rather than the deprecated dfs.block.size client configuration.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2717">HDFS-2717</a>.
+     Major sub-task reported by Ivan Kelly and fixed by Ivan Kelly <br>
+     <b>BookKeeper Journal output stream doesn't check addComplete rc</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2686">HDFS-2686</a>.
+     Major improvement reported by Todd Lipcon and fixed by Suresh Srinivas (data-node , name-node)<br>
+     <b>Remove DistributedUpgrade related code</b><br>
+     <blockquote>This jira removes functionality that has not been used/applicable since release 0.17. The incompatibility introduced by this change will not affect any HDFS users.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2652">HDFS-2652</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp <br>
+     <b>Port token service changes from 205</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2619">HDFS-2619</a>.
+     Major bug reported by Owen O'Malley and fixed by Owen O'Malley (build)<br>
+     <b>Remove my personal email address from the libhdfs build file.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2617">HDFS-2617</a>.
+     Major improvement reported by Jakob Homan and fixed by Jakob Homan (security)<br>
+     <b>Replaced Kerberized SSL for image transfer and fsck with SPNEGO-based solution</b><br>
+     <blockquote>Due to the requirement that KSSL use weak encryption types for Kerberos tickets, HTTP authentication to the NameNode will now use SPNEGO by default. This will require users of previous branch-1 releases with security enabled to modify their configurations and create new Kerberos principals in order to use SPNEGO. The old behavior of using KSSL can optionally be enabled by setting the configuration option "hadoop.security.use-weak-http-crypto" to "true".</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2421">HDFS-2421</a>.
+     Major improvement reported by Hairong Kuang and fixed by Jing Zhao (name-node)<br>
+     <b>Improve the concurrency of  SerialNumberMap in NameNode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2391">HDFS-2391</a>.
+     Major improvement reported by Rajit Saha and fixed by Harsh J (balancer)<br>
+     <b>Newly set BalancerBandwidth value is not displayed anywhere</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2330">HDFS-2330</a>.
+     Major sub-task reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (name-node)<br>
+     <b>In NNStorage.java, IOExceptions of stream closures  can mask root exceptions.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2285">HDFS-2285</a>.
+     Major bug reported by Konstantin Shvachko and fixed by Konstantin Shvachko (name-node)<br>
+     <b>BackupNode should reject requests trying to modify namespace</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2025">HDFS-2025</a>.
+     Minor bug reported by sravankorumilli and fixed by Ashish Singhi (data-node)<br>
+     <b>Go Back to File View link is not working in tail.jsp</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1490">HDFS-1490</a>.
+     Minor bug reported by Dmytro Molkov and fixed by Vinay (name-node)<br>
+     <b>TransferFSImage should timeout</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1249">HDFS-1249</a>.
+     Minor bug reported by matsusaka kentaro and fixed by Colin Patrick McCabe (fuse-dfs)<br>
+     <b>with fuse-dfs, chown which only has owner (or only group) argument fails with Input/output error.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1153">HDFS-1153</a>.
+     Minor bug reported by Ravi Phulari and fixed by Ravi Phulari (data-node)<br>
+     <b>dfsnodelist.jsp should handle invalid input parameters</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1013">HDFS-1013</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Eugene Koontz <br>
+     <b>Miscellaneous improvements to HTML markup for web UIs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-799">HDFS-799</a>.
+     Major improvement reported by Christian Kunz and fixed by Colin Patrick McCabe <br>
+     <b>libhdfs must call DetachCurrentThread when a thread is destroyed</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-766">HDFS-766</a>.
+     Minor bug reported by Ravi Phulari and fixed by Jon Zuanich <br>
+     <b>Error message not clear for set space quota out of boundary  values. </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-744">HDFS-744</a>.
+     Major new feature reported by Hairong Kuang and fixed by Lars Hofhansl (data-node , hdfs client)<br>
+     <b>Support hsync in HDFS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-711">HDFS-711</a>.
+     Major bug reported by freestyler and fixed by Colin Patrick McCabe (documentation)<br>
+     <b>hdfsUtime does not handle atime = 0 or mtime = 0 correctly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-470">HDFS-470</a>.
+     Minor bug reported by Pete Wyckoff and fixed by Colin Patrick McCabe <br>
+     <b>libhdfs should handle 0-length reads from FSInputStream correctly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8801">HADOOP-8801</a>.
+     Major bug reported by Eli Collins and fixed by Eli Collins <br>
+     <b>ExitUtil#terminate should capture the exception stack trace</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8794">HADOOP-8794</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli <br>
+     <b>Modifiy bin/hadoop to point to HADOOP_YARN_HOME</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8781">HADOOP-8781</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (scripts)<br>
+     <b>hadoop-config.sh should add JAVA_LIBRARY_PATH to LD_LIBRARY_PATH</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8775">HADOOP-8775</a>.
+     Major bug reported by Sandy Ryza and fixed by Sandy Ryza <br>
+     <b>MR2 distcp permits non-positive value to -bandwidth option which causes job never to complete</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8770">HADOOP-8770</a>.
+     Blocker bug reported by Todd Lipcon and fixed by Eli Collins (trash)<br>
+     <b>NN should not RPC to self to find trash defaults (causes deadlock)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8766">HADOOP-8766</a>.
+     Major bug reported by Eli Collins and fixed by Colin Patrick McCabe (test)<br>
+     <b>FileContextMainOperationsBaseTest should randomize the root dir </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8764">HADOOP-8764</a>.
+     Major bug reported by Trevor Robinson and fixed by Trevor Robinson (build)<br>
+     <b>CMake: HADOOP-8737 broke ARM build</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8754">HADOOP-8754</a>.
+     Minor improvement reported by Brandon Li and fixed by Brandon Li (ipc)<br>
+     <b>Deprecate all the RPC.getServer() variants</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8749">HADOOP-8749</a>.
+     Major bug reported by Ahmed Radwan and fixed by Ahmed Radwan (conf)<br>
+     <b>HADOOP-8031 changed the way in which relative xincludes are handled in Configuration.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8748">HADOOP-8748</a>.
+     Minor improvement reported by Arun C Murthy and fixed by Arun C Murthy (io)<br>
+     <b>Move dfsclient retry to a util class</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8747">HADOOP-8747</a>.
+     Major bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (native)<br>
+     <b>Syntax error on cmake version 2.6 patch 2 in JNIFlags.cmake</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8737">HADOOP-8737</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (native)<br>
+     <b>cmake: always use JAVA_HOME to find libjvm.so, jni.h, jni_md.h</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8727">HADOOP-8727</a>.
+     Major bug reported by Harsh J and fixed by Harsh J (conf)<br>
+     <b>Gracefully deprecate dfs.umaskmode in 2.x onwards</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8726">HADOOP-8726</a>.
+     Major bug reported by Benoy Antony and fixed by Daryn Sharp (security)<br>
+     <b>The Secrets in Credentials are not available to MR tasks</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8725">HADOOP-8725</a>.
+     Blocker bug reported by Daryn Sharp and fixed by Daryn Sharp (security)<br>
+     <b>MR is broken when security is off</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8722">HADOOP-8722</a>.
+     Minor bug reported by Eli Collins and fixed by Colin Patrick McCabe (documentation)<br>
+     <b>Update BUILDING.txt with latest snappy info</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8721">HADOOP-8721</a>.
+     Critical bug reported by suja s and fixed by Vinay (auto-failover , ha)<br>
+     <b>ZKFC should not retry 45 times when attempting a graceful fence during a failover</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8720">HADOOP-8720</a>.
+     Trivial bug reported by Vlad Rozov and fixed by Vlad Rozov (test)<br>
+     <b>TestLocalFileSystem should use test root subdirectory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8710">HADOOP-8710</a>.
+     Major improvement reported by Eli Collins and fixed by Eli Collins (fs)<br>
+     <b>Remove ability for users to easily run the trash emptier</b><br>
+     <blockquote>The trash emptier may no longer be run using "hadoop org.apache.hadoop.fs.Trash". The trash emptier runs on the NameNode (if configured). Old trash checkpoints may be deleted using "hadoop fs -expunge".</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8709">HADOOP-8709</a>.
+     Critical bug reported by Jason Lowe and fixed by Jason Lowe (fs)<br>
+     <b>globStatus changed behavior from 0.20/1.x</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8703">HADOOP-8703</a>.
+     Major bug reported by Dave Thompson and fixed by Dave Thompson <br>
+     <b>distcpV2: turn CRC checking off for 0 byte size</b><br>
+     <blockquote>distcp skips CRC on 0 byte files.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8700">HADOOP-8700</a>.
+     Minor improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (util)<br>
+     <b>Move the checksum type constants to an enum</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8699">HADOOP-8699</a>.
+     Critical bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (test)<br>
+     <b>some common testcases create core-site.xml in test-classes making other testcases to fail</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8697">HADOOP-8697</a>.
+     Major bug reported by Trevor Robinson and fixed by Trevor Robinson (test)<br>
+     <b>TestWritableName fails intermittently with JDK7</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8695">HADOOP-8695</a>.
+     Major bug reported by Trevor Robinson and fixed by Trevor Robinson (test)<br>
+     <b>TestPathData fails intermittently with JDK7</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8693">HADOOP-8693</a>.
+     Major bug reported by Trevor Robinson and fixed by Trevor Robinson (test)<br>
+     <b>TestSecurityUtil fails intermittently with JDK7</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8692">HADOOP-8692</a>.
+     Major bug reported by Trevor Robinson and fixed by Trevor Robinson (test)<br>
+     <b>TestLocalDirAllocator fails intermittently with JDK7</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8689">HADOOP-8689</a>.
+     Major improvement reported by Eli Collins and fixed by Eli Collins (fs)<br>
+     <b>Make trash a server side configuration option</b><br>
+     <blockquote>If fs.trash.interval is configured on the server then the client's value for this configuration is ignored. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8686">HADOOP-8686</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (native)<br>
+     <b>Fix warnings in native code</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8660">HADOOP-8660</a>.
+     Major bug reported by Eli Collins and fixed by Alejandro Abdelnur <br>
+     <b>TestPseudoAuthenticator failing with NPE</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8659">HADOOP-8659</a>.
+     Major bug reported by Trevor Robinson and fixed by Colin Patrick McCabe (native)<br>
+     <b>Native libraries must build with soft-float ABI for Oracle JVM on ARM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8655">HADOOP-8655</a>.
+     Major bug reported by Arun A K and fixed by  (util)<br>
+     <b>In TextInputFormat, while specifying textinputformat.record.delimiter the character/character sequences in data file similar to starting character/starting character sequence in delimiter were found missing in certain cases in the Map Output</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8654">HADOOP-8654</a>.
+     Major bug reported by Gelesh and fixed by  (util)<br>
+     <b>TextInputFormat delimiter  bug:- Input Text portion ends with &amp; Delimiter starts with same char/char sequence</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8648">HADOOP-8648</a>.
+     Major bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>libhadoop:  native CRC32 validation crashes when io.bytes.per.checksum=1</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8644">HADOOP-8644</a>.
+     Critical new feature reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>AuthenticatedURL should be able to use SSLFactory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8637">HADOOP-8637</a>.
+     Critical bug reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>FilterFileSystem#setWriteChecksum is broken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8635">HADOOP-8635</a>.
+     Critical improvement reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Cannot cancel paths registered deleteOnExit</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8634">HADOOP-8634</a>.
+     Critical bug reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Ensure FileSystem#close doesn't squawk for deleteOnExit paths</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8633">HADOOP-8633</a>.
+     Critical bug reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Interrupted FsShell copies may leave tmp files</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8632">HADOOP-8632</a>.
+     Major bug reported by Costin Leau and fixed by Costin Leau (conf)<br>
+     <b>Configuration leaking class-loaders</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8627">HADOOP-8627</a>.
+     Critical bug reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>FS deleteOnExit may delete the wrong path</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8626">HADOOP-8626</a>.
+     Major bug reported by Jonathan Natkins and fixed by Jonathan Natkins (security)<br>
+     <b>Typo in default setting for hadoop.security.group.mapping.ldap.search.filter.user</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8624">HADOOP-8624</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (ipc)<br>
+     <b>ProtobufRpcEngine should log all RPCs if TRACE logging is enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8623">HADOOP-8623</a>.
+     Minor improvement reported by Steven Willis and fixed by Steven Willis (scripts)<br>
+     <b>hadoop jar command should respect HADOOP_OPTS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8620">HADOOP-8620</a>.
+     Minor improvement reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (build)<br>
+     <b>Add -Drequire.fuse and -Drequire.snappy</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8614">HADOOP-8614</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>IOUtils#skipFully hangs forever on EOF</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8613">HADOOP-8613</a>.
+     Critical bug reported by Daryn Sharp and fixed by Daryn Sharp <br>
+     <b>AbstractDelegationTokenIdentifier#getUser() should set token auth type</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8611">HADOOP-8611</a>.
+     Major bug reported by Kihwal Lee and fixed by Robert Parker (security)<br>
+     <b>Allow fall-back to the shell-based implementation when JNI-based users-group mapping fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8609">HADOOP-8609</a>.
+     Major improvement reported by Todd Lipcon and fixed by Jon Zuanich <br>
+     <b>IPC server logs a useless message when shutting down socket</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8606">HADOOP-8606</a>.
+     Major bug reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>FileSystem.get may return the wrong filesystem</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8599">HADOOP-8599</a>.
+     Major bug reported by Andrey Klochkov and fixed by Andrey Klochkov (fs)<br>
+     <b>Non empty response from FileSystem.getFileBlockLocations when asking for data beyond the end of file </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8587">HADOOP-8587</a>.
+     Minor bug reported by Eli Collins and fixed by Eli Collins (fs)<br>
+     <b>HarFileSystem access of harMetaCache isn't threadsafe</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8586">HADOOP-8586</a>.
+     Major bug reported by Eli Collins and fixed by Eli Collins <br>
+     <b>Fixup a bunch of SPNEGO misspellings</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8585">HADOOP-8585</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>Fix initialization circularity between UserGroupInformation and HadoopConfiguration</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8581">HADOOP-8581</a>.
+     Major new feature reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>add support for HTTPS to the web UIs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8573">HADOOP-8573</a>.
+     Major bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (conf)<br>
+     <b>Configuration tries to read from an inputstream resource multiple times. </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8566">HADOOP-8566</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (io)<br>
+     <b>AvroReflectSerializer.accept(Class) throws a NPE if the class has no package (primitive types and arrays)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8563">HADOOP-8563</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (build)<br>
+     <b>don't package hadoop-pipes examples/bin</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8551">HADOOP-8551</a>.
+     Major bug reported by Robert Joseph Evans and fixed by John George (fs)<br>
+     <b>fs -mkdir creates parent directories without the -p option</b><br>
+     <blockquote>FsShell's "mkdir" no longer implicitly creates all non-existent parent directories.  The command adopts the posix compliant behavior of requiring the "-p" flag to auto-create parent directories.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8550">HADOOP-8550</a>.
+     Major bug reported by Robert Joseph Evans and fixed by John George (fs)<br>
+     <b>hadoop fs -touchz automatically created parent directories</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8547">HADOOP-8547</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>Package hadoop-pipes examples/bin directory (again)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8543">HADOOP-8543</a>.
+     Major bug reported by Radim Kolar and fixed by Radim Kolar (build)<br>
+     <b>Invalid pom.xml files on 0.23 branch</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8541">HADOOP-8541</a>.
+     Major improvement reported by Andrew Wang and fixed by Andrew Wang (metrics)<br>
+     <b>Better high-percentile latency metrics</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8538">HADOOP-8538</a>.
+     Major bug reported by Trevor Robinson and fixed by Trevor Robinson (native)<br>
+     <b>CMake builds fail on ARM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8537">HADOOP-8537</a>.
+     Major bug reported by Todd Lipcon and fixed by Todd Lipcon (io)<br>
+     <b>Two TFile tests failing recently</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8535">HADOOP-8535</a>.
+     Major improvement reported by Jonathan Eagles and fixed by Jonathan Eagles (build)<br>
+     <b>Cut hadoop build times in half (upgrade maven-compiler-plugin to 2.5.1)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8533">HADOOP-8533</a>.
+     Major improvement reported by Suresh Srinivas and fixed by Brandon Li (ipc)<br>
+     <b>Remove Parallel Call in IPC</b><br>
+     <blockquote>Merged the change to branch-2</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8531">HADOOP-8531</a>.
+     Trivial improvement reported by Harsh J and fixed by madhukara phatak (io)<br>
+     <b>SequenceFile Writer can throw out a better error if a serializer or deserializer isn't available</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8525">HADOOP-8525</a>.
+     Trivial improvement reported by Robert Joseph Evans and fixed by Robert Joseph Evans <br>
+     <b>Provide Improved Traceability for Configuration</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8524">HADOOP-8524</a>.
+     Trivial improvement reported by Harsh J and fixed by Harsh J (conf)<br>
+     <b>Allow users to get source of a Configuration parameter</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8512">HADOOP-8512</a>.
+     Minor bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>AuthenticatedURL should reset the Token when the server returns other than OK on authentication</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8509">HADOOP-8509</a>.
+     Minor bug reported by Matteo Bertozzi and fixed by Alejandro Abdelnur (util)<br>
+     <b>JarFinder duplicate entry: META-INF/MANIFEST.MF exception</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8507">HADOOP-8507</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>Avoid OOM while deserializing DelegationTokenIdentifer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8501">HADOOP-8501</a>.
+     Major bug reported by Radim Kolar and fixed by Radim Kolar (benchmarks)<br>
+     <b>Gridmix fails to compile on OpenJDK7u4</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8499">HADOOP-8499</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>Lower min.user.id to 500 for the tests</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8495">HADOOP-8495</a>.
+     Critical bug reported by Jason Lowe and fixed by Jason Lowe (build)<br>
+     <b>Update Netty to avoid leaking file descriptors during shuffle</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8488">HADOOP-8488</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>test-patch.sh gives +1 even if the native build fails.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8485">HADOOP-8485</a>.
+     Minor bug reported by Eli Collins and fixed by Eli Collins (documentation)<br>
+     <b>Don't hardcode "Apache Hadoop 0.23" in the docs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8481">HADOOP-8481</a>.
+     Trivial bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (documentation)<br>
+     <b>update BUILDING.txt to talk about cmake rather than autotools</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8480">HADOOP-8480</a>.
+     Trivial bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>The native build should honor -DskipTests</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8466">HADOOP-8466</a>.
+     Major bug reported by Bruno Mah&#233; and fixed by Bruno Mah&#233; (build)<br>
+     <b>hadoop-client POM incorrectly excludes avro</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8465">HADOOP-8465</a>.
+     Major new feature reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>hadoop-auth should support ephemeral authentication</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8463">HADOOP-8463</a>.
+     Major improvement reported by Eli Collins and fixed by madhukara phatak (security)<br>
+     <b>hadoop.security.auth_to_local needs a key definition and doc </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8460">HADOOP-8460</a>.
+     Major bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (documentation)<br>
+     <b>Document proper setting of HADOOP_PID_DIR and HADOOP_SECURE_DN_PID_DIR</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8458">HADOOP-8458</a>.
+     Major new feature reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>Add management hook to AuthenticationHandler to enable delegation token operations support</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8452">HADOOP-8452</a>.
+     Minor bug reported by Andy Isaacson and fixed by Andy Isaacson <br>
+     <b>DN logs backtrace when running under jsvc and /jmx is loaded</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8450">HADOOP-8450</a>.
+     Trivial bug reported by Colin Patrick McCabe and fixed by Eli Collins (test)<br>
+     <b>Remove src/test/system</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8449">HADOOP-8449</a>.
+     Minor bug reported by Joey Echeverria and fixed by Harsh J <br>
+     <b>hadoop fs -text fails with compressed sequence files with the codec file extension</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8444">HADOOP-8444</a>.
+     Major bug reported by Mariappan Asokan and fixed by madhukara phatak (fs , test)<br>
+     <b>Fix the tests FSMainOperationsBaseTest.java and F ileContextMainOperationsBaseTest.java to avoid potential test failure</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8438">HADOOP-8438</a>.
+     Major bug reported by Devaraj K and fixed by Devaraj K <br>
+     <b>hadoop-validate-setup.sh refers to examples jar file which doesn't exist</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8433">HADOOP-8433</a>.
+     Major bug reported by Brahma Reddy Battula and fixed by Brahma Reddy Battula (scripts)<br>
+     <b>Don't set HADOOP_LOG_DIR in hadoop-env.sh</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8431">HADOOP-8431</a>.
+     Major bug reported by Eli Collins and fixed by Sandy Ryza <br>
+     <b>Running distcp wo args throws IllegalArgumentException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8423">HADOOP-8423</a>.
+     Major bug reported by Jason B and fixed by Todd Lipcon (io)<br>
+     <b>MapFile.Reader.get() crashes jvm or throws EOFException on Snappy or LZO block-compressed data</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8422">HADOOP-8422</a>.
+     Minor bug reported by Eli Collins and fixed by Eli Collins (fs)<br>
+     <b>Deprecate FileSystem#getDefault* and getServerDefault methods that don't take a Path argument </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8408">HADOOP-8408</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (viewfs)<br>
+     <b>MR doesn't work with a non-default ViewFS mount table and security enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8406">HADOOP-8406</a>.
+     Major bug reported by Todd Lipcon and fixed by Todd Lipcon (io)<br>
+     <b>CompressionCodecFactory.CODEC_PROVIDERS iteration is thread-unsafe</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8403">HADOOP-8403</a>.
+     Major task reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>bump up POMs version to 2.0.1-SNAPSHOT</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8400">HADOOP-8400</a>.
+     Major bug reported by Eli Collins and fixed by Alejandro Abdelnur (security)<br>
+     <b>All commands warn "Kerberos krb5 configuration not found" when security is not enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8393">HADOOP-8393</a>.
+     Major bug reported by Patrick Hunt and fixed by Patrick Hunt (scripts)<br>
+     <b>hadoop-config.sh missing variable exports, causes Yarn jobs to fail with ClassNotFoundException MRAppMaster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8390">HADOOP-8390</a>.
+     Major bug reported by Trevor Robinson and fixed by Trevor Robinson (test)<br>
+     <b>TestFileSystemCanonicalization fails with JDK7</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8373">HADOOP-8373</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (ipc)<br>
+     <b>Port RPC.getServerAddress to 0.23</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8372">HADOOP-8372</a>.
+     Major bug reported by Junping Du and fixed by Junping Du (io , util)<br>
+     <b>normalizeHostName() in NetUtils is not working properly in resolving a hostname start with numeric character</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8370">HADOOP-8370</a>.
+     Major bug reported by Trevor Robinson and fixed by Trevor Robinson (native)<br>
+     <b>Native build failure: javah: class file for org.apache.hadoop.classification.InterfaceAudience not found</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8368">HADOOP-8368</a>.
+     Minor improvement reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>Use CMake rather than autotools to build native code</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8367">HADOOP-8367</a>.
+     Major improvement reported by Sanjay Radia and fixed by Sanjay Radia <br>
+     <b>Improve documentation of declaringClassProtocolName in rpc headers </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8362">HADOOP-8362</a>.
+     Trivial improvement reported by Todd Lipcon and fixed by madhukara phatak (conf)<br>
+     <b>Improve exception message when Configuration.set() is called with a null key or value</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8361">HADOOP-8361</a>.
+     Minor improvement reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>Avoid out-of-memory problems when deserializing strings</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8358">HADOOP-8358</a>.
+     Trivial improvement reported by Harsh J and fixed by Harsh J (conf)<br>
+     <b>Config-related WARN for dfs.web.ugi can be avoided.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8342">HADOOP-8342</a>.
+     Major bug reported by Randy Clayton and fixed by Alejandro Abdelnur (fs)<br>
+     <b>HDFS command fails with exception following merge of HADOOP-8325</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8341">HADOOP-8341</a>.
+     Major bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans <br>
+     <b>Fix or filter findbugs issues in hadoop-tools</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8340">HADOOP-8340</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (util)<br>
+     <b>SNAPSHOT build versions should compare as less than their eventual final release</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8335">HADOOP-8335</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (util)<br>
+     <b>Improve Configuration's address handling</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8334">HADOOP-8334</a>.
+     Major bug reported by Daryn Sharp and fixed by Daryn Sharp <br>
+     <b>HttpServer sometimes returns incorrect port</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8330">HADOOP-8330</a>.
+     Minor bug reported by John George and fixed by John George (test)<br>
+     <b>TestSequenceFile.testCreateUsesFsArg() is broken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8329">HADOOP-8329</a>.
+     Major bug reported by Kumar Ravi and fixed by Eli Collins (build)<br>
+     <b>Build fails with Java 7</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8328">HADOOP-8328</a>.
+     Major bug reported by Tom White and fixed by Tom White (fs)<br>
+     <b>Duplicate FileSystem Statistics object for 'file' scheme</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8327">HADOOP-8327</a>.
+     Major bug reported by Dave Thompson and fixed by Dave Thompson <br>
+     <b>distcpv2 and distcpv1 jars should not coexist</b><br>
+     <blockquote>Resolve sporadic distcp issue due to having two DistCp classes (v1 &amp; v2) in the classpath.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8325">HADOOP-8325</a>.
+     Critical bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (fs)<br>
+     <b>Add a ShutdownHookManager to be used by different components instead of the JVM shutdownhook</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8323">HADOOP-8323</a>.
+     Critical improvement reported by Harsh J and fixed by Harsh J (io)<br>
+     <b>Revert HADOOP-7940 and improve javadocs and test for Text.clear()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8317">HADOOP-8317</a>.
+     Major bug reported by Radim Kolar and fixed by  (build)<br>
+     <b>Update maven-assembly-plugin to 2.3 - fix build on FreeBSD</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8316">HADOOP-8316</a>.
+     Major bug reported by Eli Collins and fixed by Eli Collins (conf)<br>
+     <b>Audit logging should be disabled by default</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8305">HADOOP-8305</a>.
+     Major bug reported by John George and fixed by John George (viewfs)<br>
+     <b>distcp over viewfs is broken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8288">HADOOP-8288</a>.
+     Major bug reported by Ravi Prakash and fixed by Ravi Prakash <br>
+     <b>Remove references of mapred.child.ulimit etc. since they are not being used any more</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8287">HADOOP-8287</a>.
+     Major bug reported by Eli Collins and fixed by Eli Collins (conf)<br>
+     <b>etc/hadoop is missing hadoop-env.sh</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8286">HADOOP-8286</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (conf)<br>
+     <b>Simplify getting a socket address from conf</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8283">HADOOP-8283</a>.
+     Major test reported by Daryn Sharp and fixed by Daryn Sharp (test)<br>
+     <b>Allow tests to control token service value</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8278">HADOOP-8278</a>.
+     Major improvement reported by Tom White and fixed by Tom White (build)<br>
+     <b>Make sure components declare correct set of dependencies</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8268">HADOOP-8268</a>.
+     Major bug reported by Radim Kolar and fixed by Radim Kolar (build)<br>
+     <b>A few pom.xml across Hadoop project may fail XML validation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8244">HADOOP-8244</a>.
+     Major improvement reported by Henry Robinson and fixed by Henry Robinson <br>
+     <b>Improve comments on ByteBufferReadable.read</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8242">HADOOP-8242</a>.
+     Minor improvement reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>AbstractDelegationTokenIdentifier: add getter methods for owner and realuser</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8240">HADOOP-8240</a>.
+     Major improvement reported by Kihwal Lee and fixed by Kihwal Lee (fs)<br>
+     <b>Allow users to specify a checksum type on create()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8239">HADOOP-8239</a>.
+     Major improvement reported by Kihwal Lee and fixed by Kihwal Lee (fs)<br>
+     <b>Extend MD5MD5CRC32FileChecksum to show the actual checksum type being used</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8227">HADOOP-8227</a>.
+     Blocker improvement reported by Robert Joseph Evans and fixed by Robert Joseph Evans <br>
+     <b>Allow RPC to limit ephemeral port range.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8225">HADOOP-8225</a>.
+     Blocker bug reported by Mithun Radhakrishnan and fixed by Daryn Sharp (security)<br>
+     <b>DistCp fails when invoked by Oozie</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8224">HADOOP-8224</a>.
+     Major improvement reported by Eli Collins and fixed by Tomohiko Kinebuchi (conf)<br>
+     <b>Don't hardcode hdfs.audit.logger in the scripts</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8197">HADOOP-8197</a>.
+     Critical bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (conf)<br>
+     <b>Configuration logs WARNs on every use of a deprecated key</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8180">HADOOP-8180</a>.
+     Major bug reported by Ravi Prakash and fixed by Ravi Prakash <br>
+     <b>Remove hsqldb since its not needed from pom.xml</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8179">HADOOP-8179</a>.
+     Minor bug reported by Steve Loughran and fixed by Daryn Sharp (fs)<br>
+     <b>risk of NPE in CopyCommands processArguments()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8172">HADOOP-8172</a>.
+     Critical bug reported by Robert Joseph Evans and fixed by Anupam Seth (conf)<br>
+     <b>Configuration no longer sets all keys in a deprecated key list.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8168">HADOOP-8168</a>.
+     Major bug reported by Eugene Koontz and fixed by Eugene Koontz (fs)<br>
+     <b>empty-string owners or groups causes {{MissingFormatWidthException}} in o.a.h.fs.shell.Ls.ProcessPath()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8167">HADOOP-8167</a>.
+     Blocker bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (conf)<br>
+     <b>Configuration deprecation logic breaks backwards compatibility</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8144">HADOOP-8144</a>.
+     Minor bug reported by Junping Du and fixed by Junping Du (io)<br>
+     <b>pseudoSortByDistance in NetworkTopology doesn't work properly if no local node and first node is local rack node</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8135">HADOOP-8135</a>.
+     Major new feature reported by Henry Robinson and fixed by Henry Robinson (fs)<br>
+     <b>Add ByteBufferReadable interface to FSDataInputStream</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8129">HADOOP-8129</a>.
+     Major bug reported by Ravi Prakash and fixed by Ahmed Radwan (fs , test)<br>
+     <b>ViewFileSystemTestSetup setupForViewFileSystem is erring when the user's home directory is somewhere other than /home (eg. /User) etc.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8110">HADOOP-8110</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Jason Lowe (fs)<br>
+     <b>TestViewFsTrash occasionally fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8104">HADOOP-8104</a>.
+     Major bug reported by Colin Patrick McCabe and fixed by Alejandro Abdelnur <br>
+     <b>Inconsistent Jackson versions</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8088">HADOOP-8088</a>.
+     Major bug reported by Kihwal Lee and fixed by Kihwal Lee (security)<br>
+     <b>User-group mapping cache incorrectly does negative caching on transient failures</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8075">HADOOP-8075</a>.
+     Major improvement reported by Eli Collins and fixed by H&#305;z&#305;r Sefa &#304;rken (native)<br>
+     <b>Lower native-hadoop library log from info to debug </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8060">HADOOP-8060</a>.
+     Major bug reported by Kihwal Lee and fixed by Kihwal Lee (fs , util)<br>
+     <b>Add a capability to discover and set checksum types per file.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8031">HADOOP-8031</a>.
+     Major bug reported by Elias Ross and fixed by Elias Ross (conf)<br>
+     <b>Configuration class fails to find embedded .jar resources; should use URL.openStream()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8014">HADOOP-8014</a>.
+     Major bug reported by Daryn Sharp and fixed by John George (fs)<br>
+     <b>ViewFileSystem does not correctly implement getDefaultBlockSize, getDefaultReplication, getContentSummary</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8005">HADOOP-8005</a>.
+     Major bug reported by Joe Crobak and fixed by Jason Lowe (scripts)<br>
+     <b>Multiple SLF4J binding message in .out file for all daemons</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7967">HADOOP-7967</a>.
+     Critical bug reported by Daryn Sharp and fixed by Daryn Sharp (fs , security)<br>
+     <b>Need generalized multi-token filesystem support</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7868">HADOOP-7868</a>.
+     Major bug reported by James Page and fixed by Trevor Robinson (native)<br>
+     <b>Hadoop native fails to compile when default linker option is -Wl,--as-needed</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7818">HADOOP-7818</a>.
+     Minor bug reported by Eli Collins and fixed by madhukara phatak (util)<br>
+     <b>DiskChecker#checkDir should fail if the directory is not executable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7754">HADOOP-7754</a>.
+     Major sub-task reported by Todd Lipcon and fixed by Todd Lipcon (native , performance)<br>
+     <b>Expose file descriptors from Hadoop-wrapped local FileSystems</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7703">HADOOP-7703</a>.
+     Major bug reported by Devaraj K and fixed by Devaraj K <br>
+     <b>WebAppContext should also be stopped and cleared</b><br>
+     <blockquote>Improved excpetion handling of shutting down web server. (Devaraj K via Eric Yang)</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7510">HADOOP-7510</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (security)<br>
+     <b>Tokens should use original hostname provided instead of ip</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6963">HADOOP-6963</a>.
+     Critical bug reported by Owen O'Malley and fixed by Ravi Prakash (fs)<br>
+     <b>Fix FileUtil.getDU. It should not include the size of the directory or follow symbolic links</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6802">HADOOP-6802</a>.
+     Major improvement reported by Erik Steffl and fixed by Sho Shimauchi (conf , fs)<br>
+     <b>Remove FS_CLIENT_BUFFER_DIR_KEY = "fs.client.buffer.dir" from CommonConfigurationKeys.java (not used, deprecated)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-3886">HADOOP-3886</a>.
+     Minor bug reported by brien colwell and fixed by Jingguo Yao (documentation)<br>
+     <b>Error in javadoc of Reporter, Mapper and Progressable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-3450">HADOOP-3450</a>.
+     Minor improvement reported by Ari Rabkin and fixed by Sho Shimauchi (fs)<br>
+     <b>Add tests to Local Directory Allocator for asserting their URI-returning capability</b><br>
+     <blockquote></blockquote></li>
+</ul>
+</body></html>
+<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<title>Hadoop  2.0.1-alpha Release Notes</title>
+<STYLE type="text/css">
+	H1 {font-family: sans-serif}
+	H2 {font-family: sans-serif; margin-left: 7mm}
+	TABLE {margin-left: 7mm}
+</STYLE>
+</head>
+<body>
+<h1>Hadoop  2.0.1-alpha Release Notes</h1>
+These release notes include new developer and user-facing incompatibilities, features, and major improvements. 
+<a name="changes"/>
+<h2>Changes since Hadoop 2.0.0-alpha</h2>
+<ul>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8552">HADOOP-8552</a>.
+     Major bug reported by Karthik Kambatla and fixed by Karthik Kambatla (conf , security)<br>
+     <b>Conflict: Same security.log.file for multiple users. </b><br>
+     <blockquote></blockquote></li>
+</ul>
+</body></html>
+<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<title>Hadoop  2.0.0-alpha Release Notes</title>
+<STYLE type="text/css">
+	H1 {font-family: sans-serif}
+	H2 {font-family: sans-serif; margin-left: 7mm}
+	TABLE {margin-left: 7mm}
+</STYLE>
+</head>
+<body>
+<h1>Hadoop  2.0.0-alpha Release Notes</h1>
+These release notes include new developer and user-facing incompatibilities, features, and major improvements. 
+<a name="changes"/>
+<h2>Changes since Hadoop 0.23.2</h2>
+<ul>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4274">MAPREDUCE-4274</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (performance , task)<br>
+     <b>MapOutputBuffer should use native byte order for kvmeta</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4231">MAPREDUCE-4231</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (contrib/raid)<br>
+     <b>Update RAID to not to use FSInodeInfo</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4219">MAPREDUCE-4219</a>.
+     Major improvement reported by Roman Shaposhnik and fixed by Roman Shaposhnik (security)<br>
+     <b>make default container-executor.conf.dir be a path relative to the container-executor binary</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4202">MAPREDUCE-4202</a>.
+     Major bug reported by Daryn Sharp and fixed by Daryn Sharp (test)<br>
+     <b>TestYarnClientProtocolProvider is broken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4193">MAPREDUCE-4193</a>.
+     Major bug reported by Patrick Hunt and fixed by Patrick Hunt (documentation)<br>
+     <b>broken doc link for yarn-default.xml in site.xml</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4147">MAPREDUCE-4147</a>.
+     Major bug reported by Tom White and fixed by Tom White <br>
+     <b>YARN should not have a compile-time dependency on HDFS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4138">MAPREDUCE-4138</a>.
+     Major improvement reported by Tom White and fixed by Tom White <br>
+     <b>Reduce memory usage of counters due to non-static nested classes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4113">MAPREDUCE-4113</a>.
+     Major sub-task reported by Devaraj K and fixed by Devaraj K (mrv2 , test)<br>
+     <b>Fix tests org.apache.hadoop.mapred.TestClusterMRNotification</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4112">MAPREDUCE-4112</a>.
+     Major sub-task reported by Devaraj K and fixed by Devaraj K (mrv2 , test)<br>
+     <b>Fix tests org.apache.hadoop.mapred.TestClusterMapReduceTestCase</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4111">MAPREDUCE-4111</a>.
+     Major sub-task reported by Devaraj K and fixed by Devaraj K (mrv2 , test)<br>
+     <b>Fix tests in org.apache.hadoop.mapred.TestJobName</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4110">MAPREDUCE-4110</a>.
+     Major sub-task reported by Devaraj K and fixed by Devaraj K (mrv2 , test)<br>
+     <b>Fix tests in org.apache.hadoop.mapred.TestMiniMRClasspath &amp; org.apache.hadoop.mapred.TestMiniMRWithDFSWithDistinctUsers</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4108">MAPREDUCE-4108</a>.
+     Major sub-task reported by Devaraj K and fixed by Devaraj K (mrv2)<br>
+     <b>Fix tests in org.apache.hadoop.util.TestRunJar</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4107">MAPREDUCE-4107</a>.
+     Major sub-task reported by Devaraj K and fixed by Devaraj K (mrv2)<br>
+     <b>Fix tests in org.apache.hadoop.ipc.TestSocketFactory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4105">MAPREDUCE-4105</a>.
+     Major bug reported by Ahmed Radwan and fixed by Ahmed Radwan (mrv2)<br>
+     <b>Yarn RackResolver ignores rack configurations</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4103">MAPREDUCE-4103</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (documentation)<br>
+     <b>Fix HA docs for changes to shell command fencer args</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4098">MAPREDUCE-4098</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (test)<br>
+     <b>TestMRApps testSetClasspath fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4093">MAPREDUCE-4093</a>.
+     Major improvement reported by Devaraj K and fixed by Devaraj K (mrv2)<br>
+     <b>Improve RM WebApp start up when proxy address is not set</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4081">MAPREDUCE-4081</a>.
+     Blocker bug reported by Jason Lowe and fixed by Jason Lowe (build , mrv2)<br>
+     <b>TestMROutputFormat.java does not compile</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4076">MAPREDUCE-4076</a>.
+     Blocker bug reported by Devaraj K and fixed by Devaraj K (mrv2)<br>
+     <b>Stream job fails with ZipException when use yarn jar command</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4066">MAPREDUCE-4066</a>.
+     Minor bug reported by xieguiming and fixed by xieguiming (job submission , mrv2)<br>
+     <b>To get "yarn.app.mapreduce.am.staging-dir" value, should set the default value</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4057">MAPREDUCE-4057</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Devaraj K (contrib/raid)<br>
+     <b>Compilation error in RAID </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4008">MAPREDUCE-4008</a>.
+     Major bug reported by Devaraj K and fixed by Devaraj K (mrv2 , scheduler)<br>
+     <b>ResourceManager throws MetricsException on start up saying QueueMetrics MBean already exists</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4007">MAPREDUCE-4007</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (mrv2)<br>
+     <b>JobClient getJob(JobID) should return NULL if the job does not exist (for backwards compatibility)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3991">MAPREDUCE-3991</a>.
+     Trivial improvement reported by Harsh J and fixed by Harsh J (documentation)<br>
+     <b>Streaming FAQ has some wrong instructions about input files splitting</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3989">MAPREDUCE-3989</a>.
+     Major improvement reported by Patrick Hunt and fixed by Patrick Hunt <br>
+     <b>cap space usage of default log4j rolling policy (mr specific changes)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3974">MAPREDUCE-3974</a>.
+     Blocker bug reported by Arun C Murthy and fixed by Aaron T. Myers <br>
+     <b>TestSubmitJob in MR1 tests doesn't compile after HDFS-1623 merge</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3958">MAPREDUCE-3958</a>.
+     Major bug reported by Bikas Saha and fixed by Bikas Saha (mrv2)<br>
+     <b>RM: Remove RMNodeState and replace it with NodeState</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3955">MAPREDUCE-3955</a>.
+     Blocker improvement reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey (mrv2)<br>
+     <b>Replace ProtoOverHadoopRpcEngine with ProtobufRpcEngine.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3952">MAPREDUCE-3952</a>.
+     Major bug reported by Zhenxiao Luo and fixed by Bhallamudi Venkata Siva Kamesh (mrv2)<br>
+     <b>In MR2, when Total input paths to process == 1, CombinefileInputFormat.getSplits() returns 0 split.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3935">MAPREDUCE-3935</a>.
+     Major improvement reported by Tom White and fixed by Tom White (client)<br>
+     <b>Annotate Counters.Counter and Counters.Group as @Public</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3933">MAPREDUCE-3933</a>.
+     Major bug reported by Ahmed Radwan and fixed by Ahmed Radwan (mrv2 , test)<br>
+     <b>Failures because MALLOC_ARENA_MAX is not set</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3916">MAPREDUCE-3916</a>.
+     Critical bug reported by Roman Shaposhnik and fixed by Devaraj K (mrv2 , resourcemanager , webapps)<br>
+     <b>various issues with running yarn proxyserver</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3909">MAPREDUCE-3909</a>.
+     Trivial improvement reported by Steve Loughran and fixed by Steve Loughran (mrv2)<br>
+     <b>javadoc the Service interfaces</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3885">MAPREDUCE-3885</a>.
+     Major improvement reported by Devaraj Das and fixed by Devaraj Das (mrv2)<br>
+     <b>Apply the fix similar to HADOOP-8084</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3883">MAPREDUCE-3883</a>.
+     Minor improvement reported by Eugene Koontz and fixed by Eugene Koontz (documentation , mrv2)<br>
+     <b>Document yarn.nodemanager.delete.debug-delay-sec configuration property</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3869">MAPREDUCE-3869</a>.
+     Blocker bug reported by Devaraj K and fixed by Devaraj K (mrv2)<br>
+     <b>Distributed shell application fails with NoClassDefFoundError</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3867">MAPREDUCE-3867</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (test)<br>
+     <b>MiniMRYarn/MiniYarn uses fixed ports</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3818">MAPREDUCE-3818</a>.
+     Blocker bug reported by Vinod Kumar Vavilapalli and fixed by Suresh Srinivas (build , test)<br>
+     <b>Trunk MRV1 compilation is broken.</b><br>
+     <blockquote>Fixed broken compilation in TestSubmitJob after the patch for HDFS-2895.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3740">MAPREDUCE-3740</a>.
+     Blocker bug reported by Devaraj K and fixed by Devaraj K (mrv2)<br>
+     <b>Mapreduce Trunk compilation fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3578">MAPREDUCE-3578</a>.
+     Major bug reported by Gilad Wolff and fixed by Tom White (nodemanager)<br>
+     <b>starting nodemanager as 'root' gives "Unknown -jvm option"</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3545">MAPREDUCE-3545</a>.
+     Major bug reported by Suresh Srinivas and fixed by Suresh Srinivas <br>
+     <b>Remove Avro RPC</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3431">MAPREDUCE-3431</a>.
+     Minor bug reported by Steve Loughran and fixed by Steve Loughran (resourcemanager)<br>
+     <b>NPE in Resource Manager shutdown</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3377">MAPREDUCE-3377</a>.
+     Major bug reported by Jane Chen and fixed by Jane Chen <br>
+     <b>Compatibility issue with 0.20.203.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3353">MAPREDUCE-3353</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Bikas Saha (applicationmaster , mrv2 , resourcemanager)<br>
+     <b>Need a RM-&gt;AM channel to inform AMs about faulty/unhealthy/lost nodes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3173">MAPREDUCE-3173</a>.
+     Critical bug reported by Devaraj K and fixed by Devaraj K (mrv2)<br>
+     <b>MRV2 UI doesn't work properly without internet</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2942">MAPREDUCE-2942</a>.
+     Critical bug reported by Vinod Kumar Vavilapalli and fixed by Thomas Graves <br>
+     <b>TestNMAuditLogger.testNMAuditLoggerWithIP failing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2934">MAPREDUCE-2934</a>.
+     Major improvement reported by Aaron T. Myers and fixed by Aaron T. Myers (mrv2)<br>
+     <b>MR portion of HADOOP-7607 - Simplify the RPC proxy cleanup process</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2887">MAPREDUCE-2887</a>.
+     Major improvement reported by Sanjay Radia and fixed by Sanjay Radia <br>
+     <b>MR changes to match HADOOP-7524 (multiple RPC protocols)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3418">HDFS-3418</a>.
+     Minor improvement reported by Eli Collins and fixed by Eli Collins <br>
+     <b>Rename BlockWithLocationsProto datanodeIDs field to storageIDs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3396">HDFS-3396</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (fuse-dfs)<br>
+     <b>FUSE build fails on Ubuntu 12.04</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3395">HDFS-3395</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (name-node)<br>
+     <b>NN doesn't start with HA+security enabled and HTTP address set to 0.0.0.0</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3378">HDFS-3378</a>.
+     Trivial improvement reported by Eli Collins and fixed by Eli Collins <br>
+     <b>Remove DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY and DEFAULT</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3376">HDFS-3376</a>.
+     Critical bug reported by Todd Lipcon and fixed by Todd Lipcon (hdfs client)<br>
+     <b>DFSClient fails to make connection to DN if there are many unusable cached sockets</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3375">HDFS-3375</a>.
+     Trivial improvement reported by Todd Lipcon and fixed by Todd Lipcon (data-node)<br>
+     <b>Put client name in DataXceiver thread name for readBlock and keepalive</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3365">HDFS-3365</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (hdfs client)<br>
+     <b>Enable users to disable socket caching in DFS client configuration</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3363">HDFS-3363</a>.
+     Minor sub-task reported by John George and fixed by John George (name-node)<br>
+     <b>blockmanagement should stop using INodeFile &amp; INodeFileUC </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3357">HDFS-3357</a>.
+     Critical bug reported by Todd Lipcon and fixed by Todd Lipcon (data-node)<br>
+     <b>DataXceiver reads from client socket with incorrect/no timeout</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3351">HDFS-3351</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (name-node)<br>
+     <b>NameNode#initializeGenericKeys should always set fs.defaultFS regardless of whether HA or Federation is enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3350">HDFS-3350</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>findbugs warning: INodeFileUnderConstruction doesn't override INodeFile.equals(Object)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3339">HDFS-3339</a>.
+     Minor sub-task reported by John George and fixed by John George (name-node)<br>
+     <b>change INode to package private</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3336">HDFS-3336</a>.
+     Minor bug reported by Roman Shaposhnik and fixed by Roman Shaposhnik (scripts)<br>
+     <b>hdfs launcher script will be better off not special casing namenode command with regards to hadoop.security.logger</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3332">HDFS-3332</a>.
+     Major bug reported by amith and fixed by amith (data-node)<br>
+     <b>NullPointerException in DN when directoryscanner is trying to report bad blocks</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3330">HDFS-3330</a>.
+     Critical bug reported by Todd Lipcon and fixed by Todd Lipcon (name-node)<br>
+     <b>If GetImageServlet throws an Error or RTE, response has HTTP "OK" status</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3328">HDFS-3328</a>.
+     Minor bug reported by Uma Maheswara Rao G and fixed by Eli Collins (data-node)<br>
+     <b>NPE in DataNode.getIpcPort</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3326">HDFS-3326</a>.
+     Trivial bug reported by J.Andreina and fixed by Matthew Jacobs (name-node)<br>
+     <b>Append enabled log message uses the wrong variable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3322">HDFS-3322</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (hdfs client)<br>
+     <b>Update file context to use HdfsDataInputStream and HdfsDataOutputStream</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3319">HDFS-3319</a>.
+     Minor improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (hdfs client)<br>
+     <b>DFSOutputStream should not start a thread in constructors</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3314">HDFS-3314</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur <br>
+     <b>HttpFS operation for getHomeDirectory is incorrect</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3309">HDFS-3309</a>.
+     Major bug reported by Romain Rigaux and fixed by Alejandro Abdelnur <br>
+     <b>HttpFS (Hoop) chmod not supporting octal and sticky bit permissions</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3305">HDFS-3305</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (ha , name-node)<br>
+     <b>GetImageServlet should consider SBN a valid requestor in a secure HA setup</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3303">HDFS-3303</a>.
+     Minor bug reported by Brandon Li and fixed by Brandon Li (name-node)<br>
+     <b>RemoteEditLogManifest doesn't need to implements Writable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3298">HDFS-3298</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (hdfs client)<br>
+     <b>Add HdfsDataOutputStream as a public API</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3294">HDFS-3294</a>.
+     Trivial improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node , name-node)<br>
+     <b>Fix indentation in NamenodeWebHdfsMethods and DatanodeWebHdfsMethods</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3286">HDFS-3286</a>.
+     Major bug reported by J.Andreina and fixed by Ashish Singhi (balancer)<br>
+     <b>When the threshold value for balancer is 0(zero) ,unexpected output is displayed</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3284">HDFS-3284</a>.
+     Minor bug reported by Todd Lipcon and fixed by Todd Lipcon (ha , security)<br>
+     <b>bootstrapStandby fails in secure cluster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3282">HDFS-3282</a>.
+     Major sub-task reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (hdfs client)<br>
+     <b>Add HdfsDataInputStream as a public API</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3280">HDFS-3280</a>.
+     Critical bug reported by Todd Lipcon and fixed by Todd Lipcon (hdfs client)<br>
+     <b>DFSOutputStream.sync should not be synchronized</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3279">HDFS-3279</a>.
+     Minor improvement reported by Tsz Wo (Nicholas), SZE and fixed by Arpit Gupta (name-node)<br>
+     <b>One of the FSEditLog constructors should be moved to TestEditLog</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3275">HDFS-3275</a>.
+     Major bug reported by Vinithra Varadharajan and fixed by amith (ha , name-node)<br>
+     <b>Format command overwrites contents of non-empty shared edits dir if name dirs are empty without any prompting</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3268">HDFS-3268</a>.
+     Critical bug reported by Daryn Sharp and fixed by Daryn Sharp (ha , hdfs client)<br>
+     <b>Hdfs mishandles token service &amp; incompatible with HA</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3263">HDFS-3263</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur <br>
+     <b>HttpFS should read HDFS config from Hadoop site.xml files</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3260">HDFS-3260</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers <br>
+     <b>TestDatanodeRegistration should set minimum DN version in addition to minimum NN version</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3259">HDFS-3259</a>.
+     Major improvement reported by Aaron T. Myers and fixed by Aaron T. Myers (ha , name-node)<br>
+     <b>NameNode#initializeSharedEdits should populate shared edits dir with edit log segments</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3256">HDFS-3256</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers <br>
+     <b>HDFS considers blocks under-replicated if topology script is configured with only 1 rack</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3255">HDFS-3255</a>.
+     Critical bug reported by Daryn Sharp and fixed by Daryn Sharp (ha , hdfs client)<br>
+     <b>HA DFS returns wrong token service</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3254">HDFS-3254</a>.
+     Major bug reported by Anupam Seth and fixed by Anupam Seth (fuse-dfs)<br>
+     <b>Branch-2 build broken due to wrong version number in fuse-dfs' pom.xml</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3249">HDFS-3249</a>.
+     Trivial improvement reported by Todd Lipcon and fixed by Todd Lipcon (name-node)<br>
+     <b>Use ToolRunner.confirmPrompt in NameNode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3248">HDFS-3248</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>bootstrapstanby repeated twice in hdfs namenode usage message</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3247">HDFS-3247</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (ha)<br>
+     <b>Improve bootstrapStandby behavior when original NN is not active</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3244">HDFS-3244</a>.
+     Major improvement reported by Eli Collins and fixed by Eli Collins <br>
+     <b>Remove dead writable code from hdfs/protocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3240">HDFS-3240</a>.
+     Trivial improvement reported by Todd Lipcon and fixed by Todd Lipcon (data-node)<br>
+     <b>Drop log level of "heartbeat: ..." in BPServiceActor to DEBUG</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3238">HDFS-3238</a>.
+     Major improvement reported by Eli Collins and fixed by Eli Collins <br>
+     <b>ServerCommand and friends don't need to be writables</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3236">HDFS-3236</a>.
+     Minor bug reported by Aaron T. Myers and fixed by Aaron T. Myers (ha , name-node)<br>
+     <b>NameNode does not initialize generic conf keys when started with -initializeSharedEditsDir</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3234">HDFS-3234</a>.
+     Trivial bug reported by Todd Lipcon and fixed by Todd Lipcon (tools)<br>
+     <b>Accidentally left log message in GetConf after HDFS-3226</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3226">HDFS-3226</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (tools)<br>
+     <b>Allow GetConf tool to print arbitrary keys</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3222">HDFS-3222</a>.
+     Major bug reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (hdfs client)<br>
+     <b>DFSInputStream#openInfo should not silently get the length as 0 when locations length is zero for last partial block.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3214">HDFS-3214</a>.
+     Blocker bug reported by Todd Lipcon and fixed by Todd Lipcon (data-node)<br>
+     <b>InterDatanodeProtocolServerSideTranslatorPB doesn't handle null response from initReplicaRecovery</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3211">HDFS-3211</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas (ha , name-node)<br>
+     <b>JournalProtocol changes required for introducing epoch and fencing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3210">HDFS-3210</a>.
+     Major bug reported by Eli Collins and fixed by Eli Collins <br>
+     <b>JsonUtil#toJsonMap for for a DatanodeInfo should use "ipAddr" instead of "name"</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3208">HDFS-3208</a>.
+     Major bug reported by Eli Collins and fixed by Eli Collins (name-node)<br>
+     <b>Bogus entries in hosts files are incorrectly displayed in the report </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3204">HDFS-3204</a>.
+     Major improvement reported by Suresh Srinivas and fixed by Suresh Srinivas (name-node)<br>
+     <b>Minor modification to JournalProtocol.proto to make it generic</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3202">HDFS-3202</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (data-node)<br>
+     <b>NamespaceInfo PB translation drops build version</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3199">HDFS-3199</a>.
+     Major bug reported by Eli Collins and fixed by Todd Lipcon <br>
+     <b>TestValidateConfigurationSettings is failing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3187">HDFS-3187</a>.
+     Minor sub-task reported by Todd Lipcon and fixed by Todd Lipcon (build)<br>
+     <b>Upgrade guava to 11.0.2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3181">HDFS-3181</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Tsz Wo (Nicholas), SZE (test)<br>
+     <b>testHardLeaseRecoveryAfterNameNodeRestart fails when length before restart is 1 byte less than CRC chunk size</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3179">HDFS-3179</a>.
+     Major improvement reported by Zhanwei.Wang and fixed by Tsz Wo (Nicholas), SZE (hdfs client)<br>
+     <b>Improve the error message: DataStreamer throw an exception, "nodes.length != original.length + 1" on single datanode cluster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3172">HDFS-3172</a>.
+     Trivial improvement reported by Eli Collins and fixed by Eli Collins (name-node)<br>
+     <b>dfs.upgrade.permission is dead code</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3171">HDFS-3171</a>.
+     Major improvement reported by Eli Collins and fixed by Eli Collins (data-node)<br>
+     <b>The DatanodeID "name" field is overloaded </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3169">HDFS-3169</a>.
+     Minor improvement reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (test)<br>
+     <b>TestFsck should test multiple -move operations in a row</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3167">HDFS-3167</a>.
+     Minor new feature reported by Henry Robinson and fixed by Henry Robinson (test)<br>
+     <b>CLI-based driver for MiniDFSCluster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3164">HDFS-3164</a>.
+     Major improvement reported by Eli Collins and fixed by Eli Collins (data-node)<br>
+     <b>Move DatanodeInfo#hostName to DatanodeID</b><br>
+     <blockquote>This change modifies DatanodeID, which is part of the client to server protocol, therefore clients must be upgraded with servers.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3160">HDFS-3160</a>.
+     Major bug reported by Roman Shaposhnik and fixed by Roman Shaposhnik (scripts)<br>
+     <b>httpfs should exec catalina instead of forking it</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3158">HDFS-3158</a>.
+     Major improvement reported by Aaron T. Myers and fixed by Aaron T. Myers (name-node)<br>
+     <b>LiveNodes member of NameNodeMXBean should list non-DFS used space and capacity per DN</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3156">HDFS-3156</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (test)<br>
+     <b>TestDFSHAAdmin is failing post HADOOP-8202</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3155">HDFS-3155</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node)<br>
+     <b>Clean up FSDataset implemenation related code.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3148">HDFS-3148</a>.
+     Major new feature reported by Eli Collins and fixed by Eli Collins (hdfs client , performance)<br>
+     <b>The client should be able to use multiple local interfaces for data transfer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3144">HDFS-3144</a>.
+     Major improvement reported by Eli Collins and fixed by Eli Collins (data-node)<br>
+     <b>Refactor DatanodeID#getName by use</b><br>
+     <blockquote>This change modifies DatanodeID, which is part of the client to server protocol, therefore clients must be upgraded with servers.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3143">HDFS-3143</a>.
+     Major bug reported by Eli Collins and fixed by Arpit Gupta (test)<br>
+     <b>TestGetBlocks.testGetBlocks is failing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3142">HDFS-3142</a>.
+     Blocker bug reported by Eli Collins and fixed by Brandon Li (test)<br>
+     <b>TestHDFSCLI.testAll is failing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3139">HDFS-3139</a>.
+     Minor improvement reported by Eli Collins and fixed by Eli Collins (data-node)<br>
+     <b>Minor Datanode logging improvement</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3138">HDFS-3138</a>.
+     Major improvement reported by Eli Collins and fixed by Eli Collins <br>
+     <b>Move DatanodeInfo#ipcPort to DatanodeID</b><br>
+     <blockquote>This change modifies DatanodeID, which is part of the client to server protocol, therefore clients must be upgraded with servers.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3137">HDFS-3137</a>.
+     Major improvement reported by Eli Collins and fixed by Eli Collins (name-node)<br>
+     <b>Bump LAST_UPGRADABLE_LAYOUT_VERSION to -16</b><br>
+     <blockquote>Upgrade from Hadoop versions earlier than 0.18 is not supported as of 2.0. To upgrade from an earlier release, first upgrade to 0.18, and then upgrade again from there.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3132">HDFS-3132</a>.
+     Minor bug reported by Todd Lipcon and fixed by Todd Lipcon (name-node)<br>
+     <b>Findbugs warning on HDFS trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3130">HDFS-3130</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node)<br>
+     <b>Move FSDataset implemenation to a package</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3129">HDFS-3129</a>.
+     Minor test reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>NetworkTopology: add test that getLeaf should check for invalid topologies</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3121">HDFS-3121</a>.
+     Major bug reported by John George and fixed by John George <br>
+     <b>hdfs tests for HADOOP-8014</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3120">HDFS-3120</a>.
+     Major improvement reported by Eli Collins and fixed by Eli Collins <br>
+     <b>Enable hsync and hflush by default</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3119">HDFS-3119</a>.
+     Minor bug reported by J.Andreina and fixed by Ashish Singhi (name-node)<br>
+     <b>Overreplicated block is not deleted even after the replication factor is reduced after sync follwed by closing that file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3111">HDFS-3111</a>.
+     Trivial task reported by Todd Lipcon and fixed by Uma Maheswara Rao G <br>
+     <b>Missing license headers in trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3109">HDFS-3109</a>.
+     Major bug reported by Ravi Prakash and fixed by Ravi Prakash <br>
+     <b>Remove hsqldb exclusions from pom.xml</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3105">HDFS-3105</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node , hdfs client)<br>
+     <b>Add DatanodeStorage information to block recovery</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3102">HDFS-3102</a>.
+     Major new feature reported by Todd Lipcon and fixed by Aaron T. Myers (ha , name-node)<br>
+     <b>Add CLI tool to initialize the shared-edits dir</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3101">HDFS-3101</a>.
+     Major bug reported by Zhanwei.Wang and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>cannot read empty file using webhdfs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3100">HDFS-3100</a>.
+     Major bug reported by Zhanwei.Wang and fixed by Brandon Li (data-node)<br>
+     <b>failed to append data</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3099">HDFS-3099</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (name-node)<br>
+     <b>SecondaryNameNode does not properly initialize metrics system</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3094">HDFS-3094</a>.
+     Major improvement reported by Arpit Gupta and fixed by Arpit Gupta <br>
+     <b>add -nonInteractive and -force option to namenode -format command</b><br>
+     <blockquote>The 'namenode -format' command now supports the flags '-nonInteractive' and '-force' to improve usefulness without user input.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3093">HDFS-3093</a>.
+     Critical bug reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>TestAllowFormat is trying to be interactive</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3091">HDFS-3091</a>.
+     Major improvement reported by Uma Maheswara Rao G and fixed by Tsz Wo (Nicholas), SZE (data-node , hdfs client , name-node)<br>
+     <b>Update the usage limitations of ReplaceDatanodeOnFailure policy in the config description for the smaller clusters.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3089">HDFS-3089</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node)<br>
+     <b>Move FSDatasetInterface and other related classes/interfaces to a package</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3088">HDFS-3088</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node)<br>
+     <b>Move FSDatasetInterface inner classes to a package</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3086">HDFS-3086</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node)<br>
+     <b>Change Datanode not to send storage list in registration - it will be sent in block report</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3084">HDFS-3084</a>.
+     Major improvement reported by Philip Zeyliger and fixed by Todd Lipcon (ha)<br>
+     <b>FenceMethod.tryFence() and ShellCommandFencer should pass namenodeId as well as host:port</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3083">HDFS-3083</a>.
+     Critical bug reported by Mingjie Lai and fixed by Aaron T. Myers (ha , security)<br>
+     <b>Cannot run an MR job with HA and security enabled when second-listed NN active</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3082">HDFS-3082</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node)<br>
+     <b>Clean up FSDatasetInterface</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3071">HDFS-3071</a>.
+     Major improvement reported by Philip Zeyliger and fixed by Todd Lipcon (ha)<br>
+     <b>haadmin failover command does not provide enough detail for when target NN is not ready to be active</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3070">HDFS-3070</a>.
+     Major bug reported by Stephen Chu and fixed by Aaron T. Myers (balancer)<br>
+     <b>HDFS balancer doesn't ensure that hdfs-site.xml is loaded</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3066">HDFS-3066</a>.
+     Major improvement reported by Patrick Hunt and fixed by Patrick Hunt (scripts)<br>
+     <b>cap space usage of default log4j rolling policy (hdfs specific changes)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3062">HDFS-3062</a>.
+     Critical bug reported by Mingjie Lai and fixed by Mingjie Lai (ha , security)<br>
+     <b>Fail to submit mapred job on a secured-HA-HDFS: logic URI cannot be picked up by job submission.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3057">HDFS-3057</a>.
+     Major bug reported by Roman Shaposhnik and fixed by Roman Shaposhnik (scripts)<br>
+     <b>httpfs and hdfs launcher scripts should honor CATALINA_HOME and HADOOP_LIBEXEC_DIR</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3056">HDFS-3056</a>.
+     Major improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node)<br>
+     <b>Add an interface for DataBlockScanner logging</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3050">HDFS-3050</a>.
+     Minor improvement reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (name-node)<br>
+     <b>rework OEV to share more code with the NameNode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3044">HDFS-3044</a>.
+     Major improvement reported by Eli Collins and fixed by Colin Patrick McCabe (name-node)<br>
+     <b>fsck move should be non-destructive by default</b><br>
+     <blockquote>The fsck "move" option is no longer destructive. It copies the accessible blocks of corrupt files to lost and found as before, but no longer deletes the corrupt files after copying the blocks. The original, destructive behavior can be enabled by specifying both the "move" and "delete" options. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3038">HDFS-3038</a>.
+     Trivial bug reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>Add FSEditLog.metrics to findbugs exclude list</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3036">HDFS-3036</a>.
+     Trivial improvement reported by Aaron T. Myers and fixed by Aaron T. Myers (name-node)<br>
+     <b>Remove unused method DFSUtil#isDefaultNamenodeAddress</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3032">HDFS-3032</a>.
+     Major bug reported by Kihwal Lee and fixed by Kihwal Lee (hdfs client)<br>
+     <b>Lease renewer tries forever even if renewal is not possible</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3030">HDFS-3030</a>.
+     Major improvement reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey <br>
+     <b>Remove getProtocolVersion and getProtocolSignature from translators</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3026">HDFS-3026</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (ha , name-node)<br>
+     <b>HA: Handle failure during HA state transition</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3024">HDFS-3024</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (name-node)<br>
+     <b>Improve performance of stringification in addStoredBlock</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3021">HDFS-3021</a>.
+     Major improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node)<br>
+     <b>Use generic type to declare FSDatasetInterface</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3020">HDFS-3020</a>.
+     Critical bug reported by Todd Lipcon and fixed by Todd Lipcon (name-node)<br>
+     <b>Auto-logSync based on edit log buffer size broken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3014">HDFS-3014</a>.
+     Major improvement reported by Sho Shimauchi and fixed by Sho Shimauchi (name-node)<br>
+     <b>FSEditLogOp and its subclasses should have toString() method</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3005">HDFS-3005</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node)<br>
+     <b>ConcurrentModificationException in FSDataset$FSVolume.getDfsUsed(..)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3004">HDFS-3004</a>.
+     Major new feature reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (tools)<br>
+     <b>Implement Recovery Mode</b><br>
+     <blockquote>This is a new feature.  It is documented in hdfs_user_guide.xml.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3003">HDFS-3003</a>.
+     Trivial improvement reported by Brandon Li and fixed by Brandon Li (name-node)<br>
+     <b>Remove getHostPortString() from NameNode, replace it with NetUtils.getHostPortString()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3000">HDFS-3000</a>.
+     Major new feature reported by Aaron T. Myers and fixed by Aaron T. Myers (hdfs client)<br>
+     <b>Add a public API for setting quotas</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2995">HDFS-2995</a>.
+     Major bug reported by Todd Lipcon and fixed by Eli Collins (scripts)<br>
+     <b>start-dfs.sh should only start the 2NN for namenodes with dfs.namenode.secondary.http-address configured</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2983">HDFS-2983</a>.
+     Major improvement reported by Eli Collins and fixed by Aaron T. Myers <br>
+     <b>Relax the build version check to permit rolling upgrades within a release</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2968">HDFS-2968</a>.
+     Blocker bug reported by Todd Lipcon and fixed by Todd Lipcon (data-node , name-node)<br>
+     <b>Protocol translator for BlockRecoveryCommand broken when multiple blocks need recovery</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2941">HDFS-2941</a>.
+     Major new feature reported by Aaron T. Myers and fixed by Aaron T. Myers (hdfs client , name-node)<br>
+     <b>Add an administrative command to download a copy of the fsimage from the NN</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2899">HDFS-2899</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas <br>
+     <b>Service protocol change to support multiple storages added in HDFS-2880</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2895">HDFS-2895</a>.
+     Major improvement reported by Suresh Srinivas and fixed by Suresh Srinivas (data-node , name-node)<br>
+     <b>Remove Writable wire protocol related code that is no longer necessary</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2880">HDFS-2880</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas (data-node , name-node)<br>
+     <b>Protocol buffer changes to add support multiple storages</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2878">HDFS-2878</a>.
+     Blocker bug reported by Eli Collins and fixed by Todd Lipcon (test)<br>
+     <b>TestBlockRecovery does not compile</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2815">HDFS-2815</a>.
+     Critical bug reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (name-node)<br>
+     <b>Namenode is not coming out of safemode when we perform ( NN crash + restart ) .  Also FSCK report shows blocks missed.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2801">HDFS-2801</a>.
+     Major sub-task reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey <br>
+     <b>Provide a method in client side translators to check for a methods supported in underlying protocol.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2799">HDFS-2799</a>.
+     Major bug reported by Eli Collins and fixed by amith (name-node)<br>
+     <b>Trim fs.checkpoint.dir values</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2768">HDFS-2768</a>.
+     Major bug reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (name-node)<br>
+     <b>BackupNode stop can not close proxy connections because it is not a proxy instance.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2765">HDFS-2765</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (test)<br>
+     <b>TestNameEditsConfigs is incorrectly swallowing IOE</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2739">HDFS-2739</a>.
+     Critical bug reported by Sho Shimauchi and fixed by Jitendra Nath Pandey <br>
+     <b>SecondaryNameNode doesn't start up</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2731">HDFS-2731</a>.
+     Major new feature reported by Aaron T. Myers and fixed by Todd Lipcon (ha)<br>
+     <b>HA: Autopopulate standby name dirs if they're empty</b><br>
+     <blockquote>The HA NameNode may now be started with the "-bootstrapStandby" flag. This causes it to copy the namespace information and most recent checkpoint from its HA pair, and save it to local storage, allowing an HA setup to be bootstrapped without use of rsync or external tools.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2708">HDFS-2708</a>.
+     Minor improvement reported by Eli Collins and fixed by Aaron T. Myers (data-node , name-node)<br>
+     <b>Stats for the # of blocks per DN</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2700">HDFS-2700</a>.
+     Major bug reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G <br>
+     <b>TestDataNodeMultipleRegistrations is failing in trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2697">HDFS-2697</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Jitendra Nath Pandey <br>
+     <b>Move RefreshAuthPolicy, RefreshUserMappings, GetUserMappings protocol to protocol buffers</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2696">HDFS-2696</a>.
+     Major bug reported by Petru Dimulescu and fixed by Bruno Mah&#233; (build , fuse-dfs)<br>
+     <b>Fix the fuse-fds build</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2694">HDFS-2694</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (name-node)<br>
+     <b>Removal of Avro broke non-PB NN services</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2687">HDFS-2687</a>.
+     Major sub-task reported by Uma Maheswara Rao G and fixed by Suresh Srinivas (test)<br>
+     <b>Tests are failing with ClassCastException, due to new protocol changes </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2676">HDFS-2676</a>.
+     Major bug reported by Suresh Srinivas and fixed by Suresh Srinivas <br>
+     <b>Remove Avro RPC</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2669">HDFS-2669</a>.
+     Major sub-task reported by Sanjay Radia and fixed by Sanjay Radia <br>
+     <b>Enable protobuf rpc for ClientNamenodeProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2666">HDFS-2666</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas (test)<br>
+     <b>TestBackupNode fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2663">HDFS-2663</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas <br>
+     <b>Optional parameters are not handled correctly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2661">HDFS-2661</a>.
+     Major sub-task reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey <br>
+     <b>Enable protobuf RPC for DatanodeProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2651">HDFS-2651</a>.
+     Major sub-task reported by Sanjay Radia and fixed by Sanjay Radia <br>
+     <b>ClientNameNodeProtocol Translators for Protocol Buffers</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2650">HDFS-2650</a>.
+     Minor improvement reported by Hari Mankude and fixed by Hari Mankude <br>
+     <b>Replace @inheritDoc with @Override </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2647">HDFS-2647</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas (balancer , data-node , hdfs client , name-node)<br>
+     <b>Enable protobuf RPC for InterDatanodeProtocol, ClientDatanodeProtocol, JournalProtocol and NamenodeProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2642">HDFS-2642</a>.
+     Major sub-task reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey <br>
+     <b>Protobuf translators for DatanodeProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2636">HDFS-2636</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas <br>
+     <b>Implement protobuf service for ClientDatanodeProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2629">HDFS-2629</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas (data-node)<br>
+     <b>Implement protobuf service for InterDatanodeProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2618">HDFS-2618</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas (name-node)<br>
+     <b>Implement protobuf service for NamenodeProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2597">HDFS-2597</a>.
+     Major sub-task reported by Sanjay Radia and fixed by Sanjay Radia <br>
+     <b> ClientNameNodeProtocol in Protocol Buffers</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2581">HDFS-2581</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas (name-node)<br>
+     <b>Implement protobuf service for JournalProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2532">HDFS-2532</a>.
+     Critical bug reported by Todd Lipcon and fixed by Uma Maheswara Rao G (test)<br>
+     <b>TestDfsOverAvroRpc timing out in trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2526">HDFS-2526</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (hdfs client , name-node)<br>
+     <b>(Client)NamenodeProtocolTranslatorR23 do not need to keep a reference to rpcProxyWithoutRetry</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2520">HDFS-2520</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas (data-node)<br>
+     <b>Protobuf - Add protobuf service for InterDatanodeProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2519">HDFS-2519</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas (data-node , name-node)<br>
+     <b>Protobuf - Add protobuf service for DatanodeProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2518">HDFS-2518</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas (name-node)<br>
+     <b>Protobuf - Add protobuf service for NamenodeProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2517">HDFS-2517</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas (name-node)<br>
+     <b>Protobuf - Add protocol service for JournalProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2507">HDFS-2507</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (name-node)<br>
+     <b>HA: Allow saveNamespace operations to be canceled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2505">HDFS-2505</a>.
+     Minor test reported by Ravi Prakash and fixed by Ravi Prakash (test)<br>
+     <b>Add a test to verify getFileChecksum works with ViewFS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2499">HDFS-2499</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas (name-node)<br>
+     <b>Fix RPC client creation bug from HDFS-2459</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2497">HDFS-2497</a>.
+     Major bug reported by Suresh Srinivas and fixed by Suresh Srinivas <br>
+     <b>Fix TestBackupNode failure</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2496">HDFS-2496</a>.
+     Major improvement reported by Suresh Srinivas and fixed by Suresh Srinivas <br>
+     <b>Separate datatypes for DatanodeProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2495">HDFS-2495</a>.
+     Major sub-task reported by Tomasz Nykiel and fixed by Tomasz Nykiel (name-node)<br>
+     <b>Increase granularity of write operations in ReplicationMonitor thus reducing contention for write lock</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2489">HDFS-2489</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas <br>
+     <b>Move commands Finalize and Register out of DatanodeCommand class.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2488">HDFS-2488</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas (data-node)<br>
+     <b>Separate datatypes for InterDatanodeProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2481">HDFS-2481</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Sanjay Radia <br>
+     <b>Unknown protocol: org.apache.hadoop.hdfs.protocol.ClientProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2480">HDFS-2480</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas <br>
+     <b>Separate datatypes for NamenodeProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2479">HDFS-2479</a>.
+     Major sub-task reported by Sanjay Radia and fixed by Sanjay Radia <br>
+     <b>HDFS Client Data Types in Protocol Buffers</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2477">HDFS-2477</a>.
+     Major sub-task reported by Tomasz Nykiel and fixed by Tomasz Nykiel (name-node)<br>
+     <b>Optimize computing the diff between a block report and the namenode state.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2476">HDFS-2476</a>.
+     Major sub-task reported by Tomasz Nykiel and fixed by Tomasz Nykiel (name-node)<br>
+     <b>More CPU efficient data structure for under-replicated/over-replicated/invalidate blocks</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2459">HDFS-2459</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas <br>
+     <b>Separate datatypes for Journal protocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2430">HDFS-2430</a>.
+     Major new feature reported by Aaron T. Myers and fixed by Aaron T. Myers (name-node)<br>
+     <b>The number of failed or low-resource volumes the NN can tolerate should be configurable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2413">HDFS-2413</a>.
+     Major improvement reported by Todd Lipcon and fixed by Harsh J (hdfs client)<br>
+     <b>Add public APIs for safemode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2410">HDFS-2410</a>.
+     Minor improvement reported by Suresh Srinivas and fixed by Suresh Srinivas (data-node , name-node , test)<br>
+     <b>Further clean up hard-coded configuration keys</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2351">HDFS-2351</a>.
+     Major improvement reported by Sanjay Radia and fixed by Sanjay Radia <br>
+     <b>Change Namenode and Datanode to register each of their protocols seperately </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2337">HDFS-2337</a>.
+     Major improvement reported by Aaron T. Myers and fixed by Aaron T. Myers (hdfs client)<br>
+     <b>DFSClient shouldn't keep multiple RPC proxy references</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2334">HDFS-2334</a>.
+     Major sub-task reported by Ivan Kelly and fixed by Ivan Kelly (name-node)<br>
+     <b>Add Closeable to JournalManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2303">HDFS-2303</a>.
+     Major improvement reported by Roman Shaposhnik and fixed by Mingjie Lai (build , scripts)<br>
+     <b>Unbundle jsvc</b><br>
+     <blockquote>To run secure Datanodes users must install jsvc for their platform and set JSVC_HOME to point to the location of jsvc in their environment.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2223">HDFS-2223</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (name-node)<br>
+     <b>Untangle depencencies between NN components</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2181">HDFS-2181</a>.
+     Major sub-task reported by Sanjay Radia and fixed by Sanjay Radia <br>
+     <b>Separate HDFS Client wire protocol data types</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2158">HDFS-2158</a>.
+     Major sub-task reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey <br>
+     <b>Add JournalSet to manage the set of journals.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2038">HDFS-2038</a>.
+     Critical test reported by Daryn Sharp and fixed by Kihwal Lee (test)<br>
+     <b>Update test to handle relative paths with globs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2018">HDFS-2018</a>.
+     Major sub-task reported by Ivan Kelly and fixed by Ivan Kelly <br>
+     <b>1073: Move all journal stream management code into one place</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1765">HDFS-1765</a>.
+     Major bug reported by Hairong Kuang and fixed by Uma Maheswara Rao G (name-node)<br>
+     <b>Block Replication should respect under-replication block priority</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1623">HDFS-1623</a>.
+     Major new feature reported by Sanjay Radia and fixed by  <br>
+     <b>High Availability Framework for HDFS NN</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1580">HDFS-1580</a>.
+     Major improvement reported by Ivan Kelly and fixed by Jitendra Nath Pandey (name-node)<br>
+     <b>Add interface for generic Write Ahead Logging mechanisms</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-891">HDFS-891</a>.
+     Minor bug reported by Steve Loughran and fixed by Harsh J (data-node)<br>
+     <b>DataNode no longer needs to check for dfs.network.script</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-860">HDFS-860</a>.
+     Minor wish reported by Brian Bockelman and fixed by Brian Bockelman (fuse-dfs)<br>
+     <b>fuse-dfs truncate behavior causes issues with scp</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-395">HDFS-395</a>.
+     Major sub-task reported by dhruba borthakur and fixed by Tomasz Nykiel (data-node , name-node)<br>
+     <b>DFS Scalability: Incremental block reports</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-309">HDFS-309</a>.
+     Major improvement reported by Todd Lipcon and fixed by Sho Shimauchi <br>
+     <b>FSEditLog should log progress during replay</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-234">HDFS-234</a>.
+     Major new feature reported by Luca Telloli and fixed by Ivan Kelly <br>
+     <b>Integration with BookKeeper logging system</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-208">HDFS-208</a>.
+     Minor improvement reported by Allen Wittenauer and fixed by Uma Maheswara Rao G (name-node)<br>
+     <b>name node should warn if only one dir is listed in dfs.name.dir</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8619">HADOOP-8619</a>.
+     Major improvement reported by Radim Kolar and fixed by Chris Douglas (io)<br>
+     <b>WritableComparator must implement no-arg constructor</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8398">HADOOP-8398</a>.
+     Minor improvement reported by Eli Collins and fixed by Eli Collins <br>
+     <b>Cleanup BlockLocation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8388">HADOOP-8388</a>.
+     Minor improvement reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>Remove unused BlockLocation serialization</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8366">HADOOP-8366</a>.
+     Blocker improvement reported by Sanjay Radia and fixed by Sanjay Radia <br>
+     <b>Use ProtoBuf for RpcResponseHeader</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8359">HADOOP-8359</a>.
+     Trivial task reported by Harsh J and fixed by Anupam Seth (conf)<br>
+     <b>Clear up javadoc warnings in hadoop-common-project</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8356">HADOOP-8356</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (fs)<br>
+     <b>FileSystem service loading mechanism should print the FileSystem impl it is failing to load</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8355">HADOOP-8355</a>.
+     Minor bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>SPNEGO filter throws/logs exception when authentication fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8353">HADOOP-8353</a>.
+     Major improvement reported by Roman Shaposhnik and fixed by Roman Shaposhnik (scripts)<br>
+     <b>hadoop-daemon.sh and yarn-daemon.sh can be misleading on stop</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8350">HADOOP-8350</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (util)<br>
+     <b>Improve NetUtils.getInputStream to return a stream which has a tunable timeout</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8349">HADOOP-8349</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (viewfs)<br>
+     <b>ViewFS doesn't work when the root of a file system is mounted</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8347">HADOOP-8347</a>.
+     Major bug reported by Philip Zeyliger and fixed by Philip Zeyliger (security)<br>
+     <b>Hadoop Common logs misspell 'successful'</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8343">HADOOP-8343</a>.
+     Major new feature reported by Philip Zeyliger and fixed by Alejandro Abdelnur (util)<br>
+     <b>Allow configuration of authorization for JmxJsonServlet and MetricsServlet</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8314">HADOOP-8314</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>HttpServer#hasAdminAccess should return false if authorization is enabled but user is not authenticated</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8310">HADOOP-8310</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (fs)<br>
+     <b>FileContext#checkPath should handle URIs with no port</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8309">HADOOP-8309</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>Pseudo &amp; Kerberos AuthenticationHandler should use getType() to create token</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8296">HADOOP-8296</a>.
+     Minor bug reported by Thomas Graves and fixed by Devaraj K <br>
+     <b>hadoop/yarn daemonlog usage wrong </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8285">HADOOP-8285</a>.
+     Major improvement reported by Sanjay Radia and fixed by Sanjay Radia (ipc)<br>
+     <b>Use ProtoBuf for RpcPayLoadHeader</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8282">HADOOP-8282</a>.
+     Minor bug reported by Devaraj K and fixed by Devaraj K (scripts)<br>
+     <b>start-all.sh refers incorrectly start-dfs.sh existence for starting start-yarn.sh</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8280">HADOOP-8280</a>.
+     Major improvement reported by Ahmed Radwan and fixed by Ahmed Radwan (test , util)<br>
+     <b> Move VersionUtil/TestVersionUtil and GenericTestUtils from HDFS into Common.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8275">HADOOP-8275</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>Range check DelegationKey length </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8270">HADOOP-8270</a>.
+     Minor bug reported by Roman Shaposhnik and fixed by Roman Shaposhnik (scripts)<br>
+     <b>hadoop-daemon.sh stop action should return 0 for an already stopped service </b><br>
+     <blockquote>The daemon stop action no longer returns failure when stopping an already stopped service.  </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8264">HADOOP-8264</a>.
+     Trivial bug reported by Bernd Fondermann and fixed by Bernd Fondermann <br>
+     <b>Remove irritating double double quotes in front of hostname </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8263">HADOOP-8263</a>.
+     Minor bug reported by Todd Lipcon and fixed by Todd Lipcon (ipc)<br>
+     <b>Stringification of IPC calls not useful</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8261">HADOOP-8261</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (fs)<br>
+     <b>Har file system doesn't deal with FS URIs with a host but no port</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8251">HADOOP-8251</a>.
+     Blocker bug reported by Todd Lipcon and fixed by Todd Lipcon (security)<br>
+     <b>SecurityUtil.fetchServiceTicket broken after HADOOP-6941</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8243">HADOOP-8243</a>.
+     Critical bug reported by Todd Lipcon and fixed by Todd Lipcon (ha , security)<br>
+     <b>Security support broken in CLI (manual) failover controller</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8238">HADOOP-8238</a>.
+     Major bug reported by Eli Collins and fixed by Eli Collins <br>
+     <b>NetUtils#getHostNameOfIP blows up if given ip:port string w/o port</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8236">HADOOP-8236</a>.
+     Major improvement reported by Philip Zeyliger and fixed by Todd Lipcon (ha)<br>
+     <b>haadmin should have configurable timeouts for failover commands</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8218">HADOOP-8218</a>.
+     Critical bug reported by Todd Lipcon and fixed by Todd Lipcon (ipc , test)<br>
+     <b>RPC.closeProxy shouldn't throw error when closing a mock</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8214">HADOOP-8214</a>.
+     Major improvement reported by Roman Shaposhnik and fixed by Roman Shaposhnik (scripts)<br>
+     <b>make hadoop script recognize a full set of deprecated commands</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8211">HADOOP-8211</a>.
+     Major sub-task reported by Eli Collins and fixed by Eli Collins (io , performance)<br>
+     <b>Update commons-net version to 3.1</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8210">HADOOP-8210</a>.
+     Major sub-task reported by Eli Collins and fixed by Eli Collins (io , performance)<br>
+     <b>Common side of HDFS-3148</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8206">HADOOP-8206</a>.
+     Major new feature reported by Todd Lipcon and fixed by Todd Lipcon (ha)<br>
+     <b>Common portion of ZK-based failover controller</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8204">HADOOP-8204</a>.
+     Major bug reported by Tom White and fixed by Todd Lipcon <br>
+     <b>TestHealthMonitor fails occasionally </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8202">HADOOP-8202</a>.
+     Minor bug reported by Hari Mankude and fixed by Hari Mankude (ipc)<br>
+     <b>stopproxy() is not closing the proxies correctly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8200">HADOOP-8200</a>.
+     Minor improvement reported by Eli Collins and fixed by Eli Collins (conf)<br>
+     <b>Remove HADOOP_[JOBTRACKER|TASKTRACKER]_OPTS </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8199">HADOOP-8199</a>.
+     Major bug reported by Nishan Shetty and fixed by Devaraj K <br>
+     <b>Fix issues in start-all.sh and stop-all.sh</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8193">HADOOP-8193</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (ha)<br>
+     <b>Refactor FailoverController/HAAdmin code to add an abstract class for "target" services</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8191">HADOOP-8191</a>.
+     Major bug reported by Philip Zeyliger and fixed by Todd Lipcon (ha)<br>
+     <b>SshFenceByTcpPort uses netcat incorrectly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8189">HADOOP-8189</a>.
+     Major bug reported by Jonathan Natkins and fixed by Jonathan Natkins (security)<br>
+     <b>LdapGroupsMapping shouldn't throw away IOException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8185">HADOOP-8185</a>.
+     Major improvement reported by Arpit Gupta and fixed by Arpit Gupta (documentation)<br>
+     <b>Update namenode -format documentation and add -nonInteractive and -force</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8184">HADOOP-8184</a>.
+     Major improvement reported by Sanjay Radia and fixed by Sanjay Radia (ipc)<br>
+     <b>ProtoBuf RPC engine does not need it own reply packet - it can use the IPC layer reply packet.</b><br>
+     <blockquote>This change will affect the output of errors for some Hadoop CLI commands. Specifically, the name of the exception class will no longer appear, and instead only the text of the exception message will appear.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8183">HADOOP-8183</a>.
+     Minor improvement reported by Harsh J and fixed by Harsh J (util)<br>
+     <b>Stop using "mapred.used.genericoptionsparser" to avoid unnecessary warnings</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8169">HADOOP-8169</a>.
+     Critical bug reported by Thomas Graves and fixed by Thomas Graves (build)<br>
+     <b>javadoc generation fails with java.lang.OutOfMemoryError: Java heap space</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8164">HADOOP-8164</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Daryn Sharp (fs)<br>
+     <b>Handle paths using back slash as path separator for windows only</b><br>
+     <blockquote>This jira only allows providing paths using back slash as separator on Windows. The back slash on *nix system will be used as escape character. The support for paths using back slash as path separator will be removed in HADOOP-8139 in release 23.3.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8163">HADOOP-8163</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (ha)<br>
+     <b>Improve ActiveStandbyElector to provide hooks for fencing old active</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8159">HADOOP-8159</a>.
+     Major bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>NetworkTopology: getLeaf should check for invalid topologies</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8154">HADOOP-8154</a>.
+     Major bug reported by Eli Collins and fixed by Eli Collins (conf)<br>
+     <b>DNS#getIPs shouldn't silently return the local host IP for bogus interface names</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8152">HADOOP-8152</a>.
+     Major improvement reported by Aaron T. Myers and fixed by Aaron T. Myers (security)<br>
+     <b>Expand public APIs for security library classes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8149">HADOOP-8149</a>.
+     Major improvement reported by Patrick Hunt and fixed by Patrick Hunt (conf)<br>
+     <b>cap space usage of default log4j rolling policy </b><br>
+     <blockquote>Hadoop log files are now rolled by size instead of date (daily) by default. Tools that depend on the log file name format will need to be updated. Users who would like to maintain the previous settings of hadoop.root.logger and hadoop.security.logger can use their current log4j.properties files and update the HADOOP_ROOT_LOGGER and HADOOP_SECURITY_LOGGER environment variables to use DRFA and DRFAS respectively.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8142">HADOOP-8142</a>.
+     Major task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (build)<br>
+     <b>Update versions from 0.23.2 to 0.23.3</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8141">HADOOP-8141</a>.
+     Trivial improvement reported by Todd Lipcon and fixed by Todd Lipcon (security)<br>
+     <b>Add method to init krb5 cipher suites</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8121">HADOOP-8121</a>.
+     Major new feature reported by Jonathan Natkins and fixed by Jonathan Natkins (security)<br>
+     <b>Active Directory Group Mapping Service</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8119">HADOOP-8119</a>.
+     Minor bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (test)<br>
+     <b>Fix javac warnings in TestAuthenticationFilter</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8118">HADOOP-8118</a>.
+     Minor improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (metrics)<br>
+     <b>Print the stack trace of InstanceAlreadyExistsException in trace level</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8117">HADOOP-8117</a>.
+     Trivial improvement reported by Todd Lipcon and fixed by Todd Lipcon (build , test)<br>
+     <b>Upgrade test build to Surefire 2.12</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8113">HADOOP-8113</a>.
+     Trivial improvement reported by Eugene Koontz and fixed by Eugene Koontz (documentation)<br>
+     <b>Correction to BUILDING.txt: HDFS needs ProtocolBuffer, too (not just MapReduce)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8098">HADOOP-8098</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>KerberosAuthenticatorHandler should use _HOST replacement to resolve principal name</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8086">HADOOP-8086</a>.
+     Minor improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>KerberosName silently sets defaultRealm to "" if the Kerberos config is not found, it should log a WARN</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8084">HADOOP-8084</a>.
+     Major improvement reported by Devaraj Das and fixed by Devaraj Das (ipc)<br>
+     <b>Protobuf RPC engine can be optimized to not do copying for the RPC request/response</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8077">HADOOP-8077</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (ha)<br>
+     <b>HA: fencing method should be able to be configured on a per-NN or per-NS basis</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8070">HADOOP-8070</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (benchmarks , ipc)<br>
+     <b>Add standalone benchmark of protobuf IPC</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8007">HADOOP-8007</a>.
+     Major improvement reported by Aaron T. Myers and fixed by Todd Lipcon (ha)<br>
+     <b>HA: use substitution token for fencing argument</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7994">HADOOP-7994</a>.
+     Major sub-task reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey <br>
+     <b>Remove getProtocolVersion and getProtocolSignature from the client side translator and server side implementation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7968">HADOOP-7968</a>.
+     Minor bug reported by Todd Lipcon and fixed by Sho Shimauchi (ipc)<br>
+     <b>Errant println left in RPC.getHighestSupportedProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7965">HADOOP-7965</a>.
+     Major sub-task reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey (ipc)<br>
+     <b>Support for protocol version and signature in PB</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7957">HADOOP-7957</a>.
+     Major improvement reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey <br>
+     <b>Classes deriving GetGroupsBase should be able to override proxy creation.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7940">HADOOP-7940</a>.
+     Major bug reported by Aaron, and fixed by Csaba Miklos (io)<br>
+     <b>method clear() in org.apache.hadoop.io.Text does not work</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7931">HADOOP-7931</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (ipc)<br>
+     <b>o.a.h.ipc.WritableRpcEngine should have a way to force initialization</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7920">HADOOP-7920</a>.
+     Major bug reported by Suresh Srinivas and fixed by Suresh Srinivas (ipc)<br>
+     <b>Remove Avro RPC</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7913">HADOOP-7913</a>.
+     Major sub-task reported by Sanjay Radia and fixed by Sanjay Radia (ipc)<br>
+     <b>Fix bug in ProtoBufRpcEngine - </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7900">HADOOP-7900</a>.
+     Major bug reported by Ravi Gummadi and fixed by Ravi Gummadi (fs)<br>
+     <b>LocalDirAllocator confChanged() accesses conf.get() twice</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7899">HADOOP-7899</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>Generate proto java files as part of the build</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7897">HADOOP-7897</a>.
+     Major bug reported by Suresh Srinivas and fixed by Suresh Srinivas (ipc)<br>
+     <b>ProtobufRPCEngine client side exception mechanism is not consistent with WritableRpcEngine</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7892">HADOOP-7892</a>.
+     Trivial bug reported by Todd Lipcon and fixed by Todd Lipcon (ipc)<br>
+     <b>IPC logs too verbose after "RpcKind" introduction</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7888">HADOOP-7888</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (test)<br>
+     <b>TestFailoverProxy fails intermittently on trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7876">HADOOP-7876</a>.
+     Major new feature reported by Suresh Srinivas and fixed by Suresh Srinivas (ipc)<br>
+     <b>Allow access to BlockKey/DelegationKey endoded key for RPC over protobuf</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7875">HADOOP-7875</a>.
+     Major improvement reported by Suresh Srinivas and fixed by Suresh Srinivas (ipc)<br>
+     <b>Add helper class to unwrap RemoteException from ServiceException thrown on protobuf based RPC</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7862">HADOOP-7862</a>.
+     Major sub-task reported by Sanjay Radia and fixed by Sanjay Radia (ipc)<br>
+     <b>Move the support for multiple protocols to lower layer so that Writable, PB and Avro can all use it</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7833">HADOOP-7833</a>.
+     Major bug reported by John Lee and fixed by John Lee (ipc)<br>
+     <b>Inner classes of org.apache.hadoop.ipc.protobuf.HadoopRpcProtos generates findbugs warnings which results in -1 for findbugs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7806">HADOOP-7806</a>.
+     Major new feature reported by Harsh J and fixed by Harsh J (util)<br>
+     <b>Support binding to sub-interfaces</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7788">HADOOP-7788</a>.
+     Major new feature reported by Todd Lipcon and fixed by Todd Lipcon (ha)<br>
+     <b>HA: Simple HealthMonitor class to watch an HAService</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7776">HADOOP-7776</a>.
+     Major sub-task reported by Sanjay Radia and fixed by Sanjay Radia (ipc)<br>
+     <b>Make the Ipc-Header in a RPC-Payload an explicit header</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7773">HADOOP-7773</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas (ipc)<br>
+     <b>Add support for protocol buffer based RPC engine</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7729">HADOOP-7729</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (ipc)<br>
+     <b>Send back valid HTTP response if user hits IPC port with HTTP GET</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7717">HADOOP-7717</a>.
+     Major improvement reported by Aaron T. Myers and fixed by Aaron T. Myers (ipc)<br>
+     <b>Move handling of concurrent client fail-overs to RetryInvocationHandler</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7716">HADOOP-7716</a>.
+     Minor improvement reported by Sanjay Radia and fixed by Sanjay Radia <br>
+     <b>RPC protocol registration on SS does not log the protocol name (only the class which may be different)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7695">HADOOP-7695</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (ipc)<br>
+     <b>RPC.stopProxy can throw unintended exception while logging error</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7693">HADOOP-7693</a>.
+     Major improvement reported by Doug Cutting and fixed by Doug Cutting (ipc)<br>
+     <b>fix RPC.Server#addProtocol to work in AvroRpcEngine</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7687">HADOOP-7687</a>.
+     Minor improvement reported by Sanjay Radia and fixed by Sanjay Radia <br>
+     <b>Make getProtocolSignature public </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7635">HADOOP-7635</a>.
+     Major improvement reported by Aaron T. Myers and fixed by Aaron T. Myers (ipc)<br>
+     <b>RetryInvocationHandler should release underlying resources on close</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7621">HADOOP-7621</a>.
+     Critical bug reported by Alejandro Abdelnur and fixed by Aaron T. Myers (security)<br>
+     <b>alfredo config should be in a file not readable by users</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7607">HADOOP-7607</a>.
+     Major improvement reported by Aaron T. Myers and fixed by Aaron T. Myers (ipc)<br>
+     <b>Simplify the RPC proxy cleanup process</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7557">HADOOP-7557</a>.
+     Major sub-task reported by Sanjay Radia and fixed by Sanjay Radia <br>
+     <b>Make  IPC  header be extensible</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7549">HADOOP-7549</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (fs)<br>
+     <b>Use JDK ServiceLoader mechanism to find FileSystem implementations</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7524">HADOOP-7524</a>.
+     Major sub-task reported by Sanjay Radia and fixed by Sanjay Radia (ipc)<br>
+     <b>Change RPC to allow multiple protocols including multiple versions of the same protocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7454">HADOOP-7454</a>.
+     Major new feature reported by Aaron T. Myers and fixed by  <br>
+     <b>Common side of High Availability Framework (HDFS-1623)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7358">HADOOP-7358</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (ipc)<br>
+     <b>Improve log levels when exceptions caught in RPC handler</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7350">HADOOP-7350</a>.
+     Major improvement reported by Tom White and fixed by Tom White (conf , io)<br>
+     <b>Use ServiceLoader to discover compression codec classes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7069">HADOOP-7069</a>.
+     Major improvement reported by Jakob Homan and fixed by  (documentation)<br>
+     <b>Replace forrest with supported framework</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7030">HADOOP-7030</a>.
+     Major new feature reported by Patrick Angeles and fixed by Tom White <br>
+     <b>Add TableMapping topology implementation to read host to rack mapping from a file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6941">HADOOP-6941</a>.
+     Major bug reported by Stephen Watt and fixed by Devaraj Das <br>
+     <b>Support non-SUN JREs in UserGroupInformation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6924">HADOOP-6924</a>.
+     Major bug reported by Stephen Watt and fixed by Devaraj Das <br>
+     <b>Build fails with non-Sun JREs due to different pathing to the operating system architecture shared libraries</b><br>
+     <blockquote></blockquote></li>
+</ul>
+</body></html>
+<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<title>Hadoop  0.23.2 Release Notes</title>
+<STYLE type="text/css">
+	H1 {font-family: sans-serif}
+	H2 {font-family: sans-serif; margin-left: 7mm}
+	TABLE {margin-left: 7mm}
+</STYLE>
+</head>
+<body>
+<h1>Hadoop  0.23.2 Release Notes</h1>
+These release notes include new developer and user-facing incompatibilities, features, and major improvements. 
+<a name="changes"/>
+<h2>Changes since Hadoop 0.23.1</h2>
+<ul>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4043">MAPREDUCE-4043</a>.
+     Blocker bug reported by Jason Lowe and fixed by Jason Lowe (mrv2 , security)<br>
+     <b>Secret keys set in Credentials are not seen by tasks</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4034">MAPREDUCE-4034</a>.
+     Blocker bug reported by Jason Lowe and fixed by Jason Lowe (mrv2)<br>
+     <b>Unable to view task logs on history server with mapreduce.job.acl-view-job=*</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4025">MAPREDUCE-4025</a>.
+     Blocker bug reported by Jason Lowe and fixed by Jason Lowe (mr-am , mrv2)<br>
+     <b>AM can crash if task attempt reports bogus progress value</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4006">MAPREDUCE-4006</a>.
+     Major bug reported by Jason Lowe and fixed by Siddharth Seth (jobhistoryserver , mrv2)<br>
+     <b>history server container log web UI sometimes combines stderr/stdout/syslog contents together</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4005">MAPREDUCE-4005</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (mrv2)<br>
+     <b>AM container logs URL is broken for completed apps when log aggregation is enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3982">MAPREDUCE-3982</a>.
+     Critical bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>TestEmptyJob fails with FileNotFound</b><br>
+     <blockquote>Fixed FileOutputCommitter to not err out for an 'empty-job' whose tasks don't write any outputs.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3977">MAPREDUCE-3977</a>.
+     Critical bug reported by Jason Lowe and fixed by Jason Lowe (mrv2 , nodemanager)<br>
+     <b>LogAggregationService leaks log aggregator objects</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3976">MAPREDUCE-3976</a>.
+     Major bug reported by Bikas Saha and fixed by Jason Lowe (mrv2)<br>
+     <b>TestRMContainerAllocator failing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3975">MAPREDUCE-3975</a>.
+     Blocker bug reported by Eric Payne and fixed by Eric Payne (mrv2)<br>
+     <b>Default value not set for Configuration parameter mapreduce.job.local.dir</b><br>
+     <blockquote>Exporting mapreduce.job.local.dir for mapreduce tasks to use as job-level shared scratch space.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3964">MAPREDUCE-3964</a>.
+     Critical bug reported by Jason Lowe and fixed by Jason Lowe (mrv2 , resourcemanager)<br>
+     <b>ResourceManager does not have JVM metrics</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3961">MAPREDUCE-3961</a>.
+     Major bug reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>Map/ReduceSlotMillis computation incorrect</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3960">MAPREDUCE-3960</a>.
+     Critical bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>web proxy doesn't forward request to AM with configured hostname/IP</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3954">MAPREDUCE-3954</a>.
+     Blocker bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>Clean up passing HEAPSIZE to yarn and mapred commands.</b><br>
+     <blockquote>Added new envs to separate heap size for different daemons started via bin scripts.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3944">MAPREDUCE-3944</a>.
+     Blocker sub-task reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>JobHistory web services are slower then the UI and can easly overload the JH</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3931">MAPREDUCE-3931</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Siddharth Seth (mrv2)<br>
+     <b>MR tasks failing due to changing timestamps on Resources to download</b><br>
+     <blockquote>Changed PB implementation of LocalResource to take locks so that race conditions don't fail tasks by inadvertantly changing the timestamps.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3930">MAPREDUCE-3930</a>.
+     Critical bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>The AM page for a Reducer that has not been launched causes an NPE</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3929">MAPREDUCE-3929</a>.
+     Major bug reported by John George and fixed by John George (mrv2)<br>
+     <b>output of mapred -showacl is not clear</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3922">MAPREDUCE-3922</a>.
+     Minor improvement reported by Eugene Koontz and fixed by Hitesh Shah (build , mrv2)<br>
+     <b>Fix the potential problem compiling 32 bit binaries on a x86_64 host.</b><br>
+     <blockquote>Fixed build to not compile 32bit container-executor binary by default on all platforms.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3920">MAPREDUCE-3920</a>.
+     Major bug reported by Dave Thompson and fixed by Dave Thompson (nodemanager , resourcemanager)<br>
+     <b>Revise yarn default port number selection</b><br>
+     <blockquote>port number changes for resourcemanager and nodemanager</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3918">MAPREDUCE-3918</a>.
+     Major bug reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2)<br>
+     <b>proc_historyserver no longer in command line arguments for HistoryServer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3913">MAPREDUCE-3913</a>.
+     Critical bug reported by Jason Lowe and fixed by Jason Lowe (mrv2 , webapps)<br>
+     <b>RM application webpage is unresponsive after 2000 jobs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3910">MAPREDUCE-3910</a>.
+     Blocker bug reported by John George and fixed by John George (mrv2)<br>
+     <b>user not allowed to submit jobs even though queue -showacls shows it allows</b><br>
+     <blockquote>Fixed a bug in CapacityScheduler LeafQueue which was causing app-submission to fail.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3904">MAPREDUCE-3904</a>.
+     Major bug reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2)<br>
+     <b>[NPE] Job history produced with mapreduce.cluster.acls.enabled false can not be viewed with mapreduce.cluster.acls.enabled true</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3903">MAPREDUCE-3903</a>.
+     Critical bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>no admin override to view jobs on mr app master and job history server</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3901">MAPREDUCE-3901</a>.
+     Major improvement reported by Siddharth Seth and fixed by Siddharth Seth (jobhistoryserver , mrv2)<br>
+     <b>lazy load JobHistory Task and TaskAttempt details</b><br>
+     <blockquote>Modified JobHistory records in YARN to lazily load job and task reports so as to improve UI response times.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3897">MAPREDUCE-3897</a>.
+     Critical bug reported by Thomas Graves and fixed by Eric Payne (mrv2)<br>
+     <b>capacity scheduler - maxActiveApplicationsPerUser calculation can be wrong</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3896">MAPREDUCE-3896</a>.
+     Blocker bug reported by John George and fixed by Vinod Kumar Vavilapalli (jobhistoryserver , mrv2)<br>
+     <b>pig job through oozie hangs </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3884">MAPREDUCE-3884</a>.
+     Critical bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (mrv2)<br>
+     <b>PWD should be first in the classpath of MR tasks</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3878">MAPREDUCE-3878</a>.
+     Critical bug reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2)<br>
+     <b>Null user on filtered jobhistory job page</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3877">MAPREDUCE-3877</a>.
+     Minor test reported by Steve Loughran and fixed by Steve Loughran (mrv2)<br>
+     <b>Add a test to formalise the current state transitions of the yarn lifecycle</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3866">MAPREDUCE-3866</a>.
+     Minor bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mrv2)<br>
+     <b>bin/yarn prints the command line unnecessarily</b><br>
+     <blockquote>Fixed the bin/yarn script to not print the command line unnecessarily.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3864">MAPREDUCE-3864</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (documentation , security)<br>
+     <b>Fix cluster setup docs for correct SNN HTTPS parameters</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3862">MAPREDUCE-3862</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (mrv2 , nodemanager)<br>
+     <b>Nodemanager can appear to hang on shutdown due to lingering DeletionService threads</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3852">MAPREDUCE-3852</a>.
+     Blocker bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>test TestLinuxResourceCalculatorPlugin failing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3849">MAPREDUCE-3849</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (security)<br>
+     <b>Change TokenCache's reading of the binary token file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3816">MAPREDUCE-3816</a>.
+     Critical bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>capacity scheduler web ui bar graphs for used capacity wrong</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3798">MAPREDUCE-3798</a>.
+     Major test reported by Ravi Prakash and fixed by Ravi Prakash (test)<br>
+     <b>TestJobCleanup testCustomCleanup is failing</b><br>
+     <blockquote>Fixed failing TestJobCleanup.testCusomCleanup() and moved it to the maven build.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3792">MAPREDUCE-3792</a>.
+     Critical bug reported by Ramya Sunil and fixed by Jason Lowe (mrv2)<br>
+     <b>job -list displays only the jobs submitted by a particular user</b><br>
+     <blockquote>Fix "bin/mapred job -list" to display all jobs instead of only the jobs owned by the user. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3790">MAPREDUCE-3790</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (contrib/streaming , mrv2)<br>
+     <b>Broken pipe on streaming job can lead to truncated output for a successful job</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3738">MAPREDUCE-3738</a>.
+     Critical bug reported by Jason Lowe and fixed by Jason Lowe (mrv2 , nodemanager)<br>
+     <b>NM can hang during shutdown if AppLogAggregatorImpl thread dies unexpectedly</b><br>
+     <blockquote>Committed to trunk and branch-0.23. Thanks Jason.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3730">MAPREDUCE-3730</a>.
+     Minor improvement reported by Jason Lowe and fixed by Jason Lowe (mrv2 , resourcemanager)<br>
+     <b>Allow restarted NM to rejoin cluster before RM expires it</b><br>
+     <blockquote>Modified RM to allow restarted NMs to be able to join the cluster without waiting for expiry.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3706">MAPREDUCE-3706</a>.
+     Critical bug reported by Thomas Graves and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>HTTP Circular redirect error on the job attempts page</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3687">MAPREDUCE-3687</a>.
+     Major bug reported by David Capwell and fixed by Ravi Prakash (mrv2)<br>
+     <b>If AM dies before it returns new tracking URL, proxy redirects to http://N/A/ and doesn't return error code</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3686">MAPREDUCE-3686</a>.
+     Critical bug reported by Thomas Graves and fixed by Bhallamudi Venkata Siva Kamesh (mrv2)<br>
+     <b>history server web ui - job counter values for map/reduce not shown properly</b><br>
+     <blockquote>Fixed two bugs in Counters because of which web app displays zero counter values for framework counters.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3680">MAPREDUCE-3680</a>.
+     Major bug reported by Thomas Graves and fixed by  (mrv2)<br>
+     <b>FifoScheduler web service rest API can print out invalid JSON</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3634">MAPREDUCE-3634</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mrv2)<br>
+     <b>All daemons should crash instead of hanging around when their EventHandlers get exceptions</b><br>
+     <blockquote>Fixed all daemons to crash instead of hanging around when their EventHandlers get exceptions.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3614">MAPREDUCE-3614</a>.
+     Major bug reported by Ravi Prakash and fixed by Ravi Prakash (mrv2)<br>
+     <b> finalState UNDEFINED if AM is killed by hand</b><br>
+     <blockquote>Fixed MR AM to close history file quickly and send a correct final state to the RM when it is killed.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3583">MAPREDUCE-3583</a>.
+     Critical bug reported by Ted Yu and fixed by Ted Yu <br>
+     <b>ProcfsBasedProcessTree#constructProcessInfo() may throw NumberFormatException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3497">MAPREDUCE-3497</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves (documentation , mrv2)<br>
+     <b>missing documentation for yarn cli and subcommands - similar to commands_manual.html</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3034">MAPREDUCE-3034</a>.
+     Critical bug reported by Vinod Kumar Vavilapalli and fixed by Devaraj K (mrv2 , nodemanager)<br>
+     <b>NM should act on a REBOOT command from RM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3009">MAPREDUCE-3009</a>.
+     Major bug reported by chackaravarthy and fixed by chackaravarthy (jobhistoryserver , mrv2)<br>
+     <b>RM UI -&gt; Applications -&gt; Application(Job History) -&gt; Map Tasks -&gt; Task ID -&gt; Node link is not working</b><br>
+     <blockquote>Fixed node link on JobHistory webapp.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2855">MAPREDUCE-2855</a>.
+     Major bug reported by Todd Lipcon and fixed by Siddharth Seth <br>
+     <b>ResourceBundle lookup during counter name resolution takes a lot of time</b><br>
+     <blockquote>Passing a cached class-loader to ResourceBundle creator to minimize counter names lookup time.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2793">MAPREDUCE-2793</a>.
+     Critical bug reported by Ramya Sunil and fixed by Bikas Saha (mrv2)<br>
+     <b>[MR-279] Maintain consistency in naming appIDs, jobIDs and attemptIDs </b><br>
+     <blockquote>Corrected AppIDs, JobIDs, TaskAttemptIDs to be of correct format on the web pages.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3853">HDFS-3853</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (name-node)<br>
+     <b>Port MiniDFSCluster enableManagedDfsDirsRedundancy option to branch-2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3104">HDFS-3104</a>.
+     Major test reported by Daryn Sharp and fixed by Daryn Sharp (test)<br>
+     <b>Add tests for mkdir -p</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3101">HDFS-3101</a>.
+     Major bug reported by Zhanwei.Wang and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>cannot read empty file using webhdfs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3098">HDFS-3098</a>.
+     Major test reported by Daryn Sharp and fixed by Daryn Sharp (test)<br>
+     <b>Update FsShell tests for quoted metachars</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3060">HDFS-3060</a>.
+     Minor test reported by Eli Collins and fixed by Eli Collins (test)<br>
+     <b>Bump TestDistributedUpgrade#testDistributedUpgrade timeout</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3012">HDFS-3012</a>.
+     Critical bug reported by Ramya Sunil and fixed by Robert Joseph Evans <br>
+     <b>Exception while renewing delegation token</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3008">HDFS-3008</a>.
+     Major bug reported by Eli Collins and fixed by Eli Collins (hdfs client)<br>
+     <b>Negative caching of local addrs doesn't work</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3006">HDFS-3006</a>.
+     Major bug reported by bc Wong and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>Webhdfs "SETOWNER" call returns incorrect content-type</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2985">HDFS-2985</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (name-node)<br>
+     <b>Improve logging when replicas are marked as corrupt</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2981">HDFS-2981</a>.
+     Major improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE <br>
+     <b>The default value of dfs.client.block.write.replace-datanode-on-failure.enable should be true</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2969">HDFS-2969</a>.
+     Critical bug reported by Todd Lipcon and fixed by Todd Lipcon (data-node)<br>
+     <b>ExtendedBlock.equals is incorrectly implemented</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2950">HDFS-2950</a>.
+     Minor bug reported by Todd Lipcon and fixed by Todd Lipcon (name-node)<br>
+     <b>Secondary NN HTTPS address should be listed as a NAMESERVICE_SPECIFIC_KEY</b><br>
+     <blockquote>The configuration dfs.secondary.https.port has been renamed to dfs.namenode.secondary.https-port for consistency. The old configuration is still supported via a deprecation path.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2944">HDFS-2944</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (hdfs client)<br>
+     <b>Typo in hdfs-default.xml causes dfs.client.block.write.replace-datanode-on-failure.enable to be mistakenly disabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2943">HDFS-2943</a>.
+     Major new feature reported by Aaron T. Myers and fixed by Aaron T. Myers (name-node)<br>
+     <b>Expose last checkpoint time and transaction stats as JMX metrics</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2938">HDFS-2938</a>.
+     Major bug reported by Suresh Srinivas and fixed by Hari Mankude (name-node)<br>
+     <b>Recursive delete of a large directory makes namenode unresponsive</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2931">HDFS-2931</a>.
+     Minor task reported by Harsh J and fixed by Harsh J (data-node)<br>
+     <b>Switch the DataNode's BlockVolumeChoosingPolicy to be a private-audience interface</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2907">HDFS-2907</a>.
+     Minor improvement reported by Sanjay Radia and fixed by Tsz Wo (Nicholas), SZE <br>
+     <b>Make FSDataset in Datanode Pluggable</b><br>
+     <blockquote>Add a private conf property dfs.datanode.fsdataset.factory to make FSDataset in Datanode pluggable.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2887">HDFS-2887</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node)<br>
+     <b>Define a FSVolume interface</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2764">HDFS-2764</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (name-node , test)<br>
+     <b>TestBackupNode is racy</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2725">HDFS-2725</a>.
+     Major bug reported by Prashant Sharma and fixed by  (hdfs client)<br>
+     <b>hdfs script usage information is missing the information about "dfs" command</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2506">HDFS-2506</a>.
+     Major improvement reported by Suresh Srinivas and fixed by Suresh Srinivas (data-node , name-node)<br>
+     <b>Umbrella jira for tracking separation of wire protocol datatypes from the implementation types</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1217">HDFS-1217</a>.
+     Major improvement reported by Tsz Wo (Nicholas), SZE and fixed by Laxman (name-node)<br>
+     <b>Some methods in the NameNdoe should not be public</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-776">HDFS-776</a>.
+     Critical bug reported by Owen O'Malley and fixed by Uma Maheswara Rao G (balancer)<br>
+     <b>Fix exception handling in Balancer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8176">HADOOP-8176</a>.
+     Major bug reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Disambiguate the destination of FsShell copies</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8175">HADOOP-8175</a>.
+     Major sub-task reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Add mkdir -p flag</b><br>
+     <blockquote>FsShell mkdir now accepts a -p flag.  Like unix, mkdir -p will not fail if the directory already exists.  Unlike unix, intermediate directories are always created, regardless of the flag, to avoid incompatibilities at this time.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8173">HADOOP-8173</a>.
+     Major sub-task reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>FsShell needs to handle quoted metachars</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8157">HADOOP-8157</a>.
+     Major test reported by Eli Collins and fixed by Todd Lipcon <br>
+     <b>TestRPCCallBenchmark#testBenchmarkWithWritable fails with RTE</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8146">HADOOP-8146</a>.
+     Major bug reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>FsShell commands cannot be interrupted</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8140">HADOOP-8140</a>.
+     Major bug reported by arkady borkovsky and fixed by Daryn Sharp <br>
+     <b>dfs -getmerge  should process its argments better </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8137">HADOOP-8137</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Thomas Graves (documentation)<br>
+     <b>Site side links for commands manual (MAPREDUCE-3497)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8131">HADOOP-8131</a>.
+     Critical bug reported by Daryn Sharp and fixed by Daryn Sharp <br>
+     <b>FsShell put doesn't correctly handle a non-existent dir</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8123">HADOOP-8123</a>.
+     Critical bug reported by Jonathan Eagles and fixed by Jonathan Eagles (build)<br>
+     <b>hadoop-project invalid pom warnings prevent transitive dependency resolution</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8083">HADOOP-8083</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>javadoc generation for some modules is not done under target/</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8082">HADOOP-8082</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>add hadoop-client and hadoop-minicluster to the dependency-management section</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8074">HADOOP-8074</a>.
+     Trivial bug reported by Eli Collins and fixed by Colin Patrick McCabe (scripts)<br>
+     <b>Small bug in hadoop error message for unknown commands</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8071">HADOOP-8071</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (ipc)<br>
+     <b>Avoid an extra packet in client code when nagling is disabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8066">HADOOP-8066</a>.
+     Major bug reported by Aaron T. Myers and fixed by Andrew Bayer (build)<br>
+     <b>The full docs build intermittently fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8064">HADOOP-8064</a>.
+     Major bug reported by Kihwal Lee and fixed by Kihwal Lee (build)<br>
+     <b>Remove unnecessary dependency on w3c.org in document processing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8057">HADOOP-8057</a>.
+     Major bug reported by Vinay and fixed by Vinay (scripts)<br>
+     <b>hadoop-setup-conf.sh not working because of some extra spaces.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8051">HADOOP-8051</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (documentation)<br>
+     <b>HttpFS documentation it is not wired to the generated site</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8050">HADOOP-8050</a>.
+     Major bug reported by Kihwal Lee and fixed by Kihwal Lee (metrics)<br>
+     <b>Deadlock in metrics</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8048">HADOOP-8048</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (util)<br>
+     <b>Allow merging of Credentials</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8046">HADOOP-8046</a>.
+     Minor bug reported by Steve Loughran and fixed by Steve Loughran <br>
+     <b>Revert StaticMapping semantics to the existing ones, add DNS mapping diagnostics in progress</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8042">HADOOP-8042</a>.
+     Critical bug reported by Kevin J. Price and fixed by Daryn Sharp (fs)<br>
+     <b>When copying a file out of HDFS, modifying it, and uploading it back into HDFS, the put fails due to a CRC mismatch</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8036">HADOOP-8036</a>.
+     Major bug reported by Eli Collins and fixed by Colin Patrick McCabe (fs , test)<br>
+     <b>TestViewFsTrash assumes the user's home directory is 2 levels deep</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8035">HADOOP-8035</a>.
+     Minor bug reported by Andrew Bayer and fixed by Andrew Bayer (build)<br>
+     <b>Hadoop Maven site is inefficient and runs phases redundantly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8032">HADOOP-8032</a>.
+     Major wish reported by Ravi Prakash and fixed by Ravi Prakash (build , documentation)<br>
+     <b>mvn site:stage-deploy should be able to use the scp protocol to stage documents</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6502">HADOOP-6502</a>.
+     Critical bug reported by Hairong Kuang and fixed by Sharad Agarwal (util)<br>
+     <b>DistributedFileSystem#listStatus is very slow when listing a directory with a size of 1300</b><br>
+     <blockquote></blockquote></li>
+</ul>
+</body></html>
+<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<title>Hadoop  0.23.1 Release Notes</title>
+<STYLE type="text/css">
+	H1 {font-family: sans-serif}
+	H2 {font-family: sans-serif; margin-left: 7mm}
+	TABLE {margin-left: 7mm}
+</STYLE>
+</head>
+<body>
+<h1>Hadoop  0.23.1 Release Notes</h1>
+These release notes include new developer and user-facing incompatibilities, features, and major improvements. 
+<a name="changes"/>
+<h2>Changes since Hadoop 0.23.0</h2>
+<ul>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3858">MAPREDUCE-3858</a>.
+     Critical bug reported by Tom White and fixed by Tom White (mrv2)<br>
+     <b>Task attempt failure during commit results in task never completing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3856">MAPREDUCE-3856</a>.
+     Critical bug reported by Eric Payne and fixed by Eric Payne (mrv2)<br>
+     <b>Instances of RunningJob class givs incorrect job tracking urls when mutiple jobs are submitted from same client jvm.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3854">MAPREDUCE-3854</a>.
+     Major test reported by Tom White and fixed by Tom White (mrv2)<br>
+     <b>Reinstate environment variable tests in TestMiniMRChildTask</b><br>
+     <blockquote>Fixed and reenabled tests related to MR child JVM's environmental variables in TestMiniMRChildTask.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3846">MAPREDUCE-3846</a>.
+     Critical sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mrv2)<br>
+     <b>Restarted+Recovered AM hangs in some corner cases</b><br>
+     <blockquote>Addressed MR AM hanging issues during AM restart and then the recovery.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3843">MAPREDUCE-3843</a>.
+     Critical bug reported by Anupam Seth and fixed by Anupam Seth (jobhistoryserver , mrv2)<br>
+     <b>Job summary log file found missing on the RM host</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3840">MAPREDUCE-3840</a>.
+     Blocker bug reported by Ravi Prakash and fixed by Ravi Prakash (mrv2)<br>
+     <b>JobEndNotifier doesn't use the proxyToUse during connecting</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3834">MAPREDUCE-3834</a>.
+     Critical bug reported by Siddharth Seth and fixed by Siddharth Seth (mr-am , mrv2)<br>
+     <b>If multiple hosts for a split belong to the same rack, the rack is added multiple times in the AM request table</b><br>
+     <blockquote>Changed MR AM to not add the same rack entry multiple times into the container request table when multiple hosts for a split happen to be on the same rack</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3833">MAPREDUCE-3833</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (mrv2)<br>
+     <b>Capacity scheduler queue refresh doesn't recompute queue capacities properly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3828">MAPREDUCE-3828</a>.
+     Major bug reported by Ahmed Radwan and fixed by Siddharth Seth (mrv2)<br>
+     <b>Broken urls: AM tracking url and jobhistory url in a single node setup.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3827">MAPREDUCE-3827</a>.
+     Blocker sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mrv2 , performance)<br>
+     <b>Counters aggregation slowed down significantly after MAPREDUCE-3749</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3826">MAPREDUCE-3826</a>.
+     Major bug reported by Arpit Gupta and fixed by Jonathan Eagles (mrv2)<br>
+     <b>RM UI when loaded throws a message stating Data Tables warning and then the column sorting stops working</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3823">MAPREDUCE-3823</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mrv2 , performance)<br>
+     <b>Counters are getting calculated twice at job-finish and delaying clients.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3822">MAPREDUCE-3822</a>.
+     Critical bug reported by Mahadev konar and fixed by Mahadev konar (mrv2)<br>
+     <b>TestJobCounters is failing intermittently on trunk and 0.23.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3817">MAPREDUCE-3817</a>.
+     Major bug reported by Arpit Gupta and fixed by Arpit Gupta (mrv2)<br>
+     <b>bin/mapred command cannot run distcp and archive jobs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3815">MAPREDUCE-3815</a>.
+     Critical sub-task reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>Data Locality suffers if the AM asks for containers using IPs instead of hostnames</b><br>
+     <blockquote>Fixed MR AM to always use hostnames and never IPs when requesting containers so that scheduler can give off data local containers correctly.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3814">MAPREDUCE-3814</a>.
+     Major bug reported by Arun C Murthy and fixed by Arun C Murthy (mrv1 , mrv2)<br>
+     <b>MR1 compile fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3813">MAPREDUCE-3813</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mrv2 , performance)<br>
+     <b>RackResolver should maintain a cache to avoid repetitive lookups.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3811">MAPREDUCE-3811</a>.
+     Critical task reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>Make the Client-AM IPC retry count configurable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3810">MAPREDUCE-3810</a>.
+     Blocker sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mrv2 , performance)<br>
+     <b>MR AM's ContainerAllocator is assigning the allocated containers very slowly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3809">MAPREDUCE-3809</a>.
+     Blocker sub-task reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>Tasks may take upto 3 seconds to exit after completion</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3808">MAPREDUCE-3808</a>.
+     Blocker bug reported by Siddharth Seth and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>NPE in FileOutputCommitter when running a 0 reduce job</b><br>
+     <blockquote>Fixed an NPE in FileOutputCommitter for jobs with maps but no reduces.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3804">MAPREDUCE-3804</a>.
+     Major bug reported by Dave Thompson and fixed by Dave Thompson (jobhistoryserver , mrv2 , resourcemanager)<br>
+     <b>yarn webapp interface vulnerable to cross scripting attacks</b><br>
+     <blockquote>fix cross scripting attacks vulnerability through webapp interface.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3803">MAPREDUCE-3803</a>.
+     Major test reported by Ravi Prakash and fixed by Ravi Prakash (build)<br>
+     <b>HDFS-2864 broke ant compilation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3802">MAPREDUCE-3802</a>.
+     Critical sub-task reported by Robert Joseph Evans and fixed by Vinod Kumar Vavilapalli (applicationmaster , mrv2)<br>
+     <b>If an MR AM dies twice  it looks like the process freezes</b><br>
+     <blockquote>Added test to validate that AM can crash multiple times and still can recover successfully after MAPREDUCE-3846.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3795">MAPREDUCE-3795</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mrv2)<br>
+     <b>"job -status" command line output is malformed</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3794">MAPREDUCE-3794</a>.
+     Major bug reported by Tom White and fixed by Tom White (mrv2)<br>
+     <b>Support mapred.Task.Counter and mapred.JobInProgress.Counter enums for compatibility</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3791">MAPREDUCE-3791</a>.
+     Major bug reported by Roman Shaposhnik and fixed by Mahadev konar (documentation , mrv2)<br>
+     <b>can't build site in hadoop-yarn-server-common</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3787">MAPREDUCE-3787</a>.
+     Major improvement reported by Amar Kamat and fixed by Amar Kamat (contrib/gridmix)<br>
+     <b>[Gridmix] Improve STRESS mode</b><br>
+     <blockquote>JobMonitor can now deploy multiple threads for faster job-status polling. Use 'gridmix.job-monitor.thread-count' to set the number of threads. Stress mode now relies on the updates from the job monitor instead of polling for job status. Failures in job submission now get reported to the statistics module and ultimately reported to the user via summary.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3784">MAPREDUCE-3784</a>.
+     Major bug reported by Ramya Sunil and fixed by Arun C Murthy (mrv2)<br>
+     <b>maxActiveApplications(|PerUser) per queue is too low for small clusters</b><br>
+     <blockquote>Fixed CapacityScheduler so that maxActiveApplication and maxActiveApplicationsPerUser per queue are not too low for small clusters. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3780">MAPREDUCE-3780</a>.
+     Blocker bug reported by Ramya Sunil and fixed by Hitesh Shah (mrv2)<br>
+     <b>RM assigns containers to killed applications</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3775">MAPREDUCE-3775</a>.
+     Minor bug reported by Hitesh Shah and fixed by Hitesh Shah (mrv2)<br>
+     <b>Change MiniYarnCluster to escape special chars in testname</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3774">MAPREDUCE-3774</a>.
+     Major bug reported by Mahadev konar and fixed by Mahadev konar (mrv2)<br>
+     <b>yarn-default.xml should be moved to hadoop-yarn-common.</b><br>
+     <blockquote>MAPREDUCE-3774. Moved yarn-default.xml to hadoop-yarn-common from hadoop-server-common.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3771">MAPREDUCE-3771</a>.
+     Major improvement reported by Arun C Murthy and fixed by Arun C Murthy <br>
+     <b>Port MAPREDUCE-1735 to trunk/0.23</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3770">MAPREDUCE-3770</a>.
+     Critical bug reported by Amar Kamat and fixed by Amar Kamat (tools/rumen)<br>
+     <b>[Rumen] Zombie.getJobConf() results into NPE</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3765">MAPREDUCE-3765</a>.
+     Minor bug reported by Hitesh Shah and fixed by Hitesh Shah (mrv2)<br>
+     <b>FifoScheduler does not respect yarn.scheduler.fifo.minimum-allocation-mb setting</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3764">MAPREDUCE-3764</a>.
+     Critical bug reported by Siddharth Seth and fixed by Arun C Murthy (mrv2)<br>
+     <b>AllocatedGB etc metrics incorrect if min-allocation-mb isn't a multiple of 1GB</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3762">MAPREDUCE-3762</a>.
+     Critical bug reported by Mahadev konar and fixed by Mahadev konar (mrv2)<br>
+     <b>Resource Manager fails to come up with default capacity scheduler configs.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3760">MAPREDUCE-3760</a>.
+     Major bug reported by Ramya Sunil and fixed by Vinod Kumar Vavilapalli (mrv2)<br>
+     <b>Blacklisted NMs should not appear in Active nodes list</b><br>
+     <blockquote>Changed active nodes list to not contain unhealthy nodes on the webUI and metrics.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3759">MAPREDUCE-3759</a>.
+     Major bug reported by Ramya Sunil and fixed by Vinod Kumar Vavilapalli (mrv2)<br>
+     <b>ClassCastException thrown in -list-active-trackers when there are a few unhealthy nodes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3756">MAPREDUCE-3756</a>.
+     Major improvement reported by Arun C Murthy and fixed by Hitesh Shah (mrv2)<br>
+     <b>Make single shuffle limit configurable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3754">MAPREDUCE-3754</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mrv2 , webapps)<br>
+     <b>RM webapp should have pages filtered based on App-state</b><br>
+     <blockquote>Modified RM UI to filter applications based on state of the applications.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3752">MAPREDUCE-3752</a>.
+     Blocker bug reported by Arun C Murthy and fixed by Arun C Murthy (mrv2)<br>
+     <b>Headroom should be capped by queue max-cap</b><br>
+     <blockquote>Modified application limits to include queue max-capacities besides the usual user limits.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3749">MAPREDUCE-3749</a>.
+     Blocker bug reported by Tom White and fixed by Tom White (mrv2)<br>
+     <b>ConcurrentModificationException in counter groups</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3748">MAPREDUCE-3748</a>.
+     Minor bug reported by Ramya Sunil and fixed by Ramya Sunil (mrv2)<br>
+     <b>Move CS related nodeUpdate log messages to DEBUG</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3747">MAPREDUCE-3747</a>.
+     Major bug reported by Ramya Sunil and fixed by Arun C Murthy (mrv2)<br>
+     <b>Memory Total is not refreshed until an app is launched</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3744">MAPREDUCE-3744</a>.
+     Blocker bug reported by Jason Lowe and fixed by Jason Lowe (mrv2)<br>
+     <b>Unable to retrieve application logs via "yarn logs" or "mapred job -logs"</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3742">MAPREDUCE-3742</a>.
+     Blocker bug reported by Jason Lowe and fixed by Jason Lowe (mrv2)<br>
+     <b>"yarn logs" command fails with ClassNotFoundException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3737">MAPREDUCE-3737</a>.
+     Critical bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>The Web Application Proxy's is not documented very well</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3735">MAPREDUCE-3735</a>.
+     Blocker bug reported by Mahadev konar and fixed by Mahadev konar (mrv2)<br>
+     <b>Add distcp jar to the distribution (tar)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3733">MAPREDUCE-3733</a>.
+     Major bug reported by Mahadev konar and fixed by Mahadev konar <br>
+     <b>Add Apache License Header to hadoop-distcp/pom.xml</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3732">MAPREDUCE-3732</a>.
+     Blocker bug reported by Arun C Murthy and fixed by Arun C Murthy (mrv2 , resourcemanager , scheduler)<br>
+     <b>CS should only use 'activeUsers with pending requests' for computing user-limits</b><br>
+     <blockquote>Modified CapacityScheduler to use only users with pending requests for computing user-limits.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3727">MAPREDUCE-3727</a>.
+     Critical bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>jobtoken location property in jobconf refers to wrong jobtoken file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3723">MAPREDUCE-3723</a>.
+     Major bug reported by Bhallamudi Venkata Siva Kamesh and fixed by Bhallamudi Venkata Siva Kamesh (mrv2 , test , webapps)<br>
+     <b>TestAMWebServicesJobs &amp; TestHSWebServicesJobs incorrectly asserting tests</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3721">MAPREDUCE-3721</a>.
+     Blocker bug reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>Race in shuffle can cause it to hang</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3720">MAPREDUCE-3720</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (client , mrv2)<br>
+     <b>Command line listJobs should not visit each AM</b><br>
+     <blockquote>Changed bin/mapred job -list to not print job-specific information not available at RM.

+

+Very minor incompatibility in cmd-line output, inevitable due to MRv2 architecture.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3718">MAPREDUCE-3718</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Hitesh Shah (mrv2 , performance)<br>
+     <b>Default AM heartbeat interval should be one second</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3717">MAPREDUCE-3717</a>.
+     Blocker bug reported by Mahadev konar and fixed by Mahadev konar (mrv2)<br>
+     <b>JobClient test jar has missing files to run all the test programs.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3716">MAPREDUCE-3716</a>.
+     Blocker bug reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2)<br>
+     <b>java.io.File.createTempFile fails in map/reduce tasks</b><br>
+     <blockquote>Fixing YARN+MR to allow MR jobs to be able to use java.io.File.createTempFile to create temporary files as part of their tasks.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3714">MAPREDUCE-3714</a>.
+     Blocker bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mrv2 , task)<br>
+     <b>Reduce hangs in a corner case</b><br>
+     <blockquote>Fixed EventFetcher and Fetcher threads to shut-down properly so that reducers don't hang in corner cases.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3713">MAPREDUCE-3713</a>.
+     Blocker bug reported by Siddharth Seth and fixed by Arun C Murthy (mrv2 , resourcemanager)<br>
+     <b>Incorrect headroom reported to jobs</b><br>
+     <blockquote>Fixed the way head-room is allocated to applications by CapacityScheduler so that it deducts current-usage per user and not per-application.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3712">MAPREDUCE-3712</a>.
+     Blocker bug reported by Ravi Prakash and fixed by Mahadev konar (mrv2)<br>
+     <b>The mapreduce tar does not contain the hadoop-mapreduce-client-jobclient-tests.jar. </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3711">MAPREDUCE-3711</a>.
+     Blocker sub-task reported by Siddharth Seth and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>AppMaster recovery for Medium to large jobs take long time</b><br>
+     <blockquote>Fixed MR AM recovery so that only single selected task output is recovered and thus reduce the unnecessarily bloated recovery time.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3710">MAPREDUCE-3710</a>.
+     Major bug reported by Siddharth Seth and fixed by Siddharth Seth (mrv1 , mrv2)<br>
+     <b>last split generated by FileInputFormat.getSplits may not have the best locality</b><br>
+     <blockquote>Improved FileInputFormat to return better locality for the last split.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3709">MAPREDUCE-3709</a>.
+     Major bug reported by Eli Collins and fixed by Hitesh Shah (mrv2 , test)<br>
+     <b>TestDistributedShell is failing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3708">MAPREDUCE-3708</a>.
+     Major bug reported by Bhallamudi Venkata Siva Kamesh and fixed by Bhallamudi Venkata Siva Kamesh (mrv2)<br>
+     <b>Metrics: Incorrect Apps Submitted Count</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3705">MAPREDUCE-3705</a>.
+     Blocker bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>ant build fails on 0.23 branch </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3703">MAPREDUCE-3703</a>.
+     Critical bug reported by Eric Payne and fixed by Eric Payne (mrv2 , resourcemanager)<br>
+     <b>ResourceManager should provide node lists in JMX output</b><br>
+     <blockquote>New JMX Bean in ResourceManager to provide list of live node managers:

+

+Hadoop:service=ResourceManager,name=RMNMInfo LiveNodeManagers</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3702">MAPREDUCE-3702</a>.
+     Critical bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>internal server error trying access application master via proxy with filter enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3701">MAPREDUCE-3701</a>.
+     Major bug reported by Mahadev konar and fixed by Mahadev konar (mrv2)<br>
+     <b>Delete HadoopYarnRPC from 0.23 branch.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3699">MAPREDUCE-3699</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Hitesh Shah (mrv2)<br>
+     <b>Default RPC handlers are very low for YARN servers</b><br>
+     <blockquote>Increased RPC handlers for all YARN servers to reasonable values for working at scale.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3698">MAPREDUCE-3698</a>.
+     Blocker sub-task reported by Siddharth Seth and fixed by Mahadev konar (mrv2)<br>
+     <b>Client cannot talk to the history server in secure mode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3697">MAPREDUCE-3697</a>.
+     Blocker bug reported by John George and fixed by Mahadev konar (mrv2)<br>
+     <b>Hadoop Counters API limits Oozie's working across different hadoop versions</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3696">MAPREDUCE-3696</a>.
+     Blocker bug reported by John George and fixed by John George (mrv2)<br>
+     <b>MR job via oozie does not work on hadoop 23</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3693">MAPREDUCE-3693</a>.
+     Minor improvement reported by Roman Shaposhnik and fixed by Roman Shaposhnik (mrv2)<br>
+     <b>Add admin env to mapred-default.xml</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3692">MAPREDUCE-3692</a>.
+     Blocker improvement reported by Eli Collins and fixed by Eli Collins (mrv2)<br>
+     <b>yarn-resourcemanager out and log files can get big</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3691">MAPREDUCE-3691</a>.
+     Critical bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>webservices add support to compress response</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3689">MAPREDUCE-3689</a>.
+     Blocker bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>RM web UI doesn't handle newline in job name</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3684">MAPREDUCE-3684</a>.
+     Major bug reported by Tom White and fixed by Tom White (client)<br>
+     <b>LocalDistributedCacheManager does not shut down its thread pool</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3683">MAPREDUCE-3683</a>.
+     Blocker bug reported by Thomas Graves and fixed by Arun C Murthy (mrv2)<br>
+     <b>Capacity scheduler LeafQueues maximum capacity calculation issues</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3681">MAPREDUCE-3681</a>.
+     Critical bug reported by Thomas Graves and fixed by Arun C Murthy (mrv2)<br>
+     <b>capacity scheduler LeafQueues calculate used capacity wrong</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3679">MAPREDUCE-3679</a>.
+     Major improvement reported by Mahadev konar and fixed by Vinod Kumar Vavilapalli (mrv2)<br>
+     <b>AM logs and others should not automatically refresh after every 1 second.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3669">MAPREDUCE-3669</a>.
+     Blocker bug reported by Thomas Graves and fixed by Mahadev konar (mrv2)<br>
+     <b>Getting a lot of PriviledgedActionException / SaslException when running a job</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3664">MAPREDUCE-3664</a>.
+     Minor bug reported by praveen sripati and fixed by Brandon Li (documentation)<br>
+     <b>HDFS Federation Documentation has incorrect configuration example</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3657">MAPREDUCE-3657</a>.
+     Minor bug reported by Jason Lowe and fixed by Jason Lowe (build , mrv2)<br>
+     <b>State machine visualize build fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3656">MAPREDUCE-3656</a>.
+     Blocker bug reported by Karam Singh and fixed by Siddharth Seth (applicationmaster , mrv2 , resourcemanager)<br>
+     <b>Sort job on 350 scale is consistently failing with latest MRV2 code </b><br>
+     <blockquote>Fixed a race condition in MR AM which is failing the sort benchmark consistently.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3652">MAPREDUCE-3652</a>.
+     Blocker bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>org.apache.hadoop.mapred.TestWebUIAuthorization.testWebUIAuthorization fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3651">MAPREDUCE-3651</a>.
+     Blocker bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>TestQueueManagerRefresh fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3649">MAPREDUCE-3649</a>.
+     Blocker bug reported by Mahadev konar and fixed by Ravi Prakash (mrv2)<br>
+     <b>Job End notification gives an error on calling back.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3648">MAPREDUCE-3648</a>.
+     Blocker bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>TestJobConf failing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3646">MAPREDUCE-3646</a>.
+     Major bug reported by Ramya Sunil and fixed by Jonathan Eagles (client , mrv2)<br>
+     <b>Remove redundant URL info from "mapred job" output</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3645">MAPREDUCE-3645</a>.
+     Blocker bug reported by Thomas Graves and fixed by Thomas Graves (mrv1)<br>
+     <b>TestJobHistory fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3641">MAPREDUCE-3641</a>.
+     Blocker sub-task reported by Arun C Murthy and fixed by Arun C Murthy (mrv2 , scheduler)<br>
+     <b>CapacityScheduler should be more conservative assigning off-switch requests</b><br>
+     <blockquote>Making CapacityScheduler more conservative so as to assign only one off-switch container in a single scheduling iteration.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3640">MAPREDUCE-3640</a>.
+     Blocker sub-task reported by Siddharth Seth and fixed by Arun C Murthy (mrv2)<br>
+     <b>AMRecovery should pick completed task form partial JobHistory files</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3639">MAPREDUCE-3639</a>.
+     Blocker bug reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>TokenCache likely broken for FileSystems which don't issue delegation tokens</b><br>
+     <blockquote>Fixed TokenCache to work with absent FileSystem canonical service-names.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3630">MAPREDUCE-3630</a>.
+     Critical task reported by Amol Kekre and fixed by Mahadev konar (mrv2)<br>
+     <b>NullPointerException running teragen</b><br>
+     <blockquote>Committed to trunk and branch-0.23. Thanks Mahadev.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3625">MAPREDUCE-3625</a>.
+     Critical bug reported by Arun C Murthy and fixed by Jason Lowe (mrv2)<br>
+     <b>CapacityScheduler web-ui display of queue's used capacity is broken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3624">MAPREDUCE-3624</a>.
+     Major bug reported by Mahadev konar and fixed by Mahadev konar (mrv2)<br>
+     <b>bin/yarn script adds jdk tools.jar to the classpath.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3618">MAPREDUCE-3618</a>.
+     Major sub-task reported by Siddharth Seth and fixed by Siddharth Seth (mrv2 , performance)<br>
+     <b>TaskHeartbeatHandler holds a global lock for all task-updates</b><br>
+     <blockquote>Fixed TaskHeartbeatHandler to not hold a global lock for all task-updates.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3617">MAPREDUCE-3617</a>.
+     Major bug reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2)<br>
+     <b>Remove yarn default values for resource manager and nodemanager principal</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3616">MAPREDUCE-3616</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mr-am , mrv2 , performance)<br>
+     <b>Thread pool for launching containers in MR AM not expanding as expected</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3610">MAPREDUCE-3610</a>.
+     Minor improvement reported by Sho Shimauchi and fixed by Sho Shimauchi <br>
+     <b>Some parts in MR use old property dfs.block.size</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3608">MAPREDUCE-3608</a>.
+     Major bug reported by Mahadev konar and fixed by Mahadev konar (mrv2)<br>
+     <b>MAPREDUCE-3522 commit causes compilation to fail</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3604">MAPREDUCE-3604</a>.
+     Blocker bug reported by Arun C Murthy and fixed by Arun C Murthy (contrib/streaming)<br>
+     <b>Streaming's check for local mode is broken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3597">MAPREDUCE-3597</a>.
+     Major improvement reported by Ravi Gummadi and fixed by Ravi Gummadi (tools/rumen)<br>
+     <b>Provide a way to access other info of history file from Rumentool</b><br>
+     <blockquote>Rumen now provides {{Parsed*}} objects. These objects provide extra information that are not provided by {{Logged*}} objects.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3596">MAPREDUCE-3596</a>.
+     Blocker bug reported by Ravi Prakash and fixed by Vinod Kumar Vavilapalli (applicationmaster , mrv2)<br>
+     <b>Sort benchmark got hang after completion of 99% map phase</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3595">MAPREDUCE-3595</a>.
+     Major test reported by Tom White and fixed by Tom White (test)<br>
+     <b>Add missing TestCounters#testCounterValue test from branch 1 to 0.23</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3588">MAPREDUCE-3588</a>.
+     Blocker bug reported by Arun C Murthy and fixed by Arun C Murthy <br>
+     <b>bin/yarn broken after MAPREDUCE-3366</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3586">MAPREDUCE-3586</a>.
+     Blocker bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mr-am , mrv2)<br>
+     <b>Lots of AMs hanging around in PIG testing</b><br>
+     <blockquote>Modified CompositeService to avoid duplicate stop operations thereby solving race conditions in MR AM shutdown.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3582">MAPREDUCE-3582</a>.
+     Major bug reported by Ahmed Radwan and fixed by Ahmed Radwan (mrv2 , test)<br>
+     <b>Move successfully passing MR1 tests to MR2 maven tree.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3579">MAPREDUCE-3579</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (mrv2)<br>
+     <b>ConverterUtils should not include a port in a path for a URL with no port</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3572">MAPREDUCE-3572</a>.
+     Critical sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mr-am , mrv2 , performance)<br>
+     <b>MR AM's dispatcher is blocked by heartbeats to ResourceManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3569">MAPREDUCE-3569</a>.
+     Critical sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mr-am , mrv2 , performance)<br>
+     <b>TaskAttemptListener holds a global lock for all task-updates</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3568">MAPREDUCE-3568</a>.
+     Critical sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mr-am , mrv2 , performance)<br>
+     <b>Optimize Job's progress calculations in MR AM</b><br>
+     <blockquote>Optimized Job's progress calculations in MR AM.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3567">MAPREDUCE-3567</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mr-am , mrv2 , performance)<br>
+     <b>Extraneous JobConf objects in AM heap</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3566">MAPREDUCE-3566</a>.
+     Critical sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mr-am , mrv2)<br>
+     <b>MR AM slows down due to repeatedly constructing ContainerLaunchContext</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3564">MAPREDUCE-3564</a>.
+     Blocker bug reported by Mahadev konar and fixed by Siddharth Seth (mrv2)<br>
+     <b>TestStagingCleanup and TestJobEndNotifier are failing on trunk.</b><br>
+     <blockquote>Fixed failures in TestStagingCleanup and TestJobEndNotifier tests.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3563">MAPREDUCE-3563</a>.
+     Major bug reported by Arun C Murthy and fixed by Arun C Murthy (mrv2)<br>
+     <b>LocalJobRunner doesn't handle Jobs using o.a.h.mapreduce.OutputCommitter</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3560">MAPREDUCE-3560</a>.
+     Blocker bug reported by Vinod Kumar Vavilapalli and fixed by Siddharth Seth (mrv2 , resourcemanager , test)<br>
+     <b>TestRMNodeTransitions is failing on trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3557">MAPREDUCE-3557</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>MR1 test fail to compile because of missing hadoop-archives dependency</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3553">MAPREDUCE-3553</a>.
+     Minor sub-task reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>Add support for data returned when exceptions thrown from web service apis to be in either xml or in JSON</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3549">MAPREDUCE-3549</a>.
+     Blocker bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>write api documentation for web service apis for RM, NM, mapreduce app master, and job history server</b><br>
+     <blockquote>new files added: A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/WebServicesIntro.apt.vm

+A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/NodeManagerRest.apt.vm

+A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm

+A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/MapredAppMasterRest.apt.vm

+A      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HistoryServerRest.apt.vm

+

+The hadoop-project/src/site/site.xml is split into separate patch.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3548">MAPREDUCE-3548</a>.
+     Critical sub-task reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>write unit tests for web services for mapreduce app master and job history server</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3547">MAPREDUCE-3547</a>.
+     Critical sub-task reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>finish unit tests for web services for RM and NM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3544">MAPREDUCE-3544</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build , tools/rumen)<br>
+     <b>gridmix build is broken, requires hadoop-archives to be added as ivy dependency</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3542">MAPREDUCE-3542</a>.
+     Major bug reported by Tom White and fixed by Tom White <br>
+     <b>Support "FileSystemCounter" legacy counter group name for compatibility</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3541">MAPREDUCE-3541</a>.
+     Blocker bug reported by Ravi Prakash and fixed by Ravi Prakash (mrv2)<br>
+     <b>Fix broken TestJobQueueClient test</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3537">MAPREDUCE-3537</a>.
+     Blocker bug reported by Arun C Murthy and fixed by Arun C Murthy <br>
+     <b>DefaultContainerExecutor has a race condn. with multiple concurrent containers</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3534">MAPREDUCE-3534</a>.
+     Blocker sub-task reported by Vinay Kumar Thota and fixed by Vinod Kumar Vavilapalli (mrv2)<br>
+     <b>Compression benchmark run-time increased by 13% in 0.23</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3532">MAPREDUCE-3532</a>.
+     Critical bug reported by Karam Singh and fixed by Bhallamudi Venkata Siva Kamesh (mrv2 , nodemanager)<br>
+     <b>When 0 is provided as port number in yarn.nodemanager.webapp.address, NMs webserver component picks up random port, NM keeps on Reporting 0 port to RM</b><br>
+     <blockquote>Modified NM to report correct http address when an ephemeral web port is configured.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3531">MAPREDUCE-3531</a>.
+     Blocker bug reported by Karam Singh and fixed by Robert Joseph Evans (mrv2 , resourcemanager , scheduler)<br>
+     <b>Sometimes java.lang.IllegalArgumentException: Invalid key to HMAC computation in NODE_UPDATE also causing RM to stop scheduling </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3530">MAPREDUCE-3530</a>.
+     Blocker bug reported by Karam Singh and fixed by Arun C Murthy (mrv2 , resourcemanager , scheduler)<br>
+     <b>Sometimes NODE_UPDATE to the scheduler throws an NPE causing the scheduling to stop</b><br>
+     <blockquote>Fixed an NPE occuring during scheduling in the ResourceManager.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3529">MAPREDUCE-3529</a>.
+     Critical bug reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>TokenCache does not cache viewfs credentials correctly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3528">MAPREDUCE-3528</a>.
+     Major bug reported by Siddharth Seth and fixed by Siddharth Seth (mr-am , mrv2)<br>
+     <b>The task timeout check interval should be configurable independent of mapreduce.task.timeout</b><br>
+     <blockquote>Fixed TaskHeartBeatHandler to use a new configuration for the thread loop interval separate from task-timeout configuration property.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3527">MAPREDUCE-3527</a>.
+     Major bug reported by Tom White and fixed by Tom White <br>
+     <b>Fix minor API incompatibilities between 1.0 and 0.23</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3525">MAPREDUCE-3525</a>.
+     Blocker sub-task reported by Karam Singh and fixed by Vinod Kumar Vavilapalli (mrv2)<br>
+     <b>Shuffle benchmark is nearly 1.5x slower in 0.23</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3522">MAPREDUCE-3522</a>.
+     Major bug reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2)<br>
+     <b>Capacity Scheduler ACLs not inherited by default</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3521">MAPREDUCE-3521</a>.
+     Minor bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>Hadoop Streaming ignores unknown parameters</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3519">MAPREDUCE-3519</a>.
+     Blocker sub-task reported by Ravi Gummadi and fixed by Ravi Gummadi (mrv2 , nodemanager)<br>
+     <b>Deadlock in LocalDirsHandlerService and ShuffleHandler</b><br>
+     <blockquote>Fixed a deadlock in NodeManager LocalDirectories's handling service.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3518">MAPREDUCE-3518</a>.
+     Critical bug reported by Jonathan Eagles and fixed by Jonathan Eagles (client , mrv2)<br>
+     <b>mapred queue -info &lt;queue&gt; -showJobs throws NPE</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3513">MAPREDUCE-3513</a>.
+     Trivial bug reported by Mahadev konar and fixed by chackaravarthy (mrv2)<br>
+     <b>Capacity Scheduler web UI has a spelling mistake for Memory.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3512">MAPREDUCE-3512</a>.
+     Blocker sub-task reported by Siddharth Seth and fixed by Siddharth Seth (mr-am , mrv2)<br>
+     <b>Batch jobHistory disk flushes</b><br>
+     <blockquote>Batching JobHistory flushing to DFS so that we don't flush for every event slowing down AM.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3511">MAPREDUCE-3511</a>.
+     Blocker sub-task reported by Siddharth Seth and fixed by Vinod Kumar Vavilapalli (mr-am , mrv2)<br>
+     <b>Counters occupy a good part of AM heap</b><br>
+     <blockquote>Removed a multitude of cloned/duplicate counters in the AM thereby reducing the AM heap size and preventing full GCs.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3510">MAPREDUCE-3510</a>.
+     Major bug reported by Jonathan Eagles and fixed by Jonathan Eagles (capacity-sched , mrv2)<br>
+     <b>Capacity Scheduler inherited ACLs not displayed by mapred queue -showacls</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3505">MAPREDUCE-3505</a>.
+     Major bug reported by Bruno Mah&#233; and fixed by Ahmed Radwan (mrv2)<br>
+     <b>yarn APPLICATION_CLASSPATH needs to be overridable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3500">MAPREDUCE-3500</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (mrv2)<br>
+     <b>MRJobConfig creates an LD_LIBRARY_PATH using the platform ARCH</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3499">MAPREDUCE-3499</a>.
+     Blocker bug reported by Alejandro Abdelnur and fixed by John George (mrv2 , test)<br>
+     <b>New MiniMR does not setup proxyuser configuration correctly, thus tests using doAs do not work</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3496">MAPREDUCE-3496</a>.
+     Major bug reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2)<br>
+     <b>Yarn initializes ACL operations from capacity scheduler config in a non-deterministic order</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3490">MAPREDUCE-3490</a>.
+     Blocker bug reported by Siddharth Seth and fixed by Sharad Agarwal (mr-am , mrv2)<br>
+     <b>RMContainerAllocator counts failed maps towards Reduce ramp up</b><br>
+     <blockquote>Fixed MapReduce AM to count failed maps also towards Reduce ramp up.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3488">MAPREDUCE-3488</a>.
+     Blocker bug reported by Mahadev konar and fixed by Mahadev konar (mrv2)<br>
+     <b>Streaming jobs are failing because the main class isnt set in the pom files.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3487">MAPREDUCE-3487</a>.
+     Critical bug reported by Thomas Graves and fixed by Jason Lowe (mrv2)<br>
+     <b>jobhistory web ui task counters no longer links to singletakecounter page</b><br>
+     <blockquote>Fixed JobHistory web-UI to display links to single task's counters' page.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3485">MAPREDUCE-3485</a>.
+     Major sub-task reported by Hitesh Shah and fixed by Ravi Gummadi (mrv2)<br>
+     <b>DISKS_FAILED -101 error code should be defined in same location as ABORTED_CONTAINER_EXIT_STATUS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3484">MAPREDUCE-3484</a>.
+     Major bug reported by Ravi Prakash and fixed by Ravi Prakash (mr-am , mrv2)<br>
+     <b>JobEndNotifier is getting interrupted before completing all its retries.</b><br>
+     <blockquote>Fixed JobEndNotifier to not get interrupted before completing all its retries.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3481">MAPREDUCE-3481</a>.
+     Major improvement reported by Amar Kamat and fixed by Amar Kamat (contrib/gridmix)<br>
+     <b>[Gridmix] Improve STRESS mode locking</b><br>
+     <blockquote>Modified Gridmix STRESS mode locking structure. The submitted thread and the polling thread now run simultaneously without blocking each other. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3479">MAPREDUCE-3479</a>.
+     Major bug reported by Tom White and fixed by Tom White (client)<br>
+     <b>JobClient#getJob cannot find local jobs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3478">MAPREDUCE-3478</a>.
+     Minor bug reported by Andrew Bayer and fixed by Tom White (mrv2)<br>
+     <b>Cannot build against ZooKeeper 3.4.0</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3477">MAPREDUCE-3477</a>.
+     Major bug reported by Bruno Mah&#233; and fixed by Jonathan Eagles (documentation , mrv2)<br>
+     <b>Hadoop site documentation cannot be built anymore on trunk and branch-0.23</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3468">MAPREDUCE-3468</a>.
+     Major task reported by Siddharth Seth and fixed by Siddharth Seth <br>
+     <b>Change version to 0.23.1 for ant builds on the 23 branch</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3465">MAPREDUCE-3465</a>.
+     Minor bug reported by Hitesh Shah and fixed by Hitesh Shah (mrv2)<br>
+     <b>org.apache.hadoop.yarn.util.TestLinuxResourceCalculatorPlugin fails on 0.23 </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3464">MAPREDUCE-3464</a>.
+     Trivial bug reported by Dave Vronay and fixed by Dave Vronay <br>
+     <b>mapreduce jsp pages missing DOCTYPE [post-split branches]</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3463">MAPREDUCE-3463</a>.
+     Blocker bug reported by Karam Singh and fixed by Siddharth Seth (applicationmaster , mrv2)<br>
+     <b>Second AM fails to recover properly when first AM is killed with java.lang.IllegalArgumentException causing lost job</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3462">MAPREDUCE-3462</a>.
+     Blocker bug reported by Amar Kamat and fixed by Ravi Prakash (mrv2 , test)<br>
+     <b>Job submission failing in JUnit tests</b><br>
+     <blockquote>Fixed failing JUnit tests in Gridmix.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3460">MAPREDUCE-3460</a>.
+     Blocker bug reported by Siddharth Seth and fixed by Robert Joseph Evans (mr-am , mrv2)<br>
+     <b>MR AM can hang if containers are allocated on a node blacklisted by the AM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3458">MAPREDUCE-3458</a>.
+     Major bug reported by Arun C Murthy and fixed by Devaraj K (mrv2)<br>
+     <b>Fix findbugs warnings in hadoop-examples</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3456">MAPREDUCE-3456</a>.
+     Blocker bug reported by Eric Payne and fixed by Eric Payne (mrv2)<br>
+     <b>$HADOOP_PREFIX/bin/yarn should set defaults for $HADOOP_*_HOME</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3454">MAPREDUCE-3454</a>.
+     Major bug reported by Amar Kamat and fixed by Hitesh Shah (contrib/gridmix)<br>
+     <b>[Gridmix] TestDistCacheEmulation is broken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3453">MAPREDUCE-3453</a>.
+     Major bug reported by Thomas Graves and fixed by Jonathan Eagles (mrv2)<br>
+     <b>RM web ui application details page shows RM cluster about information</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3452">MAPREDUCE-3452</a>.
+     Major bug reported by Thomas Graves and fixed by Jonathan Eagles (mrv2)<br>
+     <b>fifoscheduler web ui page always shows 0% used for the queue</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3450">MAPREDUCE-3450</a>.
+     Major bug reported by Siddharth Seth and fixed by Siddharth Seth (mr-am , mrv2)<br>
+     <b>NM port info no longer available in JobHistory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3448">MAPREDUCE-3448</a>.
+     Minor bug reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2)<br>
+     <b>TestCombineOutputCollector javac unchecked warning on mocked generics</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3447">MAPREDUCE-3447</a>.
+     Blocker bug reported by Thomas Graves and fixed by Mahadev konar (mrv2)<br>
+     <b>mapreduce examples not working</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3444">MAPREDUCE-3444</a>.
+     Blocker bug reported by Hitesh Shah and fixed by Hitesh Shah (mrv2)<br>
+     <b>trunk/0.23 builds broken </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3443">MAPREDUCE-3443</a>.
+     Blocker bug reported by Mahadev konar and fixed by Mahadev konar (mrv2)<br>
+     <b>Oozie jobs are running as oozie user even though they create the jobclient as doAs.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3437">MAPREDUCE-3437</a>.
+     Blocker bug reported by Jonathan Eagles and fixed by Jonathan Eagles (build , mrv2)<br>
+     <b>Branch 23 fails to build with Failure to find org.apache.hadoop:hadoop-project:pom:0.24.0-SNAPSHOT</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3436">MAPREDUCE-3436</a>.
+     Major bug reported by Bruno Mah&#233; and fixed by Ahmed Radwan (mrv2 , webapps)<br>
+     <b>JobHistory webapp address should use the host from the jobhistory address</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3434">MAPREDUCE-3434</a>.
+     Blocker bug reported by Hitesh Shah and fixed by Hitesh Shah (mrv2)<br>
+     <b>Nightly build broken </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3433">MAPREDUCE-3433</a>.
+     Major sub-task reported by Tom White and fixed by Tom White (client , mrv2)<br>
+     <b>Finding counters by legacy group name returns empty counters</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3427">MAPREDUCE-3427</a>.
+     Blocker bug reported by Alejandro Abdelnur and fixed by Hitesh Shah (contrib/streaming , mrv2)<br>
+     <b>streaming tests fail with MR2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3426">MAPREDUCE-3426</a>.
+     Blocker sub-task reported by Hitesh Shah and fixed by Hitesh Shah (mrv2)<br>
+     <b>uber-jobs tried to write outputs into wrong dir</b><br>
+     <blockquote>Fixed MR AM in uber mode to write map intermediate outputs in the correct directory to work properly in secure mode.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3422">MAPREDUCE-3422</a>.
+     Major bug reported by Tom White and fixed by Jonathan Eagles (mrv2)<br>
+     <b>Counter display names are not being picked up</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3420">MAPREDUCE-3420</a>.
+     Major bug reported by Hitesh Shah and fixed by  (mrv2)<br>
+     <b>[Umbrella ticket] Make uber jobs functional</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3417">MAPREDUCE-3417</a>.
+     Blocker bug reported by Thomas Graves and fixed by Jonathan Eagles (mrv2)<br>
+     <b>job access controls not working app master and job history UI's</b><br>
+     <blockquote>Fixed job-access-controls to work with MR AM and JobHistoryServer web-apps.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3415">MAPREDUCE-3415</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (mrv2)<br>
+     <b>improve MiniMRYarnCluster &amp; DistributedShell JAR resolution </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3413">MAPREDUCE-3413</a>.
+     Minor bug reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2)<br>
+     <b>RM web ui applications not sorted in any order by default</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3412">MAPREDUCE-3412</a>.
+     Major bug reported by Amar Kamat and fixed by Amar Kamat <br>
+     <b>'ant docs' is broken</b><br>
+     <blockquote>Fixes 'ant docs' by removing stale references to capacity-scheduler docs.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3411">MAPREDUCE-3411</a>.
+     Minor improvement reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2)<br>
+     <b>Performance Upgrade for jQuery</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3408">MAPREDUCE-3408</a>.
+     Major bug reported by Bruno Mah&#233; and fixed by Bruno Mah&#233; (mrv2 , nodemanager , resourcemanager)<br>
+     <b>yarn-daemon.sh unconditionnaly sets yarn.root.logger</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3407">MAPREDUCE-3407</a>.
+     Minor bug reported by Hitesh Shah and fixed by Hitesh Shah (mrv2)<br>
+     <b>Wrong jar getting used in TestMR*Jobs* for MiniMRYarnCluster</b><br>
+     <blockquote>Fixed pom files to refer to the correct MR app-jar needed by the integration tests.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3404">MAPREDUCE-3404</a>.
+     Critical bug reported by patrick white and fixed by Eric Payne (job submission , mrv2)<br>
+     <b>Speculative Execution: speculative map tasks launched even if -Dmapreduce.map.speculative=false</b><br>
+     <blockquote>Corrected MR AM to honor speculative configuration and enable speculating either maps or reduces.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3402">MAPREDUCE-3402</a>.
+     Blocker sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (applicationmaster , mrv2)<br>
+     <b>AMScalability test of Sleep job with 100K 1-sec maps regressed into running very slowly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3399">MAPREDUCE-3399</a>.
+     Blocker sub-task reported by Siddharth Seth and fixed by Siddharth Seth (mrv2 , nodemanager)<br>
+     <b>ContainerLocalizer should request new resources after completing the current one</b><br>
+     <blockquote>Modified ContainerLocalizer to send a heartbeat to NM immediately after downloading a resource instead of always waiting for a second.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3398">MAPREDUCE-3398</a>.
+     Blocker bug reported by Siddharth Seth and fixed by Siddharth Seth (mrv2 , nodemanager)<br>
+     <b>Log Aggregation broken in Secure Mode</b><br>
+     <blockquote>Fixed log aggregation to work correctly in secure mode. Contributed by Siddharth Seth.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3392">MAPREDUCE-3392</a>.
+     Blocker sub-task reported by John George and fixed by John George <br>
+     <b>Cluster.getDelegationToken() throws NPE if client.getDelegationToken() returns null.</b><br>
+     <blockquote>Fixed Cluster's getDelegationToken's API to return null when there isn't a supported token.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3391">MAPREDUCE-3391</a>.
+     Minor bug reported by Subroto Sanyal and fixed by Subroto Sanyal (applicationmaster)<br>
+     <b>Connecting to CM is logged as Connecting to RM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3389">MAPREDUCE-3389</a>.
+     Critical bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (mrv2)<br>
+     <b>MRApps loads the 'mrapp-generated-classpath' file with classpath from the build machine</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3387">MAPREDUCE-3387</a>.
+     Critical bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>A tracking URL of N/A before the app master is launched breaks oozie</b><br>
+     <blockquote>Fixed AM's tracking URL to always go through the proxy, even before the job started, so that it works properly with oozie throughout the job execution.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3382">MAPREDUCE-3382</a>.
+     Critical bug reported by Vinod Kumar Vavilapalli and fixed by Ravi Prakash (applicationmaster , mrv2)<br>
+     <b>Network ACLs can prevent AMs to ping the Job-end notification URL</b><br>
+     <blockquote>Enhanced MR AM to use a proxy to ping the job-end notification URL.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3380">MAPREDUCE-3380</a>.
+     Blocker sub-task reported by Alejandro Abdelnur and fixed by Mahadev konar (mr-am , mrv2)<br>
+     <b>Token infrastructure for running clients which are not kerberos authenticated</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3379">MAPREDUCE-3379</a>.
+     Major bug reported by Siddharth Seth and fixed by Siddharth Seth (mrv2 , nodemanager)<br>
+     <b>LocalResourceTracker should not tracking deleted cache entries</b><br>
+     <blockquote>Fixed LocalResourceTracker in NodeManager to remove deleted cache entries correctly.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3376">MAPREDUCE-3376</a>.
+     Major bug reported by Robert Joseph Evans and fixed by Subroto Sanyal (mrv1 , mrv2)<br>
+     <b>Old mapred API combiner uses NULL reporter</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3375">MAPREDUCE-3375</a>.
+     Major task reported by Vinay Kumar Thota and fixed by Vinay Kumar Thota <br>
+     <b>Memory Emulation system tests.</b><br>
+     <blockquote>Added system tests to test the memory emulation feature in Gridmix.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3372">MAPREDUCE-3372</a>.
+     Major bug reported by Bruno Mah&#233; and fixed by Bruno Mah&#233; <br>
+     <b>HADOOP_PREFIX cannot be overriden</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3371">MAPREDUCE-3371</a>.
+     Minor improvement reported by Ravi Prakash and fixed by Ravi Prakash (documentation , mrv2)<br>
+     <b>Review and improve the yarn-api javadocs.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3370">MAPREDUCE-3370</a>.
+     Major bug reported by Ahmed Radwan and fixed by Ahmed Radwan (mrv2 , test)<br>
+     <b>MiniMRYarnCluster uses a hard coded path location for the MapReduce application jar</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3369">MAPREDUCE-3369</a>.
+     Major improvement reported by Ahmed Radwan and fixed by Ahmed Radwan (mrv1 , mrv2 , test)<br>
+     <b>Migrate MR1 tests to run on MR2 using the new interfaces introduced in MAPREDUCE-3169</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3368">MAPREDUCE-3368</a>.
+     Critical bug reported by Ramya Sunil and fixed by Hitesh Shah (build , mrv2)<br>
+     <b>compile-mapred-test fails</b><br>
+     <blockquote>Fixed ant test compilation.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3366">MAPREDUCE-3366</a>.
+     Major bug reported by Eric Yang and fixed by Eric Yang (mrv2)<br>
+     <b>Mapreduce component should use consistent directory structure layout as HDFS/common</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3360">MAPREDUCE-3360</a>.
+     Critical improvement reported by Bhallamudi Venkata Siva Kamesh and fixed by Bhallamudi Venkata Siva Kamesh (mrv2)<br>
+     <b>Provide information about lost nodes in the UI.</b><br>
+     <blockquote>Added information about lost/rebooted/decommissioned nodes on the webapps.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3355">MAPREDUCE-3355</a>.
+     Blocker bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (applicationmaster , mrv2)<br>
+     <b>AM scheduling hangs frequently with sort job on 350 nodes</b><br>
+     <blockquote>Fixed MR AM's ContainerLauncher to handle node-command timeouts correctly.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3354">MAPREDUCE-3354</a>.
+     Blocker bug reported by Vinod Kumar Vavilapalli and fixed by Jonathan Eagles (jobhistoryserver , mrv2)<br>
+     <b>JobHistoryServer should be started by bin/mapred and not by bin/yarn</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3349">MAPREDUCE-3349</a>.
+     Blocker bug reported by Vinod Kumar Vavilapalli and fixed by Amar Kamat (mrv2)<br>
+     <b>No rack-name logged in JobHistory for unsuccessful tasks</b><br>
+     <blockquote>Unsuccessful tasks now log hostname and rackname to job history. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3346">MAPREDUCE-3346</a>.
+     Blocker bug reported by Karam Singh and fixed by Amar Kamat (tools/rumen)<br>
+     <b>Rumen LoggedTaskAttempt  getHostName call returns hostname as null</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3345">MAPREDUCE-3345</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Hitesh Shah (mrv2 , resourcemanager)<br>
+     <b>Race condition in ResourceManager causing TestContainerManagerSecurity to fail sometimes</b><br>
+     <blockquote>Fixed a race condition in ResourceManager that was causing TestContainerManagerSecurity to fail sometimes.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3344">MAPREDUCE-3344</a>.
+     Major bug reported by Brock Noland and fixed by Brock Noland <br>
+     <b>o.a.h.mapreduce.Reducer since 0.21 blindly casts to ReduceContext.ValueIterator</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3342">MAPREDUCE-3342</a>.
+     Critical bug reported by Thomas Graves and fixed by Jonathan Eagles (jobhistoryserver , mrv2)<br>
+     <b>JobHistoryServer doesn't show job queue</b><br>
+     <blockquote>Fixed JobHistoryServer to also show the job's queue name.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3341">MAPREDUCE-3341</a>.
+     Major improvement reported by Anupam Seth and fixed by Anupam Seth (mrv2)<br>
+     <b>Enhance logging of initalized queue limit values</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3339">MAPREDUCE-3339</a>.
+     Blocker bug reported by Ramgopal N and fixed by Siddharth Seth (mrv2)<br>
+     <b>Job is getting hanged indefinitely,if the child processes are killed on the NM.  KILL_CONTAINER eventtype is continuosly sent to the containers that are not existing</b><br>
+     <blockquote>Fixed MR AM to stop considering node blacklisting after the number of nodes blacklisted crosses a threshold.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3336">MAPREDUCE-3336</a>.
+     Critical bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>com.google.inject.internal.Preconditions not public api - shouldn't be using it</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3333">MAPREDUCE-3333</a>.
+     Blocker bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (applicationmaster , mrv2)<br>
+     <b>MR AM for sort-job going out of memory</b><br>
+     <blockquote>Fixed bugs in ContainerLauncher of MR AppMaster due to which per-container connections to NodeManager were lingering long enough to hit the ulimits on number of processes.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3331">MAPREDUCE-3331</a>.
+     Minor improvement reported by Anupam Seth and fixed by Anupam Seth (mrv2)<br>
+     <b>Improvement to single node cluster setup documentation for 0.23</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3329">MAPREDUCE-3329</a>.
+     Blocker bug reported by Thomas Graves and fixed by Arun C Murthy (mrv2)<br>
+     <b>capacity schedule maximum-capacity allowed to be less then capacity</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3328">MAPREDUCE-3328</a>.
+     Critical bug reported by Thomas Graves and fixed by Ravi Prakash (mrv2)<br>
+     <b>mapred queue -list output inconsistent and missing child queues</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3327">MAPREDUCE-3327</a>.
+     Critical bug reported by Thomas Graves and fixed by Anupam Seth (mrv2)<br>
+     <b>RM web ui scheduler link doesn't show correct max value for queues</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3326">MAPREDUCE-3326</a>.
+     Critical bug reported by Thomas Graves and fixed by Jason Lowe (mrv2)<br>
+     <b>RM web UI scheduler link not as useful as should be</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3325">MAPREDUCE-3325</a>.
+     Major improvement reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>Improvements to CapacityScheduler doc</b><br>
+     <blockquote>document changes only.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3324">MAPREDUCE-3324</a>.
+     Critical bug reported by Jonathan Eagles and fixed by Jonathan Eagles (jobhistoryserver , mrv2 , nodemanager)<br>
+     <b>Not All HttpServer tools links (stacks,logs,config,metrics) are accessible through all UI servers</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3312">MAPREDUCE-3312</a>.
+     Major bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>Make MR AM not send a stopContainer w/o corresponding start container</b><br>
+     <blockquote>Modified MR AM to not send a stop-container request for a container that isn't launched at all.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3299">MAPREDUCE-3299</a>.
+     Minor improvement reported by Siddharth Seth and fixed by Jonathan Eagles (mrv2)<br>
+     <b>Add AMInfo table to the AM job page</b><br>
+     <blockquote>Added AMInfo table to the MR AM job pages to list all the job-attempts when AM restarts and recovers.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3297">MAPREDUCE-3297</a>.
+     Major task reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>Move Log Related components from yarn-server-nodemanager to yarn-common</b><br>
+     <blockquote>Moved log related components into yarn-common so that HistoryServer and clients can use them without depending on the yarn-server-nodemanager module.

+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3291">MAPREDUCE-3291</a>.
+     Blocker bug reported by Ramya Sunil and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>App fail to launch due to delegation token not found in cache</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3280">MAPREDUCE-3280</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (applicationmaster , mrv2)<br>
+     <b>MR AM should not read the username from configuration</b><br>
+     <blockquote>Removed the unnecessary job user-name configuration in mapred-site.xml.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3265">MAPREDUCE-3265</a>.
+     Blocker improvement reported by Todd Lipcon and fixed by Arun C Murthy (mrv2)<br>
+     <b>Reduce log level on MR2 IPC construction, etc</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3251">MAPREDUCE-3251</a>.
+     Critical task reported by Anupam Seth and fixed by Anupam Seth (mrv2)<br>
+     <b>Network ACLs can prevent some clients to talk to MR ApplicationMaster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3243">MAPREDUCE-3243</a>.
+     Major bug reported by Ramya Sunil and fixed by Jonathan Eagles (contrib/streaming , mrv2)<br>
+     <b>Invalid tracking URL for streaming jobs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3238">MAPREDUCE-3238</a>.
+     Trivial improvement reported by Todd Lipcon and fixed by Todd Lipcon (mrv2)<br>
+     <b>Small cleanup in SchedulerApp</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3221">MAPREDUCE-3221</a>.
+     Minor sub-task reported by Hitesh Shah and fixed by Devaraj K (mrv2 , test)<br>
+     <b>ant test TestSubmitJob failing on trunk</b><br>
+     <blockquote>Fixed a bug in TestSubmitJob.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3219">MAPREDUCE-3219</a>.
+     Minor sub-task reported by Hitesh Shah and fixed by Hitesh Shah (mrv2 , test)<br>
+     <b>ant test TestDelegationToken failing on trunk</b><br>
+     <blockquote>Reenabled and fixed bugs in the failing test TestDelegationToken.

+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3217">MAPREDUCE-3217</a>.
+     Minor sub-task reported by Hitesh Shah and fixed by Devaraj K (mrv2 , test)<br>
+     <b>ant test TestAuditLogger fails on trunk</b><br>
+     <blockquote>Reenabled and fixed bugs in the failing ant test TestAuditLogger.

+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3215">MAPREDUCE-3215</a>.
+     Minor sub-task reported by Hitesh Shah and fixed by Hitesh Shah (mrv2)<br>
+     <b>org.apache.hadoop.mapreduce.TestNoJobSetupCleanup failing on trunk</b><br>
+     <blockquote>Reneabled and fixed bugs in the failing test TestNoJobSetupCleanup.

+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3194">MAPREDUCE-3194</a>.
+     Major bug reported by Siddharth Seth and fixed by Jason Lowe (mrv2)<br>
+     <b>"mapred mradmin" command is broken in mrv2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3169">MAPREDUCE-3169</a>.
+     Major improvement reported by Todd Lipcon and fixed by Ahmed Radwan (mrv1 , mrv2 , test)<br>
+     <b>Create a new MiniMRCluster equivalent which only provides client APIs cross MR1 and MR2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3147">MAPREDUCE-3147</a>.
+     Major improvement reported by Ravi Prakash and fixed by Ravi Prakash (mrv2)<br>
+     <b>Handle leaf queues with the same name properly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3121">MAPREDUCE-3121</a>.
+     Blocker bug reported by Vinod Kumar Vavilapalli and fixed by Ravi Gummadi (mrv2 , nodemanager)<br>
+     <b>DFIP aka 'NodeManager should handle Disk-Failures In Place'</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3102">MAPREDUCE-3102</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Hitesh Shah (mrv2 , security)<br>
+     <b>NodeManager should fail fast with wrong configuration or permissions for LinuxContainerExecutor</b><br>
+     <blockquote>Changed NodeManager to fail fast when LinuxContainerExecutor has wrong configuration or permissions.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3045">MAPREDUCE-3045</a>.
+     Minor bug reported by Ramya Sunil and fixed by Jonathan Eagles (jobhistoryserver , mrv2)<br>
+     <b>Elapsed time filter on jobhistory server displays incorrect table entries</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2950">MAPREDUCE-2950</a>.
+     Major bug reported by Amar Kamat and fixed by Ravi Gummadi (contrib/gridmix)<br>
+     <b>[Gridmix] TestUserResolve fails in trunk</b><br>
+     <blockquote>Fixes bug in TestUserResolve.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2863">MAPREDUCE-2863</a>.
+     Blocker improvement reported by Arun C Murthy and fixed by Thomas Graves (mrv2 , nodemanager , resourcemanager)<br>
+     <b>Support web-services for RM &amp; NM</b><br>
+     <blockquote>Support for web-services in YARN and MR components.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2784">MAPREDUCE-2784</a>.
+     Major bug reported by Amar Kamat and fixed by Amar Kamat (contrib/gridmix)<br>
+     <b>[Gridmix] TestGridmixSummary fails with NPE when run in DEBUG mode.</b><br>
+     <blockquote>Fixed bugs in ExecutionSummarizer and ResourceUsageMatcher.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2765">MAPREDUCE-2765</a>.
+     Major new feature reported by Mithun Radhakrishnan and fixed by Mithun Radhakrishnan (distcp , mrv2)<br>
+     <b>DistCp Rewrite</b><br>
+     <blockquote>DistCpV2 added to hadoop-tools.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2733">MAPREDUCE-2733</a>.
+     Major task reported by Vinay Kumar Thota and fixed by Vinay Kumar Thota <br>
+     <b>Gridmix v3 cpu emulation system tests.</b><br>
+     <blockquote>Adds system tests for the CPU emulation feature in Gridmix3.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2450">MAPREDUCE-2450</a>.
+     Major bug reported by Matei Zaharia and fixed by Rajesh Balamohan <br>
+     <b>Calls from running tasks to TaskTracker methods sometimes fail and incur a 60s timeout</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-1744">MAPREDUCE-1744</a>.
+     Major bug reported by Dick King and fixed by Dick King <br>
+     <b>DistributedCache creates its own FileSytem instance when adding a file/archive to the path</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-778">MAPREDUCE-778</a>.
+     Major new feature reported by Hong Tang and fixed by Amar Kamat (tools/rumen)<br>
+     <b>[Rumen] Need a standalone JobHistory log anonymizer</b><br>
+     <blockquote>Added an anonymizer tool to Rumen. Anonymizer takes a Rumen trace file and/or topology as input. It supports persistence and plugins to override the default behavior.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2923">HDFS-2923</a>.
+     Critical bug reported by Todd Lipcon and fixed by Todd Lipcon (name-node)<br>
+     <b>Namenode IPC handler count uses the wrong configuration key</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2893">HDFS-2893</a>.
+     Minor bug reported by Eli Collins and fixed by Eli Collins <br>
+     <b>The start/stop scripts don't start/stop the 2NN when using the default configuration</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2889">HDFS-2889</a>.
+     Major bug reported by Gregory Chanan and fixed by Gregory Chanan (hdfs client)<br>
+     <b>getNumCurrentReplicas is package private but should be public on 0.23 (see HDFS-2408)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2879">HDFS-2879</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node)<br>
+     <b>Change FSDataset to package private</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2869">HDFS-2869</a>.
+     Minor bug reported by Harsh J and fixed by Harsh J (webhdfs)<br>
+     <b>Error in Webhdfs documentation for mkdir</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2868">HDFS-2868</a>.
+     Minor improvement reported by Harsh J and fixed by Harsh J (data-node)<br>
+     <b>Add number of active transfer threads to the DataNode status</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2864">HDFS-2864</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node)<br>
+     <b>Remove redundant methods and a constant from FSDataset</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2840">HDFS-2840</a>.
+     Major bug reported by Eli Collins and fixed by Alejandro Abdelnur (test)<br>
+     <b>TestHostnameFilter should work with localhost or localhost.localdomain </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2837">HDFS-2837</a>.
+     Major bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans <br>
+     <b>mvn javadoc:javadoc not seeing LimitedPrivate class </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2836">HDFS-2836</a>.
+     Major bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans <br>
+     <b>HttpFSServer still has 2 javadoc warnings in trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2835">HDFS-2835</a>.
+     Major bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (tools)<br>
+     <b>Fix org.apache.hadoop.hdfs.tools.GetConf$Command Findbug issue</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2827">HDFS-2827</a>.
+     Major bug reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (name-node)<br>
+     <b>Cannot save namespace after renaming a directory above a file with an open lease</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2826">HDFS-2826</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (name-node , test)<br>
+     <b>Test case for HDFS-1476 (safemode can initialize repl queues before exiting)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2825">HDFS-2825</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (name-node)<br>
+     <b>Add test hook to turn off the writer preferring its local DN</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2822">HDFS-2822</a>.
+     Major bug reported by Todd Lipcon and fixed by Todd Lipcon (ha , name-node)<br>
+     <b>processMisReplicatedBlock incorrectly identifies under-construction blocks as under-replicated</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2818">HDFS-2818</a>.
+     Trivial bug reported by Todd Lipcon and fixed by Devaraj K (name-node)<br>
+     <b>dfshealth.jsp missing space between role and node name</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2817">HDFS-2817</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (test)<br>
+     <b>Combine the two TestSafeMode test suites</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2816">HDFS-2816</a>.
+     Trivial bug reported by Hitesh Shah and fixed by Hitesh Shah <br>
+     <b>Fix missing license header in hadoop-hdfs-project/hadoop-hdfs-httpfs/dev-support/findbugsExcludeFile.xml</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2814">HDFS-2814</a>.
+     Minor improvement reported by Hitesh Shah and fixed by Hitesh Shah <br>
+     <b>NamenodeMXBean does not account for svn revision in the version information</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2810">HDFS-2810</a>.
+     Critical bug reported by Todd Lipcon and fixed by Todd Lipcon (hdfs client)<br>
+     <b>Leases not properly getting renewed by clients</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2803">HDFS-2803</a>.
+     Minor improvement reported by Jimmy Xiang and fixed by Jimmy Xiang (name-node)<br>
+     <b>Adding logging to LeaseRenewer for better lease expiration triage.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2791">HDFS-2791</a>.
+     Major bug reported by Todd Lipcon and fixed by Todd Lipcon (data-node , name-node)<br>
+     <b>If block report races with closing of file, replica is incorrectly marked corrupt</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2790">HDFS-2790</a>.
+     Minor bug reported by Arpit Gupta and fixed by Arpit Gupta <br>
+     <b>FSNamesystem.setTimes throws exception with wrong configuration name in the message</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2788">HDFS-2788</a>.
+     Major improvement reported by Eli Collins and fixed by Eli Collins (data-node)<br>
+     <b>HdfsServerConstants#DN_KEEPALIVE_TIMEOUT is dead code</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2786">HDFS-2786</a>.
+     Major sub-task reported by Daryn Sharp and fixed by Kihwal Lee (name-node , security)<br>
+     <b>Fix host-based token incompatibilities in DFSUtil</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2785">HDFS-2785</a>.
+     Major sub-task reported by Daryn Sharp and fixed by Robert Joseph Evans (webhdfs)<br>
+     <b>Update webhdfs and httpfs for host-based token support</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2784">HDFS-2784</a>.
+     Major sub-task reported by Daryn Sharp and fixed by Kihwal Lee (hdfs client , name-node , security)<br>
+     <b>Update hftp and hdfs for host-based token support</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2761">HDFS-2761</a>.
+     Major improvement reported by Roman Shaposhnik and fixed by Roman Shaposhnik (build , hdfs client , scripts)<br>
+     <b>Improve Hadoop subcomponent integration in Hadoop 0.23</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2751">HDFS-2751</a>.
+     Major bug reported by Todd Lipcon and fixed by Todd Lipcon (data-node)<br>
+     <b>Datanode drops OS cache behind reads even for short reads</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2729">HDFS-2729</a>.
+     Minor improvement reported by Harsh J and fixed by Harsh J (name-node)<br>
+     <b>Update BlockManager's comments regarding the invalid block set</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2726">HDFS-2726</a>.
+     Major improvement reported by Michael Bieniosek and fixed by Harsh J <br>
+     <b>"Exception in createBlockOutputStream" shouldn't delete exception stack trace</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2722">HDFS-2722</a>.
+     Major bug reported by Harsh J and fixed by Harsh J (hdfs client)<br>
+     <b>HttpFs shouldn't be using an int for block size</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2710">HDFS-2710</a>.
+     Critical bug reported by Siddharth Seth and fixed by  <br>
+     <b>HDFS part of MAPREDUCE-3529, HADOOP-7933</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2707">HDFS-2707</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>HttpFS should read the hadoop-auth secret from a file instead inline from the configuration</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2706">HDFS-2706</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Use configuration for blockInvalidateLimit if it is set</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2705">HDFS-2705</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur <br>
+     <b>HttpFS server should check that upload requests have correct content-type</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2675">HDFS-2675</a>.
+     Trivial improvement reported by Todd Lipcon and fixed by Todd Lipcon (name-node)<br>
+     <b>Reduce verbosity when double-closing edit logs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2658">HDFS-2658</a>.
+     Major bug reported by Eli Collins and fixed by Alejandro Abdelnur <br>
+     <b>HttpFS introduced 70 javadoc warnings</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2657">HDFS-2657</a>.
+     Major bug reported by Eli Collins and fixed by Alejandro Abdelnur <br>
+     <b>TestHttpFSServer and TestServerWebApp are failing on trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2654">HDFS-2654</a>.
+     Major improvement reported by Eli Collins and fixed by Eli Collins (data-node)<br>
+     <b>Make BlockReaderLocal not extend RemoteBlockReader2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2653">HDFS-2653</a>.
+     Major improvement reported by Eli Collins and fixed by Eli Collins (data-node)<br>
+     <b>DFSClient should cache whether addrs are non-local when short-circuiting is enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2649">HDFS-2649</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (build)<br>
+     <b>eclipse:eclipse build fails for hadoop-hdfs-httpfs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2646">HDFS-2646</a>.
+     Major bug reported by Uma Maheswara Rao G and fixed by Alejandro Abdelnur <br>
+     <b>Hadoop HttpFS introduced 4 findbug warnings.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2640">HDFS-2640</a>.
+     Major bug reported by Tom White and fixed by Tom White <br>
+     <b>Javadoc generation hangs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2614">HDFS-2614</a>.
+     Major bug reported by Bruno Mah&#233; and fixed by Alejandro Abdelnur (build)<br>
+     <b>hadoop dist tarball is missing hdfs headers</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2606">HDFS-2606</a>.
+     Critical bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (webhdfs)<br>
+     <b>webhdfs client filesystem impl must set the content-type header for create/append</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2604">HDFS-2604</a>.
+     Minor improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>Add a log message to show if WebHDFS is enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2596">HDFS-2596</a>.
+     Major bug reported by Eli Collins and fixed by Eli Collins (data-node , test)<br>
+     <b>TestDirectoryScanner doesn't test parallel scans</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2590">HDFS-2590</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>Some links in WebHDFS forrest doc do not work</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2588">HDFS-2588</a>.
+     Trivial bug reported by Dave Vronay and fixed by Dave Vronay (scripts)<br>
+     <b>hdfs jsp pages missing DOCTYPE [post-split branches]</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2587">HDFS-2587</a>.
+     Major task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>Add WebHDFS apt doc</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2575">HDFS-2575</a>.
+     Minor bug reported by Todd Lipcon and fixed by Todd Lipcon (test)<br>
+     <b>DFSTestUtil may create empty files</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2574">HDFS-2574</a>.
+     Trivial task reported by Joe Crobak and fixed by Joe Crobak (documentation)<br>
+     <b>remove references to deprecated properties in hdfs-site.xml template and hdfs-default.xml</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2572">HDFS-2572</a>.
+     Trivial improvement reported by Harsh J and fixed by Harsh J (data-node)<br>
+     <b>Unnecessary double-check in DN#getHostName</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2570">HDFS-2570</a>.
+     Trivial improvement reported by Eli Collins and fixed by Eli Collins (documentation)<br>
+     <b>Add descriptions for dfs.*.https.address in hdfs-default.xml</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2568">HDFS-2568</a>.
+     Trivial improvement reported by Harsh J and fixed by Harsh J (data-node)<br>
+     <b>Use a set to manage child sockets in XceiverServer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2567">HDFS-2567</a>.
+     Major bug reported by Harsh J and fixed by Harsh J (name-node)<br>
+     <b>When 0 DNs are available, show a proper error when trying to browse DFS via web UI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2566">HDFS-2566</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (data-node)<br>
+     <b>Move BPOfferService to be a non-inner class</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2563">HDFS-2563</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (data-node)<br>
+     <b>Some cleanup in BPOfferService</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2562">HDFS-2562</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (data-node)<br>
+     <b>Refactor DN configuration variables out of DataNode class</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2560">HDFS-2560</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (data-node)<br>
+     <b>Refactor BPOfferService to be a static inner class</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2553">HDFS-2553</a>.
+     Critical bug reported by Todd Lipcon and fixed by Uma Maheswara Rao G (data-node)<br>
+     <b>BlockPoolSliceScanner spinning in loop</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2552">HDFS-2552</a>.
+     Major task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>Add WebHdfs Forrest doc</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2545">HDFS-2545</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>Webhdfs: Support multiple namenodes in federation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2544">HDFS-2544</a>.
+     Major bug reported by Bruno Mah&#233; and fixed by Bruno Mah&#233; (scripts)<br>
+     <b>Hadoop scripts unconditionally source "$bin"/../libexec/hadoop-config.sh.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2543">HDFS-2543</a>.
+     Major bug reported by Bruno Mah&#233; and fixed by Bruno Mah&#233; (scripts)<br>
+     <b>HADOOP_PREFIX cannot be overriden</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2541">HDFS-2541</a>.
+     Major bug reported by Harsh J and fixed by Harsh J (data-node)<br>
+     <b>For a sufficiently large value of blocks, the DN Scanner may request a random number with a negative seed value.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2536">HDFS-2536</a>.
+     Trivial improvement reported by Aaron T. Myers and fixed by Harsh J (name-node)<br>
+     <b>Remove unused imports</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2533">HDFS-2533</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (data-node , performance)<br>
+     <b>Remove needless synchronization on FSDataSet.getBlockFile</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2511">HDFS-2511</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Alejandro Abdelnur (build)<br>
+     <b>Add dev script to generate HDFS protobufs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2502">HDFS-2502</a>.
+     Minor improvement reported by Eli Collins and fixed by Harsh J (documentation)<br>
+     <b>hdfs-default.xml should include dfs.name.dir.restore</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2454">HDFS-2454</a>.
+     Minor improvement reported by Uma Maheswara Rao G and fixed by Harsh J (data-node)<br>
+     <b>Move maxXceiverCount check to before starting the thread in dataXceiver</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2397">HDFS-2397</a>.
+     Major improvement reported by Todd Lipcon and fixed by Eli Collins (name-node)<br>
+     <b>Undeprecate SecondaryNameNode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2349">HDFS-2349</a>.
+     Trivial improvement reported by Harsh J and fixed by Harsh J (data-node)<br>
+     <b>DN should log a WARN, not an INFO when it detects a corruption during block transfer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2335">HDFS-2335</a>.
+     Major improvement reported by Eli Collins and fixed by Uma Maheswara Rao G (data-node , name-node)<br>
+     <b>DataNodeCluster and NNStorage always pull fresh entropy</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2246">HDFS-2246</a>.
+     Major improvement reported by Sanjay Radia and fixed by Jitendra Nath Pandey <br>
+     <b>Shortcut a local client reads to a Datanodes files directly</b><br>
+     <blockquote>1. New configurations

+a. dfs.block.local-path-access.user is the key in datanode configuration to specify the user allowed to do short circuit read.

+b. dfs.client.read.shortcircuit is the key to enable short circuit read at the client side configuration.

+c. dfs.client.read.shortcircuit.skip.checksum is the key to bypass checksum check at the client side.

+2. By default none of the above are enabled and short circuit read will not kick in.

+3. If security is on, the feature can be used only for user that has kerberos credentials at the client, therefore map reduce tasks cannot benefit from it in general.

+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2178">HDFS-2178</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur <br>
+     <b>HttpFS - a read/write Hadoop file system proxy</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2130">HDFS-2130</a>.
+     Major sub-task reported by Todd Lipcon and fixed by Todd Lipcon (hdfs client)<br>
+     <b>Switch default checksum to CRC32C</b><br>
+     <blockquote>The default checksum algorithm used on HDFS is now CRC32C. Data from previous versions of Hadoop can still be read backwards-compatibly.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2129">HDFS-2129</a>.
+     Major sub-task reported by Todd Lipcon and fixed by Todd Lipcon (hdfs client , performance)<br>
+     <b>Simplify BlockReader to not inherit from FSInputChecker</b><br>
+     <blockquote>BlockReader has been reimplemented to use direct byte buffers. If you use a custom socket factory, it must generate sockets that have associated Channels.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2080">HDFS-2080</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (hdfs client , performance)<br>
+     <b>Speed up DFS read path by lessening checksum overhead</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1314">HDFS-1314</a>.
+     Minor bug reported by Karim Saadah and fixed by Sho Shimauchi <br>
+     <b>dfs.blocksize accepts only absolute value</b><br>
+     <blockquote>The default blocksize property 'dfs.blocksize' now accepts unit symbols to be used instead of byte length. Values such as "10k", "128m", "1g" are now OK to provide instead of just no. of bytes as was before.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-554">HDFS-554</a>.
+     Minor improvement reported by Steve Loughran and fixed by Harsh J (name-node)<br>
+     <b>BlockInfo.ensureCapacity may get a speedup from System.arraycopy()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-442">HDFS-442</a>.
+     Minor bug reported by Ramya Sunil and fixed by Harsh J (test)<br>
+     <b>dfsthroughput in test.jar throws NPE</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-362">HDFS-362</a>.
+     Major improvement reported by Tsz Wo (Nicholas), SZE and fixed by Uma Maheswara Rao G (name-node)<br>
+     <b>FSEditLog should not writes long and short as UTF8 and should not use ArrayWritable for writing non-array items</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-69">HDFS-69</a>.
+     Minor bug reported by Ravi Phulari and fixed by Harsh J <br>
+     <b>Improve dfsadmin command line help </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8055">HADOOP-8055</a>.
+     Major bug reported by Eric Charles and fixed by Harsh J (build)<br>
+     <b>Distribution tar.gz does not contain etc/hadoop/core-site.xml</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8054">HADOOP-8054</a>.
+     Critical bug reported by Amareshwari Sriramadasu and fixed by Daryn Sharp (fs)<br>
+     <b>NPE with FilterFileSystem</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8052">HADOOP-8052</a>.
+     Major bug reported by Varun Kapoor and fixed by Varun Kapoor (metrics)<br>
+     <b>Hadoop Metrics2 should emit Float.MAX_VALUE (instead of Double.MAX_VALUE) to avoid making Ganglia's gmetad core</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8027">HADOOP-8027</a>.
+     Minor improvement reported by Harsh J and fixed by Aaron T. Myers (metrics)<br>
+     <b>Visiting /jmx on the daemon web interfaces may print unnecessary error in logs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8018">HADOOP-8018</a>.
+     Major bug reported by Matt Foley and fixed by Jonathan Eagles (build , test)<br>
+     <b>Hudson auto test for HDFS has started throwing javadoc: warning - Error fetching URL: http://java.sun.com/javase/6/docs/api/package-list</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8015">HADOOP-8015</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>ChRootFileSystem should extend FilterFileSystem</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8013">HADOOP-8013</a>.
+     Major bug reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>ViewFileSystem does not honor setVerifyChecksum</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8012">HADOOP-8012</a>.
+     Minor bug reported by Roman Shaposhnik and fixed by Roman Shaposhnik (scripts)<br>
+     <b>hadoop-daemon.sh and yarn-daemon.sh are trying to mkdir and chow log/pid dirs which can fail</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8009">HADOOP-8009</a>.
+     Critical improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>Create hadoop-client and hadoop-minicluster artifacts for downstream projects </b><br>
+     <blockquote>Generate integration artifacts "org.apache.hadoop:hadoop-client" and "org.apache.hadoop:hadoop-minicluster" containing all the jars needed to use Hadoop client APIs, and to run Hadoop MiniClusters, respectively.  Push these artifacts to the maven repository when mvn-deploy, along with existing artifacts. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8006">HADOOP-8006</a>.
+     Major bug reported by Uma Maheswara Rao G and fixed by Daryn Sharp (fs)<br>
+     <b>TestFSInputChecker is failing in trunk.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8002">HADOOP-8002</a>.
+     Major bug reported by Arpit Gupta and fixed by Arpit Gupta <br>
+     <b>SecurityUtil acquired token message should be a debug rather than info</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8001">HADOOP-8001</a>.
+     Major bug reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>ChecksumFileSystem's rename doesn't correctly handle checksum files</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8000">HADOOP-8000</a>.
+     Critical bug reported by Arpit Gupta and fixed by Arpit Gupta <br>
+     <b>fetchdt command not available in bin/hadoop</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7999">HADOOP-7999</a>.
+     Critical bug reported by Jason Lowe and fixed by Jason Lowe (scripts)<br>
+     <b>"hadoop archive" fails with ClassNotFoundException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7998">HADOOP-7998</a>.
+     Major bug reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>CheckFileSystem does not correctly honor setVerifyChecksum</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7993">HADOOP-7993</a>.
+     Major bug reported by Anupam Seth and fixed by Anupam Seth (conf)<br>
+     <b>Hadoop ignores old-style config options for enabling compressed output</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7988">HADOOP-7988</a>.
+     Major bug reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey <br>
+     <b>Upper case in hostname part of the principals doesn't work with kerberos.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7987">HADOOP-7987</a>.
+     Major improvement reported by Devaraj Das and fixed by Jitendra Nath Pandey (security)<br>
+     <b>Support setting the run-as user in unsecure mode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7986">HADOOP-7986</a>.
+     Major bug reported by Mahadev konar and fixed by Mahadev konar <br>
+     <b>Add config for History Server protocol in hadoop-policy for service level authorization.</b><br>
+     <blockquote>Adding config for MapReduce History Server protocol in hadoop-policy.xml for service level authorization.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7982">HADOOP-7982</a>.
+     Major bug reported by Todd Lipcon and fixed by Todd Lipcon (security)<br>
+     <b>UserGroupInformation fails to login if thread's context classloader can't load HadoopLoginModule</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7981">HADOOP-7981</a>.
+     Major bug reported by Jonathan Eagles and fixed by Jonathan Eagles (io)<br>
+     <b>Improve documentation for org.apache.hadoop.io.compress.Decompressor.getRemaining</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7975">HADOOP-7975</a>.
+     Minor bug reported by Harsh J and fixed by Harsh J <br>
+     <b>Add entry to XML defaults for new LZ4 codec</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7974">HADOOP-7974</a>.
+     Major bug reported by Eli Collins and fixed by Harsh J (fs , test)<br>
+     <b>TestViewFsTrash incorrectly determines the user's home directory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7971">HADOOP-7971</a>.
+     Blocker bug reported by Thomas Graves and fixed by Prashant Sharma <br>
+     <b>hadoop &lt;job/queue/pipes&gt; removed - should be added back, but deprecated</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7964">HADOOP-7964</a>.
+     Blocker bug reported by Kihwal Lee and fixed by Daryn Sharp (security , util)<br>
+     <b>Deadlock in class init.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7963">HADOOP-7963</a>.
+     Blocker bug reported by Thomas Graves and fixed by Siddharth Seth <br>
+     <b>test failures: TestViewFileSystemWithAuthorityLocalFileSystem and TestViewFileSystemLocalFileSystem</b><br>
+     <blockquote>Fix ViewFS to catch a null canonical service-name and pass tests TestViewFileSystem*</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7949">HADOOP-7949</a>.
+     Trivial bug reported by Eli Collins and fixed by Eli Collins (ipc)<br>
+     <b>Updated maxIdleTime default in the code to match core-default.xml</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7948">HADOOP-7948</a>.
+     Minor bug reported by Michajlo Matijkiw and fixed by Michajlo Matijkiw (build)<br>
+     <b>Shell scripts created by hadoop-dist/pom.xml to build tar do not properly propagate failure</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7939">HADOOP-7939</a>.
+     Major improvement reported by Roman Shaposhnik and fixed by Roman Shaposhnik (build , conf , documentation , scripts)<br>
+     <b>Improve Hadoop subcomponent integration in Hadoop 0.23</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7936">HADOOP-7936</a>.
+     Major bug reported by Eli Collins and fixed by Alejandro Abdelnur (build)<br>
+     <b>There's a Hoop README in the root dir of the tarball</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7934">HADOOP-7934</a>.
+     Critical improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>Normalize dependencies versions across all modules</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7933">HADOOP-7933</a>.
+     Critical bug reported by Siddharth Seth and fixed by Siddharth Seth (viewfs)<br>
+     <b>Viewfs changes for MAPREDUCE-3529</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7919">HADOOP-7919</a>.
+     Trivial improvement reported by Harsh J and fixed by Harsh J (documentation)<br>
+     <b>[Doc] Remove hadoop.logfile.* properties.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7917">HADOOP-7917</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>compilation of protobuf files fails in windows/cygwin</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7914">HADOOP-7914</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (build)<br>
+     <b>duplicate declaration of hadoop-hdfs test-jar</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7912">HADOOP-7912</a>.
+     Major bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (build)<br>
+     <b>test-patch should run eclipse:eclipse to verify that it does not break again</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7910">HADOOP-7910</a>.
+     Minor improvement reported by Sho Shimauchi and fixed by Sho Shimauchi (conf)<br>
+     <b>add configuration methods to handle human readable size values</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7907">HADOOP-7907</a>.
+     Blocker bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>hadoop-tools JARs are not part of the distro</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7902">HADOOP-7902</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Alejandro Abdelnur <br>
+     <b>skipping name rules setting (if already set) should be done on UGI initialization only </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7898">HADOOP-7898</a>.
+     Minor bug reported by Suresh Srinivas and fixed by Suresh Srinivas (security)<br>
+     <b>Fix javadoc warnings in AuthenticationToken.java</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7890">HADOOP-7890</a>.
+     Trivial improvement reported by Koji Noguchi and fixed by Koji Noguchi (scripts)<br>
+     <b>Redirect hadoop script's deprecation message to stderr</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7887">HADOOP-7887</a>.
+     Critical bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>KerberosAuthenticatorHandler is not setting KerberosName name rules from configuration</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7878">HADOOP-7878</a>.
+     Minor bug reported by Steve Loughran and fixed by Steve Loughran (util)<br>
+     <b>Regression HADOOP-7777 switch changes break HDFS tests when the isSingleSwitch() predicate is used</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7877">HADOOP-7877</a>.
+     Major task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (documentation)<br>
+     <b>Federation: update Balancer documentation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7874">HADOOP-7874</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>native libs should be under lib/native/ dir</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7870">HADOOP-7870</a>.
+     Major bug reported by Jonathan Hsieh and fixed by Jonathan Hsieh <br>
+     <b>fix SequenceFile#createWriter with boolean createParent arg to respect createParent.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7864">HADOOP-7864</a>.
+     Major bug reported by Andrew Bayer and fixed by Andrew Bayer (build)<br>
+     <b>Building mvn site with Maven &lt; 3.0.2 causes OOM errors</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7859">HADOOP-7859</a>.
+     Major bug reported by Eli Collins and fixed by Eli Collins (fs)<br>
+     <b>TestViewFsHdfs.testgetFileLinkStatus is failing an assert</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7858">HADOOP-7858</a>.
+     Trivial improvement reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>Drop some info logging to DEBUG level in IPC, metrics, and HTTP</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7854">HADOOP-7854</a>.
+     Critical bug reported by Daryn Sharp and fixed by Daryn Sharp (security)<br>
+     <b>UGI getCurrentUser is not synchronized</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7853">HADOOP-7853</a>.
+     Blocker bug reported by Daryn Sharp and fixed by Daryn Sharp (security)<br>
+     <b>multiple javax security configurations cause conflicts</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7851">HADOOP-7851</a>.
+     Major bug reported by Amar Kamat and fixed by Uma Maheswara Rao G (conf)<br>
+     <b>Configuration.getClasses() never returns the default value.</b><br>
+     <blockquote>Fixed Configuration.getClasses() API to return the default value if the key is not set.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7843">HADOOP-7843</a>.
+     Major bug reported by John George and fixed by John George <br>
+     <b>compilation failing because workDir not initialized in RunJar.java</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7841">HADOOP-7841</a>.
+     Trivial improvement reported by Todd Lipcon and fixed by Todd Lipcon (build)<br>
+     <b>Run tests with non-secure random</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7837">HADOOP-7837</a>.
+     Major bug reported by Steve Loughran and fixed by Eli Collins (conf)<br>
+     <b>no NullAppender in the log4j config</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7813">HADOOP-7813</a>.
+     Major bug reported by Jonathan Eagles and fixed by Jonathan Eagles (build , test)<br>
+     <b>test-patch +1 patches that introduce javadoc and findbugs warnings in some cases</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7811">HADOOP-7811</a>.
+     Major bug reported by Jonathan Eagles and fixed by Jonathan Eagles (security , test)<br>
+     <b>TestUserGroupInformation#testGetServerSideGroups test fails in chroot</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7810">HADOOP-7810</a>.
+     Blocker bug reported by John George and fixed by John George <br>
+     <b>move hadoop archive to core from tools</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7808">HADOOP-7808</a>.
+     Major new feature reported by Daryn Sharp and fixed by Daryn Sharp (fs , security)<br>
+     <b>Port token service changes from 205</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7804">HADOOP-7804</a>.
+     Major improvement reported by Arpit Gupta and fixed by Arpit Gupta (conf)<br>
+     <b>enable hadoop config generator to set dfs.block.local-path-access.user to enable short circuit read</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7802">HADOOP-7802</a>.
+     Major bug reported by Bruno Mah&#233; and fixed by Bruno Mah&#233; <br>
+     <b>Hadoop scripts unconditionally source "$bin"/../libexec/hadoop-config.sh.</b><br>
+     <blockquote>Here is a patch to enable this behavior

+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7801">HADOOP-7801</a>.
+     Major bug reported by Bruno Mah&#233; and fixed by Bruno Mah&#233; (build)<br>
+     <b>HADOOP_PREFIX cannot be overriden</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7787">HADOOP-7787</a>.
+     Major bug reported by Bruno Mah&#233; and fixed by Bruno Mah&#233; (build)<br>
+     <b>Make source tarball use conventional name.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7761">HADOOP-7761</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (io , performance , util)<br>
+     <b>Improve performance of raw comparisons</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7758">HADOOP-7758</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (fs)<br>
+     <b>Make GlobFilter class public</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7736">HADOOP-7736</a>.
+     Trivial improvement reported by Harsh J and fixed by Harsh J (fs)<br>
+     <b>Remove duplicate call of Path#normalizePath during initialization.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7657">HADOOP-7657</a>.
+     Major improvement reported by Bert Sanders and fixed by Binglin Chang <br>
+     <b>Add support for LZ4 compression</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7590">HADOOP-7590</a>.
+     Major sub-task reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>Mavenize streaming and MR examples</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7574">HADOOP-7574</a>.
+     Trivial improvement reported by XieXianshan and fixed by XieXianshan (fs)<br>
+     <b>Improvement for FSshell -stat</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7504">HADOOP-7504</a>.
+     Trivial improvement reported by Eli Collins and fixed by Harsh J (metrics)<br>
+     <b>hadoop-metrics.properties missing some Ganglia31 options </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7470">HADOOP-7470</a>.
+     Minor improvement reported by Steve Loughran and fixed by Enis Soztutar (util)<br>
+     <b>move up to Jackson 1.8.8</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7424">HADOOP-7424</a>.
+     Major improvement reported by Eli Collins and fixed by Uma Maheswara Rao G <br>
+     <b>Log an error if the topology script doesn't handle multiple args</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7348">HADOOP-7348</a>.
+     Major improvement reported by XieXianshan and fixed by XieXianshan (fs)<br>
+     <b>Modify the option of FsShell getmerge from [addnl] to [-nl] for consistency</b><br>
+     <blockquote>The 'fs -getmerge' tool now uses a -nl flag to determine if adding a newline at end of each file is required, in favor of the 'addnl' boolean flag that was used earlier.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6886">HADOOP-6886</a>.
+     Minor improvement reported by Nicolas Spiegelberg and fixed by Nicolas Spiegelberg (fs)<br>
+     <b>LocalFileSystem Needs createNonRecursive API</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6840">HADOOP-6840</a>.
+     Minor improvement reported by Nicolas Spiegelberg and fixed by Nicolas Spiegelberg (fs , io)<br>
+     <b>Support non-recursive create() in FileSystem &amp; SequenceFile.Writer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6614">HADOOP-6614</a>.
+     Minor improvement reported by Steve Loughran and fixed by Jonathan Hsieh (util)<br>
+     <b>RunJar should provide more diags when it can't create a temp file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6490">HADOOP-6490</a>.
+     Minor bug reported by Zheng Shao and fixed by Uma Maheswara Rao G (fs)<br>
+     <b>Path.normalize should use StringUtils.replace in favor of String.replace</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-4515">HADOOP-4515</a>.
+     Minor improvement reported by Abhijit Bagri and fixed by Sho Shimauchi <br>
+     <b>conf.getBoolean must be case insensitive</b><br>
+     <blockquote></blockquote></li>
+</ul>
+</body></html>
+<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<title>Hadoop  0.23.0 Release Notes</title>
+<STYLE type="text/css">
+	H1 {font-family: sans-serif}
+	H2 {font-family: sans-serif; margin-left: 7mm}
+	TABLE {margin-left: 7mm}
+</STYLE>
+</head>
+<body>
+<h1>Hadoop  0.23.0 Release Notes</h1>
+These release notes include new developer and user-facing incompatibilities, features, and major improvements. 
+<a name="changes"/>
+<h2>Changes since Hadoop 1.0.0</h2>
+<ul>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3332">MAPREDUCE-3332</a>.
+     Trivial bug reported by Hitesh Shah and fixed by Hitesh Shah (contrib/raid)<br>
+     <b>contrib/raid compile breaks due to changes in hdfs/protocol/datatransfer/Sender#writeBlock related to checksum handling </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3322">MAPREDUCE-3322</a>.
+     Major improvement reported by Arun C Murthy and fixed by Arun C Murthy (documentation , mrv2)<br>
+     <b>Create a better index.html for maven docs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3321">MAPREDUCE-3321</a>.
+     Minor bug reported by Hitesh Shah and fixed by Hitesh Shah (mrv2)<br>
+     <b>Disable some failing legacy tests for MRv2 builds to go through</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3317">MAPREDUCE-3317</a>.
+     Major bug reported by Ravi Gummadi and fixed by Ravi Gummadi (tools/rumen)<br>
+     <b>Rumen TraceBuilder is emiting null as hostname</b><br>
+     <blockquote>Fixes Rumen to get correct hostName that includes rackName in attempt info.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3316">MAPREDUCE-3316</a>.
+     Major bug reported by Bhallamudi Venkata Siva Kamesh and fixed by Bhallamudi Venkata Siva Kamesh (resourcemanager)<br>
+     <b>Rebooted link is not working properly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3313">MAPREDUCE-3313</a>.
+     Blocker bug reported by Ravi Gummadi and fixed by Hitesh Shah (mrv2 , test)<br>
+     <b>TestResourceTrackerService failing in trunk some times</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3306">MAPREDUCE-3306</a>.
+     Blocker bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mrv2 , nodemanager)<br>
+     <b>Cannot run apps after MAPREDUCE-2989</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3304">MAPREDUCE-3304</a>.
+     Major bug reported by Ravi Prakash and fixed by Ravi Prakash (mrv2 , test)<br>
+     <b>TestRMContainerAllocator#testBlackListedNodes fails intermittently</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3296">MAPREDUCE-3296</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (build)<br>
+     <b>Pending(9) findBugs warnings</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3295">MAPREDUCE-3295</a>.
+     Critical bug reported by Mahadev konar and fixed by  <br>
+     <b>TestAMAuthorization failing on branch 0.23.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3292">MAPREDUCE-3292</a>.
+     Critical bug reported by Mahadev konar and fixed by Mahadev konar (mrv2)<br>
+     <b>In secure mode job submission fails with Provider org.apache.hadoop.mapreduce.security.token.JobTokenIndentifier$Renewer not found.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3290">MAPREDUCE-3290</a>.
+     Major bug reported by Ramya Sunil and fixed by Arun C Murthy (mrv2)<br>
+     <b>list-active-trackers throws NPE</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3288">MAPREDUCE-3288</a>.
+     Blocker bug reported by Ramya Sunil and fixed by Mahadev konar (mrv2)<br>
+     <b>Mapreduce 23 builds failing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3285">MAPREDUCE-3285</a>.
+     Blocker bug reported by Arun C Murthy and fixed by Siddharth Seth (mrv2)<br>
+     <b>Tests on branch-0.23 failing </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3284">MAPREDUCE-3284</a>.
+     Major bug reported by Ramya Sunil and fixed by Arun C Murthy (mrv2)<br>
+     <b>bin/mapred queue fails with JobQueueClient ClassNotFoundException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3282">MAPREDUCE-3282</a>.
+     Critical bug reported by Ramya Sunil and fixed by Arun C Murthy (mrv2)<br>
+     <b>bin/mapred job -list throws exception</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3281">MAPREDUCE-3281</a>.
+     Blocker bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (test)<br>
+     <b>TestLinuxContainerExecutorWithMocks failing on trunk.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3279">MAPREDUCE-3279</a>.
+     Major bug reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>TestJobHistoryParsing broken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3275">MAPREDUCE-3275</a>.
+     Critical improvement reported by Robert Joseph Evans and fixed by Robert Joseph Evans (documentation , mrv2)<br>
+     <b>Add docs for WebAppProxy</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3274">MAPREDUCE-3274</a>.
+     Blocker bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (applicationmaster , mrv2)<br>
+     <b>Race condition in MR App Master Preemtion can cause a dead lock</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3269">MAPREDUCE-3269</a>.
+     Blocker bug reported by Ramya Sunil and fixed by Mahadev konar (mrv2)<br>
+     <b>Jobsummary logs not being moved to a separate file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3264">MAPREDUCE-3264</a>.
+     Blocker bug reported by Todd Lipcon and fixed by Arun C Murthy (mrv2)<br>
+     <b>mapreduce.job.user.name needs to be set automatically</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3263">MAPREDUCE-3263</a>.
+     Blocker bug reported by Ramya Sunil and fixed by Hitesh Shah (build , mrv2)<br>
+     <b>compile-mapred-test target fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3262">MAPREDUCE-3262</a>.
+     Critical bug reported by Hitesh Shah and fixed by Hitesh Shah (mrv2 , nodemanager)<br>
+     <b>A few events are not handled by the NodeManager in failure scenarios</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3261">MAPREDUCE-3261</a>.
+     Major bug reported by Chris Riccomini and fixed by  (applicationmaster)<br>
+     <b>AM unable to release containers</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3259">MAPREDUCE-3259</a>.
+     Blocker bug reported by Kihwal Lee and fixed by Kihwal Lee (mrv2 , nodemanager)<br>
+     <b>ContainerLocalizer should get the proper java.library.path from LinuxContainerExecutor</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3258">MAPREDUCE-3258</a>.
+     Blocker bug reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>Job counters missing from AM and history UI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3257">MAPREDUCE-3257</a>.
+     Blocker sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (applicationmaster , mrv2 , resourcemanager , security)<br>
+     <b>Authorization checks needed for AM-&gt;RM protocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3256">MAPREDUCE-3256</a>.
+     Blocker sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (applicationmaster , mrv2 , nodemanager , security)<br>
+     <b>Authorization checks needed for AM-&gt;NM protocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3254">MAPREDUCE-3254</a>.
+     Blocker bug reported by Ramya Sunil and fixed by Arun C Murthy (contrib/streaming , mrv2)<br>
+     <b>Streaming jobs failing with PipeMapRunner ClassNotFoundException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3253">MAPREDUCE-3253</a>.
+     Blocker bug reported by Daniel Dai and fixed by Arun C Murthy (mrv2)<br>
+     <b>ContextFactory throw NoSuchFieldException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3252">MAPREDUCE-3252</a>.
+     Critical bug reported by Todd Lipcon and fixed by Todd Lipcon (mrv2 , task)<br>
+     <b>MR2: Map tasks rewrite data once even if output fits in sort buffer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3250">MAPREDUCE-3250</a>.
+     Blocker sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (applicationmaster , mrv2)<br>
+     <b>When AM restarts, client keeps reconnecting to the new AM and prints a lots of logs.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3249">MAPREDUCE-3249</a>.
+     Blocker sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (applicationmaster , mrv2)<br>
+     <b>Recovery of MR AMs with reduces fails the subsequent generation of the job</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3248">MAPREDUCE-3248</a>.
+     Blocker bug reported by Arun C Murthy and fixed by Vinod Kumar Vavilapalli (test)<br>
+     <b>Log4j logs from unit tests are lost</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3242">MAPREDUCE-3242</a>.
+     Major bug reported by Mahadev konar and fixed by Mahadev konar (mrv2)<br>
+     <b>Trunk compilation broken with bad interaction from MAPREDUCE-3070 and MAPREDUCE-3239.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3241">MAPREDUCE-3241</a>.
+     Major bug reported by Devaraj K and fixed by Amar Kamat <br>
+     <b>(Rumen)TraceBuilder throws IllegalArgumentException</b><br>
+     <blockquote>Rumen is fixed to ignore the AMRestartedEvent.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3240">MAPREDUCE-3240</a>.
+     Blocker bug reported by Vinod Kumar Vavilapalli and fixed by Hitesh Shah (mrv2 , nodemanager)<br>
+     <b>NM should send a SIGKILL for completed containers also</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3239">MAPREDUCE-3239</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (mrv2)<br>
+     <b>Use new createSocketAddr API in MRv2 to give better error messages on misconfig</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3237">MAPREDUCE-3237</a>.
+     Major improvement reported by Tom White and fixed by Tom White (client)<br>
+     <b>Move LocalJobRunner to hadoop-mapreduce-client-core module</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3233">MAPREDUCE-3233</a>.
+     Blocker sub-task reported by Karam Singh and fixed by Mahadev konar (mrv2)<br>
+     <b>AM fails to restart when first AM is killed</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3228">MAPREDUCE-3228</a>.
+     Blocker bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (applicationmaster , mrv2)<br>
+     <b>MR AM hangs when one node goes bad</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3226">MAPREDUCE-3226</a>.
+     Blocker bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mrv2 , task)<br>
+     <b>Few reduce tasks hanging in a gridmix-run</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3220">MAPREDUCE-3220</a>.
+     Minor sub-task reported by Hitesh Shah and fixed by Devaraj K (mrv2 , test)<br>
+     <b>ant test TestCombineOutputCollector failing on trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3212">MAPREDUCE-3212</a>.
+     Minor bug reported by Bhallamudi Venkata Siva Kamesh and fixed by Bhallamudi Venkata Siva Kamesh (mrv2)<br>
+     <b>Message displays while executing yarn command should be proper</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3209">MAPREDUCE-3209</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (build , mrv2)<br>
+     <b>Jenkins reports 160 FindBugs warnings</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3208">MAPREDUCE-3208</a>.
+     Minor bug reported by liangzhaowang and fixed by liangzhaowang (mrv2)<br>
+     <b>NPE while flushing TaskLogAppender</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3205">MAPREDUCE-3205</a>.
+     Blocker improvement reported by Todd Lipcon and fixed by Todd Lipcon (mrv2 , nodemanager)<br>
+     <b>MR2 memory limits should be pmem, not vmem</b><br>
+     <blockquote>Resource limits are now expressed and enforced in terms of physical memory, rather than virtual memory. The virtual memory limit is set as a configurable multiple of the physical limit. The NodeManager's memory usage is now configured in units of MB rather than GB.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3204">MAPREDUCE-3204</a>.
+     Major bug reported by Suresh Srinivas and fixed by Alejandro Abdelnur (build)<br>
+     <b>mvn site:site fails on MapReduce</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3203">MAPREDUCE-3203</a>.
+     Major bug reported by Mahadev konar and fixed by Mahadev konar (mrv2)<br>
+     <b>Fix some javac warnings in MRAppMaster.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3199">MAPREDUCE-3199</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mrv2 , test)<br>
+     <b>TestJobMonitorAndPrint is broken on trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3198">MAPREDUCE-3198</a>.
+     Trivial bug reported by Hitesh Shah and fixed by Arun C Murthy (mrv2)<br>
+     <b>Change mode for hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/mock-container-executor to 755 </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3197">MAPREDUCE-3197</a>.
+     Major bug reported by Anupam Seth and fixed by Mahadev konar (mrv2)<br>
+     <b>TestMRClientService failing on building clean checkout of branch 0.23</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3196">MAPREDUCE-3196</a>.
+     Major bug reported by Arun C Murthy and fixed by Arun C Murthy (mrv2)<br>
+     <b>TestLinuxContainerExecutorWithMocks fails on Mac OSX</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3192">MAPREDUCE-3192</a>.
+     Major bug reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey <br>
+     <b>Fix Javadoc warning in JobClient.java and Cluster.java</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3190">MAPREDUCE-3190</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (mrv2)<br>
+     <b>bin/yarn should barf early if HADOOP_COMMON_HOME or HADOOP_HDFS_HOME are not set</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3189">MAPREDUCE-3189</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (mrv2)<br>
+     <b>Add link decoration back to MR2's CSS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3188">MAPREDUCE-3188</a>.
+     Major bug reported by Todd Lipcon and fixed by Todd Lipcon (mrv2)<br>
+     <b>Lots of errors in logs when daemon startup fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3187">MAPREDUCE-3187</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (mrv2)<br>
+     <b>Add names for various unnamed threads in MR2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3186">MAPREDUCE-3186</a>.
+     Blocker bug reported by Ramgopal N and fixed by Eric Payne (mrv2)<br>
+     <b>User jobs are getting hanged if the Resource manager process goes down and comes up while job is getting executed.</b><br>
+     <blockquote>New Yarn configuration property:

+

+Name: yarn.app.mapreduce.am.scheduler.connection.retries

+Description: Number of times AM should retry to contact RM if connection is lost.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3185">MAPREDUCE-3185</a>.
+     Critical bug reported by Mahadev konar and fixed by Jonathan Eagles (mrv2)<br>
+     <b>RM Web UI does not sort the columns in some cases.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3183">MAPREDUCE-3183</a>.
+     Trivial bug reported by Hitesh Shah and fixed by Hitesh Shah (build)<br>
+     <b>hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml missing license header</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3181">MAPREDUCE-3181</a>.
+     Blocker bug reported by Anupam Seth and fixed by Arun C Murthy (mrv2)<br>
+     <b>Terasort fails with Kerberos exception on secure cluster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3179">MAPREDUCE-3179</a>.
+     Major bug reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2 , test)<br>
+     <b>Incorrect exit code for hadoop-mapreduce-test tests when exception thrown</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3176">MAPREDUCE-3176</a>.
+     Blocker bug reported by Ravi Prakash and fixed by Hitesh Shah (mrv2 , test)<br>
+     <b>ant mapreduce tests are timing out</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3175">MAPREDUCE-3175</a>.
+     Blocker sub-task reported by Thomas Graves and fixed by Jonathan Eagles (mrv2)<br>
+     <b>Yarn httpservers not created with access Control lists</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3171">MAPREDUCE-3171</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>normalize nodemanager native code compilation with common/hdfs native</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3170">MAPREDUCE-3170</a>.
+     Critical bug reported by Mahadev konar and fixed by Hitesh Shah (build , mrv1 , mrv2)<br>
+     <b>Trunk nightly commit builds are failing.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3167">MAPREDUCE-3167</a>.
+     Minor bug reported by Mahadev konar and fixed by Mahadev konar (mrv2)<br>
+     <b>container-executor is not being packaged with the assembly target.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3166">MAPREDUCE-3166</a>.
+     Major bug reported by Ravi Gummadi and fixed by Ravi Gummadi (tools/rumen)<br>
+     <b>Make Rumen use job history api instead of relying on current history file name format</b><br>
+     <blockquote>Makes Rumen use job history api instead of relying on current history file name format.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3165">MAPREDUCE-3165</a>.
+     Blocker bug reported by Arun C Murthy and fixed by Todd Lipcon (applicationmaster , mrv2)<br>
+     <b>Ensure logging option is set on child command line</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3163">MAPREDUCE-3163</a>.
+     Blocker bug reported by Todd Lipcon and fixed by Mahadev konar (job submission , mrv2)<br>
+     <b>JobClient spews errors when killing MR2 job</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3162">MAPREDUCE-3162</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (mrv2 , nodemanager)<br>
+     <b>Separate application-init and container-init event types in NM's ApplicationImpl FSM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3161">MAPREDUCE-3161</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (mrv2)<br>
+     <b>Improve javadoc and fix some typos in MR2 code</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3159">MAPREDUCE-3159</a>.
+     Blocker bug reported by Todd Lipcon and fixed by Todd Lipcon (mrv2)<br>
+     <b>DefaultContainerExecutor removes appcache dir on every localization</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3158">MAPREDUCE-3158</a>.
+     Major bug reported by Hitesh Shah and fixed by Hitesh Shah (mrv2)<br>
+     <b>Fix trunk build failures</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3157">MAPREDUCE-3157</a>.
+     Major bug reported by Ravi Gummadi and fixed by Ravi Gummadi (tools/rumen)<br>
+     <b>Rumen TraceBuilder is skipping analyzing 0.20 history files</b><br>
+     <blockquote>Fixes TraceBuilder to handle 0.20 history file names also.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3154">MAPREDUCE-3154</a>.
+     Major improvement reported by Abhijit Suresh Shingate and fixed by Abhijit Suresh Shingate (client , mrv2)<br>
+     <b>Validate the Jobs Output Specification as the first statement in JobSubmitter.submitJobInternal(Job, Cluster) method</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3153">MAPREDUCE-3153</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Mahadev konar (mrv2 , test)<br>
+     <b>TestFileOutputCommitter.testFailAbort() is failing on trunk on Jenkins</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3148">MAPREDUCE-3148</a>.
+     Blocker sub-task reported by Arun C Murthy and fixed by Arun C Murthy (mrv2)<br>
+     <b>Port MAPREDUCE-2702 to old mapred api</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3146">MAPREDUCE-3146</a>.
+     Critical sub-task reported by Vinod Kumar Vavilapalli and fixed by Siddharth Seth (mrv2 , nodemanager)<br>
+     <b>Add a MR specific command line to dump logs for a given TaskAttemptID</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3144">MAPREDUCE-3144</a>.
+     Critical sub-task reported by Vinod Kumar Vavilapalli and fixed by Siddharth Seth (mrv2)<br>
+     <b>Augment JobHistory to include information needed for serving aggregated logs.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3143">MAPREDUCE-3143</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by  (mrv2 , nodemanager)<br>
+     <b>Complete aggregation of user-logs spit out by containers onto DFS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3141">MAPREDUCE-3141</a>.
+     Blocker sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (applicationmaster , mrv2 , security)<br>
+     <b>Yarn+MR secure mode is broken, uncovered after MAPREDUCE-3056</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3140">MAPREDUCE-3140</a>.
+     Major bug reported by Bhallamudi Venkata Siva Kamesh and fixed by Subroto Sanyal (mrv2)<br>
+     <b>Invalid JobHistory URL for failed applications</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3138">MAPREDUCE-3138</a>.
+     Blocker bug reported by Arun C Murthy and fixed by Owen O'Malley (client , mrv2)<br>
+     <b>Allow for applications to deal with MAPREDUCE-954</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3137">MAPREDUCE-3137</a>.
+     Trivial sub-task reported by Hitesh Shah and fixed by Hitesh Shah (mrv2)<br>
+     <b>Fix broken merge of MR-2719 to 0.23 branch for the distributed shell test case </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3136">MAPREDUCE-3136</a>.
+     Blocker sub-task reported by Arun C Murthy and fixed by Arun C Murthy (documentation , mrv2)<br>
+     <b>Add docs for setting up real-world MRv2 clusters</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3134">MAPREDUCE-3134</a>.
+     Blocker sub-task reported by Arun C Murthy and fixed by Arun C Murthy (documentation , mrv2 , scheduler)<br>
+     <b>Add documentation for CapacityScheduler</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3133">MAPREDUCE-3133</a>.
+     Major improvement reported by Jonathan Eagles and fixed by Jonathan Eagles (build)<br>
+     <b>Running a set of methods in a Single Test Class</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3127">MAPREDUCE-3127</a>.
+     Blocker sub-task reported by Amol Kekre and fixed by Arun C Murthy (mrv2 , resourcemanager)<br>
+     <b>Unable to restrict users based on resourcemanager.admin.acls value set</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3126">MAPREDUCE-3126</a>.
+     Blocker bug reported by Thomas Graves and fixed by Arun C Murthy (mrv2)<br>
+     <b>mr job stuck because reducers using all slots and mapper isn't scheduled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3125">MAPREDUCE-3125</a>.
+     Critical bug reported by Thomas Graves and fixed by Hitesh Shah (mrv2)<br>
+     <b>app master web UI shows reduce task progress 100% even though reducers not complete and state running/scheduled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3124">MAPREDUCE-3124</a>.
+     Blocker bug reported by Thomas Graves and fixed by John George (mrv2)<br>
+     <b>mapper failed with failed to load native libs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3123">MAPREDUCE-3123</a>.
+     Blocker bug reported by Thomas Graves and fixed by Hitesh Shah (mrv2)<br>
+     <b>Symbolic links with special chars causing container/task.sh to fail</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3114">MAPREDUCE-3114</a>.
+     Major bug reported by Subroto Sanyal and fixed by Subroto Sanyal (mrv2)<br>
+     <b>Invalid ApplicationMaster URL in Applications Page</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3113">MAPREDUCE-3113</a>.
+     Minor improvement reported by XieXianshan and fixed by XieXianshan (mrv2)<br>
+     <b>the scripts yarn-daemon.sh and yarn are not working properly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3112">MAPREDUCE-3112</a>.
+     Major bug reported by Eric Yang and fixed by Eric Yang (contrib/streaming)<br>
+     <b>Calling hadoop cli inside mapreduce job leads to errors</b><br>
+     <blockquote>Removed inheritance of certain server environment variables (HADOOP_OPTS and HADOOP_ROOT_LOGGER) in task attempt process.

+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3110">MAPREDUCE-3110</a>.
+     Major bug reported by Devaraj K and fixed by Vinod Kumar Vavilapalli (mrv2 , test)<br>
+     <b>TestRPC.testUnknownCall() is failing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3104">MAPREDUCE-3104</a>.
+     Blocker sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mrv2 , resourcemanager , security)<br>
+     <b>Implement Application ACLs, Queue ACLs and their interaction</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3099">MAPREDUCE-3099</a>.
+     Major sub-task reported by Mahadev konar and fixed by Mahadev konar <br>
+     <b>Add docs for setting up a single node MRv2 cluster.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3098">MAPREDUCE-3098</a>.
+     Blocker sub-task reported by Hitesh Shah and fixed by Hitesh Shah (mrv2)<br>
+     <b>Report Application status as well as ApplicationMaster status in GetApplicationReportResponse </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3095">MAPREDUCE-3095</a>.
+     Major bug reported by John George and fixed by John George (mrv2)<br>
+     <b>fairscheduler ivy including wrong version for hdfs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3092">MAPREDUCE-3092</a>.
+     Minor bug reported by Devaraj K and fixed by Devaraj K (mrv2)<br>
+     <b>Remove JOB_ID_COMPARATOR usage in JobHistory.java</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3090">MAPREDUCE-3090</a>.
+     Major improvement reported by Arun C Murthy and fixed by Arun C Murthy (applicationmaster , mrv2)<br>
+     <b>Change MR AM to use ApplicationAttemptId rather than &lt;applicationId, startCount&gt; everywhere</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3087">MAPREDUCE-3087</a>.
+     Critical bug reported by Ravi Prakash and fixed by Ravi Prakash (mrv2)<br>
+     <b>CLASSPATH not the same after MAPREDUCE-2880</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3081">MAPREDUCE-3081</a>.
+     Major bug reported by vitthal (Suhas) Gogate and fixed by  (contrib/vaidya)<br>
+     <b>Change the name format for hadoop core and vaidya jar to be hadoop-{core/vaidya}-{version}.jar in vaidya.sh</b><br>
+     <blockquote>contrib/vaidya/bin/vaidya.sh script fixed to use appropriate jars and classpath </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3078">MAPREDUCE-3078</a>.
+     Blocker bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (applicationmaster , mrv2 , resourcemanager)<br>
+     <b>Application's progress isn't updated from AM to RM.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3073">MAPREDUCE-3073</a>.
+     Blocker bug reported by Mahadev konar and fixed by Mahadev konar <br>
+     <b>Build failure for MRv1 caused due to changes to MRConstants.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3071">MAPREDUCE-3071</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>app master configuration web UI link under the Job menu opens up application menu</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3070">MAPREDUCE-3070</a>.
+     Blocker bug reported by Ravi Teja Ch N V and fixed by Devaraj K (mrv2 , nodemanager)<br>
+     <b>NM not able to register with RM after NM restart</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3068">MAPREDUCE-3068</a>.
+     Blocker bug reported by Vinod Kumar Vavilapalli and fixed by Chris Riccomini (mrv2)<br>
+     <b>Should set MALLOC_ARENA_MAX for all YARN daemons and AMs/Containers</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3067">MAPREDUCE-3067</a>.
+     Blocker bug reported by Hitesh Shah and fixed by Hitesh Shah (mrv2)<br>
+     <b>Container exit status not set properly to launched process's exit code on successful completion of process</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3066">MAPREDUCE-3066</a>.
+     Major bug reported by Chris Riccomini and fixed by Chris Riccomini (mrv2 , nodemanager)<br>
+     <b>YARN NM fails to start</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3064">MAPREDUCE-3064</a>.
+     Blocker bug reported by Thomas Graves and fixed by Venu Gopala Rao <br>
+     <b>27 unit test failures with  Invalid "mapreduce.jobtracker.address" configuration value for JobTracker: "local"</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3062">MAPREDUCE-3062</a>.
+     Major bug reported by Chris Riccomini and fixed by Chris Riccomini (mrv2 , nodemanager , resourcemanager)<br>
+     <b>YARN NM/RM fail to start</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3059">MAPREDUCE-3059</a>.
+     Blocker bug reported by Karam Singh and fixed by Devaraj K (mrv2)<br>
+     <b>QueueMetrics do not have metrics for aggregate containers-allocated and aggregate containers-released</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3058">MAPREDUCE-3058</a>.
+     Critical bug reported by Karam Singh and fixed by Vinod Kumar Vavilapalli (contrib/gridmix , mrv2)<br>
+     <b>Sometimes task keeps on running while its Syslog says that it is shutdown</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3057">MAPREDUCE-3057</a>.
+     Blocker bug reported by Karam Singh and fixed by Eric Payne (jobhistoryserver , mrv2)<br>
+     <b>Job History Server goes of OutOfMemory with 1200 Jobs and Heap Size set to 10 GB</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3056">MAPREDUCE-3056</a>.
+     Blocker bug reported by Devaraj K and fixed by Devaraj K (applicationmaster , mrv2)<br>
+     <b>Jobs are failing when those are submitted by other users</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3055">MAPREDUCE-3055</a>.
+     Minor bug reported by Hitesh Shah and fixed by Vinod Kumar Vavilapalli (mrv2)<br>
+     <b>Simplify parameter passing to Application Master from Client. SImplify approach to pass info such  appId, ClusterTimestamp and failcount required by App Master.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3054">MAPREDUCE-3054</a>.
+     Blocker bug reported by Siddharth Seth and fixed by Mahadev konar (mrv2)<br>
+     <b>Unable to kill submitted jobs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3053">MAPREDUCE-3053</a>.
+     Major bug reported by Chris Riccomini and fixed by Vinod Kumar Vavilapalli (mrv2 , resourcemanager)<br>
+     <b>YARN Protobuf RPC Failures in RM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3050">MAPREDUCE-3050</a>.
+     Blocker bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2 , resourcemanager)<br>
+     <b>YarnScheduler needs to expose Resource Usage Information</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3048">MAPREDUCE-3048</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (build)<br>
+     <b>Fix test-patch to run tests via "mvn clean install test"</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3044">MAPREDUCE-3044</a>.
+     Blocker bug reported by Ramya Sunil and fixed by Mahadev konar (mrv2)<br>
+     <b>Pipes jobs stuck without making progress</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3042">MAPREDUCE-3042</a>.
+     Major bug reported by Chris Riccomini and fixed by Chris Riccomini (mrv2 , resourcemanager)<br>
+     <b>YARN RM fails to start</b><br>
+     <blockquote>Simple typo fix to allow ResourceManager to start instead of fail</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3041">MAPREDUCE-3041</a>.
+     Blocker bug reported by Hitesh Shah and fixed by Hitesh Shah (mrv2)<br>
+     <b>Enhance YARN Client-RM protocol to provide access to information such as cluster's Min/Max Resource capabilities similar to that of AM-RM protocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3040">MAPREDUCE-3040</a>.
+     Major bug reported by Thomas Graves and fixed by Arun C Murthy (mrv2)<br>
+     <b>TestMRJobs, TestMRJobsWithHistoryService, TestMROldApiJobs fail</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3038">MAPREDUCE-3038</a>.
+     Blocker bug reported by Thomas Graves and fixed by Jeffrey Naisbitt (mrv2)<br>
+     <b>job history server not starting because conf() missing HsController</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3036">MAPREDUCE-3036</a>.
+     Blocker bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>Some of the Resource Manager memory metrics go negative.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3035">MAPREDUCE-3035</a>.
+     Critical bug reported by Karam Singh and fixed by chackaravarthy (mrv2)<br>
+     <b>MR V2 jobhistory does not contain rack information</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3033">MAPREDUCE-3033</a>.
+     Blocker bug reported by Karam Singh and fixed by Hitesh Shah (job submission , mrv2)<br>
+     <b>JobClient requires mapreduce.jobtracker.address config even when mapreduce.framework.name is set to yarn</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3032">MAPREDUCE-3032</a>.
+     Blocker bug reported by Vinod Kumar Vavilapalli and fixed by Devaraj K (applicationmaster , mrv2)<br>
+     <b>JobHistory doesn't have error information from failed tasks</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3031">MAPREDUCE-3031</a>.
+     Blocker bug reported by Karam Singh and fixed by Siddharth Seth (mrv2)<br>
+     <b>Job Client goes into infinite loop when we kill AM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3030">MAPREDUCE-3030</a>.
+     Blocker bug reported by Devaraj K and fixed by Devaraj K (mrv2 , resourcemanager)<br>
+     <b>RM is not processing heartbeat and continuously giving the message 'Node not found rebooting'</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3028">MAPREDUCE-3028</a>.
+     Blocker bug reported by Mohammad Kamrul Islam and fixed by Ravi Prakash (mrv2)<br>
+     <b>Support job end notification in .next /0.23</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3023">MAPREDUCE-3023</a>.
+     Major bug reported by Ravi Prakash and fixed by Ravi Prakash (mrv2)<br>
+     <b>Queue state is not being translated properly (is always assumed to be running)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3021">MAPREDUCE-3021</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>all yarn webapps use same base name of "yarn/"</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3020">MAPREDUCE-3020</a>.
+     Major bug reported by chackaravarthy and fixed by chackaravarthy (jobhistoryserver)<br>
+     <b>Node link in reduce task attempt page is not working [Job History Page]</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3018">MAPREDUCE-3018</a>.
+     Blocker bug reported by Mahadev konar and fixed by Mahadev konar (mrv2)<br>
+     <b>Streaming jobs with -file option fail to run.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3017">MAPREDUCE-3017</a>.
+     Blocker bug reported by Mahadev konar and fixed by Mahadev konar (mrv2)<br>
+     <b>The Web UI shows FINISHED for killed/successful/failed jobs.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3014">MAPREDUCE-3014</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>Rename and invert logic of '-cbuild' profile to 'native' and off by default</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3013">MAPREDUCE-3013</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mrv2 , security)<br>
+     <b>Remove YarnConfiguration.YARN_SECURITY_INFO</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3007">MAPREDUCE-3007</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (jobhistoryserver , mrv2)<br>
+     <b>JobClient cannot talk to JobHistory server in secure mode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3006">MAPREDUCE-3006</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (applicationmaster , mrv2)<br>
+     <b>MapReduce AM exits prematurely before completely writing and closing the JobHistory file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3005">MAPREDUCE-3005</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Arun C Murthy (mrv2)<br>
+     <b>MR app hangs because of a NPE in ResourceManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3004">MAPREDUCE-3004</a>.
+     Minor bug reported by Hitesh Shah and fixed by Hitesh Shah (mrv2)<br>
+     <b>sort example fails in shuffle/reduce stage as it assumes a local job by default </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3003">MAPREDUCE-3003</a>.
+     Major bug reported by Tom White and fixed by Alejandro Abdelnur (build)<br>
+     <b>Publish MR JARs to Maven snapshot repository</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3001">MAPREDUCE-3001</a>.
+     Blocker improvement reported by Robert Joseph Evans and fixed by Robert Joseph Evans (jobhistoryserver , mrv2)<br>
+     <b>Map Reduce JobHistory and AppMaster UI should have ability to display task specific counters.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2999">MAPREDUCE-2999</a>.
+     Critical bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>hadoop.http.filter.initializers not working properly on yarn UI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2998">MAPREDUCE-2998</a>.
+     Critical bug reported by Jeffrey Naisbitt and fixed by Vinod Kumar Vavilapalli (mrv2)<br>
+     <b>Failing to contact Am/History for jobs: java.io.EOFException in DataInputStream</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2997">MAPREDUCE-2997</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (applicationmaster , mrv2)<br>
+     <b>MR task fails before launch itself with an NPE in ContainerLauncher</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2996">MAPREDUCE-2996</a>.
+     Blocker bug reported by Vinod Kumar Vavilapalli and fixed by Jonathan Eagles (jobhistoryserver , mrv2)<br>
+     <b>Log uberized information into JobHistory and use the same via CompletedJob</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2995">MAPREDUCE-2995</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mrv2)<br>
+     <b>MR AM crashes when a container-launch hangs on a faulty NM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2994">MAPREDUCE-2994</a>.
+     Major bug reported by Devaraj K and fixed by Devaraj K (mrv2 , resourcemanager)<br>
+     <b>Parse Error is coming for App ID when we click application link on the RM UI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2991">MAPREDUCE-2991</a>.
+     Major bug reported by Priyo Mustafi and fixed by Priyo Mustafi (scheduler)<br>
+     <b>queueinfo.jsp fails to show queue status if any Capacity scheduler queue name has dash/hiphen in it.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2990">MAPREDUCE-2990</a>.
+     Blocker improvement reported by Mahadev konar and fixed by Subroto Sanyal (mrv2)<br>
+     <b>Health Report on Resource Manager UI is null if the NM's are all healthy.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2989">MAPREDUCE-2989</a>.
+     Critical sub-task reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>JobHistory should link to task logs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2988">MAPREDUCE-2988</a>.
+     Critical sub-task reported by Eric Payne and fixed by Robert Joseph Evans (mrv2 , security , test)<br>
+     <b>Reenable TestLinuxContainerExecutor reflecting the current NM code. </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2987">MAPREDUCE-2987</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>RM UI display logged in user as null</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2986">MAPREDUCE-2986</a>.
+     Critical task reported by Anupam Seth and fixed by Anupam Seth (mrv2 , test)<br>
+     <b>Multiple node managers support for the MiniYARNCluster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2985">MAPREDUCE-2985</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>findbugs error in ResourceLocalizationService.handle(LocalizationEvent)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2984">MAPREDUCE-2984</a>.
+     Minor bug reported by Devaraj K and fixed by Devaraj K (mrv2 , nodemanager)<br>
+     <b>Throwing NullPointerException when we open the container page</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2979">MAPREDUCE-2979</a>.
+     Major bug reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>Remove ClientProtocolProvider configuration under mapreduce-client-core</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2977">MAPREDUCE-2977</a>.
+     Blocker sub-task reported by Owen O'Malley and fixed by Arun C Murthy (mrv2 , resourcemanager , security)<br>
+     <b>ResourceManager needs to renew and cancel tokens associated with a job</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2975">MAPREDUCE-2975</a>.
+     Blocker bug reported by Mahadev konar and fixed by Mahadev konar <br>
+     <b>ResourceManager Delegate is not getting initialized with yarn-site.xml as default configuration.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2971">MAPREDUCE-2971</a>.
+     Blocker bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>ant build mapreduce fails  protected access  jc.displayJobList(jobs);</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2970">MAPREDUCE-2970</a>.
+     Major bug reported by Venu Gopala Rao and fixed by Venu Gopala Rao (job submission , mrv2)<br>
+     <b>Null Pointer Exception while submitting a Job, If mapreduce.framework.name property is not set.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2966">MAPREDUCE-2966</a>.
+     Major improvement reported by Abhijit Suresh Shingate and fixed by Abhijit Suresh Shingate (applicationmaster , jobhistoryserver , nodemanager , resourcemanager)<br>
+     <b>Add ShutDown hooks for MRV2 processes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2965">MAPREDUCE-2965</a>.
+     Blocker bug reported by Vinod Kumar Vavilapalli and fixed by Siddharth Seth (mrv2)<br>
+     <b>Streamline hashCode(), equals(), compareTo() and toString() for all IDs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2963">MAPREDUCE-2963</a>.
+     Critical bug reported by Mahadev konar and fixed by Siddharth Seth <br>
+     <b>TestMRJobs hangs waiting to connect to history server.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2961">MAPREDUCE-2961</a>.
+     Blocker improvement reported by Mahadev konar and fixed by Vinod Kumar Vavilapalli (mrv2)<br>
+     <b>Increase the default threadpool size for container launching in the application master.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2958">MAPREDUCE-2958</a>.
+     Critical bug reported by Thomas Graves and fixed by Arun C Murthy (mrv2)<br>
+     <b>mapred-default.xml not merged from mr279</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2954">MAPREDUCE-2954</a>.
+     Critical bug reported by Vinod Kumar Vavilapalli and fixed by Siddharth Seth (mrv2)<br>
+     <b>Deadlock in NM with threads racing for ApplicationAttemptId</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2953">MAPREDUCE-2953</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Thomas Graves (mrv2 , resourcemanager)<br>
+     <b>JobClient fails due to a race in RM, removes staged files and in turn crashes MR AM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2952">MAPREDUCE-2952</a>.
+     Blocker bug reported by Vinod Kumar Vavilapalli and fixed by Arun C Murthy (mrv2 , resourcemanager)<br>
+     <b>Application failure diagnostics are not consumed in a couple of cases</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2949">MAPREDUCE-2949</a>.
+     Major bug reported by Ravi Teja Ch N V and fixed by Ravi Teja Ch N V (mrv2 , nodemanager)<br>
+     <b>NodeManager in a inconsistent state if a service startup fails.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2948">MAPREDUCE-2948</a>.
+     Major bug reported by Milind Bhandarkar and fixed by Mahadev konar (contrib/streaming)<br>
+     <b>Hadoop streaming test failure, post MR-2767</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2947">MAPREDUCE-2947</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli (mrv2)<br>
+     <b>Sort fails on YARN+MR with lots of task failures</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2938">MAPREDUCE-2938</a>.
+     Trivial bug reported by Arun C Murthy and fixed by Arun C Murthy (mrv2 , scheduler)<br>
+     <b>Missing log stmt for app submission fail CS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2937">MAPREDUCE-2937</a>.
+     Critical bug reported by Mahadev konar and fixed by Mahadev konar (mrv2)<br>
+     <b>Errors in Application failures are not shown in the client trace.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2936">MAPREDUCE-2936</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli <br>
+     <b>Contrib Raid compilation broken after HDFS-1620</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2933">MAPREDUCE-2933</a>.
+     Blocker sub-task reported by Arun C Murthy and fixed by Arun C Murthy (applicationmaster , mrv2 , nodemanager , resourcemanager)<br>
+     <b>Change allocate call to return ContainerStatus for completed containers rather than Container </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2930">MAPREDUCE-2930</a>.
+     Major improvement reported by Sharad Agarwal and fixed by Binglin Chang (mrv2)<br>
+     <b>Generate state graph from the State Machine Definition</b><br>
+     <blockquote>Generate state graph from State Machine Definition</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2925">MAPREDUCE-2925</a>.
+     Major bug reported by Devaraj K and fixed by Devaraj K (mrv2)<br>
+     <b>job -status &lt;JOB_ID&gt; is giving continuously info message for completed jobs on the console</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2917">MAPREDUCE-2917</a>.
+     Major bug reported by Arun C Murthy and fixed by Arun C Murthy (mrv2 , resourcemanager)<br>
+     <b>Corner case in container reservations</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2916">MAPREDUCE-2916</a>.
+     Major bug reported by Mahadev konar and fixed by Mahadev konar <br>
+     <b>Ivy build for MRv1 fails with bad organization for common daemon.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2913">MAPREDUCE-2913</a>.
+     Critical bug reported by Robert Joseph Evans and fixed by Jonathan Eagles (mrv2 , test)<br>
+     <b>TestMRJobs.testFailingMapper does not assert the correct thing.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2909">MAPREDUCE-2909</a>.
+     Major sub-task reported by Arun C Murthy and fixed by Arun C Murthy (documentation , mrv2)<br>
+     <b>Docs for remaining records in yarn-api</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2908">MAPREDUCE-2908</a>.
+     Critical bug reported by Mahadev konar and fixed by Vinod Kumar Vavilapalli (mrv2)<br>
+     <b>Fix findbugs warnings in Map Reduce.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2907">MAPREDUCE-2907</a>.
+     Major bug reported by Ravi Prakash and fixed by Ravi Prakash (mrv2 , resourcemanager)<br>
+     <b>ResourceManager logs filled with [INFO] debug messages from org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2904">MAPREDUCE-2904</a>.
+     Major bug reported by Sharad Agarwal and fixed by Sharad Agarwal <br>
+     <b>HDFS jars added incorrectly to yarn classpath</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2899">MAPREDUCE-2899</a>.
+     Major sub-task reported by Arun C Murthy and fixed by Arun C Murthy (mrv2 , resourcemanager)<br>
+     <b>Replace major parts of ApplicationSubmissionContext with a ContainerLaunchContext</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2898">MAPREDUCE-2898</a>.
+     Major sub-task reported by Arun C Murthy and fixed by Arun C Murthy (documentation , mrv2)<br>
+     <b>Docs for core protocols in yarn-api - ContainerManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2897">MAPREDUCE-2897</a>.
+     Major sub-task reported by Arun C Murthy and fixed by Arun C Murthy (documentation , mrv2)<br>
+     <b>Docs for core protocols in yarn-api - ClientRMProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2896">MAPREDUCE-2896</a>.
+     Major sub-task reported by Arun C Murthy and fixed by Arun C Murthy (mrv2)<br>
+     <b>Remove all apis other than getters and setters in all org/apache/hadoop/yarn/api/records/*</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2894">MAPREDUCE-2894</a>.
+     Blocker improvement reported by Arun C Murthy and fixed by  (mrv2)<br>
+     <b>Improvements to YARN apis</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2893">MAPREDUCE-2893</a>.
+     Trivial improvement reported by Liang-Chi Hsieh and fixed by Liang-Chi Hsieh (client)<br>
+     <b>Removing duplicate service provider in hadoop-mapreduce-client-jobclient</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2891">MAPREDUCE-2891</a>.
+     Major sub-task reported by Arun C Murthy and fixed by Arun C Murthy (documentation , mrv2)<br>
+     <b>Docs for core protocols in yarn-api - AMRMProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2889">MAPREDUCE-2889</a>.
+     Critical sub-task reported by Arun C Murthy and fixed by Hitesh Shah (documentation , mrv2)<br>
+     <b>Add docs for writing new application frameworks</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2886">MAPREDUCE-2886</a>.
+     Critical bug reported by Mahadev konar and fixed by Mahadev konar (mrv2)<br>
+     <b>Fix Javadoc warnings in MapReduce.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2885">MAPREDUCE-2885</a>.
+     Blocker bug reported by Arun C Murthy and fixed by Arun C Murthy <br>
+     <b>mapred-config.sh doesn't look for $HADOOP_COMMON_HOME/libexec/hadoop-config.sh</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2882">MAPREDUCE-2882</a>.
+     Minor bug reported by Todd Lipcon and fixed by Todd Lipcon (test)<br>
+     <b>TestLineRecordReader depends on ant jars</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2881">MAPREDUCE-2881</a>.
+     Major bug reported by Giridharan Kesavan and fixed by Giridharan Kesavan (build)<br>
+     <b>mapreduce ant compilation fails "java.lang.IllegalStateException: impossible to get artifacts"</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2880">MAPREDUCE-2880</a>.
+     Blocker improvement reported by Luke Lu and fixed by Arun C Murthy (mrv2)<br>
+     <b>Fix classpath construction for MRv2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2879">MAPREDUCE-2879</a>.
+     Major bug reported by Arun C Murthy and fixed by Arun C Murthy <br>
+     <b>Change mrv2 version to be 0.23.0-SNAPSHOT</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2877">MAPREDUCE-2877</a>.
+     Major bug reported by Mahadev konar and fixed by Mahadev konar <br>
+     <b>Add missing Apache license header in some files in MR and also add the rat plugin to the poms.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2876">MAPREDUCE-2876</a>.
+     Critical bug reported by Robert Joseph Evans and fixed by Anupam Seth (mrv2)<br>
+     <b>ContainerAllocationExpirer appears to use the incorrect configs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2874">MAPREDUCE-2874</a>.
+     Major bug reported by Thomas Graves and fixed by Eric Payne (mrv2)<br>
+     <b>ApplicationId printed in 2 different formats and has 2 different toString routines that are used</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2868">MAPREDUCE-2868</a>.
+     Major bug reported by Thomas Graves and fixed by Mahadev konar (build)<br>
+     <b>ant build broken in hadoop-mapreduce dir</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2867">MAPREDUCE-2867</a>.
+     Major bug reported by Mahadev konar and fixed by Mahadev konar <br>
+     <b>Remove Unused TestApplicaitonCleanup in resourcemanager/applicationsmanager.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2864">MAPREDUCE-2864</a>.
+     Major improvement reported by Robert Joseph Evans and fixed by Robert Joseph Evans (jobhistoryserver , mrv2 , nodemanager , resourcemanager)<br>
+     <b>Renaming of configuration property names in yarn</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2860">MAPREDUCE-2860</a>.
+     Major bug reported by Mahadev konar and fixed by Mahadev konar (mrv2)<br>
+     <b>Fix log4j logging in the maven test cases.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2859">MAPREDUCE-2859</a>.
+     Major bug reported by Giridharan Kesavan and fixed by Giridharan Kesavan <br>
+     <b>mapreduce trunk is broken with eclipse plugin contrib</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2858">MAPREDUCE-2858</a>.
+     Blocker sub-task reported by Luke Lu and fixed by Robert Joseph Evans (applicationmaster , mrv2 , security)<br>
+     <b>MRv2 WebApp Security</b><br>
+     <blockquote>A new server has been added to yarn.  It is a web proxy that sits in front of the AM web UI.  The server is controlled by the yarn.web-proxy.address config.  If that config is set, and it points to an address that is different then the RM web interface then a separate proxy server needs to be launched.

+

+This can be done by running 

+

+yarn-daemon.sh start proxyserver

+

+If a separate proxy server is needed other configs also may need to be set, if security is enabled.

+yarn.web-proxy.principal

+yarn.web-proxy.keytab

+

+The proxy server is stateless and should be able to support a VIP or other load balancing sitting in front of multiple instances of this server.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2854">MAPREDUCE-2854</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves <br>
+     <b>update INSTALL with config necessary run mapred on yarn</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2848">MAPREDUCE-2848</a>.
+     Major improvement reported by Luke Lu and fixed by Luke Lu <br>
+     <b>Upgrade avro to 1.5.2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2846">MAPREDUCE-2846</a>.
+     Blocker bug reported by Allen Wittenauer and fixed by Owen O'Malley (task , task-controller , tasktracker)<br>
+     <b>a small % of all tasks fail with DefaultTaskController</b><br>
+     <blockquote>Fixed a race condition in writing the log index file that caused tasks to 'fail'.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2844">MAPREDUCE-2844</a>.
+     Trivial bug reported by Ramya Sunil and fixed by Ravi Teja Ch N V (mrv2)<br>
+     <b>[MR-279] Incorrect node ID info </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2843">MAPREDUCE-2843</a>.
+     Major bug reported by Ramya Sunil and fixed by Abhijit Suresh Shingate (mrv2)<br>
+     <b>[MR-279] Node entries on the RM UI are not sortable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2840">MAPREDUCE-2840</a>.
+     Minor bug reported by Thomas Graves and fixed by Jonathan Eagles (mrv2)<br>
+     <b>mr279 TestUberAM.testSleepJob test fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2839">MAPREDUCE-2839</a>.
+     Major bug reported by Siddharth Seth and fixed by Siddharth Seth <br>
+     <b>MR Jobs fail on a secure cluster with viewfs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2821">MAPREDUCE-2821</a>.
+     Blocker bug reported by Ramya Sunil and fixed by Mahadev konar (mrv2)<br>
+     <b>[MR-279] Missing fields in job summary logs </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2808">MAPREDUCE-2808</a>.
+     Minor bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>pull MAPREDUCE-2797 into mr279 branch</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2807">MAPREDUCE-2807</a>.
+     Major sub-task reported by Sharad Agarwal and fixed by Sharad Agarwal (applicationmaster , mrv2 , resourcemanager)<br>
+     <b>MR-279: AM restart does not work after RM refactor</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2805">MAPREDUCE-2805</a>.
+     Minor improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (contrib/raid)<br>
+     <b>Update RAID for HDFS-2241</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2802">MAPREDUCE-2802</a>.
+     Critical improvement reported by Ramya Sunil and fixed by Jonathan Eagles (mrv2)<br>
+     <b>[MR-279] Jobhistory filenames should have jobID to help in better parsing </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2800">MAPREDUCE-2800</a>.
+     Major bug reported by Ramya Sunil and fixed by Siddharth Seth (mrv2)<br>
+     <b>clockSplits, cpuUsages, vMemKbytes, physMemKbytes is set to -1 in jhist files</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2797">MAPREDUCE-2797</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (contrib/raid , test)<br>
+     <b>Some java files cannot be compiled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2796">MAPREDUCE-2796</a>.
+     Major bug reported by Ramya Sunil and fixed by Devaraj K (mrv2)<br>
+     <b>[MR-279] Start time for all the apps is set to 0</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2794">MAPREDUCE-2794</a>.
+     Blocker bug reported by Ramya Sunil and fixed by John George (mrv2)<br>
+     <b>[MR-279] Incorrect metrics value for AvailableGB per queue per user</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2792">MAPREDUCE-2792</a>.
+     Blocker sub-task reported by Ramya Sunil and fixed by Vinod Kumar Vavilapalli (mrv2 , security)<br>
+     <b>[MR-279] Replace IP addresses with hostnames</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2791">MAPREDUCE-2791</a>.
+     Blocker bug reported by Ramya Sunil and fixed by Devaraj K (mrv2)<br>
+     <b>[MR-279] Missing/incorrect info on job -status CLI </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2789">MAPREDUCE-2789</a>.
+     Major bug reported by Ramya Sunil and fixed by Eric Payne (mrv2)<br>
+     <b>[MR:279] Update the scheduling info on CLI</b><br>
+     <blockquote>"mapred/job -list" now contains map/reduce, container, and resource information.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2788">MAPREDUCE-2788</a>.
+     Major bug reported by Ahmed Radwan and fixed by Ahmed Radwan (mrv2)<br>
+     <b>Normalize requests in FifoScheduler.allocate to prevent NPEs later</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2783">MAPREDUCE-2783</a>.
+     Critical bug reported by Thomas Graves and fixed by Eric Payne (mrv2)<br>
+     <b>mr279 job history handling after killing application</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2782">MAPREDUCE-2782</a>.
+     Major test reported by Arun C Murthy and fixed by Arun C Murthy (mrv2)<br>
+     <b>MR-279: Unit (mockito) tests for CS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2781">MAPREDUCE-2781</a>.
+     Minor bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>mr279 RM application finishtime not set</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2779">MAPREDUCE-2779</a>.
+     Major bug reported by Ming Ma and fixed by Ming Ma (job submission)<br>
+     <b>JobSplitWriter.java can't handle large job.split file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2776">MAPREDUCE-2776</a>.
+     Major bug reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>MR 279: Fix some of the yarn findbug warnings</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2775">MAPREDUCE-2775</a>.
+     Blocker bug reported by Ramya Sunil and fixed by Devaraj K (mrv2)<br>
+     <b>[MR-279] Decommissioned node does not shutdown</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2774">MAPREDUCE-2774</a>.
+     Minor bug reported by Ramya Sunil and fixed by Venu Gopala Rao (mrv2)<br>
+     <b>[MR-279] Add a startup msg while starting RM/NM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2773">MAPREDUCE-2773</a>.
+     Minor bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>[MR-279] server.api.records.NodeHealthStatus renamed but not updated in client NodeHealthStatus.java</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2772">MAPREDUCE-2772</a>.
+     Major bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>MR-279: mrv2 no longer compiles against trunk after common mavenization.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2767">MAPREDUCE-2767</a>.
+     Blocker bug reported by Milind Bhandarkar and fixed by Milind Bhandarkar (security)<br>
+     <b>Remove Linux task-controller from 0.22 branch</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2766">MAPREDUCE-2766</a>.
+     Blocker sub-task reported by Ramya Sunil and fixed by Hitesh Shah (mrv2)<br>
+     <b>[MR-279] Set correct permissions for files in dist cache</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2764">MAPREDUCE-2764</a>.
+     Major bug reported by Daryn Sharp and fixed by Owen O'Malley <br>
+     <b>Fix renewal of dfs delegation tokens</b><br>
+     <blockquote>Generalizes token renewal and canceling to a common interface and provides a plugin interface for adding renewers for new kinds of tokens. Hftp changed to store the tokens as HFTP and renew them over http.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2763">MAPREDUCE-2763</a>.
+     Major bug reported by Ramya Sunil and fixed by  (mrv2)<br>
+     <b>IllegalArgumentException while using the dist cache</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2762">MAPREDUCE-2762</a>.
+     Blocker bug reported by Ramya Sunil and fixed by Mahadev konar (mrv2)<br>
+     <b>[MR-279] - Cleanup staging dir after job completion</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2760">MAPREDUCE-2760</a>.
+     Minor bug reported by Todd Lipcon and fixed by Todd Lipcon (documentation)<br>
+     <b>mapreduce.jobtracker.split.metainfo.maxsize typoed in mapred-default.xml</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2756">MAPREDUCE-2756</a>.
+     Minor bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (client , mrv2)<br>
+     <b>JobControl can drop jobs if an error occurs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2754">MAPREDUCE-2754</a>.
+     Blocker bug reported by Ramya Sunil and fixed by Ravi Teja Ch N V (mrv2)<br>
+     <b>MR-279: AM logs are incorrectly going to stderr and error messages going incorrectly to stdout</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2751">MAPREDUCE-2751</a>.
+     Blocker bug reported by Vinod Kumar Vavilapalli and fixed by Siddharth Seth (mrv2)<br>
+     <b>[MR-279] Lot of local files left on NM after the app finish.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2749">MAPREDUCE-2749</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Thomas Graves (mrv2)<br>
+     <b>[MR-279] NM registers with RM even before it starts various servers</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2747">MAPREDUCE-2747</a>.
+     Blocker sub-task reported by Vinod Kumar Vavilapalli and fixed by Robert Joseph Evans (mrv2 , nodemanager , security)<br>
+     <b>[MR-279] [Security] Cleanup LinuxContainerExecutor binary sources</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2746">MAPREDUCE-2746</a>.
+     Blocker sub-task reported by Vinod Kumar Vavilapalli and fixed by Arun C Murthy (mrv2 , security)<br>
+     <b>[MR-279] [Security] Yarn servers can't communicate with each other with hadoop.security.authorization set to true</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2741">MAPREDUCE-2741</a>.
+     Major task reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>Make ant build system work with hadoop-common JAR generated by Maven</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2740">MAPREDUCE-2740</a>.
+     Major bug reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>MultipleOutputs in new API creates needless TaskAttemptContexts</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2738">MAPREDUCE-2738</a>.
+     Blocker bug reported by Ramya Sunil and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>Missing cluster level stats on the RM UI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2737">MAPREDUCE-2737</a>.
+     Major bug reported by Ramya Sunil and fixed by Siddharth Seth (mrv2)<br>
+     <b>Update the progress of jobs on client side</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2736">MAPREDUCE-2736</a>.
+     Major task reported by Eli Collins and fixed by Eli Collins (jobtracker , tasktracker)<br>
+     <b>Remove unused contrib components dependent on MR1</b><br>
+     <blockquote>The pre-MR2 MapReduce implementation (JobTracker, TaskTracer, etc) and contrib components are no longer supported. This implementation is currently supported in the 0.20.20x releases.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2735">MAPREDUCE-2735</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>MR279: finished applications should be added to an application summary log</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2732">MAPREDUCE-2732</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (test)<br>
+     <b>Some tests using FSNamesystem.LOG cannot be compiled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2727">MAPREDUCE-2727</a>.
+     Major bug reported by Jeffrey Naisbitt and fixed by Jeffrey Naisbitt (mrv2)<br>
+     <b>MR-279: SleepJob throws divide by zero exception when count = 0</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2726">MAPREDUCE-2726</a>.
+     Blocker improvement reported by Jeffrey Naisbitt and fixed by Jeffrey Naisbitt (mrv2)<br>
+     <b>MR-279: Add the jobFile to the web UI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2719">MAPREDUCE-2719</a>.
+     Major new feature reported by Sharad Agarwal and fixed by Hitesh Shah (mrv2)<br>
+     <b>MR-279: Write a shell command application</b><br>
+     <blockquote>Adding a simple, DistributedShell application as an alternate framework to MapReduce and to act as an illustrative example for porting applications to YARN.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2716">MAPREDUCE-2716</a>.
+     Major bug reported by Jeffrey Naisbitt and fixed by Jeffrey Naisbitt (mrv2)<br>
+     <b>MR279: MRReliabilityTest job fails because of missing job-file.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2711">MAPREDUCE-2711</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (contrib/raid)<br>
+     <b>TestBlockPlacementPolicyRaid cannot be compiled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2710">MAPREDUCE-2710</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (client)<br>
+     <b>Update DFSClient.stringifyToken(..) in JobSubmitter.printTokens(..) for HDFS-2161</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2708">MAPREDUCE-2708</a>.
+     Blocker sub-task reported by Sharad Agarwal and fixed by Sharad Agarwal (applicationmaster , mrv2)<br>
+     <b>[MR-279] Design and implement MR Application Master recovery</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2707">MAPREDUCE-2707</a>.
+     Major improvement reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey <br>
+     <b>ProtoOverHadoopRpcEngine without using TunnelProtocol over WritableRpc</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2706">MAPREDUCE-2706</a>.
+     Major bug reported by Jeffrey Naisbitt and fixed by Jeffrey Naisbitt (mrv2)<br>
+     <b>MR-279: Submit jobs beyond the max jobs per queue limit no longer gets logged</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2705">MAPREDUCE-2705</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves (tasktracker)<br>
+     <b>tasks localized and launched serially by TaskLauncher - causing other tasks to be delayed</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2702">MAPREDUCE-2702</a>.
+     Blocker sub-task reported by Sharad Agarwal and fixed by Sharad Agarwal (applicationmaster , mrv2)<br>
+     <b>[MR-279] OutputCommitter changes for MR Application Master recovery</b><br>
+     <blockquote>Enhance OutputCommitter and FileOutputCommitter to allow for recover of tasks across job restart.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2701">MAPREDUCE-2701</a>.
+     Major improvement reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>MR-279: app/Job.java needs UGI for the user that launched it</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2697">MAPREDUCE-2697</a>.
+     Major bug reported by Arun C Murthy and fixed by Arun C Murthy (mrv2)<br>
+     <b>Enhance CS to cap concurrently running jobs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2696">MAPREDUCE-2696</a>.
+     Major sub-task reported by Arun C Murthy and fixed by Siddharth Seth (mrv2 , nodemanager)<br>
+     <b>Container logs aren't getting cleaned up when LogAggregation is disabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2693">MAPREDUCE-2693</a>.
+     Critical bug reported by Amol Kekre and fixed by Hitesh Shah (mrv2)<br>
+     <b>NPE in AM causes it to lose containers which are never returned back to RM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2692">MAPREDUCE-2692</a>.
+     Major new feature reported by Amol Kekre and fixed by Sharad Agarwal (mrv2)<br>
+     <b>Ensure AM Restart and Recovery-on-restart is complete</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2691">MAPREDUCE-2691</a>.
+     Major improvement reported by Amol Kekre and fixed by Siddharth Seth (mrv2)<br>
+     <b>Finish up the cleanup of distributed cache file resources and related tests.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2690">MAPREDUCE-2690</a>.
+     Major bug reported by Ramya Sunil and fixed by Eric Payne (mrv2)<br>
+     <b>Construct the web page for default scheduler</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2689">MAPREDUCE-2689</a>.
+     Major bug reported by Ramya Sunil and fixed by  (mrv2)<br>
+     <b>InvalidStateTransisiton when AM is not assigned to a job</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2687">MAPREDUCE-2687</a>.
+     Blocker bug reported by Ramya Sunil and fixed by Mahadev konar (mrv2)<br>
+     <b>Non superusers unable to launch apps in both secure and non-secure cluster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2682">MAPREDUCE-2682</a>.
+     Trivial improvement reported by Arun C Murthy and fixed by Vinod Kumar Vavilapalli <br>
+     <b>Add a -classpath option to bin/mapred</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2680">MAPREDUCE-2680</a>.
+     Minor improvement reported by Arun C Murthy and fixed by Arun C Murthy <br>
+     <b>Enhance job-client cli to show queue information for running jobs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2679">MAPREDUCE-2679</a>.
+     Trivial improvement reported by Arun C Murthy and fixed by Arun C Murthy <br>
+     <b>MR-279: Merge MR-279 related minor patches into trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2678">MAPREDUCE-2678</a>.
+     Major bug reported by Jeffrey Naisbitt and fixed by Jeffrey Naisbitt (capacity-sched)<br>
+     <b>MR-279: minimum-user-limit-percent no longer honored</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2677">MAPREDUCE-2677</a>.
+     Major bug reported by Ramya Sunil and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>MR-279: 404 error while accessing pages from history server</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2676">MAPREDUCE-2676</a>.
+     Major improvement reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>MR-279: JobHistory Job page needs reformatted</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2675">MAPREDUCE-2675</a>.
+     Major improvement reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>MR-279: JobHistory Server main page needs to be reformatted</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2672">MAPREDUCE-2672</a>.
+     Major improvement reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>MR-279: JobHistory Server needs Analysis this job</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2670">MAPREDUCE-2670</a>.
+     Trivial bug reported by Eli Collins and fixed by Eli Collins <br>
+     <b>Fixing spelling mistake in FairSchedulerServlet.java</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2668">MAPREDUCE-2668</a>.
+     Blocker bug reported by Robert Joseph Evans and fixed by Thomas Graves (mrv2)<br>
+     <b>MR-279: APPLICATION_STOP is never sent to AuxServices</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2667">MAPREDUCE-2667</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>MR279: mapred job -kill leaves application in RUNNING state</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2666">MAPREDUCE-2666</a>.
+     Blocker sub-task reported by Robert Joseph Evans and fixed by Jonathan Eagles (mrv2)<br>
+     <b>MR-279: Need to retrieve shuffle port number on ApplicationMaster restart</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2664">MAPREDUCE-2664</a>.
+     Major improvement reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>MR 279: Implement JobCounters for MRv2 + Fix for Map Data Locality</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2663">MAPREDUCE-2663</a>.
+     Minor bug reported by Ahmed Radwan and fixed by Ahmed Radwan (mrv2)<br>
+     <b>MR-279: Refactoring StateMachineFactory inner classes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2661">MAPREDUCE-2661</a>.
+     Minor bug reported by Ahmed Radwan and fixed by Ahmed Radwan (mrv2)<br>
+     <b>MR-279: Accessing MapTaskImpl from TaskImpl</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2655">MAPREDUCE-2655</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>MR279: Audit logs for YARN </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2649">MAPREDUCE-2649</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves (mrv2)<br>
+     <b>MR279: Fate of finished Applications on RM</b><br>
+     <blockquote>New config added:
+
+   // the maximum number of completed applications the RM keeps &lt;name&gt;yarn.server.resourcemanager.expire.applications.completed.max&lt;/name&gt;</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2646">MAPREDUCE-2646</a>.
+     Critical bug reported by Sharad Agarwal and fixed by Sharad Agarwal (applicationmaster , mrv2)<br>
+     <b>MR-279: AM with same sized maps and reduces hangs in presence of failing maps</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2644">MAPREDUCE-2644</a>.
+     Major bug reported by Josh Wills and fixed by Josh Wills (mrv2)<br>
+     <b>NodeManager fails to create containers when NM_LOG_DIR is not explicitly set in the Configuration</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2641">MAPREDUCE-2641</a>.
+     Minor sub-task reported by Josh Wills and fixed by Josh Wills (mrv2)<br>
+     <b>Fix the ExponentiallySmoothedTaskRuntimeEstimator and its unit test</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2630">MAPREDUCE-2630</a>.
+     Minor bug reported by Josh Wills and fixed by Josh Wills (mrv2)<br>
+     <b>MR-279: refreshQueues leads to NPEs when used w/FifoScheduler</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2629">MAPREDUCE-2629</a>.
+     Minor improvement reported by Eric Caspole and fixed by Eric Caspole (task)<br>
+     <b>Class loading quirk prevents inner class method compilation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2628">MAPREDUCE-2628</a>.
+     Minor bug reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2)<br>
+     <b>MR-279: Add compiled on date to NM and RM info/about page</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2625">MAPREDUCE-2625</a>.
+     Minor bug reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2)<br>
+     <b>MR-279: Add Node Manager Version to NM info page</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2624">MAPREDUCE-2624</a>.
+     Major improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (contrib/raid)<br>
+     <b>Update RAID for HDFS-2107</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2623">MAPREDUCE-2623</a>.
+     Minor improvement reported by Jim Plush and fixed by Harsh J (test)<br>
+     <b>Update ClusterMapReduceTestCase to use MiniDFSCluster.Builder</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2622">MAPREDUCE-2622</a>.
+     Minor task reported by Harsh J and fixed by Harsh J (test)<br>
+     <b>Remove the last remaining reference to "io.sort.mb"</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2620">MAPREDUCE-2620</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (contrib/raid)<br>
+     <b>Update RAID for HDFS-2087</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2618">MAPREDUCE-2618</a>.
+     Major bug reported by Jeffrey Naisbitt and fixed by Jeffrey Naisbitt (mrv2)<br>
+     <b>MR-279: 0 map, 0 reduce job fails with Null Pointer Exception</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2615">MAPREDUCE-2615</a>.
+     Major bug reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>MR 279: KillJob should go through AM whenever possible</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2611">MAPREDUCE-2611</a>.
+     Major improvement reported by Siddharth Seth and fixed by  (mrv2)<br>
+     <b>MR 279: Metrics, finishTimes, etc in JobHistory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2606">MAPREDUCE-2606</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur <br>
+     <b>Remove IsolationRunner</b><br>
+     <blockquote>IsolationRunner is no longer maintained. See MAPREDUCE-2637 for its replacement. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2603">MAPREDUCE-2603</a>.
+     Major bug reported by Vinay Kumar Thota and fixed by Vinay Kumar Thota (contrib/gridmix)<br>
+     <b>Gridmix system tests are failing due to high ram emulation enable by default for normal mr jobs in the trace which exceeds the solt capacity.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2602">MAPREDUCE-2602</a>.
+     Major improvement reported by Ahmed Radwan and fixed by Ahmed Radwan <br>
+     <b>Allow setting of end-of-record delimiter for TextInputFormat (for the old API)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2598">MAPREDUCE-2598</a>.
+     Minor bug reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>MR 279: miscellaneous UI, NPE fixes for JobHistory, UI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2596">MAPREDUCE-2596</a>.
+     Major improvement reported by Arun C Murthy and fixed by Amar Kamat (benchmarks , contrib/gridmix)<br>
+     <b>Gridmix should notify job failures</b><br>
+     <blockquote>Gridmix now prints a summary information after every run. It summarizes the runs w.r.t input trace details, input data statistics, cli arguments, data-gen runtime, simulation runtimes etc and also the cluster w.r.t map slots, reduce slots, jobtracker-address, hdfs-address etc.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2595">MAPREDUCE-2595</a>.
+     Minor bug reported by Thomas Graves and fixed by Thomas Graves <br>
+     <b>MR279: update yarn INSTALL doc</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2588">MAPREDUCE-2588</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (contrib/raid)<br>
+     <b>Raid is not compile after DataTransferProtocol refactoring</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2587">MAPREDUCE-2587</a>.
+     Minor bug reported by Thomas Graves and fixed by Thomas Graves <br>
+     <b>MR279: Fix RM version in the cluster-&gt;about page </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2582">MAPREDUCE-2582</a>.
+     Major bug reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>MR 279: Cleanup JobHistory event generation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2581">MAPREDUCE-2581</a>.
+     Trivial bug reported by Dave Syer and fixed by Tim Sell <br>
+     <b>Spelling errors in log messages (MapTask)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2580">MAPREDUCE-2580</a>.
+     Minor improvement reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>MR 279: RM UI should redirect finished jobs to History UI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2576">MAPREDUCE-2576</a>.
+     Trivial bug reported by Sherry Chen and fixed by Tim Sell <br>
+     <b>Typo in comment in SimulatorLaunchTaskAction.java</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2575">MAPREDUCE-2575</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves (test)<br>
+     <b>TestMiniMRDFSCaching fails if test.build.dir is set to something other than build/test</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2573">MAPREDUCE-2573</a>.
+     Major bug reported by Todd Lipcon and fixed by Robert Joseph Evans <br>
+     <b>New findbugs warning after MAPREDUCE-2494</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2569">MAPREDUCE-2569</a>.
+     Minor bug reported by Jonathan Eagles and fixed by Jonathan Eagles (mrv2)<br>
+     <b>MR-279: Restarting resource manager with root capacity not equal to 100 percent should result in error</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2566">MAPREDUCE-2566</a>.
+     Major bug reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>MR 279: YarnConfiguration should reloadConfiguration if instantiated with a non YarnConfiguration object</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2563">MAPREDUCE-2563</a>.
+     Major task reported by Vinay Kumar Thota and fixed by Vinay Kumar Thota (contrib/gridmix)<br>
+     <b>Gridmix high ram jobs emulation system tests.</b><br>
+     <blockquote>Adds system tests to test the High-Ram feature in Gridmix.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2559">MAPREDUCE-2559</a>.
+     Major bug reported by Eric Yang and fixed by Eric Yang (build)<br>
+     <b>ant binary fails due to missing c++ lib dir</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2556">MAPREDUCE-2556</a>.
+     Major bug reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>MR 279: NodeStatus.getNodeHealthStatus().setBlah broken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2554">MAPREDUCE-2554</a>.
+     Major task reported by Vinay Kumar Thota and fixed by Vinay Kumar Thota (contrib/gridmix)<br>
+     <b>Gridmix distributed cache emulation system tests.</b><br>
+     <blockquote>Adds distributed cache related system tests to Gridmix.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2552">MAPREDUCE-2552</a>.
+     Minor bug reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>MR 279: NPE when requesting attemptids for completed jobs </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2551">MAPREDUCE-2551</a>.
+     Major improvement reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>MR 279: Implement JobSummaryLog</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2550">MAPREDUCE-2550</a>.
+     Blocker bug reported by Eric Yang and fixed by Eric Yang (build)<br>
+     <b>bin/mapred no longer works from a source checkout</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2544">MAPREDUCE-2544</a>.
+     Major task reported by Vinay Kumar Thota and fixed by Vinay Kumar Thota (contrib/gridmix)<br>
+     <b>Gridmix compression emulation system tests.</b><br>
+     <blockquote>Adds system tests for testing the compression emulation feature of Gridmix.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2543">MAPREDUCE-2543</a>.
+     Major new feature reported by Amar Kamat and fixed by Amar Kamat (contrib/gridmix)<br>
+     <b>[Gridmix] Add support for HighRam jobs</b><br>
+     <blockquote>Adds High-Ram feature emulation in Gridmix.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2541">MAPREDUCE-2541</a>.
+     Critical bug reported by Binglin Chang and fixed by Binglin Chang (tasktracker)<br>
+     <b>Race Condition in IndexCache(readIndexFileToCache,removeMap) causes value of totalMemoryUsed corrupt, which may cause TaskTracker continue throw Exception</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2537">MAPREDUCE-2537</a>.
+     Minor bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (mrv2)<br>
+     <b>MR-279: The RM writes its log to yarn-mapred-resourcemanager-&lt;RM_Host&gt;.out</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2536">MAPREDUCE-2536</a>.
+     Minor test reported by Daryn Sharp and fixed by Daryn Sharp (test)<br>
+     <b>TestMRCLI broke due to change in usage output</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2534">MAPREDUCE-2534</a>.
+     Major bug reported by Luke Lu and fixed by Luke Lu (mrv2)<br>
+     <b>MR-279: Fix CI breaking hard coded version in jobclient pom</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2533">MAPREDUCE-2533</a>.
+     Major new feature reported by Luke Lu and fixed by Luke Lu (mrv2)<br>
+     <b>MR-279: Metrics for reserved resource in ResourceManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2532">MAPREDUCE-2532</a>.
+     Major new feature reported by Luke Lu and fixed by Luke Lu (mrv2)<br>
+     <b>MR-279: Metrics for NodeManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2531">MAPREDUCE-2531</a>.
+     Blocker bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (client)<br>
+     <b>org.apache.hadoop.mapred.jobcontrol.getAssignedJobID throw class cast exception </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2529">MAPREDUCE-2529</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves (tasktracker)<br>
+     <b>Recognize Jetty bug 1342 and handle it</b><br>
+     <blockquote>Added 2 new config parameters:
+
+mapreduce.reduce.shuffle.catch.exception.stack.regex
+mapreduce.reduce.shuffle.catch.exception.message.regex</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2527">MAPREDUCE-2527</a>.
+     Major new feature reported by Luke Lu and fixed by Luke Lu (mrv2)<br>
+     <b>MR-279: Metrics for MRAppMaster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2522">MAPREDUCE-2522</a>.
+     Major sub-task reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>MR 279: Security for JobHistory service</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2521">MAPREDUCE-2521</a>.
+     Major new feature reported by Eric Yang and fixed by Eric Yang (build)<br>
+     <b>Mapreduce RPM integration project</b><br>
+     <blockquote>Created rpm and debian packages for MapReduce. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2518">MAPREDUCE-2518</a>.
+     Major bug reported by Wei Yongjun and fixed by Wei Yongjun (distcp)<br>
+     <b>missing t flag in distcp help message '-p[rbugp]'</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2517">MAPREDUCE-2517</a>.
+     Major task reported by Vinay Kumar Thota and fixed by Vinay Kumar Thota (contrib/gridmix)<br>
+     <b>Porting Gridmix v3 system tests into trunk branch.</b><br>
+     <blockquote>Adds system tests to Gridmix. These system tests cover various features like job types (load and sleep), user resolvers (round-robin, submitter-user, echo) and  submission modes (stress, replay and serial).</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2514">MAPREDUCE-2514</a>.
+     Trivial bug reported by Jonathan Eagles and fixed by Jonathan Eagles (tasktracker)<br>
+     <b>ReinitTrackerAction class name misspelled RenitTrackerAction in task tracker log</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2509">MAPREDUCE-2509</a>.
+     Major bug reported by Luke Lu and fixed by Luke Lu (mrv2)<br>
+     <b>MR-279: Fix NPE in UI for pending attempts</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2504">MAPREDUCE-2504</a>.
+     Major bug reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>MR 279: race in JobHistoryEventHandler stop </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2501">MAPREDUCE-2501</a>.
+     Major improvement reported by Luke Lu and fixed by Luke Lu (mrv2)<br>
+     <b>MR-279: Attach sources in builds</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2500">MAPREDUCE-2500</a>.
+     Major bug reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>MR 279: PB factories are not thread safe</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2497">MAPREDUCE-2497</a>.
+     Trivial bug reported by Robert Henry and fixed by Eli Collins <br>
+     <b>missing spaces in error messages</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2495">MAPREDUCE-2495</a>.
+     Minor improvement reported by Robert Joseph Evans and fixed by Robert Joseph Evans (distributed-cache)<br>
+     <b>The distributed cache cleanup thread has no monitoring to check to see if it has died for some reason</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2494">MAPREDUCE-2494</a>.
+     Major improvement reported by Robert Joseph Evans and fixed by Robert Joseph Evans (distributed-cache)<br>
+     <b>Make the distributed cache delete entires using LRU priority</b><br>
+     <blockquote>Added config option mapreduce.tasktracker.cache.local.keep.pct to the TaskTracker.  It is the target percentage of the local distributed cache that should be kept in between garbage collection runs.  In practice it will delete unused distributed cache entries in LRU order until the size of the cache is less than mapreduce.tasktracker.cache.local.keep.pct of the maximum cache size.  This is a floating point value between 0.0 and 1.0.  The default is 0.95.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2492">MAPREDUCE-2492</a>.
+     Major improvement reported by Amar Kamat and fixed by Amar Kamat (task)<br>
+     <b>[MAPREDUCE] The new MapReduce API should make available task's progress to the task</b><br>
+     <blockquote>Map and Reduce task can access the attempt's overall progress via TaskAttemptContext.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2490">MAPREDUCE-2490</a>.
+     Trivial improvement reported by Jonathan Eagles and fixed by Jonathan Eagles (jobtracker)<br>
+     <b>Log blacklist debug count</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2489">MAPREDUCE-2489</a>.
+     Major bug reported by Jeffrey Naisbitt and fixed by Jeffrey Naisbitt (jobtracker)<br>
+     <b>Jobsplits with random hostnames can make the queue unusable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2483">MAPREDUCE-2483</a>.
+     Major bug reported by Eric Yang and fixed by Eric Yang (build)<br>
+     <b>Clean up duplication of dependent jar files</b><br>
+     <blockquote>Removed duplicated hadoop-common library dependencies.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2480">MAPREDUCE-2480</a>.
+     Major bug reported by Luke Lu and fixed by Luke Lu (mrv2)<br>
+     <b>MR-279: mr app should not depend on hard-coded version of shuffle</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2478">MAPREDUCE-2478</a>.
+     Major improvement reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>MR 279: Improve history server</b><br>
+     <blockquote>Looks great. I just committed this. Thanks Siddharth!</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2475">MAPREDUCE-2475</a>.
+     Major bug reported by Suresh Srinivas and fixed by Suresh Srinivas (test)<br>
+     <b>Disable IPV6 for junit tests</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2474">MAPREDUCE-2474</a>.
+     Minor improvement reported by Harsh J and fixed by Harsh J (documentation)<br>
+     <b>Add docs to the new API Partitioner on how to access Job Configuration data</b><br>
+     <blockquote>Improve the Partitioner interface's docs to help fetch Job Configuration objects.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2473">MAPREDUCE-2473</a>.
+     Major new feature reported by Aaron T. Myers and fixed by Aaron T. Myers (jobtracker)<br>
+     <b>MR portion of HADOOP-7214 - Hadoop /usr/bin/groups equivalent</b><br>
+     <blockquote>Introduces a new command, "mapred groups", which displays what groups are associated with a user as seen by the JobTracker.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2470">MAPREDUCE-2470</a>.
+     Major bug reported by Aaron Baff and fixed by Robert Joseph Evans (client)<br>
+     <b>Receiving NPE occasionally on RunningJob.getCounters() call</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2469">MAPREDUCE-2469</a>.
+     Major improvement reported by Amar Kamat and fixed by Amar Kamat (task)<br>
+     <b>Task counters should also report the total heap usage of the task</b><br>
+     <blockquote>Task attempt's total heap usage gets recorded and published via counters as COMMITTED_HEAP_BYTES.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2467">MAPREDUCE-2467</a>.
+     Major bug reported by Suresh Srinivas and fixed by Suresh Srinivas (contrib/raid)<br>
+     <b>HDFS-1052 changes break the raid contrib module in MapReduce</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2466">MAPREDUCE-2466</a>.
+     Blocker bug reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>TestFileInputFormat.testLocality failing after federation merge</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2463">MAPREDUCE-2463</a>.
+     Major bug reported by Devaraj K and fixed by Devaraj K (jobtracker)<br>
+     <b>Job History files are not moving to done folder when job history location is hdfs location</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2462">MAPREDUCE-2462</a>.
+     Minor improvement reported by Siddharth Seth and fixed by Siddharth Seth (mrv2)<br>
+     <b>MR 279: Write job conf along with JobHistory, other minor improvements</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2460">MAPREDUCE-2460</a>.
+     Blocker bug reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>TestFairSchedulerSystem failing on Hudson</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2459">MAPREDUCE-2459</a>.
+     Major improvement reported by Mac Yang and fixed by Mac Yang (harchive)<br>
+     <b>Cache HAR filesystem metadata</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2458">MAPREDUCE-2458</a>.
+     Major bug reported by Luke Lu and fixed by Luke Lu (mrv2)<br>
+     <b>MR-279: Rename sanitized pom.xml in build directory to work around IDE bug</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2456">MAPREDUCE-2456</a>.
+     Trivial improvement reported by Jeffrey Naisbitt and fixed by Jeffrey Naisbitt (jobtracker)<br>
+     <b>Show the reducer taskid and map/reduce tasktrackers for "Failed fetch notification #_ for task attempt..." log messages</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2455">MAPREDUCE-2455</a>.
+     Major sub-task reported by Tom White and fixed by Tom White (build , client)<br>
+     <b>Remove deprecated JobTracker.State in favour of JobTrackerStatus</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2451">MAPREDUCE-2451</a>.
+     Trivial bug reported by Thomas Graves and fixed by Thomas Graves (jobtracker)<br>
+     <b>Log the reason string of healthcheck script</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2449">MAPREDUCE-2449</a>.
+     Minor improvement reported by Jeff Zemerick and fixed by Jeff Zemerick (contrib/eclipse-plugin)<br>
+     <b>Allow for command line arguments when performing "Run on Hadoop" action.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2440">MAPREDUCE-2440</a>.
+     Major bug reported by Luke Lu and fixed by Luke Lu (mrv2)<br>
+     <b>MR-279: Name clashes in TypeConverter</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2439">MAPREDUCE-2439</a>.
+     Major bug reported by Mahadev konar and fixed by Siddharth Seth (mrv2)<br>
+     <b>MR-279: Fix YarnRemoteException to give more details.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2438">MAPREDUCE-2438</a>.
+     Major new feature reported by Mahadev konar and fixed by Krishna Ramachandran (mrv2)<br>
+     <b>MR-279: WebApp for Job History</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2434">MAPREDUCE-2434</a>.
+     Major new feature reported by Luke Lu and fixed by Luke Lu (mrv2)<br>
+     <b>MR-279: ResourceManager metrics</b><br>
+     <blockquote>I just committed this. Thanks Luke!</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2433">MAPREDUCE-2433</a>.
+     Blocker bug reported by Luke Lu and fixed by Mahadev konar (mrv2)<br>
+     <b>MR-279: YARNApplicationConstants hard code app master jar version</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2432">MAPREDUCE-2432</a>.
+     Major improvement reported by Luke Lu and fixed by Luke Lu (mrv2)<br>
+     <b>MR-279: Install sanitized poms for downstream sanity</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2430">MAPREDUCE-2430</a>.
+     Major task reported by Nigel Daley and fixed by Nigel Daley <br>
+     <b>Remove mrunit contrib</b><br>
+     <blockquote>MRUnit is now available as a separate Apache project.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2429">MAPREDUCE-2429</a>.
+     Major bug reported by Arun C Murthy and fixed by Siddharth Seth (tasktracker)<br>
+     <b>Check jvmid during task status report</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2428">MAPREDUCE-2428</a>.
+     Blocker bug reported by Tom White and fixed by Tom White <br>
+     <b>start-mapred.sh script fails if HADOOP_HOME is not set</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2426">MAPREDUCE-2426</a>.
+     Trivial test reported by Todd Lipcon and fixed by Todd Lipcon (contrib/fair-share)<br>
+     <b>Make TestFairSchedulerSystem fail with more verbose output</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2424">MAPREDUCE-2424</a>.
+     Major improvement reported by Greg Roelofs and fixed by Greg Roelofs (mrv2)<br>
+     <b>MR-279: counters/UI/etc. for uber-AppMaster (in-cluster LocalJobRunner for MRv2)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2422">MAPREDUCE-2422</a>.
+     Major sub-task reported by Tom White and fixed by Tom White (client)<br>
+     <b>Removed unused internal methods from DistributedCache</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2417">MAPREDUCE-2417</a>.
+     Major bug reported by Ravi Gummadi and fixed by Ravi Gummadi (contrib/gridmix)<br>
+     <b>In Gridmix, in RoundRobinUserResolver mode, the testing/proxy users are not associated with unique users in a trace</b><br>
+     <blockquote>Fixes Gridmix in RoundRobinUserResolver mode to map testing/proxy users to unique users in a trace.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2416">MAPREDUCE-2416</a>.
+     Major bug reported by Ravi Gummadi and fixed by Ravi Gummadi (contrib/gridmix)<br>
+     <b>In Gridmix, in RoundRobinUserResolver, the list of groups for a user obtained from users-list-file is incorrect</b><br>
+     <blockquote>Removes the restriction of specifying group names in users-list file for Gridmix in RoundRobinUserResolver mode.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2414">MAPREDUCE-2414</a>.
+     Major improvement reported by Arun C Murthy and fixed by Siddharth Seth (mrv2)<br>
+     <b>MR-279: Use generic interfaces for protocols</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2409">MAPREDUCE-2409</a>.
+     Major bug reported by Siddharth Seth and fixed by Siddharth Seth (distributed-cache)<br>
+     <b>Distributed Cache does not differentiate between file /archive for files with the same path</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2408">MAPREDUCE-2408</a>.
+     Major new feature reported by Ravi Gummadi and fixed by Amar Kamat (contrib/gridmix)<br>
+     <b>Make Gridmix emulate usage of data compression</b><br>
+     <blockquote>Emulates the MapReduce compression feature in Gridmix. By default, compression emulation is turned on. Compression emulation can be disabled by setting 'gridmix.compression-emulation.enable' to 'false'.  Use 'gridmix.compression-emulation.map-input.decompression-ratio', 'gridmix.compression-emulation.map-output.compression-ratio' and 'gridmix.compression-emulation.reduce-output.compression-ratio' to configure the compression ratios at map input, map output and reduce output side respectively. Currently, compression ratios in the range [0.07, 0.68] are supported. Gridmix auto detects whether map-input, map output and reduce output should emulate compression based on original job's compression related configuration parameters.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2405">MAPREDUCE-2405</a>.
+     Major improvement reported by Mahadev konar and fixed by Greg Roelofs (mrv2)<br>
+     <b>MR-279: Implement uber-AppMaster (in-cluster LocalJobRunner for MRv2)</b><br>
+     <blockquote>An efficient implementation of small jobs by running all tasks in the MR ApplicationMaster JVM, there-by affecting lower latency.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2403">MAPREDUCE-2403</a>.
+     Major improvement reported by Mahadev konar and fixed by Krishna Ramachandran (mrv2)<br>
+     <b>MR-279: Improve job history event handling in AM to log to HDFS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2399">MAPREDUCE-2399</a>.
+     Major improvement reported by Arun C Murthy and fixed by Luke Lu <br>
+     <b>The embedded web framework for MAPREDUCE-279</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2395">MAPREDUCE-2395</a>.
+     Critical bug reported by Todd Lipcon and fixed by Ramkumar Vadali (contrib/raid)<br>
+     <b>TestBlockFixer timing out on trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2381">MAPREDUCE-2381</a>.
+     Major improvement reported by Philip Zeyliger and fixed by Philip Zeyliger <br>
+     <b>JobTracker instrumentation not consistent about error handling</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2379">MAPREDUCE-2379</a>.
+     Major bug reported by Todd Lipcon and fixed by Todd Lipcon (distributed-cache , documentation)<br>
+     <b>Distributed cache sizing configurations are missing from mapred-default.xml</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2367">MAPREDUCE-2367</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>Allow using a file to exclude certain tests from build</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2365">MAPREDUCE-2365</a>.
+     Major bug reported by Owen O'Malley and fixed by Siddharth Seth <br>
+     <b>Add counters for FileInputFormat (BYTES_READ) and FileOutputFormat (BYTES_WRITTEN)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2351">MAPREDUCE-2351</a>.
+     Major improvement reported by Tom White and fixed by Tom White <br>
+     <b>mapred.job.tracker.history.completed.location should support an arbitrary filesystem URI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2331">MAPREDUCE-2331</a>.
+     Major test reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>Add coverage of task graph servlet to fair scheduler system test</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2326">MAPREDUCE-2326</a>.
+     Major improvement reported by Arun C Murthy and fixed by  <br>
+     <b>Port gridmix changes from hadoop-0.20.100 to trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2323">MAPREDUCE-2323</a>.
+     Major new feature reported by Todd Lipcon and fixed by Todd Lipcon (contrib/fair-share)<br>
+     <b>Add metrics to the fair scheduler</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2317">MAPREDUCE-2317</a>.
+     Minor bug reported by Devaraj K and fixed by Devaraj K (harchive)<br>
+     <b>HadoopArchives throwing NullPointerException while creating hadoop archives (.har files)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2311">MAPREDUCE-2311</a>.
+     Blocker bug reported by Todd Lipcon and fixed by Scott Chen (contrib/fair-share)<br>
+     <b>TestFairScheduler failing on trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2307">MAPREDUCE-2307</a>.
+     Minor bug reported by Devaraj K and fixed by Devaraj K (contrib/fair-share)<br>
+     <b>Exception thrown in Jobtracker logs, when the Scheduler configured is FairScheduler.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2302">MAPREDUCE-2302</a>.
+     Major improvement reported by Scott Chen and fixed by Scott Chen (contrib/raid)<br>
+     <b>Add static factory methods in GaloisField</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2290">MAPREDUCE-2290</a>.
+     Major bug reported by Eli Collins and fixed by Eli Collins (test)<br>
+     <b>TestTaskCommit missing getProtocolSignature override</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2271">MAPREDUCE-2271</a>.
+     Blocker bug reported by Todd Lipcon and fixed by Liyin Liang (jobtracker)<br>
+     <b>TestSetupTaskScheduling failing in trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2263">MAPREDUCE-2263</a>.
+     Major improvement reported by Hairong Kuang and fixed by Hairong Kuang <br>
+     <b>MapReduce side of HADOOP-6904</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2260">MAPREDUCE-2260</a>.
+     Major improvement reported by Roman Shaposhnik and fixed by Roman Shaposhnik (build)<br>
+     <b>Remove auto-generated native build files</b><br>
+     <blockquote>The native build run when from trunk now requires autotools, libtool and openssl dev libraries.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2258">MAPREDUCE-2258</a>.
+     Major bug reported by Todd Lipcon and fixed by Todd Lipcon (task)<br>
+     <b>IFile reader closes stream and compressor in wrong order</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2254">MAPREDUCE-2254</a>.
+     Major improvement reported by Ahmed Radwan and fixed by Ahmed Radwan <br>
+     <b>Allow setting of end-of-record delimiter for TextInputFormat</b><br>
+     <blockquote>TextInputFormat may now split lines with delimiters other than newline, by specifying a configuration parameter "textinputformat.record.delimiter"</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2250">MAPREDUCE-2250</a>.
+     Trivial improvement reported by Ramkumar Vadali and fixed by Ramkumar Vadali (contrib/raid)<br>
+     <b>Fix logging in raid code.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2249">MAPREDUCE-2249</a>.
+     Major improvement reported by Bhallamudi Venkata Siva Kamesh and fixed by Devaraj K <br>
+     <b>Better to check the reflexive property of the object while overriding equals method of it</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2248">MAPREDUCE-2248</a>.
+     Major improvement reported by Ramkumar Vadali and fixed by Ramkumar Vadali <br>
+     <b>DistributedRaidFileSystem should unraid only the corrupt block</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2243">MAPREDUCE-2243</a>.
+     Minor improvement reported by Bhallamudi Venkata Siva Kamesh and fixed by Devaraj K (jobtracker , tasktracker)<br>
+     <b>Close all the file streams propely in a finally block to avoid their leakage.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2239">MAPREDUCE-2239</a>.
+     Major improvement reported by Scott Chen and fixed by Scott Chen (contrib/raid)<br>
+     <b>BlockPlacementPolicyRaid should call getBlockLocations only when necessary</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2225">MAPREDUCE-2225</a>.
+     Blocker improvement reported by Harsh J and fixed by Harsh J (job submission)<br>
+     <b>MultipleOutputs should not require the use of 'Writable'</b><br>
+     <blockquote>MultipleOutputs should not require the use/check of 'Writable' interfaces in key and value classes.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2215">MAPREDUCE-2215</a>.
+     Major bug reported by Patrick Kling and fixed by Patrick Kling (contrib/raid)<br>
+     <b>A more elegant FileSystem#listCorruptFileBlocks API (RAID changes)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2207">MAPREDUCE-2207</a>.
+     Major improvement reported by Scott Chen and fixed by Liyin Liang (jobtracker)<br>
+     <b>Task-cleanup task should not be scheduled on the node that the task just failed</b><br>
+     <blockquote>Task-cleanup task should not be scheduled on the node that the task just failed</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2206">MAPREDUCE-2206</a>.
+     Major improvement reported by Scott Chen and fixed by Scott Chen (jobtracker)<br>
+     <b>The task-cleanup tasks should be optional</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2203">MAPREDUCE-2203</a>.
+     Trivial improvement reported by Jingguo Yao and fixed by Jingguo Yao <br>
+     <b>Wong javadoc for TaskRunner's appendJobJarClasspaths method</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2202">MAPREDUCE-2202</a>.
+     Major improvement reported by Konstantin Boudnik and fixed by Konstantin Boudnik <br>
+     <b>Generalize CLITest structure and interfaces to facilitate upstream adoption (e.g. for web or system testing)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2199">MAPREDUCE-2199</a>.
+     Major bug reported by Konstantin Boudnik and fixed by Konstantin Boudnik (build)<br>
+     <b>build is broken 0.22 branch creation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2185">MAPREDUCE-2185</a>.
+     Major bug reported by Hairong Kuang and fixed by Ramkumar Vadali (job submission)<br>
+     <b>Infinite loop at creating splits using CombineFileInputFormat</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2172">MAPREDUCE-2172</a>.
+     Major bug reported by Patrick Kling and fixed by Nigel Daley <br>
+     <b>test-patch.properties contains incorrect/version-dependent values of OK_FINDBUGS_WARNINGS and OK_RELEASEAUDIT_WARNINGS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2156">MAPREDUCE-2156</a>.
+     Major improvement reported by Patrick Kling and fixed by Patrick Kling (contrib/raid)<br>
+     <b>Raid-aware FSCK</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2155">MAPREDUCE-2155</a>.
+     Major improvement reported by Patrick Kling and fixed by Patrick Kling (contrib/raid)<br>
+     <b>RaidNode should optionally dispatch map reduce jobs to fix corrupt blocks (instead of fixing locally)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2153">MAPREDUCE-2153</a>.
+     Major improvement reported by Ravi Gummadi and fixed by Rajesh Balamohan (tools/rumen)<br>
+     <b>Bring in more job configuration properties in to the trace file</b><br>
+     <blockquote>Adds job configuration parameters to the job trace. The configuration parameters are stored under the 'jobProperties' field as key-value pairs.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2137">MAPREDUCE-2137</a>.
+     Major bug reported by Ravi Gummadi and fixed by Ravi Gummadi (contrib/gridmix)<br>
+     <b>Mapping between Gridmix jobs and the corresponding original MR jobs is needed</b><br>
+     <blockquote>New configuration properties gridmix.job.original-job-id and gridmix.job.original-job-name in the configuration of simulated job are exposed/documented to gridmix user for mapping between original cluster's jobs and simulated jobs.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2127">MAPREDUCE-2127</a>.
+     Major bug reported by Giridharan Kesavan and fixed by Bruno Mah&#233; (build , pipes)<br>
+     <b>mapreduce trunk builds are failing on hudson</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2107">MAPREDUCE-2107</a>.
+     Major improvement reported by Ranjit Mathew and fixed by Amar Kamat (contrib/gridmix)<br>
+     <b>Emulate Memory Usage of Tasks in GridMix3</b><br>
+     <blockquote>Adds total heap usage emulation to Gridmix. Also, Gridmix can configure the simulated task's JVM heap options with max heap options obtained from the original task (via Rumen). Use 'gridmix.task.jvm-options.enable' to disable the task max heap options configuration. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2106">MAPREDUCE-2106</a>.
+     Major improvement reported by Ranjit Mathew and fixed by Amar Kamat (contrib/gridmix)<br>
+     <b>Emulate CPU Usage of Tasks in GridMix3</b><br>
+     <blockquote>Adds cumulative cpu usage emulation to Gridmix</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2105">MAPREDUCE-2105</a>.
+     Major improvement reported by Ranjit Mathew and fixed by Amar Kamat (contrib/gridmix)<br>
+     <b>Simulate Load Incrementally and Adaptively in GridMix3</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2104">MAPREDUCE-2104</a>.
+     Major bug reported by Ranjit Mathew and fixed by Amar Kamat (tools/rumen)<br>
+     <b>Rumen TraceBuilder Does Not Emit CPU/Memory Usage Details in Traces</b><br>
+     <blockquote>Adds cpu, physical memory, virtual memory and heap usages to TraceBuilder's output.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2081">MAPREDUCE-2081</a>.
+     Major test reported by Vinay Kumar Thota and fixed by Vinay Kumar Thota (contrib/gridmix)<br>
+     <b>[GridMix3] Implement functionality for get the list of job traces which has different intervals.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2074">MAPREDUCE-2074</a>.
+     Minor bug reported by Koji Noguchi and fixed by Priyo Mustafi (distributed-cache)<br>
+     <b>Task should fail when symlink creation fail</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2053">MAPREDUCE-2053</a>.
+     Major task reported by Vinay Kumar Thota and fixed by Vinay Kumar Thota (contrib/gridmix)<br>
+     <b>[Herriot] Test Gridmix file pool for different input file sizes based on pool minimum size.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2037">MAPREDUCE-2037</a>.
+     Major new feature reported by Dick King and fixed by Dick King <br>
+     <b>Capturing interim progress times, CPU usage, and memory usage, when tasks reach certain progress thresholds</b><br>
+     <blockquote>Capture intermediate task resource consumption information:

+* Time taken so far

+* CPU load [either at the time the data are taken, or exponentially smoothed]

+* Memory load [also either at the time the data are taken, or exponentially smoothed]

+

+This would be taken at intervals that depend on the task progress plateaus. For example, reducers have three progress ranges - [0-1/3], (1/3-2/3], and (2/3-3/3] - where fundamentally different activities happen. Mappers have different boundaries that are not symmetrically placed [0-9/10], (9/10-1]. Data capture boundaries should coincide with activity boundaries. For the state information capture [CPU and memory] we should average over the covered interval.

+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2033">MAPREDUCE-2033</a>.
+     Major task reported by Vinay Kumar Thota and fixed by Vinay Kumar Thota (contrib/gridmix)<br>
+     <b>[Herriot] Gridmix generate data tests with various submission policies and different user resolvers.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-2026">MAPREDUCE-2026</a>.
+     Major improvement reported by Scott Chen and fixed by Joydeep Sen Sarma <br>
+     <b>JobTracker.getJobCounters() should not hold JobTracker lock while calling JobInProgress.getCounters()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-1996">MAPREDUCE-1996</a>.
+     Trivial bug reported by Glynn Durham and fixed by Harsh J (documentation)<br>
+     <b>API: Reducer.reduce() method detail misstatement</b><br>
+     <blockquote>Fix a misleading documentation note about the usage of Reporter objects in Reducers.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-1978">MAPREDUCE-1978</a>.
+     Major improvement reported by Amar Kamat and fixed by Ravi Gummadi (tools/rumen)<br>
+     <b>[Rumen] TraceBuilder should provide recursive input folder scanning</b><br>
+     <blockquote>Adds -recursive option to TraceBuilder for scanning the input directories recursively.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-1938">MAPREDUCE-1938</a>.
+     Blocker new feature reported by Devaraj Das and fixed by Krishna Ramachandran (job submission , task , tasktracker)<br>
+     <b>Ability for having user's classes take precedence over the system classes for tasks' classpath</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-1927">MAPREDUCE-1927</a>.
+     Minor test reported by Greg Roelofs and fixed by Greg Roelofs (test)<br>
+     <b>unit test for HADOOP-6835 (concatenated gzip support)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-1906">MAPREDUCE-1906</a>.
+     Major improvement reported by Scott Carey and fixed by Todd Lipcon (jobtracker , performance , tasktracker)<br>
+     <b>Lower default minimum heartbeat interval for tasktracker &gt; Jobtracker</b><br>
+     <blockquote>The default minimum heartbeat interval has been dropped from 3 seconds to 300ms to increase scheduling throughput on small clusters. Users may tune mapreduce.jobtracker.heartbeats.in.second to adjust this value.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-1831">MAPREDUCE-1831</a>.
+     Major improvement reported by Scott Chen and fixed by Scott Chen (contrib/raid)<br>
+     <b>BlockPlacement policy for RAID</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-1811">MAPREDUCE-1811</a>.
+     Minor bug reported by Amareshwari Sriramadasu and fixed by Harsh J (client)<br>
+     <b>Job.monitorAndPrintJob() should print status of the job at completion</b><br>
+     <blockquote>Print the resultant status of a Job on completion instead of simply saying 'Complete'.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-1788">MAPREDUCE-1788</a>.
+     Major bug reported by Arun C Murthy and fixed by Arun C Murthy (client)<br>
+     <b>o.a.h.mapreduce.Job shouldn't make a copy of the JobConf</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-1783">MAPREDUCE-1783</a>.
+     Major improvement reported by Ramkumar Vadali and fixed by Ramkumar Vadali (contrib/fair-share)<br>
+     <b>Task Initialization should be delayed till when a job can be run</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-1752">MAPREDUCE-1752</a>.
+     Major improvement reported by Dmytro Molkov and fixed by Dmytro Molkov (harchive)<br>
+     <b>Implement getFileBlockLocations in HarFilesystem</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-1738">MAPREDUCE-1738</a>.
+     Major improvement reported by Luke Lu and fixed by Luke Lu <br>
+     <b>MapReduce portion of HADOOP-6728 (ovehaul metrics framework)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-1706">MAPREDUCE-1706</a>.
+     Major improvement reported by Rodrigo Schmidt and fixed by Scott Chen (contrib/raid)<br>
+     <b>Log RAID recoveries on HDFS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-1702">MAPREDUCE-1702</a>.
+     Minor improvement reported by Jaideep and fixed by  (contrib/gridmix)<br>
+     <b>CPU/Memory emulation for GridMix3</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-1624">MAPREDUCE-1624</a>.
+     Major improvement reported by Devaraj Das and fixed by Devaraj Das (documentation)<br>
+     <b>Document the job credentials and associated details to do with delegation tokens (on the client side)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-1461">MAPREDUCE-1461</a>.
+     Major improvement reported by Rajesh Balamohan and fixed by Rajesh Balamohan (tools/rumen)<br>
+     <b>Feature to instruct rumen-folder utility to skip jobs worth of specific duration</b><br>
+     <blockquote>Added a ''-starts-after' option to Rumen's Folder utility. The time duration specified after the '-starts-after' option is an offset with respect to the submit time of the first job in the input trace. Jobs in the input trace having a submit time (relative to the first job's submit time) lesser than the specified offset will be ignored.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-1334">MAPREDUCE-1334</a>.
+     Major bug reported by Karthik K and fixed by Karthik K (contrib/index)<br>
+     <b>contrib/index - test - TestIndexUpdater fails due to an additional presence of file _SUCCESS in hdfs </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-1242">MAPREDUCE-1242</a>.
+     Trivial bug reported by Amogh Vasekar and fixed by Harsh J <br>
+     <b>Chain APIs error misleading</b><br>
+     <blockquote>Fix a misleading exception message in case the Chained Mappers have mismatch in input/output Key/Value pairs between them.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-1207">MAPREDUCE-1207</a>.
+     Blocker improvement reported by Arun C Murthy and fixed by Arun C Murthy (client , mrv2)<br>
+     <b>Allow admins to set java options for map/reduce tasks</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-1159">MAPREDUCE-1159</a>.
+     Trivial improvement reported by Zheng Shao and fixed by Harsh J <br>
+     <b>Limit Job name on jobtracker.jsp to be 80 char long</b><br>
+     <blockquote>Job names on jobtracker.jsp should be 80 characters long at most.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-993">MAPREDUCE-993</a>.
+     Minor bug reported by Iyappan Srinivasan and fixed by Harsh J (jobtracker)<br>
+     <b>bin/hadoop job -events &lt;jobid&gt; &lt;from-event-#&gt; &lt;#-of-events&gt; help message is confusing</b><br>
+     <blockquote>Added a helpful description message to the `mapred job -events` command.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-901">MAPREDUCE-901</a>.
+     Major improvement reported by Owen O'Malley and fixed by Luke Lu (task)<br>
+     <b>Move Framework Counters into a TaskMetric structure</b><br>
+     <blockquote>Efficient implementation of MapReduce framework counters.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-587">MAPREDUCE-587</a>.
+     Minor bug reported by Steve Loughran and fixed by Amar Kamat (contrib/streaming)<br>
+     <b>Stream test TestStreamingExitStatus fails with Out of Memory</b><br>
+     <blockquote>Fixed the streaming test TestStreamingExitStatus's failure due to an OutOfMemory error by reducing the testcase's io.sort.mb.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-517">MAPREDUCE-517</a>.
+     Critical bug reported by Arun C Murthy and fixed by Arun C Murthy <br>
+     <b>The capacity-scheduler should assign multiple tasks per heartbeat</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-461">MAPREDUCE-461</a>.
+     Minor new feature reported by Fredrik Hedberg and fixed by Fredrik Hedberg <br>
+     <b>Enable ServicePlugins for the JobTracker</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-279">MAPREDUCE-279</a>.
+     Major improvement reported by Arun C Murthy and fixed by  (mrv2)<br>
+     <b>Map-Reduce 2.0</b><br>
+     <blockquote>MapReduce has undergone a complete re-haul in hadoop-0.23 and we now have, what we call, MapReduce 2.0 (MRv2).

+

+The fundamental idea of MRv2 is to split up the two major functionalities of the JobTracker, resource management and job scheduling/monitoring, into separate daemons. The idea is to have a global ResourceManager (RM) and per-application ApplicationMaster (AM).  An application is either a single job in the classical sense of Map-Reduce jobs or a DAG of jobs. The ResourceManager and per-node slave, the NodeManager (NM), form the data-computation framework. The ResourceManager is the ultimate authority that arbitrates resources among all the applications in the system. The per-application ApplicationMaster is, in effect, a framework specific library and is tasked with negotiating resources from the ResourceManager and working with the NodeManager(s) to execute and monitor the tasks.

+

+The ResourceManager has two main components:

+* Scheduler (S)

+* ApplicationsManager (ASM)

+

+The Scheduler is responsible for allocating resources to the various running applications subject to familiar constraints of capacities, queues etc. The Scheduler is pure scheduler in the sense that it performs no monitoring or tracking of status for the application. Also, it offers no guarantees on restarting failed tasks either due to application failure or hardware failures. The Scheduler performs its scheduling function based the resource requirements of the applications; it does so based on the abstract notion of a Resource Container which incorporates elements such as memory, cpu, disk, network etc. 

+

+The Scheduler has a pluggable policy plug-in, which is responsible for partitioning the cluster resources among the various queues, applications etc. The current Map-Reduce schedulers such as the CapacityScheduler and the FairScheduler would be some examples of the plug-in.

+

+The CapacityScheduler supports hierarchical queues to allow for more predictable sharing of cluster resources.

+The ApplicationsManager is responsible for accepting job-submissions, negotiating the first container for executing the application specific ApplicationMaster and provides the service for restarting the ApplicationMaster container on failure.

+

+The NodeManager is the per-machine framework agent who is responsible for launching the applications' containers, monitoring their resource usage (cpu, memory, disk, network) and reporting the same to the Scheduler.

+

+The per-application ApplicationMaster has the responsibility of negotiating appropriate resource containers from the Scheduler, tracking their status and monitoring for progress.

+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2540">HDFS-2540</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE <br>
+     <b>Change WebHdfsFileSystem to two-step create/append</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2539">HDFS-2539</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>Support doAs and GETHOMEDIRECTORY in webhdfs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2528">HDFS-2528</a>.
+     Major sub-task reported by Arpit Gupta and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>webhdfs rest call to a secure dn fails when a token is sent</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2527">HDFS-2527</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>Remove the use of Range header from webhdfs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2522">HDFS-2522</a>.
+     Minor test reported by Suresh Srinivas and fixed by Suresh Srinivas <br>
+     <b>Disable TestDfsOverAvroRpc in 0.23</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2521">HDFS-2521</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (data-node , hdfs client)<br>
+     <b>Remove custom checksum headers from data transfer protocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2512">HDFS-2512</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (data-node , hdfs client)<br>
+     <b>Add textual error message to data transfer protocol responses</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2501">HDFS-2501</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>add version prefix and root methods to webhdfs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2500">HDFS-2500</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (data-node)<br>
+     <b>Avoid file system operations in BPOfferService thread while processing deletes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2494">HDFS-2494</a>.
+     Major sub-task reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (webhdfs)<br>
+     <b>[webhdfs] When Getting the file using OP=OPEN with DN http address, ESTABLISHED sockets are growing.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2493">HDFS-2493</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Remove reference to FSNamesystem in blockmanagement classes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2485">HDFS-2485</a>.
+     Trivial improvement reported by Steve Loughran and fixed by Steve Loughran (data-node)<br>
+     <b>Improve code layout and constants in UnderReplicatedBlocks</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2471">HDFS-2471</a>.
+     Major new feature reported by Suresh Srinivas and fixed by Suresh Srinivas (documentation)<br>
+     <b>Add Federation feature, configuration and tools documentation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2467">HDFS-2467</a>.
+     Major bug reported by Owen O'Malley and fixed by Owen O'Malley <br>
+     <b>HftpFileSystem uses incorrect compare for finding delegation tokens</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2465">HDFS-2465</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (data-node , performance)<br>
+     <b>Add HDFS support for fadvise readahead and drop-behind</b><br>
+     <blockquote>HDFS now has the ability to use posix_fadvise and sync_data_range syscalls to manage the OS buffer cache. This support is currently considered experimental, and may be enabled by configuring the following keys:

+dfs.datanode.drop.cache.behind.writes - set to true to drop data out of the buffer cache after writing

+dfs.datanode.drop.cache.behind.reads - set to true to drop data out of the buffer cache when performing sequential reads

+dfs.datanode.sync.behind.writes - set to true to trigger dirty page writeback immediately after writing data

+dfs.datanode.readahead.bytes - set to a non-zero value to trigger readahead for sequential reads</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2453">HDFS-2453</a>.
+     Major sub-task reported by Arpit Gupta and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>tail using a webhdfs uri throws an error</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2452">HDFS-2452</a>.
+     Major bug reported by Konstantin Shvachko and fixed by Uma Maheswara Rao G (data-node)<br>
+     <b>OutOfMemoryError in DataXceiverServer takes down the DataNode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2445">HDFS-2445</a>.
+     Major bug reported by Jonathan Eagles and fixed by Jonathan Eagles (test)<br>
+     <b>Incorrect exit code for hadoop-hdfs-test tests when exception thrown</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2441">HDFS-2441</a>.
+     Major sub-task reported by Arpit Gupta and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>webhdfs returns two content-type headers</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2439">HDFS-2439</a>.
+     Major sub-task reported by Arpit Gupta and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>webhdfs open an invalid path leads to a 500 which states a npe, we should return a 404 with appropriate error message</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2436">HDFS-2436</a>.
+     Major bug reported by Arpit Gupta and fixed by Uma Maheswara Rao G <br>
+     <b>FSNamesystem.setTimes(..) expects the path is a file.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2432">HDFS-2432</a>.
+     Major sub-task reported by Arpit Gupta and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>webhdfs setreplication api should return a 403 when called on a directory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2428">HDFS-2428</a>.
+     Major sub-task reported by Arpit Gupta and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>webhdfs api parameter validation should be better</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2427">HDFS-2427</a>.
+     Major sub-task reported by Arpit Gupta and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>webhdfs mkdirs api call creates path with 777 permission, we should default it to 755</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2422">HDFS-2422</a>.
+     Major bug reported by Jeff Bean and fixed by Aaron T. Myers (name-node)<br>
+     <b>The NN should tolerate the same number of low-resource volumes as failed volumes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2416">HDFS-2416</a>.
+     Major sub-task reported by Arpit Gupta and fixed by Jitendra Nath Pandey (webhdfs)<br>
+     <b>distcp with a webhdfs uri on a secure cluster fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2414">HDFS-2414</a>.
+     Critical bug reported by Robert Joseph Evans and fixed by Todd Lipcon (name-node , test)<br>
+     <b>TestDFSRollback fails intermittently</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2412">HDFS-2412</a>.
+     Blocker bug reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>Add backwards-compatibility layer for FSConstants</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2411">HDFS-2411</a>.
+     Major bug reported by Arpit Gupta and fixed by Jitendra Nath Pandey (webhdfs)<br>
+     <b>with webhdfs enabled in secure mode the auth to local mappings are not being respected.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2409">HDFS-2409</a>.
+     Major bug reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey <br>
+     <b>_HOST in dfs.web.authentication.kerberos.principal.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2404">HDFS-2404</a>.
+     Major sub-task reported by Arpit Gupta and fixed by Suresh Srinivas (webhdfs)<br>
+     <b>webhdfs liststatus json response is not correct</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2403">HDFS-2403</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE <br>
+     <b>The renewer in NamenodeWebHdfsMethods.generateDelegationToken(..) is not used</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2401">HDFS-2401</a>.
+     Major improvement reported by Jonathan Eagles and fixed by Jonathan Eagles (build)<br>
+     <b>Running a set of methods in a Single Test Class</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2395">HDFS-2395</a>.
+     Critical sub-task reported by Arpit Gupta and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>webhdfs api's should return a root element in the json response</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2385">HDFS-2385</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>Support delegation token renewal in webhdfs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2371">HDFS-2371</a>.
+     Major improvement reported by Suresh Srinivas and fixed by Suresh Srinivas (data-node)<br>
+     <b>Refactor BlockSender.java for better readability</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2368">HDFS-2368</a>.
+     Major bug reported by Arpit Gupta and fixed by Tsz Wo (Nicholas), SZE <br>
+     <b>defaults created for web keytab and principal, these properties should not have defaults</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2366">HDFS-2366</a>.
+     Major sub-task reported by Arpit Gupta and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>webhdfs throws a npe when ugi is null from getDelegationToken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2363">HDFS-2363</a>.
+     Minor sub-task reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (name-node)<br>
+     <b>Move datanodes size printing to BlockManager from FSNameSystem's metasave API</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2361">HDFS-2361</a>.
+     Critical bug reported by Rajit Saha and fixed by Jitendra Nath Pandey (name-node)<br>
+     <b>hftp is broken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2356">HDFS-2356</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>webhdfs: support case insensitive query parameter names</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2355">HDFS-2355</a>.
+     Major improvement reported by Suresh Srinivas and fixed by Suresh Srinivas (name-node)<br>
+     <b>Federation: enable using the same configuration file across all the nodes in the cluster.</b><br>
+     <blockquote>This change allows when running multiple namenodes on different hosts, sharing the same configuration file across all the nodes in the cluster (Datanodes, NamNode, BackupNode, SecondaryNameNode), without the need to define dfs.federation.nameservice.id parameter.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2348">HDFS-2348</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>Support getContentSummary and getFileChecksum in webhdfs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2347">HDFS-2347</a>.
+     Trivial bug reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (name-node)<br>
+     <b>checkpointTxnCount's comment still saying about editlog size</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2346">HDFS-2346</a>.
+     Blocker bug reported by Uma Maheswara Rao G and fixed by Laxman (test)<br>
+     <b>TestHost2NodesMap &amp; TestReplicasMap will fail depending upon execution order of test methods</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2344">HDFS-2344</a>.
+     Major bug reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (test)<br>
+     <b>Fix the TestOfflineEditsViewer test failure in 0.23 branch</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2340">HDFS-2340</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>Support getFileBlockLocations and getDelegationToken in webhdfs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2338">HDFS-2338</a>.
+     Major sub-task reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey (webhdfs)<br>
+     <b>Configuration option to enable/disable webhdfs.</b><br>
+     <blockquote>Added a conf property dfs.webhdfs.enabled for enabling/disabling webhdfs.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2333">HDFS-2333</a>.
+     Major bug reported by Ivan Kelly and fixed by Tsz Wo (Nicholas), SZE <br>
+     <b>HDFS-2284 introduced 2 findbugs warnings on trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2332">HDFS-2332</a>.
+     Major test reported by Todd Lipcon and fixed by Todd Lipcon (test)<br>
+     <b>Add test for HADOOP-7629: using an immutable FsPermission as an IPC parameter</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2331">HDFS-2331</a>.
+     Major bug reported by Abhijit Suresh Shingate and fixed by Abhijit Suresh Shingate (hdfs client)<br>
+     <b>Hdfs compilation fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2323">HDFS-2323</a>.
+     Major bug reported by Tom White and fixed by Tom White <br>
+     <b>start-dfs.sh script fails for tarball install</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2322">HDFS-2322</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>the build fails in Windows because commons-daemon TAR cannot be fetched</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2318">HDFS-2318</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (webhdfs)<br>
+     <b>Provide authentication to webhdfs using SPNEGO</b><br>
+     <blockquote>Added two new conf properties dfs.web.authentication.kerberos.principal and dfs.web.authentication.kerberos.keytab for the SPNEGO servlet filter.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2317">HDFS-2317</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE <br>
+     <b>Read access to HDFS using HTTP REST</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2314">HDFS-2314</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Todd Lipcon (test)<br>
+     <b>MRV1 test compilation broken after HDFS-2197</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2294">HDFS-2294</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>Download of commons-daemon TAR should not be under target</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2290">HDFS-2290</a>.
+     Major bug reported by Konstantin Shvachko and fixed by Benoy Antony (name-node)<br>
+     <b>Block with corrupt replica is not getting replicated</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2289">HDFS-2289</a>.
+     Blocker bug reported by Arun C Murthy and fixed by Alejandro Abdelnur <br>
+     <b>jsvc isn't part of the artifact</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2286">HDFS-2286</a>.
+     Trivial improvement reported by Todd Lipcon and fixed by Todd Lipcon (data-node)<br>
+     <b>DataXceiverServer logs AsynchronousCloseException at shutdown</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2284">HDFS-2284</a>.
+     Major sub-task reported by Sanjay Radia and fixed by Tsz Wo (Nicholas), SZE <br>
+     <b>Write Http access to HDFS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2273">HDFS-2273</a>.
+     Minor improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Refactor BlockManager.recentInvalidateSets to a new class</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2267">HDFS-2267</a>.
+     Trivial bug reported by Todd Lipcon and fixed by Todd Lipcon (data-node)<br>
+     <b>DataXceiver thread name incorrect while waiting on op during keepalive</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2266">HDFS-2266</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Add a Namesystem interface to avoid directly referring to FSNamesystem</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2265">HDFS-2265</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Remove unnecessary BlockTokenSecretManager fields/methods from BlockManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2260">HDFS-2260</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (hdfs client)<br>
+     <b>Refactor BlockReader into an interface and implementation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2258">HDFS-2258</a>.
+     Major bug reported by Konstantin Shvachko and fixed by Konstantin Shvachko (name-node , test)<br>
+     <b>TestLeaseRecovery2 fails as lease hard limit is not reset to default</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2245">HDFS-2245</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>BlockManager.chooseTarget(..) throws NPE</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2241">HDFS-2241</a>.
+     Major improvement reported by Suresh Srinivas and fixed by Suresh Srinivas <br>
+     <b>Remove implementing FSConstants interface just to access the constants defined in the interface</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2240">HDFS-2240</a>.
+     Critical bug reported by Todd Lipcon and fixed by Tsz Wo (Nicholas), SZE (hdfs client)<br>
+     <b>Possible deadlock between LeaseRenewer and its factory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2239">HDFS-2239</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Reduce access levels of the fields and methods in FSNamesystem</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2238">HDFS-2238</a>.
+     Minor improvement reported by Tsz Wo (Nicholas), SZE and fixed by Uma Maheswara Rao G (name-node)<br>
+     <b>NamenodeFsck.toString() uses StringBuilder with + operator </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2237">HDFS-2237</a>.
+     Minor sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Change UnderReplicatedBlocks from public to package private</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2235">HDFS-2235</a>.
+     Major bug reported by Eli Collins and fixed by Eli Collins (name-node)<br>
+     <b>Encode servlet paths</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2233">HDFS-2233</a>.
+     Major test reported by Eli Collins and fixed by Eli Collins (name-node)<br>
+     <b>Add WebUI tests with URI reserved chars in the path and filename</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2232">HDFS-2232</a>.
+     Blocker bug reported by Konstantin Shvachko and fixed by Plamen Jeliazkov (test)<br>
+     <b>TestHDFSCLI fails on 0.22 branch</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2230">HDFS-2230</a>.
+     Major improvement reported by Giridharan Kesavan and fixed by Giridharan Kesavan (build)<br>
+     <b>hdfs it not resolving the latest common test jars published post common mavenization</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2229">HDFS-2229</a>.
+     Blocker bug reported by Vinod Kumar Vavilapalli and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Deadlock in NameNode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2228">HDFS-2228</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Move block and datanode code from FSNamesystem to BlockManager and DatanodeManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2227">HDFS-2227</a>.
+     Major improvement reported by Ivan Kelly and fixed by Ivan Kelly <br>
+     <b>HDFS-2018 Part 2 :  getRemoteEditLogManifest should pull it's information from FileJournalManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2226">HDFS-2226</a>.
+     Trivial improvement reported by Todd Lipcon and fixed by Todd Lipcon (name-node)<br>
+     <b>Clean up counting of operations in FSEditLogLoader</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2225">HDFS-2225</a>.
+     Major improvement reported by Ivan Kelly and fixed by Ivan Kelly <br>
+     <b>HDFS-2018 Part 1 : Refactor file management so its not in classes which should be generic</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2212">HDFS-2212</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (name-node)<br>
+     <b>Refactor double-buffering code out of EditLogOutputStreams</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2210">HDFS-2210</a>.
+     Major task reported by Eli Collins and fixed by Eli Collins (contrib/hdfsproxy)<br>
+     <b>Remove hdfsproxy</b><br>
+     <blockquote>The hdfsproxy contrib component is no longer supported.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2209">HDFS-2209</a>.
+     Minor improvement reported by Steve Loughran and fixed by Steve Loughran (test)<br>
+     <b>Make MiniDFS easier to embed in other apps</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2205">HDFS-2205</a>.
+     Major improvement reported by Ravi Prakash and fixed by Ravi Prakash (hdfs client)<br>
+     <b>Log message for failed connection to datanode is not followed by a success message.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2202">HDFS-2202</a>.
+     Major new feature reported by Eric Payne and fixed by Eric Payne (balancer , data-node)<br>
+     <b>Changes to balancer bandwidth should not require datanode restart.</b><br>
+     <blockquote>New dfsadmin command added: [-setBalancerBandwidth &lt;bandwidth&gt;] where bandwidth is max network bandwidth in bytes per second that the balancer is allowed to use on each datanode during balacing.
+
+This is an incompatible change in 0.23.  The versions of ClientProtocol and DatanodeProtocol are changed.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2200">HDFS-2200</a>.
+     Minor sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Set FSNamesystem.LOG to package private</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2199">HDFS-2199</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Uma Maheswara Rao G (name-node)<br>
+     <b>Move blockTokenSecretManager from FSNamesystem to BlockManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2198">HDFS-2198</a>.
+     Minor improvement reported by Suresh Srinivas and fixed by Suresh Srinivas (data-node , hdfs client , name-node)<br>
+     <b>Remove hardcoded configuration keys</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2197">HDFS-2197</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (name-node)<br>
+     <b>Refactor RPC call implementations out of NameNode class</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2196">HDFS-2196</a>.
+     Major task reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>Make ant build system work with hadoop-common JAR generated by Maven</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2191">HDFS-2191</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Move datanodeMap from FSNamesystem to DatanodeManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2187">HDFS-2187</a>.
+     Major improvement reported by Ivan Kelly and fixed by Ivan Kelly <br>
+     <b>HDFS-1580: Make EditLogInputStream act like an iterator over FSEditLogOps</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2186">HDFS-2186</a>.
+     Major bug reported by Eli Collins and fixed by Eli Collins (data-node)<br>
+     <b>DN volume failures on startup are not counted</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2180">HDFS-2180</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>Refactor NameNode HTTP server into new class</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2167">HDFS-2167</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Move dnsToSwitchMapping and hostsReader from FSNamesystem to DatanodeManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2161">HDFS-2161</a>.
+     Minor improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (balancer , data-node , hdfs client , name-node , security)<br>
+     <b>Move utilities to DFSUtil</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2159">HDFS-2159</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (hdfs client)<br>
+     <b>Deprecate DistributedFileSystem.getClient()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2157">HDFS-2157</a>.
+     Major improvement reported by Aaron T. Myers and fixed by Aaron T. Myers (documentation , name-node)<br>
+     <b>Improve header comment in o.a.h.hdfs.server.namenode.NameNode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2156">HDFS-2156</a>.
+     Major bug reported by Owen O'Malley and fixed by Eric Yang <br>
+     <b>rpm should only require the same major version as common</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2154">HDFS-2154</a>.
+     Minor test reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (test)<br>
+     <b>TestDFSShell should use test dir</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2153">HDFS-2153</a>.
+     Minor bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (test)<br>
+     <b>DFSClientAdapter should be put under test</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2149">HDFS-2149</a>.
+     Major sub-task reported by Ivan Kelly and fixed by Ivan Kelly (name-node)<br>
+     <b>Move EditLogOp serialization formats into FsEditLogOp implementations</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2147">HDFS-2147</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Move cluster network topology to block management</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2144">HDFS-2144</a>.
+     Major improvement reported by Ravi Prakash and fixed by Ravi Prakash (name-node)<br>
+     <b>If SNN shuts down during initialization it does not log the cause</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2143">HDFS-2143</a>.
+     Major improvement reported by Ravi Prakash and fixed by Ravi Prakash <br>
+     <b>Federation: we should link to the live nodes and dead nodes to cluster web console</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2141">HDFS-2141</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas (ha , name-node)<br>
+     <b>Remove NameNode roles Active and Standby (they become states)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2140">HDFS-2140</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Move Host2NodesMap to block management</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2134">HDFS-2134</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Move DecommissionManager to block management</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2132">HDFS-2132</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers <br>
+     <b>Potential resource leak in EditLogFileOutputStream.close</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2131">HDFS-2131</a>.
+     Major test reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (test)<br>
+     <b>Tests for HADOOP-7361</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2118">HDFS-2118</a>.
+     Minor improvement reported by Eli Collins and fixed by Eli Collins (data-node)<br>
+     <b>Couple dfs data dir improvements</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2116">HDFS-2116</a>.
+     Minor improvement reported by Eli Collins and fixed by Plamen Jeliazkov (test)<br>
+     <b>Cleanup TestStreamFile and TestByteRangeInputStream </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2114">HDFS-2114</a>.
+     Major bug reported by John George and fixed by John George <br>
+     <b>re-commission of a decommissioned node does not delete excess replica</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2112">HDFS-2112</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Uma Maheswara Rao G (name-node)<br>
+     <b>Move ReplicationMonitor to block management</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2111">HDFS-2111</a>.
+     Major test reported by Harsh J and fixed by Harsh J (data-node , test)<br>
+     <b>Add tests for ensuring that the DN will start with a few bad data directories (Part 1 of testing DiskChecker)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2110">HDFS-2110</a>.
+     Minor improvement reported by Eli Collins and fixed by Eli Collins (name-node)<br>
+     <b>Some StreamFile and ByteRangeInputStream cleanup</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2109">HDFS-2109</a>.
+     Major bug reported by Bharath Mundlapudi and fixed by Bharath Mundlapudi (hdfs client)<br>
+     <b>Store uMask as member variable to DFSClient.Conf</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2108">HDFS-2108</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Move datanode heartbeat handling to BlockManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2107">HDFS-2107</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Move block management code to a package</b><br>
+     <blockquote>Moved block management codes to a new package org.apache.hadoop.hdfs.server.blockmanagement.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2100">HDFS-2100</a>.
+     Minor test reported by Aaron T. Myers and fixed by Aaron T. Myers (test)<br>
+     <b>Improve TestStorageRestore</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2096">HDFS-2096</a>.
+     Major task reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>Mavenization of hadoop-hdfs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2092">HDFS-2092</a>.
+     Major bug reported by Bharath Mundlapudi and fixed by Bharath Mundlapudi (hdfs client)<br>
+     <b>Create a light inner conf class in DFSClient</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2086">HDFS-2086</a>.
+     Major bug reported by Tanping Wang and fixed by Tanping Wang (name-node)<br>
+     <b>If the include hosts list contains host name, after restarting namenode, datanodes registrant is denied </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2083">HDFS-2083</a>.
+     Major new feature reported by Tanping Wang and fixed by Tanping Wang <br>
+     <b>Adopt JMXJsonServlet into HDFS in order to query statistics</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2082">HDFS-2082</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers <br>
+     <b>SecondaryNameNode web interface doesn't show the right info</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2073">HDFS-2073</a>.
+     Minor improvement reported by Suresh Srinivas and fixed by Suresh Srinivas (name-node)<br>
+     <b>Namenode is missing @Override annotations</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2069">HDFS-2069</a>.
+     Trivial sub-task reported by Ravi Phulari and fixed by Harsh J (documentation)<br>
+     <b>Incorrect default trash interval value in the docs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2067">HDFS-2067</a>.
+     Major bug reported by Todd Lipcon and fixed by Tsz Wo (Nicholas), SZE (data-node , hdfs client)<br>
+     <b>Bump DATA_TRANSFER_VERSION in trunk for protobufs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2066">HDFS-2066</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node , hdfs client , name-node)<br>
+     <b>Create a package and individual class files for DataTransferProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2065">HDFS-2065</a>.
+     Major bug reported by Bharath Mundlapudi and fixed by Uma Maheswara Rao G <br>
+     <b>Fix NPE in DFSClient.getFileChecksum</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2061">HDFS-2061</a>.
+     Minor bug reported by Matt Foley and fixed by Matt Foley (name-node)<br>
+     <b>two minor bugs in BlockManager block report processing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2058">HDFS-2058</a>.
+     Major new feature reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>DataTransfer Protocol using protobufs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2056">HDFS-2056</a>.
+     Minor improvement reported by Tanping Wang and fixed by Tanping Wang (documentation , tools)<br>
+     <b>Update fetchdt usage</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2055">HDFS-2055</a>.
+     Major new feature reported by Travis Crawford and fixed by Travis Crawford (libhdfs)<br>
+     <b>Add hflush support to libhdfs</b><br>
+     <blockquote>Add hdfsHFlush to libhdfs.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2054">HDFS-2054</a>.
+     Minor improvement reported by Kihwal Lee and fixed by Kihwal Lee (data-node)<br>
+     <b>BlockSender.sendChunk() prints ERROR for connection closures encountered  during transferToFully()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2053">HDFS-2053</a>.
+     Minor bug reported by Michael Noll and fixed by Michael Noll (name-node)<br>
+     <b>Bug in INodeDirectory#computeContentSummary warning</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2046">HDFS-2046</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (build , test)<br>
+     <b>Force entropy to come from non-true random for tests</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2041">HDFS-2041</a>.
+     Major bug reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>Some mtimes and atimes are lost when edit logs are replayed</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2040">HDFS-2040</a>.
+     Minor improvement reported by Eli Collins and fixed by Eli Collins <br>
+     <b>Only build libhdfs if a flag is passed</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2034">HDFS-2034</a>.
+     Minor bug reported by John George and fixed by John George (hdfs client)<br>
+     <b>length in getBlockRange becomes -ve when reading only from currently being written blk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2030">HDFS-2030</a>.
+     Minor bug reported by Bharath Mundlapudi and fixed by Bharath Mundlapudi <br>
+     <b>Fix the usability of namenode upgrade command</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2029">HDFS-2029</a>.
+     Trivial improvement reported by Tsz Wo (Nicholas), SZE and fixed by John George (test)<br>
+     <b>Improve TestWriteRead</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2024">HDFS-2024</a>.
+     Trivial improvement reported by CW Chung and fixed by CW Chung (test)<br>
+     <b>Eclipse format HDFS Junit test hdfs/TestWriteRead.java </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2022">HDFS-2022</a>.
+     Major bug reported by Eli Collins and fixed by Eric Yang (build)<br>
+     <b>ant binary should build libhdfs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2021">HDFS-2021</a>.
+     Major bug reported by CW Chung and fixed by John George (data-node)<br>
+     <b>TestWriteRead failed with inconsistent visible length of a file </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2020">HDFS-2020</a>.
+     Major bug reported by Suresh Srinivas and fixed by Suresh Srinivas (data-node , test)<br>
+     <b>TestDFSUpgradeFromImage fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2019">HDFS-2019</a>.
+     Minor bug reported by Bharath Mundlapudi and fixed by Bharath Mundlapudi (data-node)<br>
+     <b>Fix all the places where Java method File.list is used with FileUtil.list API</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2014">HDFS-2014</a>.
+     Critical bug reported by Todd Lipcon and fixed by Eric Yang (scripts)<br>
+     <b>bin/hdfs no longer works from a source checkout</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2011">HDFS-2011</a>.
+     Major bug reported by Ravi Prakash and fixed by Ravi Prakash (name-node)<br>
+     <b>Removal and restoration of storage directories on checkpointing failure doesn't work properly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2003">HDFS-2003</a>.
+     Major improvement reported by Ivan Kelly and fixed by Ivan Kelly <br>
+     <b>Separate FSEditLog reading logic from editLog memory state building logic</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2002">HDFS-2002</a>.
+     Major bug reported by Konstantin Shvachko and fixed by Plamen Jeliazkov (name-node)<br>
+     <b>Incorrect computation of needed blocks in getTurnOffTip()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1999">HDFS-1999</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (test)<br>
+     <b>Tests use deprecated configs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1998">HDFS-1998</a>.
+     Minor bug reported by Tanping Wang and fixed by Tanping Wang (scripts)<br>
+     <b>make refresh-namodenodes.sh refreshing all namenodes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1996">HDFS-1996</a>.
+     Major improvement reported by Tsz Wo (Nicholas), SZE and fixed by Eric Yang (build)<br>
+     <b>ivy: hdfs test jar should be independent to common test jar</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1995">HDFS-1995</a>.
+     Minor improvement reported by Tanping Wang and fixed by Tanping Wang <br>
+     <b>Minor modification to both dfsclusterhealth and dfshealth pages for Web UI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1990">HDFS-1990</a>.
+     Minor bug reported by ramkrishna.s.vasudevan and fixed by Uma Maheswara Rao G (data-node)<br>
+     <b>Resource leaks in HDFS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1986">HDFS-1986</a>.
+     Minor bug reported by Tanping Wang and fixed by Tanping Wang (tools)<br>
+     <b>Add an option for user to return http or https ports regardless of security is on/off in DFSUtil.getInfoServer()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1983">HDFS-1983</a>.
+     Major test reported by Daryn Sharp and fixed by Daryn Sharp (test)<br>
+     <b>Fix path display for copy &amp; rm</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1968">HDFS-1968</a>.
+     Minor test reported by CW Chung and fixed by CW Chung (test)<br>
+     <b>Enhance TestWriteRead to support File Append and Position Read </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1966">HDFS-1966</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node , hdfs client)<br>
+     <b>Encapsulate individual DataTransferProtocol op header</b><br>
+     <blockquote>Added header classes for individual DataTransferProtocol op headers.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1964">HDFS-1964</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers <br>
+     <b>Incorrect HTML unescaping in DatanodeJspHelper.java</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1963">HDFS-1963</a>.
+     Major new feature reported by Eric Yang and fixed by Eric Yang (build)<br>
+     <b>HDFS rpm integration project</b><br>
+     <blockquote>Create HDFS RPM package</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1959">HDFS-1959</a>.
+     Minor improvement reported by Eli Collins and fixed by Eli Collins <br>
+     <b>Better error message for missing namenode directory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1958">HDFS-1958</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (name-node)<br>
+     <b>Format confirmation prompt should be more lenient of its input</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1955">HDFS-1955</a>.
+     Major bug reported by Matt Foley and fixed by Matt Foley (name-node)<br>
+     <b>FSImage.doUpgrade() was made too fault-tolerant by HDFS-1826</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1953">HDFS-1953</a>.
+     Minor bug reported by Tanping Wang and fixed by Tanping Wang <br>
+     <b>Change name node mxbean name in cluster web console</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1952">HDFS-1952</a>.
+     Major bug reported by Matt Foley and fixed by Andrew Wang <br>
+     <b>FSEditLog.open() appears to succeed even if all EDITS directories fail</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1945">HDFS-1945</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node , hdfs client)<br>
+     <b>Removed deprecated fields in DataTransferProtocol</b><br>
+     <blockquote>Removed the deprecated fields in DataTransferProtocol.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1943">HDFS-1943</a>.
+     Blocker bug reported by Wei Yongjun and fixed by Matt Foley (scripts)<br>
+     <b>fail to start datanode while start-dfs.sh is executed by root user</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1939">HDFS-1939</a>.
+     Major improvement reported by Tsz Wo (Nicholas), SZE and fixed by Eric Yang (build)<br>
+     <b>ivy: test conf should not extend common conf</b><br>
+     <blockquote>* Removed duplicated jars in test class path.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1938">HDFS-1938</a>.
+     Minor bug reported by Tsz Wo (Nicholas), SZE and fixed by Eric Yang (build)<br>
+     <b> Reference ivy-hdfs.classpath not found.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1937">HDFS-1937</a>.
+     Major improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node , hdfs client)<br>
+     <b>Umbrella JIRA for improving DataTransferProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1936">HDFS-1936</a>.
+     Blocker bug reported by Suresh Srinivas and fixed by Suresh Srinivas (name-node)<br>
+     <b>Updating the layout version from HDFS-1822 causes upgrade problems.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1934">HDFS-1934</a>.
+     Major bug reported by Bharath Mundlapudi and fixed by Bharath Mundlapudi <br>
+     <b>Fix NullPointerException when File.listFiles() API returns null</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1933">HDFS-1933</a>.
+     Major test reported by Daryn Sharp and fixed by Daryn Sharp (test)<br>
+     <b>Update tests for FsShell's "test"</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1931">HDFS-1931</a>.
+     Major test reported by Daryn Sharp and fixed by Daryn Sharp <br>
+     <b>Update tests for du/dus/df</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1928">HDFS-1928</a>.
+     Major test reported by Daryn Sharp and fixed by Daryn Sharp (test)<br>
+     <b>Fix path display for touchz</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1927">HDFS-1927</a>.
+     Major bug reported by John George and fixed by John George (name-node)<br>
+     <b>audit logs could ignore certain xsactions and also could contain "ip=null"</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1923">HDFS-1923</a>.
+     Major sub-task reported by Matt Foley and fixed by Tsz Wo (Nicholas), SZE (test)<br>
+     <b>Intermittent recurring failure in TestFiDataTransferProtocol2.pipeline_Fi_29</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1922">HDFS-1922</a>.
+     Major sub-task reported by Matt Foley and fixed by Luke Lu (test)<br>
+     <b>Recurring failure in TestJMXGet.testNameNode since build 477 on May 11</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1921">HDFS-1921</a>.
+     Blocker bug reported by Aaron T. Myers and fixed by Matt Foley <br>
+     <b>Save namespace can cause NN to be unable to come up on restart</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1920">HDFS-1920</a>.
+     Major bug reported by Trevor Robinson and fixed by Trevor Robinson (libhdfs)<br>
+     <b>libhdfs does not build for ARM processors</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1917">HDFS-1917</a>.
+     Major bug reported by Eric Yang and fixed by Eric Yang (build)<br>
+     <b>Clean up duplication of dependent jar files</b><br>
+     <blockquote>Remove packaging of duplicated third party jar files</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1914">HDFS-1914</a>.
+     Major bug reported by Suresh Srinivas and fixed by Suresh Srinivas (name-node)<br>
+     <b>Federation: namenode storage directory must be configurable specific to a namenode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1912">HDFS-1912</a>.
+     Major test reported by Daryn Sharp and fixed by Daryn Sharp (test)<br>
+     <b>Update tests for FsShell standardized error messages</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1911">HDFS-1911</a>.
+     Major test reported by Sanjay Radia and fixed by Sanjay Radia <br>
+     <b>HDFS tests for viewfs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1908">HDFS-1908</a>.
+     Minor bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (test)<br>
+     <b>DataTransferTestUtil$CountdownDoosAction.run(..) throws NullPointerException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1907">HDFS-1907</a>.
+     Major bug reported by CW Chung and fixed by John George (hdfs client)<br>
+     <b>BlockMissingException upon concurrent read and write: reader was doing file position read while writer is doing write without hflush</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1906">HDFS-1906</a>.
+     Minor improvement reported by Suresh Srinivas and fixed by Suresh Srinivas (hdfs client)<br>
+     <b>Remove logging exception stack trace when one of the datanode targets to read from is not reachable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1905">HDFS-1905</a>.
+     Minor bug reported by Bharath Mundlapudi and fixed by Bharath Mundlapudi (name-node)<br>
+     <b>Improve the usability of namenode -format </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1903">HDFS-1903</a>.
+     Major test reported by Daryn Sharp and fixed by Daryn Sharp (test)<br>
+     <b>Fix path display for rm/rmr</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1902">HDFS-1902</a>.
+     Major test reported by Daryn Sharp and fixed by Daryn Sharp (test)<br>
+     <b>Fix path display for setrep</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1899">HDFS-1899</a>.
+     Major improvement reported by Todd Lipcon and fixed by Ted Yu <br>
+     <b>GenericTestUtils.formatNamenode is misplaced</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1898">HDFS-1898</a>.
+     Critical bug reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>Tests failing on trunk due to use of NameNode.format</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1890">HDFS-1890</a>.
+     Minor improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (hdfs client)<br>
+     <b>A few improvements on the LeaseRenewer.pendingCreates map</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1889">HDFS-1889</a>.
+     Major bug reported by John George and fixed by John George <br>
+     <b>incorrect path in start/stop dfs script</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1888">HDFS-1888</a>.
+     Major bug reported by Suresh Srinivas and fixed by Suresh Srinivas <br>
+     <b>MiniDFSCluster#corruptBlockOnDatanodes() access must be public for MapReduce contrib raid</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1884">HDFS-1884</a>.
+     Major sub-task reported by Matt Foley and fixed by Aaron T. Myers (test)<br>
+     <b>Improve TestDFSStorageStateRecovery</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1883">HDFS-1883</a>.
+     Major sub-task reported by Matt Foley and fixed by  (test)<br>
+     <b>Recurring failures in TestBackupNode since HDFS-1052</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1881">HDFS-1881</a>.
+     Major bug reported by Tanping Wang and fixed by Tanping Wang (data-node)<br>
+     <b>Federation: after taking snapshot the current directory of datanode is empty</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1877">HDFS-1877</a>.
+     Minor test reported by CW Chung and fixed by CW Chung (test)<br>
+     <b>Create a functional test for file read/write</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1876">HDFS-1876</a>.
+     Blocker bug reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>One MiniDFSCluster ignores numDataNodes parameter</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1875">HDFS-1875</a>.
+     Major bug reported by Eric Payne and fixed by Eric Payne (test)<br>
+     <b>MiniDFSCluster hard-codes dfs.datanode.address to localhost</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1873">HDFS-1873</a>.
+     Major new feature reported by Tanping Wang and fixed by Tanping Wang <br>
+     <b>Federation Cluster Management Web Console</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1871">HDFS-1871</a>.
+     Major bug reported by Suresh Srinivas and fixed by Suresh Srinivas (test)<br>
+     <b>Tests using MiniDFSCluster fail to compile due to HDFS-1052 changes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1870">HDFS-1870</a>.
+     Minor improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (hdfs client)<br>
+     <b>Refactor DFSClient.LeaseChecker</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1869">HDFS-1869</a>.
+     Major bug reported by Daryn Sharp and fixed by Daryn Sharp (name-node)<br>
+     <b>mkdirs should use the supplied permission for all of the created directories</b><br>
+     <blockquote>A multi-level mkdir is now POSIX compliant.  Instead of creating intermediate directories with the permissions of the parent directory, intermediate directories are created with permission bits of rwxrwxrwx (0777) as modified by the current umask, plus write and search permission for the owner.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1865">HDFS-1865</a>.
+     Major improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (hdfs client)<br>
+     <b>Share LeaseChecker thread among DFSClients</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1862">HDFS-1862</a>.
+     Major test reported by Aaron T. Myers and fixed by Aaron T. Myers (test)<br>
+     <b>Improve test reliability of HDFS-1594</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1861">HDFS-1861</a>.
+     Major improvement reported by Eli Collins and fixed by Eli Collins (data-node)<br>
+     <b>Rename dfs.datanode.max.xcievers and bump its default value</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1856">HDFS-1856</a>.
+     Major sub-task reported by Matt Foley and fixed by Matt Foley (test)<br>
+     <b>TestDatanodeBlockScanner waits forever, errs without giving information</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1855">HDFS-1855</a>.
+     Major test reported by Matt Foley and fixed by Matt Foley (test)<br>
+     <b>TestDatanodeBlockScanner.testBlockCorruptionRecoveryPolicy() part 2 fails in two different ways</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1854">HDFS-1854</a>.
+     Major sub-task reported by Matt Foley and fixed by Matt Foley (test)<br>
+     <b>make failure message more useful in DFSTestUtil.waitReplication()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1846">HDFS-1846</a>.
+     Major improvement reported by Aaron T. Myers and fixed by Aaron T. Myers (name-node)<br>
+     <b>Don't fill preallocated portion of edits log with 0x00</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1845">HDFS-1845</a>.
+     Major bug reported by John George and fixed by John George <br>
+     <b>symlink comes up as directory after namenode restart</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1844">HDFS-1844</a>.
+     Major test reported by Daryn Sharp and fixed by Daryn Sharp (test)<br>
+     <b>Move -fs usage tests from hdfs into common</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1843">HDFS-1843</a>.
+     Minor improvement reported by Bharath Mundlapudi and fixed by Bharath Mundlapudi <br>
+     <b>Discover file not found early for file append </b><br>
+     <blockquote>I have committed this. Thanks to Bharath!</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1840">HDFS-1840</a>.
+     Major improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (hdfs client)<br>
+     <b>Terminate LeaseChecker when all writing files are closed.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1835">HDFS-1835</a>.
+     Major bug reported by John Carrino and fixed by John Carrino (data-node)<br>
+     <b>DataNode.setNewStorageID pulls entropy from /dev/random</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1833">HDFS-1833</a>.
+     Minor improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node)<br>
+     <b>Refactor BlockReceiver</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1831">HDFS-1831</a>.
+     Major improvement reported by Suresh Srinivas and fixed by Suresh Srinivas (name-node)<br>
+     <b>HDFS equivalent of HADOOP-7223 changes to handle FileContext createFlag combinations</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1829">HDFS-1829</a>.
+     Major bug reported by Matt Foley and fixed by Matt Foley (name-node)<br>
+     <b>TestNodeCount waits forever, errs without giving information</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1827">HDFS-1827</a>.
+     Major bug reported by Matt Foley and fixed by Matt Foley (name-node)<br>
+     <b>TestBlockReplacement waits forever, errs without giving information</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1826">HDFS-1826</a>.
+     Major sub-task reported by Hairong Kuang and fixed by Matt Foley (name-node)<br>
+     <b>NameNode should save image to name directories in parallel during upgrade</b><br>
+     <blockquote>I've committed this. Thanks, Matt!</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1823">HDFS-1823</a>.
+     Blocker bug reported by Tom White and fixed by Tom White (scripts)<br>
+     <b>start-dfs.sh script fails if HADOOP_HOME is not set</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1822">HDFS-1822</a>.
+     Blocker bug reported by Suresh Srinivas and fixed by Suresh Srinivas (name-node)<br>
+     <b>Editlog opcodes overlap between 20 security and later releases</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1821">HDFS-1821</a>.
+     Major bug reported by John George and fixed by John George <br>
+     <b>FileContext.createSymlink with kerberos enabled sets wrong owner</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1818">HDFS-1818</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (test)<br>
+     <b>TestHDFSCLI is failing on trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1817">HDFS-1817</a>.
+     Trivial improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (test)<br>
+     <b>Split TestFiDataTransferProtocol.java into two files</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1814">HDFS-1814</a>.
+     Major new feature reported by Aaron T. Myers and fixed by Aaron T. Myers (hdfs client , name-node)<br>
+     <b>HDFS portion of HADOOP-7214 - Hadoop /usr/bin/groups equivalent</b><br>
+     <blockquote>Introduces a new command, "hdfs groups", which displays what groups are associated with a user as seen by the NameNode.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1812">HDFS-1812</a>.
+     Minor bug reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (test)<br>
+     <b>Address the cleanup issues in TestHDFSCLI.java</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1808">HDFS-1808</a>.
+     Major bug reported by Matt Foley and fixed by Matt Foley (data-node , name-node)<br>
+     <b>TestBalancer waits forever, errs without giving information</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1806">HDFS-1806</a>.
+     Major bug reported by Matt Foley and fixed by Matt Foley (data-node , name-node)<br>
+     <b>TestBlockReport.blockReport_08() and _09() are timing-dependent and likely to fail on fast servers</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1797">HDFS-1797</a>.
+     Major bug reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>New findbugs warning introduced by HDFS-1120</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1789">HDFS-1789</a>.
+     Minor improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node , hdfs client)<br>
+     <b>Refactor frequently used codes from DFSOutputStream, BlockReceiver and DataXceiver</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1786">HDFS-1786</a>.
+     Minor bug reported by Tsz Wo (Nicholas), SZE and fixed by Uma Maheswara Rao G (test)<br>
+     <b>Some cli test cases expect a "null" message</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1785">HDFS-1785</a>.
+     Major improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node)<br>
+     <b>Cleanup BlockReceiver and DataXceiver</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1782">HDFS-1782</a>.
+     Major bug reported by John George and fixed by John George (name-node)<br>
+     <b>FSNamesystem.startFileInternal(..) throws NullPointerException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1781">HDFS-1781</a>.
+     Major bug reported by John George and fixed by John George (scripts)<br>
+     <b>jsvc executable delivered into wrong package...</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1776">HDFS-1776</a>.
+     Major bug reported by Dmytro Molkov and fixed by Bharath Mundlapudi <br>
+     <b>Bug in Concat code</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1774">HDFS-1774</a>.
+     Minor improvement reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (data-node)<br>
+     <b>Small optimization to FSDataset</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1773">HDFS-1773</a>.
+     Minor improvement reported by Tanping Wang and fixed by Tanping Wang (name-node)<br>
+     <b>Remove a datanode from cluster if include list is not empty and this datanode is removed from both include and exclude lists</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1770">HDFS-1770</a>.
+     Minor test reported by Eli Collins and fixed by Eli Collins <br>
+     <b>TestFiRename fails due to invalid block size</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1767">HDFS-1767</a>.
+     Major sub-task reported by Matt Foley and fixed by Matt Foley (data-node)<br>
+     <b>Namenode should ignore non-initial block reports from datanodes when in safemode during startup</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1763">HDFS-1763</a>.
+     Minor improvement reported by Eli Collins and fixed by Eli Collins <br>
+     <b>Replace hard-coded option strings with variables from DFSConfigKeys</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1761">HDFS-1761</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node)<br>
+     <b>Add a new DataTransferProtocol operation, Op.TRANSFER_BLOCK, instead of using RPC</b><br>
+     <blockquote>Add a new DataTransferProtocol operation, Op.TRANSFER_BLOCK, for transferring RBW/Finalized with acknowledgement and without using RPC.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1760">HDFS-1760</a>.
+     Major bug reported by Daryn Sharp and fixed by Daryn Sharp (name-node)<br>
+     <b>problems with getFullPathName</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1757">HDFS-1757</a>.
+     Major improvement reported by Eli Collins and fixed by Eli Collins (fuse-dfs)<br>
+     <b>Don't compile fuse-dfs by default</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1751">HDFS-1751</a>.
+     Major new feature reported by Daryn Sharp and fixed by Daryn Sharp (data-node)<br>
+     <b>Intrinsic limits for HDFS files, directories</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1750">HDFS-1750</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE <br>
+     <b>fs -ls hftp://file not working</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1748">HDFS-1748</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (balancer)<br>
+     <b>Balancer utilization classification is incomplete</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1741">HDFS-1741</a>.
+     Major improvement reported by Konstantin Boudnik and fixed by Konstantin Boudnik (build)<br>
+     <b>Provide a minimal pom file to allow integration of HDFS into Sonar analysis</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1739">HDFS-1739</a>.
+     Minor improvement reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (data-node)<br>
+     <b>When DataNode throws DiskOutOfSpaceException, it will be helpfull to the user if we log the available volume size and configured block size.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1734">HDFS-1734</a>.
+     Major bug reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (name-node)<br>
+     <b>'Chunk size to view' option is not working in Name Node UI.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1731">HDFS-1731</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>Allow using a file to exclude certain tests from build</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1728">HDFS-1728</a>.
+     Minor bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>SecondaryNameNode.checkpointSize is in byte but not MB.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1727">HDFS-1727</a>.
+     Minor bug reported by Uma Maheswara Rao G and fixed by sravankorumilli <br>
+     <b>fsck command can display command usage if user passes any illegal argument</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1723">HDFS-1723</a>.
+     Minor improvement reported by Allen Wittenauer and fixed by Jim Plush <br>
+     <b>quota errors messages should use the same scale</b><br>
+     <blockquote>Updated the Quota exceptions to now use human readable output.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1703">HDFS-1703</a>.
+     Minor sub-task reported by Tanping Wang and fixed by Tanping Wang (scripts)<br>
+     <b>HDFS federation: Improve start/stop scripts and add script to decommission datanodes</b><br>
+     <blockquote>The masters file is no longer used to indicate which hosts to start the 2NN on. The 2NN is now started on hosts when dfs.namenode.secondary.http-address is configured with a non-wildcard IP.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1692">HDFS-1692</a>.
+     Major bug reported by Bharath Mundlapudi and fixed by Bharath Mundlapudi (data-node)<br>
+     <b>In secure mode, Datanode process doesn't exit when disks fail.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1691">HDFS-1691</a>.
+     Minor bug reported by Alexey Diomin and fixed by Alexey Diomin (tools)<br>
+     <b>double static declaration in Configuration.addDefaultResource("hdfs-default.xml");</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1675">HDFS-1675</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node)<br>
+     <b>Transfer RBW between datanodes</b><br>
+     <blockquote>Added a new stage TRANSFER_RBW to DataTransferProtocol</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1665">HDFS-1665</a>.
+     Minor bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (balancer)<br>
+     <b>Balancer sleeps inadequately</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1656">HDFS-1656</a>.
+     Major bug reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey <br>
+     <b>getDelegationToken in HftpFileSystem should renew TGT if needed.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1636">HDFS-1636</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Harsh J (name-node)<br>
+     <b>If dfs.name.dir points to an empty dir, namenode format shouldn't require confirmation</b><br>
+     <blockquote>If dfs.name.dir points to an empty dir, namenode -format no longer requires confirmation.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1630">HDFS-1630</a>.
+     Major improvement reported by Hairong Kuang and fixed by Hairong Kuang (name-node)<br>
+     <b>Checksum fsedits</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1629">HDFS-1629</a>.
+     Major sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Add a method to BlockPlacementPolicy for not removing the chosen nodes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1628">HDFS-1628</a>.
+     Minor improvement reported by Ramya Sunil and fixed by John George (name-node)<br>
+     <b>AccessControlException should display the full path</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1627">HDFS-1627</a>.
+     Major bug reported by Hairong Kuang and fixed by Hairong Kuang (name-node)<br>
+     <b>Fix NullPointerException in Secondary NameNode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1626">HDFS-1626</a>.
+     Minor improvement reported by Arun C Murthy and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>Make BLOCK_INVALIDATE_LIMIT configurable</b><br>
+     <blockquote>Added a new configuration property dfs.block.invalidate.limit for FSNamesystem.blockInvalidateLimit.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1625">HDFS-1625</a>.
+     Minor bug reported by Todd Lipcon and fixed by Tsz Wo (Nicholas), SZE (test)<br>
+     <b>TestDataNodeMXBean fails if disk space usage changes during test run</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1620">HDFS-1620</a>.
+     Minor improvement reported by Tsz Wo (Nicholas), SZE and fixed by Harsh J <br>
+     <b>Rename HdfsConstants -&gt; HdfsServerConstants, FSConstants -&gt; HdfsConstants</b><br>
+     <blockquote>Rename HdfsConstants interface to HdfsServerConstants, FSConstants interface to HdfsConstants</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1612">HDFS-1612</a>.
+     Minor bug reported by Joe Crobak and fixed by Joe Crobak (documentation)<br>
+     <b>HDFS Design Documentation is outdated</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1611">HDFS-1611</a>.
+     Minor bug reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (hdfs client , name-node)<br>
+     <b>Some logical issues need to address.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1606">HDFS-1606</a>.
+     Major new feature reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (data-node , hdfs client , name-node)<br>
+     <b>Provide a stronger data guarantee in the write pipeline</b><br>
+     <blockquote>Added two configuration properties, dfs.client.block.write.replace-datanode-on-failure.enable and dfs.client.block.write.replace-datanode-on-failure.policy.  Added a new feature to replace datanode on failure in DataTransferProtocol.  Added getAdditionalDatanode(..) in ClientProtocol.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1602">HDFS-1602</a>.
+     Major bug reported by Konstantin Boudnik and fixed by Boris Shkolnik (name-node)<br>
+     <b>NameNode storage failed replica restoration is broken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1601">HDFS-1601</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (data-node)<br>
+     <b>Pipeline ACKs are sent as lots of tiny TCP packets</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1600">HDFS-1600</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Todd Lipcon (build , test)<br>
+     <b>editsStored.xml cause release audit warning</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1598">HDFS-1598</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (name-node)<br>
+     <b>ListPathsServlet excludes .*.crc files</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1596">HDFS-1596</a>.
+     Major improvement reported by Patrick Angeles and fixed by Harsh J (documentation , name-node)<br>
+     <b>Move secondary namenode checkpoint configs from core-default.xml to hdfs-default.xml</b><br>
+     <blockquote>Removed references to the older fs.checkpoint.* properties that resided in core-site.xml</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1594">HDFS-1594</a>.
+     Major bug reported by Devaraj K and fixed by Aaron T. Myers (name-node)<br>
+     <b>When the disk becomes full Namenode is getting shutdown and not able to recover</b><br>
+     <blockquote>Implemented a daemon thread to monitor the disk usage for periodically and if the disk usage reaches the threshold value, put the name node into Safe mode so that no modification to file system will occur. Once the disk usage reaches below the threshold, name node will be put out of the safe mode. Here threshold value and interval to check the disk usage are configurable. 

+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1592">HDFS-1592</a>.
+     Major bug reported by Bharath Mundlapudi and fixed by Bharath Mundlapudi <br>
+     <b>Datanode startup doesn't honor volumes.tolerated </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1588">HDFS-1588</a>.
+     Major improvement reported by Erik Steffl and fixed by Erik Steffl <br>
+     <b>Add dfs.hosts.exclude to DFSConfigKeys and use constant in stead of hardcoded string</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1585">HDFS-1585</a>.
+     Blocker bug reported by Todd Lipcon and fixed by Todd Lipcon (test)<br>
+     <b>HDFS-1547 broke MR build</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1583">HDFS-1583</a>.
+     Major improvement reported by Liyin Liang and fixed by Liyin Liang (name-node)<br>
+     <b>Improve backup-node sync performance by wrapping RPC parameters</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1582">HDFS-1582</a>.
+     Major improvement reported by Roman Shaposhnik and fixed by Roman Shaposhnik (libhdfs)<br>
+     <b>Remove auto-generated native build files</b><br>
+     <blockquote>The native build run when from trunk now requires autotools, libtool and openssl dev libraries.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1573">HDFS-1573</a>.
+     Trivial improvement reported by Todd Lipcon and fixed by Todd Lipcon (hdfs client)<br>
+     <b>LeaseChecker thread name trace not that useful</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1568">HDFS-1568</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Joey Echeverria (data-node)<br>
+     <b>Improve DataXceiver error logging</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1560">HDFS-1560</a>.
+     Minor bug reported by Todd Lipcon and fixed by Todd Lipcon (data-node)<br>
+     <b>dfs.data.dir permissions should default to 700</b><br>
+     <blockquote>The permissions on datanode data directories (configured by dfs.datanode.data.dir.perm) now default to 0700. Upon startup, the datanode will automatically change the permissions to match the configured value.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1557">HDFS-1557</a>.
+     Major sub-task reported by Ivan Kelly and fixed by Ivan Kelly (name-node)<br>
+     <b>Separate Storage from FSImage</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1551">HDFS-1551</a>.
+     Major bug reported by Giridharan Kesavan and fixed by Giridharan Kesavan (build)<br>
+     <b>fix the pom template's version</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1547">HDFS-1547</a>.
+     Major improvement reported by Suresh Srinivas and fixed by Suresh Srinivas (name-node)<br>
+     <b>Improve decommission mechanism</b><br>
+     <blockquote>Summary of changes to the decommissioning process:

+# After nodes are decommissioned, they are not shutdown. The decommissioned nodes are not used for writes. For reads, the decommissioned nodes are given as the last location to read from.

+# Number of live and dead decommissioned nodes are displayed in the namenode webUI.

+# Decommissioned nodes free capacity is not count towards the the cluster free capacity.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1541">HDFS-1541</a>.
+     Major sub-task reported by Hairong Kuang and fixed by Hairong Kuang (name-node)<br>
+     <b>Not marking datanodes dead When namenode in safemode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1540">HDFS-1540</a>.
+     Major bug reported by dhruba borthakur and fixed by dhruba borthakur (data-node)<br>
+     <b>Make Datanode handle errors to namenode.register call more elegantly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1539">HDFS-1539</a>.
+     Major improvement reported by dhruba borthakur and fixed by dhruba borthakur (data-node , hdfs client , name-node)<br>
+     <b>prevent data loss when a cluster suffers a power loss</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1536">HDFS-1536</a>.
+     Major improvement reported by Hairong Kuang and fixed by Hairong Kuang <br>
+     <b>Improve HDFS WebUI</b><br>
+     <blockquote>On web UI, missing block number now becomes accurate and under-replicated blocks do not include missing blocks.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1534">HDFS-1534</a>.
+     Minor improvement reported by Eli Collins and fixed by Eli Collins (name-node)<br>
+     <b>Fix some incorrect logs in FSDirectory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1533">HDFS-1533</a>.
+     Major bug reported by Patrick Kling and fixed by Patrick Kling (hdfs client)<br>
+     <b>A more elegant FileSystem#listCorruptFileBlocks API (HDFS portion)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1526">HDFS-1526</a>.
+     Major bug reported by Hairong Kuang and fixed by Hairong Kuang (hdfs client)<br>
+     <b>Dfs client name for a map/reduce task should have some randomness</b><br>
+     <blockquote>Make a client name has this format: DFSClient_applicationid_randomint_threadid, where applicationid = mapred.task.id or else = "NONMAPREDUCE".</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1524">HDFS-1524</a>.
+     Blocker bug reported by Hairong Kuang and fixed by Hairong Kuang (name-node)<br>
+     <b>Image loader should make sure to read every byte in image file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1523">HDFS-1523</a>.
+     Major bug reported by Konstantin Boudnik and fixed by Konstantin Boudnik (test)<br>
+     <b>TestLargeBlock is failing on trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1518">HDFS-1518</a>.
+     Minor improvement reported by Jingguo Yao and fixed by Jingguo Yao (name-node)<br>
+     <b>Wrong description in FSNamesystem's javadoc</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1516">HDFS-1516</a>.
+     Major bug reported by Konstantin Boudnik and fixed by Konstantin Boudnik (build)<br>
+     <b>mvn-install is broken after 0.22 branch creation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1513">HDFS-1513</a>.
+     Minor improvement reported by Eli Collins and fixed by Eli Collins <br>
+     <b>Fix a number of warnings</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1511">HDFS-1511</a>.
+     Blocker bug reported by Nigel Daley and fixed by Jakob Homan <br>
+     <b>98 Release Audit warnings on trunk and branch-0.22</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1510">HDFS-1510</a>.
+     Minor improvement reported by Nigel Daley and fixed by Nigel Daley <br>
+     <b>Add test-patch.properties required by test-patch.sh</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1509">HDFS-1509</a>.
+     Major improvement reported by dhruba borthakur and fixed by dhruba borthakur (name-node)<br>
+     <b>Resync discarded directories in fs.name.dir during saveNamespace command</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1506">HDFS-1506</a>.
+     Major improvement reported by Hairong Kuang and fixed by Hairong Kuang (name-node)<br>
+     <b>Refactor fsimage loading code</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1505">HDFS-1505</a>.
+     Blocker bug reported by Todd Lipcon and fixed by Aaron T. Myers <br>
+     <b>saveNamespace appears to succeed even if all directories fail to save</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1503">HDFS-1503</a>.
+     Minor bug reported by Eli Collins and fixed by Todd Lipcon (test)<br>
+     <b>TestSaveNamespace fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1502">HDFS-1502</a>.
+     Minor bug reported by Eli Collins and fixed by Hairong Kuang <br>
+     <b>TestBlockRecovery triggers NPE in assert</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1486">HDFS-1486</a>.
+     Major improvement reported by Konstantin Boudnik and fixed by Konstantin Boudnik (test)<br>
+     <b>Generalize CLITest structure and interfaces to facilitate upstream adoption (e.g. for web testing)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1481">HDFS-1481</a>.
+     Major improvement reported by Hairong Kuang and fixed by Hairong Kuang (name-node)<br>
+     <b>NameNode should validate fsimage before rolling</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1480">HDFS-1480</a>.
+     Major bug reported by T Meyarivan and fixed by Todd Lipcon (name-node)<br>
+     <b>All replicas of a block can end up on the same rack when some datanodes are decommissioning.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1476">HDFS-1476</a>.
+     Major improvement reported by Patrick Kling and fixed by Patrick Kling (name-node)<br>
+     <b>listCorruptFileBlocks should be functional while the name node is still in safe mode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1473">HDFS-1473</a>.
+     Major sub-task reported by Todd Lipcon and fixed by Todd Lipcon (name-node)<br>
+     <b>Refactor storage management into separate classes than fsimage file reading/writing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1467">HDFS-1467</a>.
+     Blocker bug reported by Todd Lipcon and fixed by Todd Lipcon (data-node)<br>
+     <b>Append pipeline never succeeds with more than one replica</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1463">HDFS-1463</a>.
+     Major bug reported by dhruba borthakur and fixed by dhruba borthakur (name-node)<br>
+     <b>accessTime updates should not occur in safeMode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1458">HDFS-1458</a>.
+     Major improvement reported by Hairong Kuang and fixed by Hairong Kuang (name-node)<br>
+     <b>Improve checkpoint performance by avoiding unnecessary image downloads</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1448">HDFS-1448</a>.
+     Major new feature reported by Erik Steffl and fixed by Erik Steffl (tools)<br>
+     <b>Create multi-format parser for edits logs file, support binary and XML formats initially</b><br>
+     <blockquote>Offline edits viewer feature adds oev tool to hdfs script. Oev makes it possible to convert edits logs to/from native binary and XML formats. It uses the same framework as Offline image viewer.

+

+Example usage:

+

+$HADOOP_HOME/bin/hdfs oev -i edits -o output.xml</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1445">HDFS-1445</a>.
+     Major sub-task reported by Matt Foley and fixed by Matt Foley (data-node)<br>
+     <b>Batch the calls in DataStorage to FileUtil.createHardLink(), so we call it once per directory instead of once per file</b><br>
+     <blockquote>Batch hardlinking during "upgrade" snapshots, cutting time from aprx 8 minutes per volume to aprx 8 seconds.  Validated in both Linux and Windows.  Depends on prior integration with patch for HADOOP-7133.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1442">HDFS-1442</a>.
+     Major improvement reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey <br>
+     <b>Api to get delegation token in Hdfs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1381">HDFS-1381</a>.
+     Major bug reported by Jakob Homan and fixed by Jim Plush (test)<br>
+     <b>HDFS javadocs hard-code references to dfs.namenode.name.dir and dfs.datanode.data.dir parameters</b><br>
+     <blockquote>Updated the JavaDocs to appropriately represent the new Configuration Keys that are used in the code. The docs did not match the code.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1378">HDFS-1378</a>.
+     Major improvement reported by Todd Lipcon and fixed by Colin Patrick McCabe (name-node)<br>
+     <b>Edit log replay should track and report file offsets in case of errors</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1377">HDFS-1377</a>.
+     Blocker bug reported by Eli Collins and fixed by Eli Collins (name-node)<br>
+     <b>Quota bug for partial blocks allows quotas to be violated </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1371">HDFS-1371</a>.
+     Major bug reported by Koji Noguchi and fixed by Tanping Wang (hdfs client , name-node)<br>
+     <b>One bad node can incorrectly flag many files as corrupt</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1360">HDFS-1360</a>.
+     Minor bug reported by Todd Lipcon and fixed by Todd Lipcon (test)<br>
+     <b>TestBlockRecovery should bind ephemeral ports</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1335">HDFS-1335</a>.
+     Major improvement reported by Hairong Kuang and fixed by Hairong Kuang (hdfs client , name-node)<br>
+     <b>HDFS side of HADOOP-6904: first step towards inter-version communications between dfs client and NameNode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1332">HDFS-1332</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Ted Yu (name-node)<br>
+     <b>When unable to place replicas, BlockPlacementPolicy should log reasons nodes were excluded</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1330">HDFS-1330</a>.
+     Major new feature reported by Hairong Kuang and fixed by John George (data-node)<br>
+     <b>Make RPCs to DataNodes timeout</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1321">HDFS-1321</a>.
+     Minor bug reported by gary murry and fixed by Jim Plush (name-node)<br>
+     <b>If service port and main port are the same, there is no clear log message explaining the issue.</b><br>
+     <blockquote>Added a check to match the sure RPC and HTTP Port's on the NameNode were not set to the same value, otherwise an IOException is throw with the appropriate message.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1295">HDFS-1295</a>.
+     Major sub-task reported by dhruba borthakur and fixed by Matt Foley (name-node)<br>
+     <b>Improve namenode restart times by short-circuiting the first block reports from datanodes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1257">HDFS-1257</a>.
+     Major bug reported by Ramkumar Vadali and fixed by Eric Payne (name-node)<br>
+     <b>Race condition on FSNamesystem#recentInvalidateSets introduced by HADOOP-5124</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1206">HDFS-1206</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Konstantin Boudnik (test)<br>
+     <b>TestFiHFlush fails intermittently</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1189">HDFS-1189</a>.
+     Major bug reported by Kang Xiao and fixed by John George (name-node)<br>
+     <b>Quota counts missed between clear quota and set quota</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1149">HDFS-1149</a>.
+     Major bug reported by Todd Lipcon and fixed by Aaron T. Myers (name-node)<br>
+     <b>Lease reassignment is not persisted to edit log</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1120">HDFS-1120</a>.
+     Major improvement reported by Jeff Hammerbacher and fixed by Harsh J (data-node)<br>
+     <b>Make DataNode's block-to-device placement policy pluggable</b><br>
+     <blockquote>Make the DataNode's block-volume choosing policy pluggable.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1117">HDFS-1117</a>.
+     Major improvement reported by Luke Lu and fixed by Luke Lu <br>
+     <b>HDFS portion of HADOOP-6728 (ovehaul metrics framework)</b><br>
+     <blockquote>Metrics names are standardized to use CapitalizedCamelCase. Some examples:
+# Metrics names using "_" is changed to new naming scheme. Eg: bytes_written changes to BytesWritten.
+# All metrics names start with capitals. Example: threadsBlocked changes to ThreadsBlocked.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1073">HDFS-1073</a>.
+     Major improvement reported by Sanjay Radia and fixed by Todd Lipcon <br>
+     <b>Simpler model for Namenode's fs Image and edit Logs </b><br>
+     <blockquote>The NameNode's storage layout for its name directories has been reorganized to be more robust. Each edit now has a unique transaction ID, and each file is associated with a transaction ID (for checkpoints) or a range of transaction IDs (for edit logs).</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1070">HDFS-1070</a>.
+     Major sub-task reported by Hairong Kuang and fixed by Hairong Kuang (name-node)<br>
+     <b>Speedup NameNode image loading and saving by storing local file names</b><br>
+     <blockquote>This changes the fsimage format to be 
+root directory-1 directory-2 ... directoy-n.
+Each directory stores all its children in the following format:
+Directory_full_path_name num_of_children child-1 ... child-n.
+Each inode stores only the last component of its path name into fsimage.
+This change requires an upgrade at deployment.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1052">HDFS-1052</a>.
+     Major new feature reported by Suresh Srinivas and fixed by Suresh Srinivas (name-node)<br>
+     <b>HDFS scalability with multiple namenodes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-1001">HDFS-1001</a>.
+     Minor bug reported by bc Wong and fixed by bc Wong (data-node)<br>
+     <b>DataXceiver and BlockReader disagree on when to send/recv CHECKSUM_OK</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-863">HDFS-863</a>.
+     Major bug reported by Todd Lipcon and fixed by Ken Goodhope (test)<br>
+     <b>Potential deadlock in TestOverReplicatedBlocks</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-780">HDFS-780</a>.
+     Major test reported by Eli Collins and fixed by Eli Collins (fuse-dfs)<br>
+     <b>Revive TestFuseDFS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-560">HDFS-560</a>.
+     Minor improvement reported by Steve Loughran and fixed by Steve Loughran (build)<br>
+     <b>Proposed enhancements/tuning to hadoop-hdfs/build.xml</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-420">HDFS-420</a>.
+     Major improvement reported by Dima Brodsky and fixed by Brian Bockelman (fuse-dfs)<br>
+     <b>Fuse-dfs should cache fs handles</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-73">HDFS-73</a>.
+     Blocker bug reported by Raghu Angadi and fixed by Uma Maheswara Rao G (hdfs client)<br>
+     <b>DFSOutputStream does not close all the sockets</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8619">HADOOP-8619</a>.
+     Major improvement reported by Radim Kolar and fixed by Chris Douglas (io)<br>
+     <b>WritableComparator must implement no-arg constructor</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7798">HADOOP-7798</a>.
+     Blocker bug reported by Arun C Murthy and fixed by Doug Cutting (build)<br>
+     <b>Release artifacts need to be signed for Nexus</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7797">HADOOP-7797</a>.
+     Major bug reported by Owen O'Malley and fixed by Owen O'Malley (build)<br>
+     <b>Fix the repository name to support pushing to the staging area of Nexus</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7792">HADOOP-7792</a>.
+     Major improvement reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey <br>
+     <b>Common component for HDFS-2416: Add verifyToken method to AbstractDelegationTokenSecretManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7789">HADOOP-7789</a>.
+     Major improvement reported by Arun C Murthy and fixed by Arun C Murthy <br>
+     <b>Minor edits to top-level site</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7785">HADOOP-7785</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (io , util)<br>
+     <b>Add equals, hashcode, toString to DataChecksum</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7782">HADOOP-7782</a>.
+     Critical bug reported by Arun C Murthy and fixed by Tom White (build)<br>
+     <b>Aggregate project javadocs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7778">HADOOP-7778</a>.
+     Major bug reported by Tom White and fixed by Tom White <br>
+     <b>FindBugs warning in Token.getKind()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7772">HADOOP-7772</a>.
+     Trivial improvement reported by Steve Loughran and fixed by Steve Loughran <br>
+     <b>javadoc the topology classes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7771">HADOOP-7771</a>.
+     Blocker bug reported by John George and fixed by John George <br>
+     <b>NPE when running hdfs dfs -copyToLocal, -get etc</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7770">HADOOP-7770</a>.
+     Blocker bug reported by Ravi Prakash and fixed by Ravi Prakash (viewfs)<br>
+     <b>ViewFS getFileChecksum throws FileNotFoundException for files in /tmp and /user</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7768">HADOOP-7768</a>.
+     Blocker bug reported by Jonathan Eagles and fixed by Tom White (build)<br>
+     <b>PreCommit-HADOOP-Build is failing on hadoop-auth-examples</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7766">HADOOP-7766</a>.
+     Major bug reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey <br>
+     <b>The auth to local mappings are not being respected, with webhdfs and security enabled.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7764">HADOOP-7764</a>.
+     Blocker bug reported by Jonathan Eagles and fixed by Jonathan Eagles <br>
+     <b>Allow both ACL list and global path spec filters to HttpServer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7763">HADOOP-7763</a>.
+     Major improvement reported by Tom White and fixed by Tom White (documentation)<br>
+     <b>Add top-level navigation to APT docs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7762">HADOOP-7762</a>.
+     Major task reported by Eli Collins and fixed by Eli Collins (scripts)<br>
+     <b>Common side of MR-2736 (MR1 removal)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7755">HADOOP-7755</a>.
+     Blocker bug reported by Jonathan Eagles and fixed by Jonathan Eagles (build)<br>
+     <b>Detect MapReduce PreCommit Trunk builds silently failing when running test-patch.sh</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7753">HADOOP-7753</a>.
+     Major sub-task reported by Todd Lipcon and fixed by Todd Lipcon (io , native , performance)<br>
+     <b>Support fadvise and sync_data_range in NativeIO, add ReadaheadPool class</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7749">HADOOP-7749</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (util)<br>
+     <b>Add NetUtils call which provides more help in exception messages</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7745">HADOOP-7745</a>.
+     Major bug reported by Ravi Prakash and fixed by Ravi Prakash <br>
+     <b>I switched variable names in HADOOP-7509</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7744">HADOOP-7744</a>.
+     Major bug reported by Jonathan Eagles and fixed by Jonathan Eagles (test)<br>
+     <b>Incorrect exit code for hadoop-core-test tests when exception thrown</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7743">HADOOP-7743</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>Add Maven profile to create a full source tarball</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7740">HADOOP-7740</a>.
+     Minor bug reported by Arpit Gupta and fixed by Arpit Gupta (conf)<br>
+     <b>security audit logger is not on by default, fix the log4j properties to enable the logger</b><br>
+     <blockquote>Fixed security audit logger configuration. (Arpit Gupta via Eric Yang)</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7737">HADOOP-7737</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>normalize hadoop-mapreduce &amp; hadoop-dist dist/tar build with common/hdfs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7728">HADOOP-7728</a>.
+     Major bug reported by Ramya Sunil and fixed by Ramya Sunil (conf)<br>
+     <b>hadoop-setup-conf.sh should be modified to enable task memory manager</b><br>
+     <blockquote>Enable task memory management to be configurable via hadoop config setup script.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7724">HADOOP-7724</a>.
+     Major bug reported by Giridharan Kesavan and fixed by Arpit Gupta <br>
+     <b>hadoop-setup-conf.sh should put proxy user info into the core-site.xml </b><br>
+     <blockquote>Fixed hadoop-setup-conf.sh to put proxy user in core-site.xml.  (Arpit Gupta via Eric Yang)</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7721">HADOOP-7721</a>.
+     Major bug reported by Arpit Gupta and fixed by Jitendra Nath Pandey <br>
+     <b>dfs.web.authentication.kerberos.principal expects the full hostname and does not replace _HOST with the hostname</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7720">HADOOP-7720</a>.
+     Major improvement reported by Arpit Gupta and fixed by Arpit Gupta (conf)<br>
+     <b>improve the hadoop-setup-conf.sh to read in the hbase user and setup the configs</b><br>
+     <blockquote>Added parameter for HBase user to setup config script. (Arpit Gupta via Eric Yang)</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7715">HADOOP-7715</a>.
+     Major bug reported by Arpit Gupta and fixed by Eric Yang (conf)<br>
+     <b>see log4j Error when running mr jobs and certain dfs calls</b><br>
+     <blockquote>Removed unnecessary security logger configuration. (Eric Yang)</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7711">HADOOP-7711</a>.
+     Major bug reported by Arpit Gupta and fixed by Arpit Gupta (conf)<br>
+     <b>hadoop-env.sh generated from templates has duplicate info</b><br>
+     <blockquote>Fixed recursive sourcing of HADOOP_OPTS environment variables (Arpit Gupta via Eric Yang)</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7710">HADOOP-7710</a>.
+     Major improvement reported by Arpit Gupta and fixed by Arpit Gupta <br>
+     <b>create a script to setup application in order to create root directories for application such hbase, hcat, hive etc</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7709">HADOOP-7709</a>.
+     Major improvement reported by Jonathan Eagles and fixed by Jonathan Eagles <br>
+     <b>Running a set of methods in a Single Test Class</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7708">HADOOP-7708</a>.
+     Critical bug reported by Arpit Gupta and fixed by Eric Yang (conf)<br>
+     <b>config generator does not update the properties file if on exists already</b><br>
+     <blockquote>Fixed hadoop-setup-conf.sh to handle config file consistently.  (Eric Yang)</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7707">HADOOP-7707</a>.
+     Major improvement reported by Arpit Gupta and fixed by Arpit Gupta (conf)<br>
+     <b>improve config generator to allow users to specify proxy user, turn append on or off, turn webhdfs on or off</b><br>
+     <blockquote>Added toggle for dfs.support.append, webhdfs and hadoop proxy user to setup config script. (Arpit Gupta via Eric Yang)</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7705">HADOOP-7705</a>.
+     Minor new feature reported by Steve Loughran and fixed by Steve Loughran (util)<br>
+     <b>Add a log4j back end that can push out JSON data, one per line</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7691">HADOOP-7691</a>.
+     Major bug reported by Giridharan Kesavan and fixed by Eric Yang <br>
+     <b>hadoop deb pkg should take a diff group id</b><br>
+     <blockquote>Fixed conflict uid for install packages. (Eric Yang)</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7684">HADOOP-7684</a>.
+     Major bug reported by Eric Yang and fixed by Eric Yang (scripts)<br>
+     <b>jobhistory server and secondarynamenode should have init.d script</b><br>
+     <blockquote>Added init.d script for jobhistory server and secondary namenode. (Eric Yang)</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7681">HADOOP-7681</a>.
+     Minor bug reported by Arpit Gupta and fixed by Arpit Gupta (conf)<br>
+     <b>log4j.properties is missing properties for security audit and hdfs audit should be changed to info</b><br>
+     <blockquote>HADOOP-7681. Fixed security and hdfs audit log4j properties

+(Arpit Gupta via Eric Yang)</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7671">HADOOP-7671</a>.
+     Major bug reported by Ravi Prakash and fixed by Ravi Prakash <br>
+     <b>Add license headers to hadoop-common/src/main/packages/templates/conf/</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7668">HADOOP-7668</a>.
+     Minor improvement reported by Suresh Srinivas and fixed by Steve Loughran (util)<br>
+     <b>Add a NetUtils method that can tell if an InetAddress belongs to local host</b><br>
+     <blockquote>closing again</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7664">HADOOP-7664</a>.
+     Minor improvement reported by Ravi Prakash and fixed by Ravi Prakash (conf)<br>
+     <b>o.a.h.conf.Configuration complains of overriding final parameter even if the value with which its attempting to override is the same. </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7663">HADOOP-7663</a>.
+     Major bug reported by Mayank Bansal and fixed by Mayank Bansal (test)<br>
+     <b>TestHDFSTrash failing on 22</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7662">HADOOP-7662</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves <br>
+     <b>logs servlet should use pathspec of /*</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7658">HADOOP-7658</a>.
+     Major bug reported by Giridharan Kesavan and fixed by Eric Yang <br>
+     <b>to fix hadoop config template</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7655">HADOOP-7655</a>.
+     Major improvement reported by Arpit Gupta and fixed by Arpit Gupta <br>
+     <b>provide a small validation script that smoke tests the installed cluster</b><br>
+     <blockquote>Committed to trunk and v23, since code reviewed by Eric.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7642">HADOOP-7642</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Tom White (build)<br>
+     <b>create hadoop-dist module where TAR stitching would happen</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7639">HADOOP-7639</a>.
+     Major bug reported by Thomas Graves and fixed by Thomas Graves <br>
+     <b>yarn ui not properly filtered in HttpServer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7637">HADOOP-7637</a>.
+     Major bug reported by Eric Yang and fixed by Eric Yang (build)<br>
+     <b>Fair scheduler configuration file is not bundled in RPM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7633">HADOOP-7633</a>.
+     Major bug reported by Arpit Gupta and fixed by Eric Yang (conf)<br>
+     <b>log4j.properties should be added to the hadoop conf on deploy</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7631">HADOOP-7631</a>.
+     Major bug reported by Ramya Sunil and fixed by Eric Yang (conf)<br>
+     <b>In mapred-site.xml, stream.tmpdir is mapped to ${mapred.temp.dir} which is undeclared.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7630">HADOOP-7630</a>.
+     Major bug reported by Arpit Gupta and fixed by Eric Yang (conf)<br>
+     <b>hadoop-metrics2.properties should have a property *.period set to a default value foe metrics</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7629">HADOOP-7629</a>.
+     Major bug reported by Patrick Hunt and fixed by Todd Lipcon <br>
+     <b>regression with MAPREDUCE-2289 - setPermission passed immutable FsPermission (rpc failure)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7627">HADOOP-7627</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (metrics , test)<br>
+     <b>Improve MetricsAsserts to give more understandable output on failure</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7626">HADOOP-7626</a>.
+     Major bug reported by Eric Yang and fixed by Eric Yang (scripts)<br>
+     <b>Allow overwrite of HADOOP_CLASSPATH and HADOOP_OPTS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7624">HADOOP-7624</a>.
+     Major improvement reported by Vinod Kumar Vavilapalli and fixed by Alejandro Abdelnur (build)<br>
+     <b>Set things up for a top level hadoop-tools module</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7612">HADOOP-7612</a>.
+     Major improvement reported by Tom White and fixed by Tom White (build)<br>
+     <b>Change test-patch to run tests for all nested modules</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7610">HADOOP-7610</a>.
+     Major bug reported by Eric Yang and fixed by Eric Yang (scripts)<br>
+     <b>/etc/profile.d does not exist on Debian</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7608">HADOOP-7608</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (io)<br>
+     <b>SnappyCodec check for Hadoop native lib is wrong</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7606">HADOOP-7606</a>.
+     Major bug reported by Aaron T. Myers and fixed by Alejandro Abdelnur (test)<br>
+     <b>Upgrade Jackson to version 1.7.1 to match the version required by Jersey</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7604">HADOOP-7604</a>.
+     Critical bug reported by Mahadev konar and fixed by Mahadev konar <br>
+     <b>Hadoop Auth examples pom in 0.23 point to 0.24 versions.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7603">HADOOP-7603</a>.
+     Major bug reported by Eric Yang and fixed by Eric Yang <br>
+     <b>Set default hdfs, mapred uid, and hadoop group gid for RPM packages</b><br>
+     <blockquote>Set hdfs uid, mapred uid, and hadoop gid to fixed numbers (201, 202, and 123, respectively).</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7599">HADOOP-7599</a>.
+     Major bug reported by Eric Yang and fixed by Eric Yang (scripts)<br>
+     <b>Improve hadoop setup conf script to setup secure Hadoop cluster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7598">HADOOP-7598</a>.
+     Major bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (build)<br>
+     <b>smart-apply-patch.sh does not handle patching from a sub directory correctly.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7595">HADOOP-7595</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>Upgrade dependency to Avro 1.5.3</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7594">HADOOP-7594</a>.
+     Major new feature reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE <br>
+     <b>Support HTTP REST in HttpServer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7593">HADOOP-7593</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Uma Maheswara Rao G (test)<br>
+     <b>AssertionError in TestHttpServer.testMaxThreads()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7589">HADOOP-7589</a>.
+     Major bug reported by Robert Joseph Evans and fixed by Robert Joseph Evans (build)<br>
+     <b>Prefer mvn test -DskipTests over mvn compile in test-patch.sh</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7580">HADOOP-7580</a>.
+     Major bug reported by Siddharth Seth and fixed by Siddharth Seth <br>
+     <b>Add a version of getLocalPathForWrite to LocalDirAllocator which doesn't create dirs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7579">HADOOP-7579</a>.
+     Major task reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>Rename package names from alfredo to auth</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7578">HADOOP-7578</a>.
+     Major bug reported by Mahadev konar and fixed by Mahadev konar <br>
+     <b>Fix test-patch to be able to run on MR patches.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7576">HADOOP-7576</a>.
+     Major bug reported by Tom White and fixed by Tsz Wo (Nicholas), SZE (security)<br>
+     <b>Fix findbugs warnings in Hadoop Auth (Alfredo)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7575">HADOOP-7575</a>.
+     Minor bug reported by Jonathan Eagles and fixed by Jonathan Eagles (fs)<br>
+     <b>Support fully qualified paths as part of LocalDirAllocator</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7568">HADOOP-7568</a>.
+     Major bug reported by Konstantin Shvachko and fixed by Plamen Jeliazkov (io)<br>
+     <b>SequenceFile should not print into stdout</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7566">HADOOP-7566</a>.
+     Major bug reported by Mahadev konar and fixed by Alejandro Abdelnur <br>
+     <b>MR tests are failing  webapps/hdfs not found in CLASSPATH</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7564">HADOOP-7564</a>.
+     Major sub-task reported by Tom White and fixed by Tom White <br>
+     <b>Remove test-patch SVN externals</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7561">HADOOP-7561</a>.
+     Major sub-task reported by Tom White and fixed by Tom White <br>
+     <b>Make test-patch only run tests for changed modules</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7560">HADOOP-7560</a>.
+     Major sub-task reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur <br>
+     <b>Make hadoop-common a POM module with sub-modules (common &amp; alfredo)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7555">HADOOP-7555</a>.
+     Trivial improvement reported by Aaron T. Myers and fixed by Aaron T. Myers (build)<br>
+     <b>Add a eclipse-generated files to .gitignore</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7552">HADOOP-7552</a>.
+     Minor improvement reported by Eli Collins and fixed by Eli Collins (fs)<br>
+     <b>FileUtil#fullyDelete doesn't throw IOE but lists it in the throws clause</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7547">HADOOP-7547</a>.
+     Minor bug reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (io)<br>
+     <b>Fix the warning in writable classes.[ WritableComparable is a raw type. References to generic type WritableComparable&lt;T&gt; should be parameterized  ]</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7545">HADOOP-7545</a>.
+     Critical bug reported by Todd Lipcon and fixed by Todd Lipcon (build , test)<br>
+     <b>common -tests jar should not include properties and configs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7536">HADOOP-7536</a>.
+     Major bug reported by Kihwal Lee and fixed by Alejandro Abdelnur (build)<br>
+     <b>Correct the dependency version regressions introduced in HADOOP-6671</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7533">HADOOP-7533</a>.
+     Major sub-task reported by Tom White and fixed by Tom White <br>
+     <b>Allow test-patch to be run from any subproject directory </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7531">HADOOP-7531</a>.
+     Major improvement reported by Eli Collins and fixed by Eli Collins (util)<br>
+     <b>Add servlet util methods for handling paths in requests </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7529">HADOOP-7529</a>.
+     Critical bug reported by Todd Lipcon and fixed by Luke Lu (metrics)<br>
+     <b>Possible deadlock in metrics2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7528">HADOOP-7528</a>.
+     Major sub-task reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>Maven build fails in Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7526">HADOOP-7526</a>.
+     Minor test reported by Eli Collins and fixed by Eli Collins (fs)<br>
+     <b>Add TestPath tests for URI conversion and reserved characters  </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7525">HADOOP-7525</a>.
+     Major sub-task reported by Tom White and fixed by Tom White (scripts)<br>
+     <b>Make arguments to test-patch optional</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7523">HADOOP-7523</a>.
+     Blocker bug reported by John Lee and fixed by John Lee (test)<br>
+     <b>Test org.apache.hadoop.fs.TestFilterFileSystem fails due to java.lang.NoSuchMethodException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7520">HADOOP-7520</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>hadoop-main fails to deploy</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7515">HADOOP-7515</a>.
+     Major sub-task reported by Tom White and fixed by Tom White (build)<br>
+     <b>test-patch reports the wrong number of javadoc warnings</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7512">HADOOP-7512</a>.
+     Trivial task reported by Harsh J and fixed by Harsh J (documentation)<br>
+     <b>Fix example mistake in WritableComparable javadocs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7509">HADOOP-7509</a>.
+     Trivial improvement reported by Ravi Prakash and fixed by Ravi Prakash <br>
+     <b>Improve message when Authentication is required</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7508">HADOOP-7508</a>.
+     Major sub-task reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>compiled nativelib is in wrong directory and it is not picked up by surefire setup</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7507">HADOOP-7507</a>.
+     Major bug reported by Jeff Bean and fixed by Alejandro Abdelnur (metrics)<br>
+     <b>jvm metrics all use the same namespace</b><br>
+     <blockquote>JVM metrics published to Ganglia now include the process name as part of the gmetric name.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7502">HADOOP-7502</a>.
+     Major sub-task reported by Luke Lu and fixed by Luke Lu <br>
+     <b>Use canonical (IDE friendly) generated-sources directory for generated sources</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7501">HADOOP-7501</a>.
+     Major sub-task reported by Alejandro Abdelnur and fixed by Tom White (build)<br>
+     <b>publish Hadoop Common artifacts (post HADOOP-6671) to Apache SNAPSHOTs repo</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7499">HADOOP-7499</a>.
+     Major bug reported by Jeffrey Naisbitt and fixed by Jeffrey Naisbitt (util)<br>
+     <b>Add method for doing a sanity check on hostnames in NetUtils</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7498">HADOOP-7498</a>.
+     Major sub-task reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>Remove legacy TAR layout creation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7496">HADOOP-7496</a>.
+     Major sub-task reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>break Maven TAR &amp; bintar profiles into just LAYOUT &amp; TAR proper</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7493">HADOOP-7493</a>.
+     Major new feature reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (io)<br>
+     <b>[HDFS-362] Provide ShortWritable class in hadoop.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7491">HADOOP-7491</a>.
+     Major improvement reported by Eli Collins and fixed by Eli Collins (scripts)<br>
+     <b>hadoop command should respect HADOOP_OPTS when given a class name </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7474">HADOOP-7474</a>.
+     Major improvement reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey <br>
+     <b>Refactor ClientCache out of WritableRpcEngine.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7472">HADOOP-7472</a>.
+     Minor improvement reported by Kihwal Lee and fixed by Kihwal Lee (ipc)<br>
+     <b>RPC client should deal with the IP address changes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7471">HADOOP-7471</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (build)<br>
+     <b>the saveVersion.sh script sometimes fails to extract SVN URL</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7469">HADOOP-7469</a>.
+     Minor sub-task reported by Steve Loughran and fixed by Steve Loughran (util)<br>
+     <b>add a standard handler for socket connection problems which improves diagnostics</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7465">HADOOP-7465</a>.
+     Trivial sub-task reported by XieXianshan and fixed by XieXianshan (fs , ipc)<br>
+     <b>A several tiny improvements for the LOG format</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7463">HADOOP-7463</a>.
+     Minor improvement reported by Mahadev konar and fixed by Mahadev konar <br>
+     <b>Adding a configuration parameter to SecurityInfo interface.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7460">HADOOP-7460</a>.
+     Major improvement reported by dhruba borthakur and fixed by Usman Masood (fs)<br>
+     <b>Support for pluggable Trash policies</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7457">HADOOP-7457</a>.
+     Blocker improvement reported by Jakob Homan and fixed by Jakob Homan (documentation)<br>
+     <b>Remove out-of-date Chinese language documentation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7451">HADOOP-7451</a>.
+     Major improvement reported by Matt Foley and fixed by Matt Foley <br>
+     <b>merge for MR-279: Generalize StringUtils#join</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7449">HADOOP-7449</a>.
+     Major improvement reported by Matt Foley and fixed by Matt Foley <br>
+     <b>merge for MR-279: add Data(In,Out)putByteBuffer to work with ByteBuffer similar to Data(In,Out)putBuffer for byte[]</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7448">HADOOP-7448</a>.
+     Major improvement reported by Matt Foley and fixed by Matt Foley <br>
+     <b>merge for MR-279: HttpServer /stacks servlet should use plain text content type</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7446">HADOOP-7446</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (native , performance)<br>
+     <b>Implement CRC32C native code using SSE4.2 instructions</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7445">HADOOP-7445</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (native , util)<br>
+     <b>Implement bulk checksum verification using efficient native code</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7444">HADOOP-7444</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>Add Checksum API to verify and calculate checksums "in bulk"</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7443">HADOOP-7443</a>.
+     Major new feature reported by Todd Lipcon and fixed by Todd Lipcon (io , util)<br>
+     <b>Add CRC32C as another DataChecksum implementation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7442">HADOOP-7442</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (conf , documentation)<br>
+     <b>Docs in core-default.xml still reference deprecated config "topology.script.file.name"</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7440">HADOOP-7440</a>.
+     Major bug reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>HttpServer.getParameterValues throws NPE for missing parameters</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7438">HADOOP-7438</a>.
+     Major improvement reported by Ravi Prakash and fixed by Ravi Prakash <br>
+     <b>Using the hadoop-deamon.sh script to start nodes leads to a depricated warning </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7437">HADOOP-7437</a>.
+     Major bug reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (io)<br>
+     <b>IOUtils.copybytes will suppress the stream closure exceptions. </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7434">HADOOP-7434</a>.
+     Minor improvement reported by &#20005;&#37329;&#21452; and fixed by &#20005;&#37329;&#21452; <br>
+     <b>Display error when using "daemonlog -setlevel" with illegal level</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7430">HADOOP-7430</a>.
+     Minor improvement reported by Ravi Prakash and fixed by Ravi Prakash (fs)<br>
+     <b>Improve error message when moving to trash fails due to quota issue</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7428">HADOOP-7428</a>.
+     Major bug reported by Todd Lipcon and fixed by Todd Lipcon (ipc)<br>
+     <b>IPC connection is orphaned with null 'out' member</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7419">HADOOP-7419</a>.
+     Major bug reported by Todd Lipcon and fixed by Bing Zheng <br>
+     <b>new hadoop-config.sh doesn't manage classpath for HADOOP_CONF_DIR correctly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7402">HADOOP-7402</a>.
+     Trivial bug reported by Aaron T. Myers and fixed by Aaron T. Myers (test)<br>
+     <b>TestConfiguration doesn't clean up after itself</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7392">HADOOP-7392</a>.
+     Major improvement reported by Tanping Wang and fixed by Tanping Wang <br>
+     <b>Implement capability of querying individual property of a mbean using JMXProxyServlet </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7389">HADOOP-7389</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (test)<br>
+     <b>Use of TestingGroups by tests causes subsequent tests to fail</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7385">HADOOP-7385</a>.
+     Minor bug reported by Bharath Mundlapudi and fixed by Bharath Mundlapudi <br>
+     <b>Remove StringUtils.stringifyException(ie) in logger functions</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7384">HADOOP-7384</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>Allow test-patch to be more flexible about patch format</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7383">HADOOP-7383</a>.
+     Blocker bug reported by Todd Lipcon and fixed by Todd Lipcon (build)<br>
+     <b>HDFS needs to export protobuf library dependency in pom</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7380">HADOOP-7380</a>.
+     Major sub-task reported by Aaron T. Myers and fixed by Aaron T. Myers (ha , ipc)<br>
+     <b>Add client failover functionality to o.a.h.io.(ipc|retry)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7379">HADOOP-7379</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (io , ipc)<br>
+     <b>Add ability to include Protobufs in ObjectWritable</b><br>
+     <blockquote>Protocol buffer-generated types may now be used as arguments or return values for Hadoop RPC.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7377">HADOOP-7377</a>.
+     Major bug reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Fix command name handling affecting DFSAdmin</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7375">HADOOP-7375</a>.
+     Major improvement reported by Sanjay Radia and fixed by Sanjay Radia <br>
+     <b>Add resolvePath method to FileContext</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7374">HADOOP-7374</a>.
+     Major improvement reported by Eli Collins and fixed by Eli Collins (scripts)<br>
+     <b>Don't add tools.jar to the classpath when running Hadoop</b><br>
+     <blockquote>The scripts that run Hadoop no longer automatically add tools.jar from the JDK to the classpath (if it is present). If your job depends on tools.jar in the JDK you will need to add this dependency in your job.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7361">HADOOP-7361</a>.
+     Minor improvement reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (fs)<br>
+     <b>Provide overwrite option (-overwrite/-f) in put and copyFromLocal command line options</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7360">HADOOP-7360</a>.
+     Major improvement reported by Daryn Sharp and fixed by Kihwal Lee (fs)<br>
+     <b>FsShell does not preserve relative paths with globs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7357">HADOOP-7357</a>.
+     Trivial bug reported by Philip Zeyliger and fixed by Philip Zeyliger (test)<br>
+     <b>hadoop.io.compress.TestCodec#main() should exit with non-zero exit code if test failed</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7356">HADOOP-7356</a>.
+     Blocker bug reported by Eric Yang and fixed by Eric Yang <br>
+     <b>RPM packages broke bin/hadoop script for hadoop 0.20.205</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7353">HADOOP-7353</a>.
+     Major bug reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Cleanup FsShell and prevent masking of RTE stacktraces</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7342">HADOOP-7342</a>.
+     Minor bug reported by Bharath Mundlapudi and fixed by Bharath Mundlapudi <br>
+     <b>Add an utility API in FileUtil for JDK File.list</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7341">HADOOP-7341</a>.
+     Major bug reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Fix option parsing in CommandFormat</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7337">HADOOP-7337</a>.
+     Minor improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (util)<br>
+     <b>Annotate PureJavaCrc32 as a public API</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7336">HADOOP-7336</a>.
+     Minor bug reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey <br>
+     <b>TestFileContextResolveAfs will fail with default test.build.data property.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7333">HADOOP-7333</a>.
+     Minor improvement reported by Eric Caspole and fixed by Eric Caspole (performance , util)<br>
+     <b>Performance improvement in PureJavaCrc32</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7331">HADOOP-7331</a>.
+     Trivial improvement reported by Tanping Wang and fixed by Tanping Wang (scripts)<br>
+     <b>Make hadoop-daemon.sh to return 1 if daemon processes did not get started</b><br>
+     <blockquote>hadoop-daemon.sh now returns a non-zero exit code if it detects that the daemon was not still running after 3 seconds.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7329">HADOOP-7329</a>.
+     Minor improvement reported by XieXianshan and fixed by XieXianshan (fs)<br>
+     <b>incomplete help message  is displayed for df -h option</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7328">HADOOP-7328</a>.
+     Major improvement reported by Harsh J and fixed by Harsh J (io)<br>
+     <b>When a serializer class is missing, return null, not throw an NPE.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7327">HADOOP-7327</a>.
+     Minor bug reported by Matt Foley and fixed by Matt Foley (fs)<br>
+     <b>FileSystem.listStatus() throws NullPointerException instead of IOException upon access permission failure</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7324">HADOOP-7324</a>.
+     Blocker bug reported by Luke Lu and fixed by Priyo Mustafi (metrics)<br>
+     <b>Ganglia plugins for metrics v2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7322">HADOOP-7322</a>.
+     Minor bug reported by Bharath Mundlapudi and fixed by Bharath Mundlapudi <br>
+     <b>Adding a util method in FileUtil for JDK File.listFiles</b><br>
+     <blockquote>Use of this new utility method avoids null result from File.listFiles(), and consequent NPEs.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7320">HADOOP-7320</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp <br>
+     <b>Refactor FsShell's copy &amp; move commands</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7316">HADOOP-7316</a>.
+     Major improvement reported by Jonathan Hsieh and fixed by Eli Collins (documentation)<br>
+     <b>Add public javadocs to FSDataInputStream and FSDataOutputStream</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7314">HADOOP-7314</a>.
+     Major improvement reported by Jeffrey Naisbitt and fixed by Jeffrey Naisbitt <br>
+     <b>Add support for throwing UnknownHostException when a host doesn't resolve</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7306">HADOOP-7306</a>.
+     Major improvement reported by Luke Lu and fixed by Luke Lu (metrics)<br>
+     <b>Start metrics system even if config files are missing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7305">HADOOP-7305</a>.
+     Minor improvement reported by Niels Basjes and fixed by Niels Basjes (build)<br>
+     <b>Eclipse project files are incomplete</b><br>
+     <blockquote>Added missing library during creation of the eclipse project files.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7301">HADOOP-7301</a>.
+     Major improvement reported by Jonathan Hsieh and fixed by Jonathan Hsieh <br>
+     <b>FSDataInputStream should expose a getWrappedStream method</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7298">HADOOP-7298</a>.
+     Major test reported by Todd Lipcon and fixed by Todd Lipcon (test)<br>
+     <b>Add test utility for writing multi-threaded tests</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7292">HADOOP-7292</a>.
+     Minor bug reported by Luke Lu and fixed by Luke Lu (metrics)<br>
+     <b>Metrics 2 TestSinkQueue is racy</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7289">HADOOP-7289</a>.
+     Major improvement reported by Tsz Wo (Nicholas), SZE and fixed by Eric Yang (build)<br>
+     <b>ivy: test conf should not extend common conf</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7287">HADOOP-7287</a>.
+     Blocker bug reported by Todd Lipcon and fixed by Aaron T. Myers (conf)<br>
+     <b>Configuration deprecation mechanism doesn't work properly for GenericOptionsParser/Tools</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7286">HADOOP-7286</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Refactor FsShell's du/dus/df</b><br>
+     <blockquote>The "Found X items" header on the output of the "du" command has been removed to more closely match unix. The displayed paths now correspond to the command line arguments instead of always being a fully qualified URI. For example, the output will have relative paths if the command line arguments are relative paths.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7285">HADOOP-7285</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Refactor FsShell's test</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7284">HADOOP-7284</a>.
+     Major bug reported by Sanjay Radia and fixed by Sanjay Radia (viewfs)<br>
+     <b>Trash and shell's rm does not work for viewfs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7282">HADOOP-7282</a>.
+     Major bug reported by John George and fixed by John George (ipc)<br>
+     <b>getRemoteIp could return null in cases where the call is ongoing but the ip went away.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7276">HADOOP-7276</a>.
+     Major bug reported by Trevor Robinson and fixed by Trevor Robinson (native)<br>
+     <b>Hadoop native builds fail on ARM due to -m32</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7275">HADOOP-7275</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Refactor FsShell's stat</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7271">HADOOP-7271</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Standardize error messages</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7268">HADOOP-7268</a>.
+     Major bug reported by Devaraj Das and fixed by Jitendra Nath Pandey (fs , security)<br>
+     <b>FileContext.getLocalFSFileContext() behavior needs to be fixed w.r.t tokens</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7267">HADOOP-7267</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Refactor FsShell's rm/rmr/expunge</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7265">HADOOP-7265</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Keep track of relative paths</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7264">HADOOP-7264</a>.
+     Major improvement reported by Luke Lu and fixed by Luke Lu (io)<br>
+     <b>Bump avro version to at least 1.4.1</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7261">HADOOP-7261</a>.
+     Major bug reported by Suresh Srinivas and fixed by Suresh Srinivas (test)<br>
+     <b>Disable IPV6 for junit tests</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7259">HADOOP-7259</a>.
+     Major bug reported by Owen O'Malley and fixed by Owen O'Malley (build)<br>
+     <b>contrib modules should include build.properties from parent.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7257">HADOOP-7257</a>.
+     Major new feature reported by Sanjay Radia and fixed by Sanjay Radia <br>
+     <b>A client side mount table to give per-application/per-job file system view</b><br>
+     <blockquote>viewfs - client-side mount table.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7251">HADOOP-7251</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Refactor FsShell's getmerge</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7250">HADOOP-7250</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Refactor FsShell's setrep</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7249">HADOOP-7249</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Refactor FsShell's chmod/chown/chgrp</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7241">HADOOP-7241</a>.
+     Minor improvement reported by Wei Yongjun and fixed by Wei Yongjun (fs , test)<br>
+     <b>fix typo of command 'hadoop fs -help tail'</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7238">HADOOP-7238</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Refactor FsShell's cat &amp; text</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7237">HADOOP-7237</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Refactor FsShell's touchz</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7236">HADOOP-7236</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Refactor FsShell's mkdir</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7235">HADOOP-7235</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp <br>
+     <b>Refactor FsShell's tail</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7233">HADOOP-7233</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Refactor FsShell's ls</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7231">HADOOP-7231</a>.
+     Major bug reported by Daryn Sharp and fixed by Daryn Sharp (util)<br>
+     <b>Fix synopsis for -count</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7230">HADOOP-7230</a>.
+     Major test reported by Daryn Sharp and fixed by Daryn Sharp (test)<br>
+     <b>Move -fs usage tests from hdfs into common</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7227">HADOOP-7227</a>.
+     Major improvement reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey (ipc)<br>
+     <b>Remove protocol version check at proxy creation in Hadoop RPC.</b><br>
+     <blockquote>1. Protocol version check is removed from proxy creation, instead version check is performed at server in every rpc call.

+2. This change is backward incompatible because format of the rpc messages is changed to include client version, client method hash and rpc version.

+3. rpc version is introduced which should change when the format of rpc messages is changed.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7223">HADOOP-7223</a>.
+     Major bug reported by Suresh Srinivas and fixed by Suresh Srinivas (fs)<br>
+     <b>FileContext createFlag combinations during create are not clearly defined</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7216">HADOOP-7216</a>.
+     Major bug reported by Aaron T. Myers and fixed by Daryn Sharp (test)<br>
+     <b>HADOOP-7202 broke TestDFSShell in HDFS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7215">HADOOP-7215</a>.
+     Blocker bug reported by Suresh Srinivas and fixed by Suresh Srinivas (security)<br>
+     <b>RPC clients must connect over a network interface corresponding to the host name in the client's kerberos principal key</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7214">HADOOP-7214</a>.
+     Major new feature reported by Aaron T. Myers and fixed by Aaron T. Myers <br>
+     <b>Hadoop /usr/bin/groups equivalent</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7210">HADOOP-7210</a>.
+     Major bug reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (fs)<br>
+     <b>Chown command is not working from FSShell.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7209">HADOOP-7209</a>.
+     Major improvement reported by Olga Natkovich and fixed by Daryn Sharp <br>
+     <b>Extensions to FsShell</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7208">HADOOP-7208</a>.
+     Major bug reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G <br>
+     <b>equals() and hashCode() implementation need to change in StandardSocketFactory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7206">HADOOP-7206</a>.
+     Major new feature reported by Eli Collins and fixed by Alejandro Abdelnur <br>
+     <b>Integrate Snappy compression</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7205">HADOOP-7205</a>.
+     Trivial improvement reported by Daryn Sharp and fixed by Daryn Sharp <br>
+     <b>automatically determine JAVA_HOME on OS X</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7202">HADOOP-7202</a>.
+     Major improvement reported by Daryn Sharp and fixed by Daryn Sharp <br>
+     <b>Improve Command base class</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7194">HADOOP-7194</a>.
+     Major bug reported by Devaraj K and fixed by Devaraj K (io)<br>
+     <b>Potential Resource leak in IOUtils.java</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7193">HADOOP-7193</a>.
+     Minor improvement reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (fs)<br>
+     <b>Help message is wrong for touchz command.</b><br>
+     <blockquote>Updated the help for the touchz command.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7187">HADOOP-7187</a>.
+     Major bug reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (metrics)<br>
+     <b>Socket Leak in org.apache.hadoop.metrics.ganglia.GangliaContext</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7180">HADOOP-7180</a>.
+     Minor improvement reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Improve CommandFormat</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7178">HADOOP-7178</a>.
+     Major bug reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (fs)<br>
+     <b>FileSystem should have an option to control the .crc file creations at Local.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7177">HADOOP-7177</a>.
+     Trivial improvement reported by Allen Wittenauer and fixed by Allen Wittenauer (native)<br>
+     <b>CodecPool should report which compressor it is using</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7176">HADOOP-7176</a>.
+     Major bug reported by Daryn Sharp and fixed by Daryn Sharp <br>
+     <b>Redesign FsShell</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7175">HADOOP-7175</a>.
+     Major bug reported by Daryn Sharp and fixed by Daryn Sharp (fs)<br>
+     <b>Add isEnabled() to Trash</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7174">HADOOP-7174</a>.
+     Minor bug reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (fs)<br>
+     <b>null is displayed in the console,if the src path is invalid while doing copyToLocal operation from commandLine</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7172">HADOOP-7172</a>.
+     Critical bug reported by Todd Lipcon and fixed by Todd Lipcon (io , security)<br>
+     <b>SecureIO should not check owner on non-secure clusters that have no native support</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7171">HADOOP-7171</a>.
+     Major bug reported by Owen O'Malley and fixed by Jitendra Nath Pandey (security)<br>
+     <b>Support UGI in FileContext API</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7167">HADOOP-7167</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>Allow using a file to exclude certain tests from build</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7162">HADOOP-7162</a>.
+     Minor bug reported by Alexey Diomin and fixed by Alexey Diomin (fs)<br>
+     <b>FsShell: call srcFs.listStatus(src) twice</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7159">HADOOP-7159</a>.
+     Trivial improvement reported by Scott Chen and fixed by Scott Chen (ipc)<br>
+     <b>RPC server should log the client hostname when read exception happened</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7153">HADOOP-7153</a>.
+     Minor improvement reported by Nicholas Telford and fixed by Nicholas Telford (io)<br>
+     <b>MapWritable violates contract of Map interface for equals() and hashCode()</b><br>
+     <blockquote>MapWritable now implements equals() and hashCode() based on the map contents rather than object identity in order to correctly implement the Map interface.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7151">HADOOP-7151</a>.
+     Minor bug reported by Dmitriy V. Ryaboy and fixed by Dmitriy V. Ryaboy <br>
+     <b>Document need for stable hashCode() in WritableComparable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7144">HADOOP-7144</a>.
+     Major new feature reported by Luke Lu and fixed by Robert Joseph Evans <br>
+     <b>Expose JMX with something like JMXProxyServlet </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7136">HADOOP-7136</a>.
+     Major task reported by Nigel Daley and fixed by Nigel Daley <br>
+     <b>Remove failmon contrib</b><br>
+     <blockquote>Failmon removed from contrib codebase.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7133">HADOOP-7133</a>.
+     Major improvement reported by Matt Foley and fixed by Matt Foley (util)<br>
+     <b>CLONE to COMMON - HDFS-1445 Batch the calls in DataStorage to FileUtil.createHardLink(), so we call it once per directory instead of once per file</b><br>
+     <blockquote>This is the COMMON portion of a fix requiring coordinated change of COMMON and HDFS.  Please see HDFS-1445 for HDFS portion and release note.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7131">HADOOP-7131</a>.
+     Minor improvement reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (io)<br>
+     <b>set() and toString Methods of the org.apache.hadoop.io.Text class does not include the root exception, in the wrapping RuntimeException.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7120">HADOOP-7120</a>.
+     Major bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (test)<br>
+     <b>200 new Findbugs warnings</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7119">HADOOP-7119</a>.
+     Major new feature reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>add Kerberos HTTP SPNEGO authentication support to Hadoop JT/NN/DN/TT web-consoles</b><br>
+     <blockquote>Adding support for Kerberos HTTP SPNEGO authentication to the Hadoop web-consoles</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7117">HADOOP-7117</a>.
+     Major improvement reported by Patrick Angeles and fixed by Harsh J (conf)<br>
+     <b>Move secondary namenode checkpoint configs from core-default.xml to hdfs-default.xml</b><br>
+     <blockquote>Removed references to the older fs.checkpoint.* properties that resided in core-site.xml</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7114">HADOOP-7114</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (fs)<br>
+     <b>FsShell should dump all exceptions at DEBUG level</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7112">HADOOP-7112</a>.
+     Major improvement reported by Tom White and fixed by Tom White (conf , filecache)<br>
+     <b>Issue a warning when GenericOptionsParser libjars are not on local filesystem</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7111">HADOOP-7111</a>.
+     Critical bug reported by Todd Lipcon and fixed by Aaron T. Myers (io)<br>
+     <b>Several TFile tests failing when native libraries are present</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7098">HADOOP-7098</a>.
+     Major bug reported by Bernd Fondermann and fixed by Bernd Fondermann (conf)<br>
+     <b>tasktracker property not set in conf/hadoop-env.sh</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7096">HADOOP-7096</a>.
+     Major improvement reported by Ahmed Radwan and fixed by Ahmed Radwan <br>
+     <b>Allow setting of end-of-record delimiter for TextInputFormat</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7090">HADOOP-7090</a>.
+     Major bug reported by Gokul and fixed by Uma Maheswara Rao G (fs/s3 , io)<br>
+     <b>Possible resource leaks in hadoop core code</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7089">HADOOP-7089</a>.
+     Minor bug reported by Eli Collins and fixed by Eli Collins (scripts)<br>
+     <b>Fix link resolution logic in hadoop-config.sh</b><br>
+     <blockquote>Updates hadoop-config.sh to always resolve symlinks when determining HADOOP_HOME. Bash built-ins or POSIX:2001 compliant cmds are now required.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7078">HADOOP-7078</a>.
+     Trivial improvement reported by Todd Lipcon and fixed by Harsh J <br>
+     <b>Add better javadocs for RawComparator interface</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7071">HADOOP-7071</a>.
+     Minor bug reported by Nigel Daley and fixed by Nigel Daley (build)<br>
+     <b>test-patch.sh has bad ps arg</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7061">HADOOP-7061</a>.
+     Minor improvement reported by Jingguo Yao and fixed by Jingguo Yao (io)<br>
+     <b>unprecise javadoc for CompressionCodec</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7060">HADOOP-7060</a>.
+     Major improvement reported by Hairong Kuang and fixed by Patrick Kling (fs)<br>
+     <b>A more elegant FileSystem#listCorruptFileBlocks API</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7059">HADOOP-7059</a>.
+     Major improvement reported by Noah Watkins and fixed by Noah Watkins (native)<br>
+     <b>Remove "unused" warning in native code</b><br>
+     <blockquote>Adds __attribute__ ((unused))</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7058">HADOOP-7058</a>.
+     Trivial improvement reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>Expose number of bytes in FSOutputSummer buffer to implementatins</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7057">HADOOP-7057</a>.
+     Minor bug reported by Konstantin Boudnik and fixed by Konstantin Boudnik (util)<br>
+     <b>IOUtils.readFully and IOUtils.skipFully have typo in exception creation's message</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7055">HADOOP-7055</a>.
+     Major bug reported by Jingguo Yao and fixed by Jingguo Yao (metrics)<br>
+     <b>Update of commons logging libraries causes EventCounter to count logging events incorrectly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7053">HADOOP-7053</a>.
+     Minor bug reported by Jingguo Yao and fixed by Jingguo Yao (conf)<br>
+     <b>wrong FSNamesystem Audit logging setting in conf/log4j.properties</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7052">HADOOP-7052</a>.
+     Major bug reported by Jingguo Yao and fixed by Jingguo Yao (conf)<br>
+     <b>misspelling of threshold in conf/log4j.properties</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7049">HADOOP-7049</a>.
+     Trivial improvement reported by Patrick Kling and fixed by Patrick Kling (conf)<br>
+     <b>TestReconfiguration should be junit v4</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7048">HADOOP-7048</a>.
+     Minor improvement reported by Jingguo Yao and fixed by Jingguo Yao (io)<br>
+     <b>Wrong description of Block-Compressed SequenceFile Format in SequenceFile's javadoc</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7046">HADOOP-7046</a>.
+     Blocker bug reported by Nigel Daley and fixed by Po Cheung (security)<br>
+     <b>1 Findbugs warning on trunk and branch-0.22</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7045">HADOOP-7045</a>.
+     Minor bug reported by Eli Collins and fixed by Eli Collins (fs)<br>
+     <b>TestDU fails on systems with local file systems with extended attributes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7042">HADOOP-7042</a>.
+     Minor improvement reported by Nigel Daley and fixed by Nigel Daley (test)<br>
+     <b>Update test-patch.sh to include failed test names and move test-patch.properties</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7023">HADOOP-7023</a>.
+     Major improvement reported by Patrick Kling and fixed by Patrick Kling <br>
+     <b>Add listCorruptFileBlocks to FileSystem</b><br>
+     <blockquote>Add a new API listCorruptFileBlocks to FIleContext that returns a list of files that have corrupt blocks. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7015">HADOOP-7015</a>.
+     Minor bug reported by Sanjay Radia and fixed by Sanjay Radia <br>
+     <b>RawLocalFileSystem#listStatus does not deal with a  directory whose entries are changing ( e.g. in a multi-thread or multi-process environment)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7014">HADOOP-7014</a>.
+     Major improvement reported by Konstantin Boudnik and fixed by Konstantin Boudnik (test)<br>
+     <b>Generalize CLITest structure and interfaces to facilitate upstream adoption (e.g. for web testing)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7001">HADOOP-7001</a>.
+     Major task reported by Patrick Kling and fixed by Patrick Kling (conf)<br>
+     <b>Allow configuration changes without restarting configured nodes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6994">HADOOP-6994</a>.
+     Major improvement reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey <br>
+     <b>Api to get delegation token in AbstractFileSystem</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6949">HADOOP-6949</a>.
+     Major improvement reported by Navis and fixed by Matt Foley (io)<br>
+     <b>Reduces RPC packet size for primitive arrays, especially long[], which is used at block reporting</b><br>
+     <blockquote>Increments the RPC protocol version in org.apache.hadoop.ipc.Server from 4 to 5.
+Introduces ArrayPrimitiveWritable for a much more efficient wire format to transmit arrays of primitives over RPC. ObjectWritable uses the new writable for array of primitives for RPC and continues to use existing format for on-disk data.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6939">HADOOP-6939</a>.
+     Minor bug reported by Todd Lipcon and fixed by Todd Lipcon <br>
+     <b>Inconsistent lock ordering in AbstractDelegationTokenSecretManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6929">HADOOP-6929</a>.
+     Major improvement reported by Sharad Agarwal and fixed by Sharad Agarwal (ipc , security)<br>
+     <b>RPC should have a way to pass Security information other than protocol annotations</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6921">HADOOP-6921</a>.
+     Major sub-task reported by Luke Lu and fixed by Luke Lu <br>
+     <b>metrics2: metrics plugins</b><br>
+     <blockquote>Metrics names are standardized to CapitalizedCamelCase. See release note of HADOOP-6918 and HADOOP-6920.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6920">HADOOP-6920</a>.
+     Major sub-task reported by Luke Lu and fixed by Luke Lu <br>
+     <b>Metrics2: metrics instrumentation</b><br>
+     <blockquote>Metrics names are standardized to use CapitalizedCamelCase. Some examples of this is:
+# Metrics names using "_" is changed to new naming scheme. Eg: bytes_written changes to BytesWritten.
+# All metrics names start with capitals. Example: threadsBlocked changes to ThreadsBlocked.
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6919">HADOOP-6919</a>.
+     Major sub-task reported by Luke Lu and fixed by Luke Lu (metrics)<br>
+     <b>Metrics2: metrics framework</b><br>
+     <blockquote>New metrics2 framework for Hadoop.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6912">HADOOP-6912</a>.
+     Major bug reported by Kan Zhang and fixed by Kan Zhang (security)<br>
+     <b>Guard against NPE when calling UGI.isLoginKeytabBased()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6904">HADOOP-6904</a>.
+     Major new feature reported by Hairong Kuang and fixed by Hairong Kuang (ipc)<br>
+     <b>A baby step towards inter-version RPC communications</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6889">HADOOP-6889</a>.
+     Major new feature reported by Hairong Kuang and fixed by John George (ipc)<br>
+     <b>Make RPC to have an option to timeout</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6887">HADOOP-6887</a>.
+     Major improvement reported by Bharath Mundlapudi and fixed by Luke Lu (metrics)<br>
+     <b>Need a separate metrics per garbage collector</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6864">HADOOP-6864</a>.
+     Major improvement reported by Erik Steffl and fixed by Boris Shkolnik (security)<br>
+     <b>Provide a JNI-based implementation of ShellBasedUnixGroupsNetgroupMapping (implementation of GroupMappingServiceProvider)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6764">HADOOP-6764</a>.
+     Major improvement reported by Dmytro Molkov and fixed by Dmytro Molkov (ipc)<br>
+     <b>Add number of reader threads and queue length as configuration parameters in RPC.getServer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6754">HADOOP-6754</a>.
+     Major bug reported by Aaron Kimball and fixed by Aaron Kimball (io)<br>
+     <b>DefaultCodec.createOutputStream() leaks memory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6683">HADOOP-6683</a>.
+     Minor sub-task reported by Kang Xiao and fixed by Kang Xiao (io)<br>
+     <b>the first optimization: ZlibCompressor does not fully utilize the buffer</b><br>
+     <blockquote>Improve the buffer utilization of ZlibCompressor to avoid invoking a JNI per write request.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6671">HADOOP-6671</a>.
+     Major sub-task reported by Giridharan Kesavan and fixed by Alejandro Abdelnur (build)<br>
+     <b>To use maven for hadoop common builds</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6622">HADOOP-6622</a>.
+     Major bug reported by Jitendra Nath Pandey and fixed by Eli Collins (security)<br>
+     <b>Token should not print the password in toString.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6578">HADOOP-6578</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Michele Catasta (conf)<br>
+     <b>Configuration should trim whitespace around a lot of value types</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6508">HADOOP-6508</a>.
+     Major bug reported by Amareshwari Sriramadasu and fixed by Luke Lu (metrics)<br>
+     <b>Incorrect values for metrics with CompositeContext</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6436">HADOOP-6436</a>.
+     Major improvement reported by Eli Collins and fixed by Roman Shaposhnik <br>
+     <b>Remove auto-generated native build files </b><br>
+     <blockquote>The native build run when from trunk now requires autotools, libtool and openssl dev libraries.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6432">HADOOP-6432</a>.
+     Major new feature reported by Jitendra Nath Pandey and fixed by Jitendra Nath Pandey <br>
+     <b>Statistics support in FileContext</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6385">HADOOP-6385</a>.
+     Minor new feature reported by Scott Phillips and fixed by Daryn Sharp (fs)<br>
+     <b>dfs does not support -rmdir (was HDFS-639)</b><br>
+     <blockquote>The "rm" family of FsShell commands now supports -rmdir and -f options.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6376">HADOOP-6376</a>.
+     Minor improvement reported by Karthik K and fixed by Karthik K (conf)<br>
+     <b>slaves file to have a header specifying the format of conf/slaves file </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6255">HADOOP-6255</a>.
+     Major new feature reported by Owen O'Malley and fixed by Eric Yang <br>
+     <b>Create an rpm integration project</b><br>
+     <blockquote>Added RPM/DEB packages to build system.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-6158">HADOOP-6158</a>.
+     Minor task reported by Owen O'Malley and fixed by Eli Collins (util)<br>
+     <b>Move CyclicIteration to HDFS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-5647">HADOOP-5647</a>.
+     Major bug reported by Ravi Gummadi and fixed by Ravi Gummadi (test)<br>
+     <b>TestJobHistory fails if /tmp/_logs is not writable to. Testcase should not depend on /tmp</b><br>
+     <blockquote>Removed dependency of testcase on /tmp and made it to use test.build.data directory instead.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-2081">HADOOP-2081</a>.
+     Major bug reported by Owen O'Malley and fixed by Harsh J (conf)<br>
+     <b>Configuration getInt, getLong, and getFloat replace invalid numbers with the default value</b><br>
+     <blockquote>Invalid configuration values now result in a number format exception rather than the default value being used.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-1886">HADOOP-1886</a>.
+     Trivial improvement reported by Konstantin Shvachko and fixed by Frank Conrad (fs)<br>
+     <b>Undocumented parameters in FilesSystem</b><br>
+     <blockquote></blockquote></li>
+</ul>
+</body></html>
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 05e6297..70db687 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -61,6 +61,9 @@
     new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE);
 
   private static final DateFormat CHECKPOINT = new SimpleDateFormat("yyMMddHHmmss");
+  /** Format of checkpoint directories used prior to Hadoop 0.23. */
+  private static final DateFormat OLD_CHECKPOINT =
+      new SimpleDateFormat("yyMMddHHmm");
   private static final int MSECS_PER_MINUTE = 60*1000;
 
   private Path current;
@@ -69,8 +72,9 @@
 
   public TrashPolicyDefault() { }
 
-  private TrashPolicyDefault(Path home, Configuration conf) throws IOException {
-    initialize(conf, home.getFileSystem(conf), home);
+  private TrashPolicyDefault(FileSystem fs, Path home, Configuration conf)
+      throws IOException {
+    initialize(conf, fs, home);
   }
 
   @Override
@@ -202,9 +206,7 @@
 
       long time;
       try {
-        synchronized (CHECKPOINT) {
-          time = CHECKPOINT.parse(name).getTime();
-        }
+        time = getTimeFromCheckpoint(name);
       } catch (ParseException e) {
         LOG.warn("Unexpected item in trash: "+dir+". Ignoring.");
         continue;
@@ -278,7 +280,8 @@
               if (!home.isDirectory())
                 continue;
               try {
-                TrashPolicyDefault trash = new TrashPolicyDefault(home.getPath(), conf);
+                TrashPolicyDefault trash = new TrashPolicyDefault(
+                    fs, home.getPath(), conf);
                 trash.deleteCheckpoint();
                 trash.createCheckpoint();
               } catch (IOException e) {
@@ -304,4 +307,22 @@
       return (time / interval) * interval;
     }
   }
+
+  private long getTimeFromCheckpoint(String name) throws ParseException {
+    long time;
+
+    try {
+      synchronized (CHECKPOINT) {
+        time = CHECKPOINT.parse(name).getTime();
+      }
+    } catch (ParseException pe) {
+      // Check for old-style checkpoint directories left over
+      // after an upgrade from Hadoop 1.x
+      synchronized (OLD_CHECKPOINT) {
+        time = OLD_CHECKPOINT.parse(name).getTime();
+      }
+    }
+
+    return time;
+  }
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/IFSImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/IFSImpl.java
deleted file mode 100644
index 199264ab..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/IFSImpl.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- *
- * Licensed under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * permissions and limitations under the License.
- *
- * 
- * We need to provide the ability to the code in fs/kfs without really
- * having a KFS deployment.  In particular, the glue code that wraps
- * around calls to KfsAccess object.  This is accomplished by defining a
- * filesystem implementation interface:  
- *   -- for testing purposes, a dummy implementation of this interface
- * will suffice; as long as the dummy implementation is close enough
- * to doing what KFS does, we are good.
- *   -- for deployment purposes with KFS, this interface is
- * implemented by the KfsImpl object.
- */
-
-package org.apache.hadoop.fs.kfs;
-
-import java.io.*;
-
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.util.Progressable;
-
-interface IFSImpl {
-    public boolean exists(String path) throws IOException;
-    public boolean isDirectory(String path) throws IOException;
-    public boolean isFile(String path) throws IOException;
-    public String[] readdir(String path) throws IOException;
-    public FileStatus[] readdirplus(Path path) throws IOException;
-
-    public int mkdirs(String path) throws IOException;
-    public int rename(String source, String dest) throws IOException;
-
-    public int rmdir(String path) throws IOException; 
-    public int remove(String path) throws IOException;
-    public long filesize(String path) throws IOException;
-    public short getReplication(String path) throws IOException;
-    public short setReplication(String path, short replication) throws IOException;
-    public String[][] getDataLocation(String path, long start, long len) throws IOException;
-
-    public long getModificationTime(String path) throws IOException;
-    public FSDataOutputStream create(String path, short replication, int bufferSize, Progressable progress) throws IOException;
-    public FSDataInputStream open(String path, int bufferSize) throws IOException;
-    public FSDataOutputStream append(String path, int bufferSize, Progressable progress) throws IOException;
-    
-};
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSConfigKeys.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSConfigKeys.java
deleted file mode 100644
index 107872f..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSConfigKeys.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.kfs;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-
-/** 
- * This class contains constants for configuration keys used
- * in the kfs file system. 
- *
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class KFSConfigKeys extends CommonConfigurationKeys {
-  public static final String  KFS_BLOCK_SIZE_KEY = "kfs.blocksize";
-  public static final long    KFS_BLOCK_SIZE_DEFAULT = 64*1024*1024;
-  public static final String  KFS_REPLICATION_KEY = "kfs.replication";
-  public static final short   KFS_REPLICATION_DEFAULT = 1;
-  public static final String  KFS_STREAM_BUFFER_SIZE_KEY = 
-                                                    "kfs.stream-buffer-size";
-  public static final int     KFS_STREAM_BUFFER_SIZE_DEFAULT = 4096;
-  public static final String  KFS_BYTES_PER_CHECKSUM_KEY = 
-                                                    "kfs.bytes-per-checksum";
-  public static final int     KFS_BYTES_PER_CHECKSUM_DEFAULT = 512;
-  public static final String  KFS_CLIENT_WRITE_PACKET_SIZE_KEY =
-                                                    "kfs.client-write-packet-size";
-  public static final int     KFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
-}
-  
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSImpl.java
deleted file mode 100644
index 0d77a78..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSImpl.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/**
- *
- * Licensed under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * permissions and limitations under the License.
- *
- * 
- * Provide the implementation of KFS which turn into calls to KfsAccess.
- */
-
-package org.apache.hadoop.fs.kfs;
-
-import java.io.*;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-
-import org.kosmix.kosmosfs.access.KfsAccess;
-import org.kosmix.kosmosfs.access.KfsFileAttr;
-import org.apache.hadoop.util.Progressable;
-
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-class KFSImpl implements IFSImpl {
-    private KfsAccess kfsAccess = null;
-    private FileSystem.Statistics statistics;
-
-    @Deprecated
-    public KFSImpl(String metaServerHost, int metaServerPort
-                   ) throws IOException {
-      this(metaServerHost, metaServerPort, null);
-    }
-
-    public KFSImpl(String metaServerHost, int metaServerPort, 
-                   FileSystem.Statistics stats) throws IOException {
-        kfsAccess = new KfsAccess(metaServerHost, metaServerPort);
-        statistics = stats;
-    }
-
-    @Override
-    public boolean exists(String path) throws IOException {
-        return kfsAccess.kfs_exists(path);
-    }
-
-    @Override
-    public boolean isDirectory(String path) throws IOException {
-        return kfsAccess.kfs_isDirectory(path);
-    }
-
-    @Override
-    public boolean isFile(String path) throws IOException {
-        return kfsAccess.kfs_isFile(path);
-    }
-
-    @Override
-    public String[] readdir(String path) throws IOException {
-        return kfsAccess.kfs_readdir(path);
-    }
-
-    @Override
-    public FileStatus[] readdirplus(Path path) throws IOException {
-        String srep = path.toUri().getPath();
-        KfsFileAttr[] fattr = kfsAccess.kfs_readdirplus(srep);
-        if (fattr == null)
-            return null;
-        int numEntries = 0;
-        for (int i = 0; i < fattr.length; i++) {
-            if ((fattr[i].filename.compareTo(".") == 0) || (fattr[i].filename.compareTo("target/generated-sources") == 0))
-                continue;
-            numEntries++;
-        }
-        FileStatus[] fstatus = new FileStatus[numEntries];
-        int j = 0;
-        for (int i = 0; i < fattr.length; i++) {
-            if ((fattr[i].filename.compareTo(".") == 0) || (fattr[i].filename.compareTo("target/generated-sources") == 0))
-                continue;
-            Path fn = new Path(path, fattr[i].filename);
-
-            if (fattr[i].isDirectory)
-                fstatus[j] = new FileStatus(0, true, 1, 0, fattr[i].modificationTime, fn);
-            else
-                fstatus[j] = new FileStatus(fattr[i].filesize, fattr[i].isDirectory,
-                                            fattr[i].replication,
-                                            (long)
-                                            (1 << 26),
-                                            fattr[i].modificationTime,
-                                            fn);
-
-            j++;
-        }
-        return fstatus;
-    }
-
-
-    @Override
-    public int mkdirs(String path) throws IOException {
-        return kfsAccess.kfs_mkdirs(path);
-    }
-
-    @Override
-    public int rename(String source, String dest) throws IOException {
-        return kfsAccess.kfs_rename(source, dest);
-    }
-
-    @Override
-    public int rmdir(String path) throws IOException {
-        return kfsAccess.kfs_rmdir(path);
-    }
-
-    @Override
-    public int remove(String path) throws IOException {
-        return kfsAccess.kfs_remove(path);
-    }
-
-    @Override
-    public long filesize(String path) throws IOException {
-        return kfsAccess.kfs_filesize(path);
-    }
-
-    @Override
-    public short getReplication(String path) throws IOException {
-        return kfsAccess.kfs_getReplication(path);
-    }
-
-    @Override
-    public short setReplication(String path, short replication) throws IOException {
-        return kfsAccess.kfs_setReplication(path, replication);
-    }
-
-    @Override
-    public String[][] getDataLocation(String path, long start, long len) throws IOException {
-        return kfsAccess.kfs_getDataLocation(path, start, len);
-    }
-
-    @Override
-    public long getModificationTime(String path) throws IOException {
-        return kfsAccess.kfs_getModificationTime(path);
-    }
-
-    @Override
-    public FSDataInputStream open(String path, int bufferSize) throws IOException {
-        return new FSDataInputStream(new KFSInputStream(kfsAccess, path, 
-                                                        statistics));
-    }
-
-    @Override
-    public FSDataOutputStream create(String path, short replication, int bufferSize, Progressable progress) throws IOException {
-        return new FSDataOutputStream(new KFSOutputStream(kfsAccess, path, replication, false, progress), 
-                                      statistics);
-    }
-
-    @Override
-    public FSDataOutputStream append(String path, int bufferSize, Progressable progress) throws IOException {
-        // when opening for append, # of replicas is ignored
-        return new FSDataOutputStream(new KFSOutputStream(kfsAccess, path, (short) 1, true, progress), 
-                                      statistics);
-    }
-}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSInputStream.java
deleted file mode 100644
index 492230f..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSInputStream.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/**
- *
- * Licensed under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * permissions and limitations under the License.
- *
- * 
- * Implements the Hadoop FSInputStream interfaces to allow applications to read
- * files in Kosmos File System (KFS).
- */
-
-package org.apache.hadoop.fs.kfs;
-
-import java.io.*;
-import java.nio.ByteBuffer;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FSInputStream;
-
-import org.kosmix.kosmosfs.access.KfsAccess;
-import org.kosmix.kosmosfs.access.KfsInputChannel;
-
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-class KFSInputStream extends FSInputStream {
-
-    private KfsInputChannel kfsChannel;
-    private FileSystem.Statistics statistics;
-    private long fsize;
-
-    @Deprecated
-    public KFSInputStream(KfsAccess kfsAccess, String path) {
-      this(kfsAccess, path, null);
-    }
-
-    public KFSInputStream(KfsAccess kfsAccess, String path,
-                            FileSystem.Statistics stats) {
-        this.statistics = stats;
-        this.kfsChannel = kfsAccess.kfs_open(path);
-        if (this.kfsChannel != null)
-            this.fsize = kfsAccess.kfs_filesize(path);
-        else
-            this.fsize = 0;
-    }
-
-    @Override
-    public long getPos() throws IOException {
-        if (kfsChannel == null) {
-            throw new IOException("File closed");
-        }
-        return kfsChannel.tell();
-    }
-
-    @Override
-    public synchronized int available() throws IOException {
-        if (kfsChannel == null) {
-            throw new IOException("File closed");
-        }
-        return (int) (this.fsize - getPos());
-    }
-
-    @Override
-    public synchronized void seek(long targetPos) throws IOException {
-        if (kfsChannel == null) {
-            throw new IOException("File closed");
-        }
-        kfsChannel.seek(targetPos);
-    }
-
-    @Override
-    public synchronized boolean seekToNewSource(long targetPos) throws IOException {
-        return false;
-    }
-
-    @Override
-    public synchronized int read() throws IOException {
-        if (kfsChannel == null) {
-            throw new IOException("File closed");
-        }
-        byte b[] = new byte[1];
-        int res = read(b, 0, 1);
-        if (res == 1) {
-          if (statistics != null) {
-            statistics.incrementBytesRead(1);
-          }
-          return b[0] & 0xff;
-        }
-        return -1;
-    }
-
-    @Override
-    public synchronized int read(byte b[], int off, int len) throws IOException {
-        if (kfsChannel == null) {
-            throw new IOException("File closed");
-        }
-	int res;
-
-	res = kfsChannel.read(ByteBuffer.wrap(b, off, len));
-	// Use -1 to signify EOF
-	if (res == 0)
-	    return -1;
-	if (statistics != null) {
-	  statistics.incrementBytesRead(res);
-	}
-	return res;
-    }
-
-    @Override
-    public synchronized void close() throws IOException {
-        if (kfsChannel == null) {
-            return;
-        }
-
-        kfsChannel.close();
-        kfsChannel = null;
-    }
-
-    @Override
-    public boolean markSupported() {
-        return false;
-    }
-
-    @Override
-    public void mark(int readLimit) {
-        // Do nothing
-    }
-
-    @Override
-    public void reset() throws IOException {
-        throw new IOException("Mark not supported");
-    }
-
-}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSOutputStream.java
deleted file mode 100644
index a50f750..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KFSOutputStream.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- *
- * Licensed under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * permissions and limitations under the License.
- *
- * 
- * Implements the Hadoop FSOutputStream interfaces to allow applications to write to
- * files in Kosmos File System (KFS).
- */
-
-package org.apache.hadoop.fs.kfs;
-
-import java.io.*;
-import java.nio.ByteBuffer;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.util.Progressable;
-
-import org.kosmix.kosmosfs.access.KfsAccess;
-import org.kosmix.kosmosfs.access.KfsOutputChannel;
-
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-class KFSOutputStream extends OutputStream {
-
-    private String path;
-    private KfsOutputChannel kfsChannel;
-    private Progressable progressReporter;
-
-    public KFSOutputStream(KfsAccess kfsAccess, String path, short replication,
-                           boolean append, Progressable prog) {
-        this.path = path;
-
-        if ((append) && (kfsAccess.kfs_isFile(path)))
-                this.kfsChannel = kfsAccess.kfs_append(path);
-        else
-                this.kfsChannel = kfsAccess.kfs_create(path, replication);
-        this.progressReporter = prog;
-    }
-
-    public long getPos() throws IOException {
-        if (kfsChannel == null) {
-            throw new IOException("File closed");
-        }
-        return kfsChannel.tell();
-    }
-
-    @Override
-    public void write(int v) throws IOException {
-        if (kfsChannel == null) {
-            throw new IOException("File closed");
-        }
-        byte[] b = new byte[1];
-
-        b[0] = (byte) v;
-        write(b, 0, 1);
-    }
-
-    @Override
-    public void write(byte b[], int off, int len) throws IOException {
-        if (kfsChannel == null) {
-            throw new IOException("File closed");
-        }
-
-        // touch the progress before going into KFS since the call can block
-        progressReporter.progress();
-        kfsChannel.write(ByteBuffer.wrap(b, off, len));
-    }
-
-    @Override
-    public void flush() throws IOException {
-        if (kfsChannel == null) {
-            throw new IOException("File closed");
-        }
-        // touch the progress before going into KFS since the call can block
-        progressReporter.progress();
-        kfsChannel.sync();
-    }
-
-    @Override
-    public synchronized void close() throws IOException {
-        if (kfsChannel == null) {
-            return;
-        }
-        flush();
-        kfsChannel.close();
-        kfsChannel = null;
-    }
-}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KosmosFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KosmosFileSystem.java
deleted file mode 100644
index 591eeaf..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/KosmosFileSystem.java
+++ /dev/null
@@ -1,352 +0,0 @@
-/**
- *
- * Licensed under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * permissions and limitations under the License.
- *
- * 
- * Implements the Hadoop FS interfaces to allow applications to store
- *files in Kosmos File System (KFS).
- */
-
-package org.apache.hadoop.fs.kfs;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.URI;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.util.Progressable;
-
-/**
- * A FileSystem backed by KFS.
- *
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class KosmosFileSystem extends FileSystem {
-
-    private FileSystem localFs;
-    private IFSImpl kfsImpl = null;
-    private URI uri;
-    private Path workingDir = new Path("/");
-
-    public KosmosFileSystem() {
-
-    }
-
-    KosmosFileSystem(IFSImpl fsimpl) {
-        this.kfsImpl = fsimpl;
-    }
-
-    /**
-     * Return the protocol scheme for the FileSystem.
-     * <p/>
-     *
-     * @return <code>kfs</code>
-     */
-    @Override
-    public String getScheme() {
-      return "kfs";
-    }
-
-    @Override
-    public URI getUri() {
-	return uri;
-    }
-
-    @Override
-    public void initialize(URI uri, Configuration conf) throws IOException {
-      super.initialize(uri, conf);
-      try {
-        if (kfsImpl == null) {
-          if (uri.getHost() == null) {
-            kfsImpl = new KFSImpl(conf.get("fs.kfs.metaServerHost", ""),
-                                  conf.getInt("fs.kfs.metaServerPort", -1),
-                                  statistics);
-          } else {
-            kfsImpl = new KFSImpl(uri.getHost(), uri.getPort(), statistics);
-          }
-        }
-
-        this.localFs = FileSystem.getLocal(conf);
-        this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
-        this.workingDir = new Path("/user", System.getProperty("user.name")
-                                   ).makeQualified(this);
-        setConf(conf);
-
-      } catch (Exception e) {
-        e.printStackTrace();
-        System.out.println("Unable to initialize KFS");
-        System.exit(-1);
-      }
-    }
-
-    @Override
-    public Path getWorkingDirectory() {
-	return workingDir;
-    }
-
-    @Override
-    public void setWorkingDirectory(Path dir) {
-	workingDir = makeAbsolute(dir);
-    }
-
-    private Path makeAbsolute(Path path) {
-	if (path.isAbsolute()) {
-	    return path;
-	}
-	return new Path(workingDir, path);
-    }
-
-    @Override
-    public boolean mkdirs(Path path, FsPermission permission
-        ) throws IOException {
-	Path absolute = makeAbsolute(path);
-        String srep = absolute.toUri().getPath();
-
-	int res;
-
-	// System.out.println("Calling mkdirs on: " + srep);
-
-	res = kfsImpl.mkdirs(srep);
-	
-	return res == 0;
-    }
-
-    @Override
-    public boolean isDirectory(Path path) throws IOException {
-	Path absolute = makeAbsolute(path);
-        String srep = absolute.toUri().getPath();
-
-	// System.out.println("Calling isdir on: " + srep);
-
-        return kfsImpl.isDirectory(srep);
-    }
-
-    @Override
-    public boolean isFile(Path path) throws IOException {
-	Path absolute = makeAbsolute(path);
-        String srep = absolute.toUri().getPath();
-        return kfsImpl.isFile(srep);
-    }
-
-    @Override
-    public FileStatus[] listStatus(Path path) throws IOException {
-        Path absolute = makeAbsolute(path);
-        String srep = absolute.toUri().getPath();
-
-        if(!kfsImpl.exists(srep))
-          throw new FileNotFoundException("File " + path + " does not exist.");
-
-        if (kfsImpl.isFile(srep))
-                return new FileStatus[] { getFileStatus(path) } ;
-
-        return kfsImpl.readdirplus(absolute);
-    }
-
-    @Override
-    public FileStatus getFileStatus(Path path) throws IOException {
-	Path absolute = makeAbsolute(path);
-        String srep = absolute.toUri().getPath();
-        if (!kfsImpl.exists(srep)) {
-          throw new FileNotFoundException("File " + path + " does not exist.");
-        }
-        if (kfsImpl.isDirectory(srep)) {
-            // System.out.println("Status of path: " + path + " is dir");
-            return new FileStatus(0, true, 1, 0, kfsImpl.getModificationTime(srep), 
-                                  path.makeQualified(this));
-        } else {
-            // System.out.println("Status of path: " + path + " is file");
-            return new FileStatus(kfsImpl.filesize(srep), false, 
-                                  kfsImpl.getReplication(srep),
-                                  getDefaultBlockSize(),
-                                  kfsImpl.getModificationTime(srep),
-                                  path.makeQualified(this));
-        }
-    }
-    
-    @Override
-    public FSDataOutputStream append(Path f, int bufferSize,
-        Progressable progress) throws IOException {
-        Path parent = f.getParent();
-        if (parent != null && !mkdirs(parent)) {
-            throw new IOException("Mkdirs failed to create " + parent);
-        }
-
-        Path absolute = makeAbsolute(f);
-        String srep = absolute.toUri().getPath();
-
-        return kfsImpl.append(srep, bufferSize, progress);
-    }
-
-    @Override
-    public FSDataOutputStream create(Path file, FsPermission permission,
-                                     boolean overwrite, int bufferSize,
-				     short replication, long blockSize, Progressable progress)
-	throws IOException {
-
-        if (exists(file)) {
-            if (overwrite) {
-                delete(file, true);
-            } else {
-                throw new IOException("File already exists: " + file);
-            }
-        }
-
-	Path parent = file.getParent();
-	if (parent != null && !mkdirs(parent)) {
-	    throw new IOException("Mkdirs failed to create " + parent);
-	}
-
-        Path absolute = makeAbsolute(file);
-        String srep = absolute.toUri().getPath();
-
-        return kfsImpl.create(srep, replication, bufferSize, progress);
-    }
-
-    @Override
-    public FSDataInputStream open(Path path, int bufferSize) throws IOException {
-        if (!exists(path))
-            throw new IOException("File does not exist: " + path);
-
-        Path absolute = makeAbsolute(path);
-        String srep = absolute.toUri().getPath();
-
-        return kfsImpl.open(srep, bufferSize);
-    }
-
-    @Override
-    public boolean rename(Path src, Path dst) throws IOException {
-	Path absoluteS = makeAbsolute(src);
-        String srepS = absoluteS.toUri().getPath();
-	Path absoluteD = makeAbsolute(dst);
-        String srepD = absoluteD.toUri().getPath();
-
-        // System.out.println("Calling rename on: " + srepS + " -> " + srepD);
-
-        return kfsImpl.rename(srepS, srepD) == 0;
-    }
-
-    // recursively delete the directory and its contents
-    @Override
-    public boolean delete(Path path, boolean recursive) throws IOException {
-      Path absolute = makeAbsolute(path);
-      String srep = absolute.toUri().getPath();
-      if (kfsImpl.isFile(srep))
-        return kfsImpl.remove(srep) == 0;
-
-      FileStatus[] dirEntries = listStatus(absolute);
-      if (!recursive && (dirEntries.length != 0)) {
-        throw new IOException("Directory " + path.toString() + 
-        " is not empty.");
-      }
-
-      for (int i = 0; i < dirEntries.length; i++) {
-        delete(new Path(absolute, dirEntries[i].getPath()), recursive);
-      }
-      return kfsImpl.rmdir(srep) == 0;
-    }
-    
-    @Override
-    public short getDefaultReplication() {
-	return 3;
-    }
-
-    @Override
-    public boolean setReplication(Path path, short replication)
-	throws IOException {
-
-	Path absolute = makeAbsolute(path);
-        String srep = absolute.toUri().getPath();
-
-        int res = kfsImpl.setReplication(srep, replication);
-        return res >= 0;
-    }
-
-    // 64MB is the KFS block size
-
-    @Override
-    public long getDefaultBlockSize() {
-	return 1 << 26;
-    }
-
-    @Deprecated            
-    public void lock(Path path, boolean shared) throws IOException {
-
-    }
-
-    @Deprecated            
-    public void release(Path path) throws IOException {
-
-    }
-
-    /**
-     * Return null if the file doesn't exist; otherwise, get the
-     * locations of the various chunks of the file file from KFS.
-     */
-    @Override
-    public BlockLocation[] getFileBlockLocations(FileStatus file, long start,
-        long len) throws IOException {
-
-      if (file == null) {
-        return null;
-      }
-      String srep = makeAbsolute(file.getPath()).toUri().getPath();
-      String[][] hints = kfsImpl.getDataLocation(srep, start, len);
-      if (hints == null) {
-        return null;
-      }
-      BlockLocation[] result = new BlockLocation[hints.length];
-      long blockSize = getDefaultBlockSize();
-      long length = len;
-      long blockStart = start;
-      for(int i=0; i < result.length; ++i) {
-        result[i] = new BlockLocation(null, hints[i], blockStart, 
-                                      length < blockSize ? length : blockSize);
-        blockStart += blockSize;
-        length -= blockSize;
-      }
-      return result;
-    }
-
-    @Override
-    public void copyFromLocalFile(boolean delSrc, Path src, Path dst) throws IOException {
-	FileUtil.copy(localFs, src, this, dst, delSrc, getConf());
-    }
-
-    @Override
-    public void copyToLocalFile(boolean delSrc, Path src, Path dst) throws IOException {
-	FileUtil.copy(this, src, localFs, dst, delSrc, getConf());
-    }
-
-    @Override
-    public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
-	throws IOException {
-	return tmpLocalFile;
-    }
-
-    @Override
-    public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
-	throws IOException {
-	moveFromLocalFile(tmpLocalFile, fsOutputFile);
-    }
-}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/package.html b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/package.html
deleted file mode 100644
index 365b60b..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/kfs/package.html
+++ /dev/null
@@ -1,98 +0,0 @@
-<html>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<head></head>
-<body>
-<h1>A client for the Kosmos filesystem (KFS)</h1>
-
-<h3>Introduction</h3>
-
-This pages describes how to use Kosmos Filesystem 
-(<a href="http://kosmosfs.sourceforge.net"> KFS </a>) as a backing
-store with Hadoop.   This page assumes that you have downloaded the
-KFS software and installed necessary binaries as outlined in the KFS
-documentation.
-
-<h3>Steps</h3>
-
-        <ul>
-          <li>In the Hadoop conf directory edit core-site.xml,
-          add the following:
-            <pre>
-&lt;property&gt;
-  &lt;name&gt;fs.kfs.impl&lt;/name&gt;
-  &lt;value&gt;org.apache.hadoop.fs.kfs.KosmosFileSystem&lt;/value&gt;
-  &lt;description&gt;The FileSystem for kfs: uris.&lt;/description&gt;
-&lt;/property&gt;
-            </pre>
-
-          <li>In the Hadoop conf directory edit core-site.xml,
-          adding the following (with appropriate values for
-          &lt;server&gt; and &lt;port&gt;):
-            <pre>
-&lt;property&gt;
-  &lt;name&gt;fs.default.name&lt;/name&gt;
-  &lt;value&gt;kfs://&lt;server:port&gt;&lt;/value&gt; 
-&lt;/property&gt;
-
-&lt;property&gt;
-  &lt;name&gt;fs.kfs.metaServerHost&lt;/name&gt;
-  &lt;value&gt;&lt;server&gt;&lt;/value&gt;
-  &lt;description&gt;The location of the KFS meta server.&lt;/description&gt;
-&lt;/property&gt;
-
-&lt;property&gt;
-  &lt;name&gt;fs.kfs.metaServerPort&lt;/name&gt;
-  &lt;value&gt;&lt;port&gt;&lt;/value&gt;
-  &lt;description&gt;The location of the meta server's port.&lt;/description&gt;
-&lt;/property&gt;
-
-</pre>
-          </li>
-
-          <li>Copy KFS's <i> kfs-0.1.jar </i> to Hadoop's lib directory.  This step
-          enables Hadoop's to load the KFS specific modules.  Note
-          that, kfs-0.1.jar was built when you compiled KFS source
-          code.  This jar file contains code that calls KFS's client
-          library code via JNI; the native code is in KFS's <i>
-          libkfsClient.so </i> library.
-          </li>
-
-          <li> When the Hadoop map/reduce trackers start up, those
-processes (on local as well as remote nodes) will now need to load
-KFS's <i> libkfsClient.so </i> library.  To simplify this process, it is advisable to
-store libkfsClient.so in an NFS accessible directory (similar to where
-Hadoop binaries/scripts are stored); then, modify Hadoop's
-conf/hadoop-env.sh adding the following line and providing suitable
-value for &lt;path&gt;:
-<pre>
-export LD_LIBRARY_PATH=&lt;path&gt;
-</pre>
-
-
-          <li>Start only the map/reduce trackers
-          <br />
-          example: execute Hadoop's bin/start-mapred.sh</li>
-        </ul>
-<br/>
-
-If the map/reduce job trackers start up, all file-I/O is done to KFS.
-
-</body>
-</html>
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index 95d0a2d..b3acf50 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -89,7 +89,11 @@
   public ChRootedFileSystem(final URI uri, Configuration conf)
       throws IOException {
     super(FileSystem.get(uri, conf));
-    chRootPathPart = new Path(uri.getPath());
+    String pathString = uri.getPath();
+    if (pathString.isEmpty()) {
+      pathString = "/";
+    }
+    chRootPathPart = new Path(pathString);
     chRootPathPartString = chRootPathPart.toUri().getPath();
     myUri = uri;
     workingDir = getHomeDirectory();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
index 130ff97..a8a77be 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
@@ -205,9 +205,13 @@
       protected
       AbstractFileSystem getTargetFileSystem(final URI uri)
         throws URISyntaxException, UnsupportedFileSystemException {
+          String pathString = uri.getPath();
+          if (pathString.isEmpty()) {
+            pathString = "/";
+          }
           return new ChRootedFs(
               AbstractFileSystem.createFileSystem(uri, config),
-              new Path(uri.getPath()));
+              new Path(pathString));
       }
 
       @Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
index 8a95064..8dde9f6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
@@ -24,7 +24,6 @@
 
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.compress.snappy.LoadSnappy;
 import org.apache.hadoop.io.compress.snappy.SnappyCompressor;
 import org.apache.hadoop.io.compress.snappy.SnappyDecompressor;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -34,11 +33,6 @@
  * This class creates snappy compressors/decompressors.
  */
 public class SnappyCodec implements Configurable, CompressionCodec {
-
-  static {
-    LoadSnappy.isLoaded();
-  }
-
   Configuration conf;
 
   /**
@@ -63,11 +57,26 @@
 
   /**
    * Are the native snappy libraries loaded & initialized?
-   *
-   * @return true if loaded & initialized, otherwise false
    */
+  public static void checkNativeCodeLoaded() {
+      if (!NativeCodeLoader.buildSupportsSnappy()) {
+        throw new RuntimeException("native snappy library not available: " +
+            "this version of libhadoop was built without " +
+            "snappy support.");
+      }
+      if (!SnappyCompressor.isNativeCodeLoaded()) {
+        throw new RuntimeException("native snappy library not available: " +
+            "SnappyCompressor has not been loaded.");
+      }
+      if (!SnappyDecompressor.isNativeCodeLoaded()) {
+        throw new RuntimeException("native snappy library not available: " +
+            "SnappyDecompressor has not been loaded.");
+      }
+  }
+  
   public static boolean isNativeCodeLoaded() {
-    return LoadSnappy.isLoaded() && NativeCodeLoader.isNativeCodeLoaded();
+    return SnappyCompressor.isNativeCodeLoaded() && 
+        SnappyDecompressor.isNativeCodeLoaded();
   }
 
   /**
@@ -97,9 +106,7 @@
   public CompressionOutputStream createOutputStream(OutputStream out,
                                                     Compressor compressor)
       throws IOException {
-    if (!isNativeCodeLoaded()) {
-      throw new RuntimeException("native snappy library not available");
-    }
+    checkNativeCodeLoaded();
     int bufferSize = conf.getInt(
         CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY,
         CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT);
@@ -117,10 +124,7 @@
    */
   @Override
   public Class<? extends Compressor> getCompressorType() {
-    if (!isNativeCodeLoaded()) {
-      throw new RuntimeException("native snappy library not available");
-    }
-
+    checkNativeCodeLoaded();
     return SnappyCompressor.class;
   }
 
@@ -131,9 +135,7 @@
    */
   @Override
   public Compressor createCompressor() {
-    if (!isNativeCodeLoaded()) {
-      throw new RuntimeException("native snappy library not available");
-    }
+    checkNativeCodeLoaded();
     int bufferSize = conf.getInt(
         CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY,
         CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT);
@@ -167,10 +169,7 @@
   public CompressionInputStream createInputStream(InputStream in,
                                                   Decompressor decompressor)
       throws IOException {
-    if (!isNativeCodeLoaded()) {
-      throw new RuntimeException("native snappy library not available");
-    }
-
+    checkNativeCodeLoaded();
     return new BlockDecompressorStream(in, decompressor, conf.getInt(
         CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY,
         CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT));
@@ -183,10 +182,7 @@
    */
   @Override
   public Class<? extends Decompressor> getDecompressorType() {
-    if (!isNativeCodeLoaded()) {
-      throw new RuntimeException("native snappy library not available");
-    }
-
+    checkNativeCodeLoaded();
     return SnappyDecompressor.class;
   }
 
@@ -197,9 +193,7 @@
    */
   @Override
   public Decompressor createDecompressor() {
-    if (!isNativeCodeLoaded()) {
-      throw new RuntimeException("native snappy library not available");
-    }
+    checkNativeCodeLoaded();
     int bufferSize = conf.getInt(
         CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY,
         CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/LoadSnappy.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/LoadSnappy.java
deleted file mode 100644
index 05dc984..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/LoadSnappy.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.io.compress.snappy;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.util.NativeCodeLoader;
-
-/**
- * Determines if Snappy native library is available and loads it if available.
- */
-public class LoadSnappy {
-  private static final Log LOG = LogFactory.getLog(LoadSnappy.class.getName());
-
-  private static boolean AVAILABLE = false;
-  private static boolean LOADED = false;
-
-  static {
-    try {
-      System.loadLibrary("snappy");
-      LOG.warn("Snappy native library is available");
-      AVAILABLE = true;
-    } catch (UnsatisfiedLinkError ex) {
-      //NOP
-    }
-    boolean hadoopNativeAvailable = NativeCodeLoader.isNativeCodeLoaded();
-    LOADED = AVAILABLE && hadoopNativeAvailable;
-    if (LOADED) {
-      LOG.info("Snappy native library loaded");
-    } else {
-      LOG.warn("Snappy native library not loaded");
-    }
-  }
-
-  /**
-   * Returns if Snappy native library is loaded.
-   *
-   * @return <code>true</code> if Snappy native library is loaded,
-   * <code>false</code> if not.
-   */
-  public static boolean isAvailable() {
-    return AVAILABLE;
-  }
-
-  /**
-   * Returns if Snappy native library is loaded.
-   *
-   * @return <code>true</code> if Snappy native library is loaded,
-   * <code>false</code> if not.
-   */
-  public static boolean isLoaded() {
-    return LOADED;
-  }
-
-}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java
index ba778e0..c37b97e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java
@@ -26,6 +26,7 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.Compressor;
+import org.apache.hadoop.util.NativeCodeLoader;
 
 /**
  * A {@link Compressor} based on the snappy compression algorithm.
@@ -51,22 +52,24 @@
   private long bytesRead = 0L;
   private long bytesWritten = 0L;
 
-
+  private static boolean nativeSnappyLoaded = false;
+  
   static {
-    if (LoadSnappy.isLoaded()) {
-      // Initialize the native library
+    if (NativeCodeLoader.isNativeCodeLoaded() &&
+        NativeCodeLoader.buildSupportsSnappy()) {
       try {
         initIDs();
+        nativeSnappyLoaded = true;
       } catch (Throwable t) {
-        // Ignore failure to load/initialize snappy
-        LOG.warn(t.toString());
+        LOG.error("failed to load SnappyCompressor", t);
       }
-    } else {
-      LOG.error("Cannot load " + SnappyCompressor.class.getName() +
-          " without snappy library!");
     }
   }
-
+  
+  public static boolean isNativeCodeLoaded() {
+    return nativeSnappyLoaded;
+  }
+  
   /**
    * Creates a new compressor.
    *
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
index 4620092..b29014e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
@@ -25,6 +25,7 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.compress.Decompressor;
+import org.apache.hadoop.util.NativeCodeLoader;
 
 /**
  * A {@link Decompressor} based on the snappy compression algorithm.
@@ -47,21 +48,24 @@
   private int userBufOff = 0, userBufLen = 0;
   private boolean finished;
 
+  private static boolean nativeSnappyLoaded = false;
+
   static {
-    if (LoadSnappy.isLoaded()) {
-      // Initialize the native library
+    if (NativeCodeLoader.isNativeCodeLoaded() &&
+        NativeCodeLoader.buildSupportsSnappy()) {
       try {
         initIDs();
+        nativeSnappyLoaded = true;
       } catch (Throwable t) {
-        // Ignore failure to load/initialize snappy
-        LOG.warn(t.toString());
+        LOG.error("failed to load SnappyDecompressor", t);
       }
-    } else {
-      LOG.error("Cannot load " + SnappyDecompressor.class.getName() +
-          " without snappy library!");
     }
   }
-
+  
+  public static boolean isNativeCodeLoaded() {
+    return nativeSnappyLoaded;
+  }
+  
   /**
    * Creates a new compressor.
    *
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 66c81c2..bb2c205 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -87,7 +87,6 @@
 import org.apache.hadoop.security.SaslRpcServer.SaslGssCallbackHandler;
 import org.apache.hadoop.security.SaslRpcServer.SaslStatus;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.ProxyUsers;
@@ -1374,20 +1373,38 @@
           dataLengthBuffer.clear();
           if (authMethod == null) {
             throw new IOException("Unable to read authentication method");
-          }
-          if (isSecurityEnabled && authMethod == AuthMethod.SIMPLE) {
-            AccessControlException ae = new AccessControlException("Authorization ("
-              + CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION
-              + ") is enabled but authentication ("
-              + CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION
-              + ") is configured as simple. Please configure another method "
-              + "like kerberos or digest.");
-            setupResponse(authFailedResponse, authFailedCall, RpcStatusProto.FATAL,
-                null, ae.getClass().getName(), ae.getMessage());
-            responder.doRespond(authFailedCall);
-            throw ae;
-          }
-          if (!isSecurityEnabled && authMethod != AuthMethod.SIMPLE) {
+          }          
+          final boolean clientUsingSasl;
+          switch (authMethod) {
+            case SIMPLE: { // no sasl for simple
+              if (isSecurityEnabled) {
+                AccessControlException ae = new AccessControlException("Authorization ("
+                    + CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION
+                    + ") is enabled but authentication ("
+                    + CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION
+                    + ") is configured as simple. Please configure another method "
+                    + "like kerberos or digest.");
+                setupResponse(authFailedResponse, authFailedCall, RpcStatusProto.FATAL,
+                    null, ae.getClass().getName(), ae.getMessage());
+                responder.doRespond(authFailedCall);
+                throw ae;
+              }
+              clientUsingSasl = false;
+              useSasl = false; 
+              break;
+            }
+            case DIGEST: {
+              clientUsingSasl = true;
+              useSasl = (secretManager != null);
+              break;
+            }
+            default: {
+              clientUsingSasl = true;
+              useSasl = isSecurityEnabled; 
+              break;
+            }
+          }          
+          if (clientUsingSasl && !useSasl) {
             doSaslReply(SaslStatus.SUCCESS, new IntWritable(
                 SaslRpcServer.SWITCH_TO_SIMPLE_AUTH), null, null);
             authMethod = AuthMethod.SIMPLE;
@@ -1396,9 +1413,6 @@
             // to simple auth from now on.
             skipInitialSaslHandshake = true;
           }
-          if (authMethod != AuthMethod.SIMPLE) {
-            useSasl = true;
-          }
           
           connectionHeaderBuf = null;
           connectionHeaderRead = true;
@@ -1532,8 +1546,6 @@
             UserGroupInformation realUser = user;
             user = UserGroupInformation.createProxyUser(protocolUser
                 .getUserName(), realUser);
-            // Now the user is a proxy user, set Authentication method Proxy.
-            user.setAuthenticationMethod(AuthenticationMethod.PROXY);
           }
         }
       }
@@ -1883,7 +1895,7 @@
     // Create the responder here
     responder = new Responder();
     
-    if (isSecurityEnabled) {
+    if (secretManager != null) {
       SaslRpcServer.init(conf);
     }
     
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
index e899298..af57985 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
@@ -35,6 +35,7 @@
 import org.apache.hadoop.metrics2.util.SampleQuantiles;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /**
  * Watches a stream of long values, maintaining online estimates of specific
@@ -60,8 +61,9 @@
   @VisibleForTesting
   protected Map<Quantile, Long> previousSnapshot = null;
 
-  private final ScheduledExecutorService scheduler = Executors
-      .newScheduledThreadPool(1);
+  private static final ScheduledExecutorService scheduler = Executors
+      .newScheduledThreadPool(1, new ThreadFactoryBuilder().setDaemon(true)
+          .setNameFormat("MutableQuantiles-%d").build());
 
   /**
    * Instantiates a new {@link MutableQuantiles} for a metric that rolls itself
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleQuantiles.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleQuantiles.java
index c0b14cc..97685b4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleQuantiles.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleQuantiles.java
@@ -210,9 +210,12 @@
     int rankMin = 0;
     int desired = (int) (quantile * count);
 
+    ListIterator<SampleItem> it = samples.listIterator();
+    SampleItem prev = null;
+    SampleItem cur = it.next();
     for (int i = 1; i < samples.size(); i++) {
-      SampleItem prev = samples.get(i - 1);
-      SampleItem cur = samples.get(i);
+      prev = cur;
+      cur = it.next();
 
       rankMin += prev.g;
 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
index e3dd9f3..66ffe20 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
@@ -452,7 +452,7 @@
       return action.run();
     }
   }
-
+  
   /**
    * Perform the given action as the daemon's login user. If an
    * InterruptedException is thrown, it is converted to an IOException.
@@ -499,7 +499,7 @@
    * @throws IOException If unable to authenticate via SPNEGO
    */
   public static URLConnection openSecureHttpConnection(URL url) throws IOException {
-    if(!UserGroupInformation.isSecurityEnabled()) {
+    if (!HttpConfig.isSecure() && !UserGroupInformation.isSecurityEnabled()) {
       return url.openConnection();
     }
 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
index 4c17f9f..9560caf 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
@@ -160,7 +160,7 @@
       } finally {
         is.close();
       }
-      LOG.info(mode.toString() + " Loaded KeyStore: " + keystoreLocation);
+      LOG.debug(mode.toString() + " Loaded KeyStore: " + keystoreLocation);
     } else {
       keystore.load(null, null);
     }
@@ -201,7 +201,7 @@
                                                  truststorePassword,
                                                  truststoreReloadInterval);
     trustManager.init();
-    LOG.info(mode.toString() + " Loaded TrustStore: " + truststoreLocation);
+    LOG.debug(mode.toString() + " Loaded TrustStore: " + truststoreLocation);
 
     trustManagers = new TrustManager[]{trustManager};
   }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java
index dc0c88e..4fe81da 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java
@@ -75,6 +75,11 @@
   }
 
   /**
+   * Returns true only if this build was compiled with support for snappy.
+   */
+  public static native boolean buildSupportsSnappy();
+
+  /**
    * Return if native hadoop libraries, if present, can be used for this job.
    * @param conf configuration
    * 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
index ba32269..1296767 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
@@ -34,6 +34,7 @@
 import java.util.Locale;
 import java.util.StringTokenizer;
 
+import com.google.common.net.InetAddresses;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Path;
@@ -77,6 +78,9 @@
    * @return the hostname to the first dot
    */
   public static String simpleHostname(String fullHostname) {
+    if (InetAddresses.isInetAddress(fullHostname)) {
+      return fullHostname;
+    }
     int offset = fullHostname.indexOf('.');
     if (offset != -1) {
       return fullHostname.substring(0, offset);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/RMAdminProtocolPB.java b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c
similarity index 66%
copy from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/RMAdminProtocolPB.java
copy to hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c
index 5511894..4edb151 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/RMAdminProtocolPB.java
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -15,14 +15,17 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.yarn.server.resourcemanager.api;
 
-import org.apache.hadoop.ipc.ProtocolInfo;
-import org.apache.hadoop.yarn.proto.RMAdminProtocol.RMAdminProtocolService;
+#include "config.h"
 
-@ProtocolInfo(
-    protocolName = "org.apache.hadoop.yarn.server.nodemanager.api.RMAdminProtocolPB",
-    protocolVersion = 1)
-public interface RMAdminProtocolPB extends RMAdminProtocolService.BlockingInterface {
+#include <jni.h>
 
+JNIEXPORT jboolean JNICALL Java_org_apache_hadoop_util_NativeCodeLoader_buildSupportsSnappy
+  (JNIEnv *env, jclass clazz)
+{
+#ifdef HADOOP_SNAPPY_LIBRARY
+  return JNI_TRUE;
+#else
+  return JNI_FALSE;
+#endif
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
index 74f1607..64632e4 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
+++ b/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
@@ -17,6 +17,5 @@
 org.apache.hadoop.fs.viewfs.ViewFileSystem
 org.apache.hadoop.fs.s3.S3FileSystem
 org.apache.hadoop.fs.s3native.NativeS3FileSystem
-org.apache.hadoop.fs.kfs.KosmosFileSystem
 org.apache.hadoop.fs.ftp.FTPFileSystem
 org.apache.hadoop.fs.HarFileSystem
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 552ef99..affcc52 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -774,42 +774,6 @@
   <description>Replication factor</description>
 </property>
 
-<!-- Kosmos File System -->
-
-<property>
-  <name>kfs.stream-buffer-size</name>
-  <value>4096</value>
-  <description>The size of buffer to stream files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-</property>
-
-<property>
-  <name>kfs.bytes-per-checksum</name>
-  <value>512</value>
-  <description>The number of bytes per checksum.  Must not be larger than
-  kfs.stream-buffer-size</description>
-</property>
-
-<property>
-  <name>kfs.client-write-packet-size</name>
-  <value>65536</value>
-  <description>Packet size for clients to write</description>
-</property>
-
-<property>
-  <name>kfs.blocksize</name>
-  <value>67108864</value>
-  <description>Block size</description>
-</property>
-
-<property>
-  <name>kfs.replication</name>
-  <value>3</value>
-  <description>Replication factor</description>
-</property>
-
 <!-- FTP file system -->
 <property>
   <name>ftp.stream-buffer-size</name>
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index fa79d59..bc5e4bd 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -26,6 +26,8 @@
 import java.io.IOException;
 import java.io.PrintStream;
 import java.net.URI;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
 import java.util.HashSet;
 import java.util.Set;
 
@@ -434,6 +436,36 @@
           output.indexOf("Failed to determine server trash configuration") != -1);
     }
 
+    // Verify old checkpoint format is recognized
+    {
+      // emulate two old trash checkpoint directories, one that is old enough
+      // to be deleted on the next expunge and one that isn't.
+      long trashInterval = conf.getLong(FS_TRASH_INTERVAL_KEY,
+          FS_TRASH_INTERVAL_DEFAULT);
+      long now = Time.now();
+      DateFormat oldCheckpointFormat = new SimpleDateFormat("yyMMddHHmm");
+      Path dirToDelete = new Path(trashRoot.getParent(),
+          oldCheckpointFormat.format(now - (trashInterval * 60 * 1000) - 1));
+      Path dirToKeep = new Path(trashRoot.getParent(),
+          oldCheckpointFormat.format(now));
+      mkdir(trashRootFs, dirToDelete);
+      mkdir(trashRootFs, dirToKeep);
+
+      // Clear out trash
+      int rc = -1;
+      try {
+        rc = shell.run(new String [] { "-expunge" } );
+      } catch (Exception e) {
+        System.err.println("Exception raised from fs expunge " +
+            e.getLocalizedMessage());
+      }
+      assertEquals(0, rc);
+      assertFalse("old checkpoint format not recognized",
+          trashRootFs.exists(dirToDelete));
+      assertTrue("old checkpoint format directory should not be removed",
+          trashRootFs.exists(dirToKeep));
+    }
+
   }
 
   public static void trashNonDefaultFS(Configuration conf) throws IOException {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/kfs/KFSEmulationImpl.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/kfs/KFSEmulationImpl.java
deleted file mode 100644
index baf25de..0000000
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/kfs/KFSEmulationImpl.java
+++ /dev/null
@@ -1,168 +0,0 @@
-/**
- *
- * Licensed under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * permissions and limitations under the License.
- *
- * 
- * We need to provide the ability to the code in fs/kfs without really
- * having a KFS deployment.  For this purpose, use the LocalFileSystem
- * as a way to "emulate" KFS.
- */
-
-package org.apache.hadoop.fs.kfs;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.util.Progressable;
-
-public class KFSEmulationImpl implements IFSImpl {
-    FileSystem localFS;
-    
-    public KFSEmulationImpl(Configuration conf) throws IOException {
-        localFS = FileSystem.getLocal(conf);
-    }
-
-    @Override
-    public boolean exists(String path) throws IOException {
-        return localFS.exists(new Path(path));
-    }
-    @Override
-    public boolean isDirectory(String path) throws IOException {
-        return localFS.isDirectory(new Path(path));
-    }
-    @Override
-    public boolean isFile(String path) throws IOException {
-        return localFS.isFile(new Path(path));
-    }
-
-    @Override
-    public String[] readdir(String path) throws IOException {
-        FileStatus[] p = localFS.listStatus(new Path(path));
-        try {
-          p = localFS.listStatus(new Path(path));
-        } catch ( FileNotFoundException fnfe ) {
-          return null;
-        }
-        String[] entries = null;
-
-        entries = new String[p.length];
-        for (int i = 0; i < p.length; i++)
-            entries[i] = p[i].getPath().toString();
-        return entries;
-    }
-
-    @Override
-    public FileStatus[] readdirplus(Path path) throws IOException {
-        return localFS.listStatus(path);
-    }
-
-    @Override
-    public int mkdirs(String path) throws IOException {
-        if (localFS.mkdirs(new Path(path)))
-            return 0;
-
-        return -1;
-    }
-
-    @Override
-    public int rename(String source, String dest) throws IOException {
-        if (localFS.rename(new Path(source), new Path(dest)))
-            return 0;
-        return -1;
-    }
-
-    @Override
-    public int rmdir(String path) throws IOException {
-        if (isDirectory(path)) {
-            // the directory better be empty
-            String[] dirEntries = readdir(path);
-            if ((dirEntries.length <= 2) && (localFS.delete(new Path(path), true)))
-                return 0;
-        }
-        return -1;
-    }
-
-    @Override
-    public int remove(String path) throws IOException {
-        if (isFile(path) && (localFS.delete(new Path(path), true)))
-            return 0;
-        return -1;
-    }
-
-    @Override
-    public long filesize(String path) throws IOException {
-        return localFS.getFileStatus(new Path(path)).getLen();
-    }
-    @Override
-    public short getReplication(String path) throws IOException {
-        return 1;
-    }
-    @Override
-    public short setReplication(String path, short replication) throws IOException {
-        return 1;
-    }
-    @Override
-    public String[][] getDataLocation(String path, long start, long len) throws IOException {
-        BlockLocation[] blkLocations = 
-          localFS.getFileBlockLocations(localFS.getFileStatus(new Path(path)),
-              start, len);
-          if ((blkLocations == null) || (blkLocations.length == 0)) {
-            return new String[0][];     
-          }
-          int blkCount = blkLocations.length;
-          String[][]hints = new String[blkCount][];
-          for (int i=0; i < blkCount ; i++) {
-            String[] hosts = blkLocations[i].getHosts();
-            hints[i] = new String[hosts.length];
-            hints[i] = hosts;
-          }
-          return hints;
-    }
-
-    @Override
-    public long getModificationTime(String path) throws IOException {
-        FileStatus s = localFS.getFileStatus(new Path(path));
-        if (s == null)
-            return 0;
-
-        return s.getModificationTime();
-    }
-
-    @Override
-    public FSDataOutputStream append(String path, int bufferSize, Progressable progress) throws IOException {
-        // besides path/overwrite, the other args don't matter for
-        // testing purposes.
-        return localFS.append(new Path(path));
-    }
-
-    @Override
-    public FSDataOutputStream create(String path, short replication, int bufferSize, Progressable progress) throws IOException {
-        // besides path/overwrite, the other args don't matter for
-        // testing purposes.
-        return localFS.create(new Path(path));
-    }
-
-    @Override
-    public FSDataInputStream open(String path, int bufferSize) throws IOException {
-        return localFS.open(new Path(path));
-    }
-
-    
-};
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/kfs/TestKosmosFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/kfs/TestKosmosFileSystem.java
deleted file mode 100644
index c1c676e..0000000
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/kfs/TestKosmosFileSystem.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/**
- *
- * Licensed under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * permissions and limitations under the License.
- *
- * 
- * Unit tests for testing the KosmosFileSystem API implementation.
- */
-
-package org.apache.hadoop.fs.kfs;
-
-import java.io.IOException;
-import java.net.URI;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-
-public class TestKosmosFileSystem extends TestCase {
-
-    KosmosFileSystem kosmosFileSystem;
-    KFSEmulationImpl kfsEmul;
-    Path baseDir;
-    
-    @Override
-    protected void setUp() throws IOException {
-        Configuration conf = new Configuration();
-    
-        kfsEmul = new KFSEmulationImpl(conf);
-        kosmosFileSystem = new KosmosFileSystem(kfsEmul);
-        // a dummy URI; we are not connecting to any setup here
-        kosmosFileSystem.initialize(URI.create("kfs:///"), conf);
-        baseDir = new Path(System.getProperty("test.build.data", "/tmp" ) +
-                                              "/kfs-test");
-    }
-
-    @Override
-    protected void tearDown() throws Exception {
-
-    }
-
-    // @Test
-    // Check all the directory API's in KFS
-    public void testDirs() throws Exception {
-        Path subDir1 = new Path("dir.1");
-
-        // make the dir
-        kosmosFileSystem.mkdirs(baseDir);
-        assertTrue(kosmosFileSystem.isDirectory(baseDir));
-        kosmosFileSystem.setWorkingDirectory(baseDir);
-
-        kosmosFileSystem.mkdirs(subDir1);
-        assertTrue(kosmosFileSystem.isDirectory(subDir1));
-
-        assertFalse(kosmosFileSystem.exists(new Path("test1")));
-        assertFalse(kosmosFileSystem.isDirectory(new Path("test/dir.2")));
-
-        FileStatus[] p = kosmosFileSystem.listStatus(baseDir);
-        assertEquals(p.length, 1);
-
-        kosmosFileSystem.delete(baseDir, true);
-        assertFalse(kosmosFileSystem.exists(baseDir));
-    }
-
-    // @Test
-    // Check the file API's
-    public void testFiles() throws Exception {
-        Path subDir1 = new Path("dir.1");
-        Path file1 = new Path("dir.1/foo.1");
-        Path file2 = new Path("dir.1/foo.2");
-
-        kosmosFileSystem.mkdirs(baseDir);
-        assertTrue(kosmosFileSystem.isDirectory(baseDir));
-        kosmosFileSystem.setWorkingDirectory(baseDir);
-
-        kosmosFileSystem.mkdirs(subDir1);
-
-        FSDataOutputStream s1 = kosmosFileSystem.create(file1, true, 4096, (short) 1, (long) 4096, null);
-        FSDataOutputStream s2 = kosmosFileSystem.create(file2, true, 4096, (short) 1, (long) 4096, null);
-
-        s1.close();
-        s2.close();
-
-        FileStatus[] p = kosmosFileSystem.listStatus(subDir1);
-        assertEquals(p.length, 2);
-
-        kosmosFileSystem.delete(file1, true);
-        p = kosmosFileSystem.listStatus(subDir1);
-        assertEquals(p.length, 1);
-
-        kosmosFileSystem.delete(file2, true);
-        p = kosmosFileSystem.listStatus(subDir1);
-        assertEquals(p.length, 0);
-
-        kosmosFileSystem.delete(baseDir, true);
-        assertFalse(kosmosFileSystem.exists(baseDir));
-    }
-
-    // @Test
-    // Check file/read write
-    public void testFileIO() throws Exception {
-        Path subDir1 = new Path("dir.1");
-        Path file1 = new Path("dir.1/foo.1");
-
-        kosmosFileSystem.mkdirs(baseDir);
-        assertTrue(kosmosFileSystem.isDirectory(baseDir));
-        kosmosFileSystem.setWorkingDirectory(baseDir);
-
-        kosmosFileSystem.mkdirs(subDir1);
-
-        FSDataOutputStream s1 = kosmosFileSystem.create(file1, true, 4096, (short) 1, (long) 4096, null);
-
-        int bufsz = 4096;
-        byte[] data = new byte[bufsz];
-
-        for (int i = 0; i < data.length; i++)
-            data[i] = (byte) (i % 16);
-
-        // write 4 bytes and read them back; read API should return a byte per call
-        s1.write(32);
-        s1.write(32);
-        s1.write(32);
-        s1.write(32);
-        // write some data
-        s1.write(data, 0, data.length);
-        // flush out the changes
-        s1.close();
-
-        // Read the stuff back and verify it is correct
-        FSDataInputStream s2 = kosmosFileSystem.open(file1, 4096);
-        int v;
-        long nread = 0;
-
-        v = s2.read();
-        assertEquals(v, 32);
-        v = s2.read();
-        assertEquals(v, 32);
-        v = s2.read();
-        assertEquals(v, 32);
-        v = s2.read();
-        assertEquals(v, 32);
-
-        assertEquals(s2.available(), data.length);
-
-        byte[] buf = new byte[bufsz];
-        s2.read(buf, 0, buf.length);
-        nread = s2.getPos();
-
-        for (int i = 0; i < data.length; i++)
-            assertEquals(data[i], buf[i]);
-
-        assertEquals(s2.available(), 0);
-
-        s2.close();
-
-        // append some data to the file
-        try {
-            s1 = kosmosFileSystem.append(file1);
-            for (int i = 0; i < data.length; i++)
-                data[i] = (byte) (i % 17);
-            // write the data
-            s1.write(data, 0, data.length);
-            // flush out the changes
-            s1.close();
-
-            // read it back and validate
-            s2 = kosmosFileSystem.open(file1, 4096);
-            s2.seek(nread);
-            s2.read(buf, 0, buf.length);
-            for (int i = 0; i < data.length; i++)
-                assertEquals(data[i], buf[i]);
-
-            s2.close();
-        } catch (Exception e) {
-            System.out.println("append isn't supported by the underlying fs");
-        }
-
-        kosmosFileSystem.delete(file1, true);
-        assertFalse(kosmosFileSystem.exists(file1));        
-        kosmosFileSystem.delete(subDir1, true);
-        assertFalse(kosmosFileSystem.exists(subDir1));        
-        kosmosFileSystem.delete(baseDir, true);
-        assertFalse(kosmosFileSystem.exists(baseDir));        
-    }
-    
-}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java
index e990b924..35e2cb7 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java
@@ -342,6 +342,15 @@
     chrootFs.close();
     verify(mockFs).delete(eq(rawPath), eq(true));
   }
+  
+  @Test
+  public void testURIEmptyPath() throws IOException {
+    Configuration conf = new Configuration();
+    conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
+
+    URI chrootUri = URI.create("mockfs://foo");
+    new ChRootedFileSystem(chrootUri, conf);
+  }
 
   static class MockFileSystem extends FilterFileSystem {
     MockFileSystem() {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/RMAdminProtocolPB.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsURIs.java
similarity index 62%
copy from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/RMAdminProtocolPB.java
copy to hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsURIs.java
index 5511894..6bc014a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/RMAdminProtocolPB.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsURIs.java
@@ -15,14 +15,21 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.yarn.server.resourcemanager.api;
+package org.apache.hadoop.fs.viewfs;
 
-import org.apache.hadoop.ipc.ProtocolInfo;
-import org.apache.hadoop.yarn.proto.RMAdminProtocol.RMAdminProtocolService;
+import java.net.URI;
 
-@ProtocolInfo(
-    protocolName = "org.apache.hadoop.yarn.server.nodemanager.api.RMAdminProtocolPB",
-    protocolVersion = 1)
-public interface RMAdminProtocolPB extends RMAdminProtocolService.BlockingInterface {
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FsConstants;
+import org.junit.Test;
 
+public class TestViewFsURIs {
+  @Test
+  public void testURIEmptyPath() throws Exception {
+    Configuration conf = new Configuration();
+    ConfigUtil.addLink(conf, "/user", new URI("file://foo"));
+
+    FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
+  }
 }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
index 5f5fc26..30a414b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
@@ -54,7 +54,6 @@
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
-import org.apache.hadoop.io.compress.snappy.LoadSnappy;
 import org.apache.hadoop.io.compress.zlib.BuiltInGzipDecompressor;
 import org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater;
 import org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater;
@@ -103,14 +102,9 @@
   
   @Test
   public void testSnappyCodec() throws IOException {
-    if (LoadSnappy.isAvailable()) {
-      if (LoadSnappy.isLoaded()) {
-        codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.SnappyCodec");
-        codecTest(conf, seed, count, "org.apache.hadoop.io.compress.SnappyCodec");
-      }
-      else {
-        Assert.fail("Snappy native available but Hadoop native not");
-      }
+    if (SnappyCodec.isNativeCodeLoaded()) {
+      codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.SnappyCodec");
+      codecTest(conf, seed, count, "org.apache.hadoop.io.compress.SnappyCodec");
     }
   }
   
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
index fed7051..52078795 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
@@ -60,6 +60,7 @@
 import org.apache.hadoop.security.token.TokenSelector;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.log4j.Level;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 /** Unit tests for using Sasl over RPC. */
@@ -76,7 +77,8 @@
   static final String SERVER_PRINCIPAL_2 = "p2/foo@BAR";
   
   private static Configuration conf;
-  static {
+  @BeforeClass
+  public static void setup() {
     conf = new Configuration();
     conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
     UserGroupInformation.setConfiguration(conf);
@@ -449,11 +451,25 @@
   }
   
   @Test
-  public void testDigestAuthMethod() throws Exception {
+  public void testDigestAuthMethodSecureServer() throws Exception {
+    checkDigestAuthMethod(true);
+  }
+
+  @Test
+  public void testDigestAuthMethodInsecureServer() throws Exception {
+    checkDigestAuthMethod(false);
+  }
+
+  private void checkDigestAuthMethod(boolean secureServer) throws Exception {
     TestTokenSecretManager sm = new TestTokenSecretManager();
     Server server = new RPC.Builder(conf).setProtocol(TestSaslProtocol.class)
         .setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0)
         .setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();      
+    if (secureServer) {
+      server.enableSecurity();
+    } else {
+      server.disableSecurity();
+    }
     server.start();
 
     final UserGroupInformation current = UserGroupInformation.getCurrentUser();
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 4820f1b..dd0c955 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -115,7 +115,10 @@
       
       Thread.sleep(checkEveryMillis);
     } while (Time.now() - st < waitForMillis);
-    throw new TimeoutException("Timed out waiting for condition");
+    
+    throw new TimeoutException("Timed out waiting for condition. " +
+        "Thread diagnostics:\n" +
+        TimedOutTestsListener.buildThreadDiagnosticString());
   }
   
   public static class LogCapturer {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TimedOutTestsListener.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TimedOutTestsListener.java
index a10edbb..220ab1da 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TimedOutTestsListener.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TimedOutTestsListener.java
@@ -58,19 +58,28 @@
         && failure.getMessage().startsWith(TEST_TIMED_OUT_PREFIX)) {
       output.println("====> TEST TIMED OUT. PRINTING THREAD DUMP. <====");
       output.println();
-      DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss,SSS");
-      output.println(String.format("Timestamp: %s", dateFormat.format(new Date())));
-      output.println();
-      output.println(buildThreadDump());
-      
-      String deadlocksInfo = buildDeadlockInfo();
-      if (deadlocksInfo != null) {
-        output.println("====> DEADLOCKS DETECTED <====");
-        output.println();
-        output.println(deadlocksInfo);
-      }
+      output.print(buildThreadDiagnosticString());
     }
   }
+  
+  public static String buildThreadDiagnosticString() {
+    StringWriter sw = new StringWriter();
+    PrintWriter output = new PrintWriter(sw);
+    
+    DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss,SSS");
+    output.println(String.format("Timestamp: %s", dateFormat.format(new Date())));
+    output.println();
+    output.println(buildThreadDump());
+    
+    String deadlocksInfo = buildDeadlockInfo();
+    if (deadlocksInfo != null) {
+      output.println("====> DEADLOCKS DETECTED <====");
+      output.println();
+      output.println(deadlocksInfo);
+    }
+
+    return sw.toString();
+  }
 
   static String buildThreadDump() {
     StringBuilder dump = new StringBuilder();
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java
new file mode 100644
index 0000000..e50ae61
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.NativeCodeLoader;
+
+public class TestNativeCodeLoader {
+  static final Log LOG = LogFactory.getLog(TestNativeCodeLoader.class);
+
+  private static boolean requireTestJni() {
+    String rtj = System.getProperty("require.test.libhadoop");
+    if (rtj == null) return false;
+    if (rtj.compareToIgnoreCase("false") == 0) return false;
+    return true;
+  }
+
+  @Test
+  public void testNativeCodeLoaded() {
+    if (requireTestJni() == false) {
+      LOG.info("TestNativeCodeLoader: libhadoop.so testing is not required.");
+      return;
+    }
+    if (!NativeCodeLoader.isNativeCodeLoaded()) {
+      fail("TestNativeCodeLoader: libhadoop.so testing was required, but " +
+          "libhadoop.so was not loaded.");
+    }
+    LOG.info("TestNativeCodeLoader: libhadoop.so is loaded.");
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
index 0ab8649..8d2fa15 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
@@ -282,6 +282,19 @@
     }
   }
 
+  @Test
+  public void testSimpleHostName() {
+    assertEquals("Should return hostname when FQDN is specified",
+            "hadoop01",
+            StringUtils.simpleHostname("hadoop01.domain.com"));
+    assertEquals("Should return hostname when only hostname is specified",
+            "hadoop01",
+            StringUtils.simpleHostname("hadoop01"));
+    assertEquals("Should not truncate when IP address is passed",
+            "10.10.5.68",
+            StringUtils.simpleHostname("10.10.5.68"));
+  }
+
   // Benchmark for StringUtils split
   public static void main(String []args) {
     final String TO_SPLIT = "foo,bar,baz,blah,blah";
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
index 51752b6..b2a2805 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
@@ -261,7 +261,7 @@
     /**
      * Parameter name.
      */
-    public static final String NAME = "len";
+    public static final String NAME = "length";
 
     /**
      * Constructor.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
index 910eeef..6057a48 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
@@ -24,6 +24,7 @@
 import java.io.FileOutputStream;
 import java.io.FileWriter;
 import java.io.IOException;
+import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.OutputStream;
 import java.io.Writer;
@@ -146,6 +147,7 @@
     conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts",
              HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
     conf.set("httpfs.authentication.signature.secret.file", secretFile.getAbsolutePath());
+    conf.set("httpfs.hadoop.config.dir", hadoopConfDir.toString());
     File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
     os = new FileOutputStream(httpfsSite);
     conf.writeXml(os);
@@ -233,6 +235,31 @@
   @TestDir
   @TestJetty
   @TestHdfs
+  public void testOpenOffsetLength() throws Exception {
+    createHttpFSServer(false);
+
+    byte[] array = new byte[]{0, 1, 2, 3};
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    fs.mkdirs(new Path("/tmp"));
+    OutputStream os = fs.create(new Path("/tmp/foo"));
+    os.write(array);
+    os.close();
+
+    String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
+    URL url = new URL(TestJettyHelper.getJettyURL(),
+                      MessageFormat.format("/webhdfs/v1/tmp/foo?user.name={0}&op=open&offset=1&length=2", user));
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+    InputStream is = conn.getInputStream();
+    Assert.assertEquals(1, is.read());
+    Assert.assertEquals(2, is.read());
+    Assert.assertEquals(-1, is.read());
+  }
+
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
   public void testPutNoOperation() throws Exception {
     createHttpFSServer(false);
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e7a4ab8..012ece7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -137,6 +137,16 @@
     HDFS-3880. Use Builder to build RPC server in HDFS.
     (Brandon Li vias suresh)
 
+    HDFS-2127. Add a test that ensure AccessControlExceptions contain
+    a full path. (Stephen Chu via eli)
+
+    HDFS-3995. Use DFSTestUtil.createFile() for file creation and 
+    writing in test cases. (Jing Zhao via suresh)
+
+    HDFS-3735. NameNode WebUI should allow sorting live datanode list by fields
+    Block Pool Used, Block Pool Used(%) and Failed Volumes.
+    (Brahma Reddy Battula via suresh)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -235,6 +245,31 @@
 
     HDFS-3939. NN RPC address cleanup. (eli)
 
+    HDFS-3373. Change DFSClient input stream socket cache to global static and add
+    a thread to cleanup expired cache entries. (John George via szetszwo)
+
+    HDFS-3896. Add descriptions for dfs.namenode.rpc-address and
+    dfs.namenode.servicerpc-address to hdfs-default.xml. (Jeff Lord via atm)
+
+    HDFS-3996. Add debug log removed in HDFS-3873 back. (eli)
+
+    HDFS-3916. libwebhdfs (C client) code cleanups.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3813. Log error message if security and WebHDFS are enabled but
+    principal/keytab are not configured. (Stephen Chu via atm)
+
+    HDFS-3483. Better error message when hdfs fsck is run against a ViewFS
+    config. (Stephen Fritz via atm)
+
+    HDFS-3682. MiniDFSCluster#init should provide more info when it fails.
+    (todd via eli)
+
+    HDFS-4008. TestBalancerWithEncryptedTransfer needs a timeout. (eli)
+
+    HDFS-4007. Rehabilitate bit-rotted unit tests under
+    hadoop-hdfs-project/hadoop-hdfs/src/test/unit/ (Colin Patrick McCabe via todd)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -259,6 +294,31 @@
 
     HDFS-3964. Make NN log of fs.defaultFS debug rather than info. (eli)
 
+    HDFS-3992. Method org.apache.hadoop.hdfs.TestHftpFileSystem.tearDown()
+    sometimes throws NPEs. (Ivan A. Veselovsky via atm)
+
+    HDFS-3753. Tests don't run with native libraries.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-4000. TestParallelLocalRead fails with "input ByteBuffers
+    must be direct buffers". (Colin Patrick McCabe via eli)
+
+    HDFS-3999. HttpFS OPEN operation expects len parameter, it should be length. (tucu)
+
+    HDFS-4006. TestCheckpoint#testSecondaryHasVeryOutOfDateImage
+    occasionally fails due to unexpected exit. (todd via eli)
+
+    HDFS-4003. test-patch should build the common native libs before
+    running hdfs tests. (Colin Patrick McCabe via eli)
+
+    HDFS-4018. testMiniDFSClusterWithMultipleNN is missing some
+    cluster cleanup. (eli)
+
+    HDFS-4020. TestRBWBlockInvalidation may time out. (eli)
+
+    HDFS-4021. Misleading error message when resources are low on the NameNode.
+    (Christopher Conner via atm)
+
 Release 2.0.2-alpha - 2012-09-07 
 
   INCOMPATIBLE CHANGES
@@ -792,6 +852,8 @@
     HDFS-3938. remove current limitations from HttpFS docs. (tucu)
 
     HDFS-3944. Httpfs resolveAuthority() is not resolving host correctly. (tucu)
+
+    HDFS-3972. Trash emptier fails in secure HA cluster. (todd via eli)
  
   BREAKDOWN OF HDFS-3042 SUBTASKS
 
@@ -1628,6 +1690,27 @@
     
     HDFS-3039. Address findbugs and javadoc warnings on branch. (todd via atm)
 
+Release 0.23.5 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    HDFS-3829. TestHftpURLTimeouts fails intermittently with JDK7  (Trevor
+    Robinson via tgraves)
+
+    HDFS-3824. TestHftpDelegationToken fails intermittently with JDK7 (Trevor
+    Robinson via tgraves)
+
+    HDFS-3224. Bug in check for DN re-registration with different storage ID
+    (jlowe)
+
 Release 0.23.4 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -1640,7 +1723,10 @@
 
   BUG FIXES
 
-Release 0.23.3 - UNRELEASED
+    HDFS-3831. Failure to renew tokens due to test-sources left in classpath
+    (jlowe via bobby)
+
+Release 0.23.3
 
   INCOMPATIBLE CHANGES
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
index 17caa1b..d0b3c06 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
@@ -85,8 +85,8 @@
 
 add_dual_library(hdfs
     main/native/libhdfs/exception.c
-    main/native/libhdfs/hdfs.c
     main/native/libhdfs/jni_helper.c
+    main/native/libhdfs/hdfs.c
 )
 target_link_dual_libraries(hdfs
     ${JAVA_JVM_LIBRARY}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/CMakeLists.txt
index ca47363..4529cdf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/CMakeLists.txt
@@ -16,28 +16,21 @@
 # limitations under the License.
 #
 
-find_package(CURL)
-if (CURL_FOUND)
-    include_directories(${CURL_INCLUDE_DIRS})
-else (CURL_FOUND)
-    MESSAGE(STATUS "Failed to find CURL library.")
-endif (CURL_FOUND)
+find_package(CURL REQUIRED)
 
 set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH}
 "${CMAKE_SOURCE_DIR}/contrib/libwebhdfs/resources/")
-MESSAGE("CMAKE_MODULE_PATH IS: " ${CMAKE_MODULE_PATH})
 
-find_package(Jansson)
+find_package(Jansson REQUIRED)
 include_directories(${JANSSON_INCLUDE_DIR})
 
 add_dual_library(webhdfs
-    src/exception.c
     src/hdfs_web.c
-    src/hdfs_jni.c
-    src/jni_helper.c
     src/hdfs_http_client.c
     src/hdfs_http_query.c
     src/hdfs_json_parser.c
+    ../../main/native/libhdfs/exception.c
+    ../../main/native/libhdfs/jni_helper.c
 )
 target_link_dual_libraries(webhdfs
     ${JAVA_JVM_LIBRARY}
@@ -55,10 +48,6 @@
 )
 target_link_libraries(test_libwebhdfs_ops
     webhdfs
-    ${CURL_LIBRARY}
-    ${JAVA_JVM_LIBRARY}
-    ${JANSSON_LIBRARY}
-    pthread
 )
 
 add_executable(test_libwebhdfs_read
@@ -66,10 +55,6 @@
 )
 target_link_libraries(test_libwebhdfs_read
     webhdfs
-    ${CURL_LIBRARY}
-    ${JAVA_JVM_LIBRARY}
-    ${JANSSON_LIBRARY}
-    pthread
 )
 
 add_executable(test_libwebhdfs_write
@@ -77,10 +62,6 @@
 )
 target_link_libraries(test_libwebhdfs_write
     webhdfs
-    ${CURL_LIBRARY}
-    ${JAVA_JVM_LIBRARY}
-    ${JANSSON_LIBRARY}
-    pthread
 )
 
 add_executable(test_libwebhdfs_threaded
@@ -88,8 +69,4 @@
 )
 target_link_libraries(test_libwebhdfs_threaded
     webhdfs
-    ${CURL_LIBRARY}
-    ${JAVA_JVM_LIBRARY}
-    ${JANSSON_LIBRARY}
-    pthread
 )
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/exception.c b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/exception.c
deleted file mode 100644
index 5c49eee..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/exception.c
+++ /dev/null
@@ -1,237 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "exception.h"
-#include "webhdfs.h"
-#include "jni_helper.h"
-
-#include <inttypes.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#define EXCEPTION_INFO_LEN (sizeof(gExceptionInfo)/sizeof(gExceptionInfo[0]))
-
-struct ExceptionInfo {
-    const char * const name;
-    int noPrintFlag;
-    int excErrno;
-};
-
-static const struct ExceptionInfo gExceptionInfo[] = {
-    {
-        .name = "java/io/FileNotFoundException",
-        .noPrintFlag = NOPRINT_EXC_FILE_NOT_FOUND,
-        .excErrno = ENOENT,
-    },
-    {
-        .name = "org/apache/hadoop/security/AccessControlException",
-        .noPrintFlag = NOPRINT_EXC_ACCESS_CONTROL,
-        .excErrno = EACCES,
-    },
-    {
-        .name = "org/apache/hadoop/fs/UnresolvedLinkException",
-        .noPrintFlag = NOPRINT_EXC_UNRESOLVED_LINK,
-        .excErrno = ENOLINK,
-    },
-    {
-        .name = "org/apache/hadoop/fs/ParentNotDirectoryException",
-        .noPrintFlag = NOPRINT_EXC_PARENT_NOT_DIRECTORY,
-        .excErrno = ENOTDIR,
-    },
-    {
-        .name = "java/lang/IllegalArgumentException",
-        .noPrintFlag = NOPRINT_EXC_ILLEGAL_ARGUMENT,
-        .excErrno = EINVAL,
-    },
-    {
-        .name = "java/lang/OutOfMemoryError",
-        .noPrintFlag = 0,
-        .excErrno = ENOMEM,
-    },
-    
-};
-
-int printExceptionWebV(hdfs_exception_msg *exc, int noPrintFlags, const char *fmt, va_list ap)
-{
-    int i, noPrint, excErrno;
-    if (!exc) {
-        fprintf(stderr, "printExceptionWebV: the hdfs_exception_msg is NULL\n");
-        return EINTERNAL;
-    }
-    
-    for (i = 0; i < EXCEPTION_INFO_LEN; i++) {
-        if (strstr(gExceptionInfo[i].name, exc->exception)) {
-            break;
-        }
-    }
-    if (i < EXCEPTION_INFO_LEN) {
-        noPrint = (gExceptionInfo[i].noPrintFlag & noPrintFlags);
-        excErrno = gExceptionInfo[i].excErrno;
-    } else {
-        noPrint = 0;
-        excErrno = EINTERNAL;
-    }
-    
-    if (!noPrint) {
-        vfprintf(stderr, fmt, ap);
-        fprintf(stderr, " error:\n");
-        fprintf(stderr, "Exception: %s\nJavaClassName: %s\nMessage: %s\n", exc->exception, exc->javaClassName, exc->message);
-    }
-    
-    free(exc);
-    return excErrno;
-}
-
-int printExceptionWeb(hdfs_exception_msg *exc, int noPrintFlags, const char *fmt, ...)
-{
-    va_list ap;
-    int ret;
-    
-    va_start(ap, fmt);
-    ret = printExceptionWebV(exc, noPrintFlags, fmt, ap);
-    va_end(ap);
-    return ret;
-}
-
-int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
-        const char *fmt, va_list ap)
-{
-    int i, noPrint, excErrno;
-    char *className = NULL;
-    jstring jStr = NULL;
-    jvalue jVal;
-    jthrowable jthr;
-
-    jthr = classNameOfObject(exc, env, &className);
-    if (jthr) {
-        fprintf(stderr, "PrintExceptionAndFree: error determining class name "
-            "of exception.\n");
-        className = strdup("(unknown)");
-        destroyLocalReference(env, jthr);
-    }
-    for (i = 0; i < EXCEPTION_INFO_LEN; i++) {
-        if (!strcmp(gExceptionInfo[i].name, className)) {
-            break;
-        }
-    }
-    if (i < EXCEPTION_INFO_LEN) {
-        noPrint = (gExceptionInfo[i].noPrintFlag & noPrintFlags);
-        excErrno = gExceptionInfo[i].excErrno;
-    } else {
-        noPrint = 0;
-        excErrno = EINTERNAL;
-    }
-    if (!noPrint) {
-        vfprintf(stderr, fmt, ap);
-        fprintf(stderr, " error:\n");
-
-        // We don't want to  use ExceptionDescribe here, because that requires a
-        // pending exception.  Instead, use ExceptionUtils.
-        jthr = invokeMethod(env, &jVal, STATIC, NULL, 
-            "org/apache/commons/lang/exception/ExceptionUtils",
-            "getStackTrace", "(Ljava/lang/Throwable;)Ljava/lang/String;", exc);
-        if (jthr) {
-            fprintf(stderr, "(unable to get stack trace for %s exception: "
-                    "ExceptionUtils::getStackTrace error.)\n", className);
-            destroyLocalReference(env, jthr);
-        } else {
-            jStr = jVal.l;
-            const char *stackTrace = (*env)->GetStringUTFChars(env, jStr, NULL);
-            if (!stackTrace) {
-                fprintf(stderr, "(unable to get stack trace for %s exception: "
-                        "GetStringUTFChars error.)\n", className);
-            } else {
-                fprintf(stderr, "%s", stackTrace);
-                (*env)->ReleaseStringUTFChars(env, jStr, stackTrace);
-            }
-        }
-    }
-    destroyLocalReference(env, jStr);
-    destroyLocalReference(env, exc);
-    free(className);
-    return excErrno;
-}
-
-int printExceptionAndFree(JNIEnv *env, jthrowable exc, int noPrintFlags,
-        const char *fmt, ...)
-{
-    va_list ap;
-    int ret;
-
-    va_start(ap, fmt);
-    ret = printExceptionAndFreeV(env, exc, noPrintFlags, fmt, ap);
-    va_end(ap);
-    return ret;
-}
-
-int printPendingExceptionAndFree(JNIEnv *env, int noPrintFlags,
-        const char *fmt, ...)
-{
-    va_list ap;
-    int ret;
-    jthrowable exc;
-
-    exc = (*env)->ExceptionOccurred(env);
-    if (!exc) {
-        va_start(ap, fmt);
-        vfprintf(stderr, fmt, ap);
-        va_end(ap);
-        fprintf(stderr, " error: (no exception)");
-        ret = 0;
-    } else {
-        (*env)->ExceptionClear(env);
-        va_start(ap, fmt);
-        ret = printExceptionAndFreeV(env, exc, noPrintFlags, fmt, ap);
-        va_end(ap);
-    }
-    return ret;
-}
-
-jthrowable getPendingExceptionAndClear(JNIEnv *env)
-{
-    jthrowable jthr = (*env)->ExceptionOccurred(env);
-    if (!jthr)
-        return NULL;
-    (*env)->ExceptionClear(env);
-    return jthr;
-}
-
-jthrowable newRuntimeError(JNIEnv *env, const char *fmt, ...)
-{
-    char buf[512];
-    jobject out, exc;
-    jstring jstr;
-    va_list ap;
-
-    va_start(ap, fmt);
-    vsnprintf(buf, sizeof(buf), fmt, ap);
-    va_end(ap);
-    jstr = (*env)->NewStringUTF(env, buf);
-    if (!jstr) {
-        // We got an out of memory exception rather than a RuntimeException.
-        // Too bad...
-        return getPendingExceptionAndClear(env);
-    }
-    exc = constructNewObjectOfClass(env, &out, "RuntimeException",
-        "(java/lang/String;)V", jstr);
-    (*env)->DeleteLocalRef(env, jstr);
-    // Again, we'll either get an out of memory exception or the
-    // RuntimeException we wanted.
-    return (exc) ? exc : out;
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/exception.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/exception.h
deleted file mode 100644
index 44f7953..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/exception.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef LIBHDFS_EXCEPTION_H
-#define LIBHDFS_EXCEPTION_H
-
-/**
- * Exception handling routines for libhdfs.
- *
- * The convention we follow here is to clear pending exceptions as soon as they
- * are raised.  Never assume that the caller of your function will clean up
- * after you-- do it yourself.  Unhandled exceptions can lead to memory leaks
- * and other undefined behavior.
- *
- * If you encounter an exception, return a local reference to it.  The caller is
- * responsible for freeing the local reference, by calling a function like
- * PrintExceptionAndFree.  (You can also free exceptions directly by calling
- * DeleteLocalRef.  However, that would not produce an error message, so it's
- * usually not what you want.)
- */
-
-#include <jni.h>
-#include <stdio.h>
-
-#include <stdlib.h>
-#include <stdarg.h>
-#include <search.h>
-#include <pthread.h>
-#include <errno.h>
-
-/**
- * Exception noprint flags
- *
- * Theses flags determine which exceptions should NOT be printed to stderr by
- * the exception printing routines.  For example, if you expect to see
- * FileNotFound, you might use NOPRINT_EXC_FILE_NOT_FOUND, to avoid filling the
- * logs with messages about routine events.
- *
- * On the other hand, if you don't expect any failures, you might pass
- * PRINT_EXC_ALL.
- *
- * You can OR these flags together to avoid printing multiple classes of
- * exceptions.
- */
-#define PRINT_EXC_ALL                           0x00
-#define NOPRINT_EXC_FILE_NOT_FOUND              0x01
-#define NOPRINT_EXC_ACCESS_CONTROL              0x02
-#define NOPRINT_EXC_UNRESOLVED_LINK             0x04
-#define NOPRINT_EXC_PARENT_NOT_DIRECTORY        0x08
-#define NOPRINT_EXC_ILLEGAL_ARGUMENT            0x10
-
-/**
- * Exception information after calling webhdfs operations
- */
-typedef struct {
-    const char *exception;
-    const char *javaClassName;
-    const char *message;
-} hdfs_exception_msg;
-
-/**
- * Print out exception information got after calling webhdfs operations
- *
- * @param exc             The exception information to print and free
- * @param noPrintFlags    Flags which determine which exceptions we should NOT
- *                        print.
- * @param fmt             Printf-style format list
- * @param ap              Printf-style varargs
- *
- * @return                The POSIX error number associated with the exception
- *                        object.
- */
-int printExceptionWebV(hdfs_exception_msg *exc, int noPrintFlags, const char *fmt, va_list ap);
-
-/**
- * Print out exception information got after calling webhdfs operations
- *
- * @param exc             The exception information to print and free
- * @param noPrintFlags    Flags which determine which exceptions we should NOT
- *                        print.
- * @param fmt             Printf-style format list
- * @param ...             Printf-style varargs
- *
- * @return                The POSIX error number associated with the exception
- *                        object.
- */
-int printExceptionWeb(hdfs_exception_msg *exc, int noPrintFlags,
-                      const char *fmt, ...) __attribute__((format(printf, 3, 4)));
-
-/**
- * Print out information about an exception and free it.
- *
- * @param env             The JNI environment
- * @param exc             The exception to print and free
- * @param noPrintFlags    Flags which determine which exceptions we should NOT
- *                        print.
- * @param fmt             Printf-style format list
- * @param ap              Printf-style varargs
- *
- * @return                The POSIX error number associated with the exception
- *                        object.
- */
-int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
-        const char *fmt, va_list ap);
-
-/**
- * Print out information about an exception and free it.
- *
- * @param env             The JNI environment
- * @param exc             The exception to print and free
- * @param noPrintFlags    Flags which determine which exceptions we should NOT
- *                        print.
- * @param fmt             Printf-style format list
- * @param ...             Printf-style varargs
- *
- * @return                The POSIX error number associated with the exception
- *                        object.
- */
-int printExceptionAndFree(JNIEnv *env, jthrowable exc, int noPrintFlags,
-        const char *fmt, ...) __attribute__((format(printf, 4, 5)));  
-
-/**
- * Print out information about the pending exception and free it.
- *
- * @param env             The JNI environment
- * @param noPrintFlags    Flags which determine which exceptions we should NOT
- *                        print.
- * @param fmt             Printf-style format list
- * @param ...             Printf-style varargs
- *
- * @return                The POSIX error number associated with the exception
- *                        object.
- */
-int printPendingExceptionAndFree(JNIEnv *env, int noPrintFlags,
-        const char *fmt, ...) __attribute__((format(printf, 3, 4)));  
-
-/**
- * Get a local reference to the pending exception and clear it.
- *
- * Once it is cleared, the exception will no longer be pending.  The caller will
- * have to decide what to do with the exception object.
- *
- * @param env             The JNI environment
- *
- * @return                The exception, or NULL if there was no exception
- */
-jthrowable getPendingExceptionAndClear(JNIEnv *env);
-
-/**
- * Create a new runtime error.
- *
- * This creates (but does not throw) a new RuntimeError.
- *
- * @param env             The JNI environment
- * @param fmt             Printf-style format list
- * @param ...             Printf-style varargs
- *
- * @return                A local reference to a RuntimeError
- */
-jthrowable newRuntimeError(JNIEnv *env, const char *fmt, ...)
-        __attribute__((format(printf, 2, 3)));
-
-#endif
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/expect.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/expect.h
deleted file mode 100644
index 2046bd0..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/expect.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef LIBHDFS_NATIVE_TESTS_EXPECT_H
-#define LIBHDFS_NATIVE_TESTS_EXPECT_H
-
-#include <stdio.h>
-
-#define EXPECT_ZERO(x) \
-    do { \
-        int __my_ret__ = x; \
-        if (__my_ret__) { \
-            int __my_errno__ = errno; \
-            fprintf(stderr, "TEST_ERROR: failed on line %d with return " \
-		    "code %d (errno: %d): got nonzero from %s\n", \
-		    __LINE__, __my_ret__, __my_errno__, #x); \
-            return __my_ret__; \
-        } \
-    } while (0);
-
-#define EXPECT_NULL(x) \
-    do { \
-        void* __my_ret__ = x; \
-        int __my_errno__ = errno; \
-        if (__my_ret__ != NULL) { \
-            fprintf(stderr, "TEST_ERROR: failed on line %d (errno: %d): " \
-		    "got non-NULL value %p from %s\n", \
-		    __LINE__, __my_errno__, __my_ret__, #x); \
-            return -1; \
-        } \
-    } while (0);
-
-#define EXPECT_NONNULL(x) \
-    do { \
-        void* __my_ret__ = x; \
-        int __my_errno__ = errno; \
-        if (__my_ret__ == NULL) { \
-            fprintf(stderr, "TEST_ERROR: failed on line %d (errno: %d): " \
-		    "got NULL from %s\n", __LINE__, __my_errno__, #x); \
-            return -1; \
-        } \
-    } while (0);
-
-#define EXPECT_NEGATIVE_ONE_WITH_ERRNO(x, e) \
-    do { \
-        int __my_ret__ = x; \
-        int __my_errno__ = errno; \
-        if (__my_ret__ != -1) { \
-            fprintf(stderr, "TEST_ERROR: failed on line %d with return " \
-                "code %d (errno: %d): expected -1 from %s\n", __LINE__, \
-                __my_ret__, __my_errno__, #x); \
-            return -1; \
-        } \
-        if (__my_errno__ != e) { \
-            fprintf(stderr, "TEST_ERROR: failed on line %d with return " \
-                "code %d (errno: %d): expected errno = %d from %s\n", \
-                __LINE__, __my_ret__, __my_errno__, e, #x); \
-            return -1; \
-	} \
-    } while (0);
-
-#define EXPECT_NONZERO(x) \
-    do { \
-        int __my_ret__ = x; \
-        int __my_errno__ = errno; \
-        if (__my_ret__) { \
-            fprintf(stderr, "TEST_ERROR: failed on line %d with return " \
-		    "code %d (errno: %d): got zero from %s\n", __LINE__, \
-                __my_ret__, __my_errno__, #x); \
-            return -1; \
-        } \
-    } while (0);
-
-#define EXPECT_NONNEGATIVE(x) \
-    do { \
-        int __my_ret__ = x; \
-        int __my_errno__ = errno; \
-        if (__my_ret__ < 0) { \
-            fprintf(stderr, "TEST_ERROR: failed on line %d with return " \
-                "code %d (errno: %d): got negative return from %s\n", \
-		    __LINE__, __my_ret__, __my_errno__, #x); \
-            return __my_ret__; \
-        } \
-    } while (0);
-
-#endif
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_client.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_client.h
index 8d84e62..6544364 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_client.h
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_http_client.h
@@ -21,8 +21,42 @@
 #ifndef _HDFS_HTTP_CLIENT_H_
 #define _HDFS_HTTP_CLIENT_H_
 
-#include "webhdfs.h"
-#include <curl/curl.h>
+#include "hdfs.h" /* for tSize */
+
+#include <pthread.h> /* for pthread_t */
+#include <unistd.h> /* for size_t */
+
+enum hdfsStreamType
+{
+    UNINITIALIZED = 0,
+    INPUT = 1,
+    OUTPUT = 2,
+};
+
+/**
+ * webhdfsBuffer - used for hold the data for read/write from/to http connection
+ */
+typedef struct {
+    const char *wbuffer;  // The user's buffer for uploading
+    size_t remaining;     // Length of content
+    size_t offset;        // offset for reading
+    int openFlag;         // Check whether the hdfsOpenFile has been called before
+    int closeFlag;        // Whether to close the http connection for writing
+    pthread_mutex_t writeMutex; // Synchronization between the curl and hdfsWrite threads
+    pthread_cond_t newwrite_or_close; // Transferring thread waits for this condition
+                                      // when there is no more content for transferring in the buffer
+    pthread_cond_t transfer_finish; // Condition used to indicate finishing transferring (one buffer)
+} webhdfsBuffer;
+
+struct webhdfsFileHandle {
+    char *absPath;
+    int bufferSize;
+    short replication;
+    tSize blockSize;
+    char *datanode;
+    webhdfsBuffer *uploadBuffer;
+    pthread_t connThread;
+};
 
 enum HttpHeader {
     GET,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_jni.c b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_jni.c
deleted file mode 100644
index 805dd1e..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_jni.c
+++ /dev/null
@@ -1,616 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stdio.h>
-#include <string.h>
-#include "webhdfs.h"
-#include "jni_helper.h"
-#include "exception.h"
-
-/* Some frequently used Java paths */
-#define HADOOP_CONF     "org/apache/hadoop/conf/Configuration"
-#define HADOOP_PATH     "org/apache/hadoop/fs/Path"
-#define HADOOP_LOCALFS  "org/apache/hadoop/fs/LocalFileSystem"
-#define HADOOP_FS       "org/apache/hadoop/fs/FileSystem"
-#define HADOOP_FSSTATUS "org/apache/hadoop/fs/FsStatus"
-#define HADOOP_BLK_LOC  "org/apache/hadoop/fs/BlockLocation"
-#define HADOOP_DFS      "org/apache/hadoop/hdfs/DistributedFileSystem"
-#define HADOOP_ISTRM    "org/apache/hadoop/fs/FSDataInputStream"
-#define HADOOP_OSTRM    "org/apache/hadoop/fs/FSDataOutputStream"
-#define HADOOP_STAT     "org/apache/hadoop/fs/FileStatus"
-#define HADOOP_FSPERM   "org/apache/hadoop/fs/permission/FsPermission"
-#define JAVA_NET_ISA    "java/net/InetSocketAddress"
-#define JAVA_NET_URI    "java/net/URI"
-#define JAVA_STRING     "java/lang/String"
-
-#define JAVA_VOID       "V"
-
-/* Macros for constructing method signatures */
-#define JPARAM(X)           "L" X ";"
-#define JARRPARAM(X)        "[L" X ";"
-#define JMETHOD1(X, R)      "(" X ")" R
-#define JMETHOD2(X, Y, R)   "(" X Y ")" R
-#define JMETHOD3(X, Y, Z, R)   "(" X Y Z")" R
-
-#define KERBEROS_TICKET_CACHE_PATH "hadoop.security.kerberos.ticket.cache.path"
-
-/**
- * Helper function to create a org.apache.hadoop.fs.Path object.
- * @param env: The JNIEnv pointer.
- * @param path: The file-path for which to construct org.apache.hadoop.fs.Path
- * object.
- * @return Returns a jobject on success and NULL on error.
- */
-static jthrowable constructNewObjectOfPath(JNIEnv *env, const char *path,
-                                           jobject *out)
-{
-    jthrowable jthr;
-    jstring jPathString;
-    jobject jPath;
-    
-    //Construct a java.lang.String object
-    jthr = newJavaStr(env, path, &jPathString);
-    if (jthr)
-        return jthr;
-    //Construct the org.apache.hadoop.fs.Path object
-    jthr = constructNewObjectOfClass(env, &jPath, "org/apache/hadoop/fs/Path",
-                                     "(Ljava/lang/String;)V", jPathString);
-    destroyLocalReference(env, jPathString);
-    if (jthr)
-        return jthr;
-    *out = jPath;
-    return NULL;
-}
-
-/**
- * Set a configuration value.
- *
- * @param env               The JNI environment
- * @param jConfiguration    The configuration object to modify
- * @param key               The key to modify
- * @param value             The value to set the key to
- *
- * @return                  NULL on success; exception otherwise
- */
-static jthrowable hadoopConfSetStr(JNIEnv *env, jobject jConfiguration,
-                                   const char *key, const char *value)
-{
-    jthrowable jthr;
-    jstring jkey = NULL, jvalue = NULL;
-    
-    jthr = newJavaStr(env, key, &jkey);
-    if (jthr)
-        goto done;
-    jthr = newJavaStr(env, value, &jvalue);
-    if (jthr)
-        goto done;
-    jthr = invokeMethod(env, NULL, INSTANCE, jConfiguration,
-                        HADOOP_CONF, "set", JMETHOD2(JPARAM(JAVA_STRING),
-                                                     JPARAM(JAVA_STRING), JAVA_VOID),
-                        jkey, jvalue);
-    if (jthr)
-        goto done;
-done:
-    destroyLocalReference(env, jkey);
-    destroyLocalReference(env, jvalue);
-    return jthr;
-}
-
-static jthrowable hadoopConfGetStr(JNIEnv *env, jobject jConfiguration,
-                                   const char *key, char **val)
-{
-    jthrowable jthr;
-    jvalue jVal;
-    jstring jkey = NULL, jRet = NULL;
-    
-    jthr = newJavaStr(env, key, &jkey);
-    if (jthr)
-        goto done;
-    jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
-                        HADOOP_CONF, "get", JMETHOD1(JPARAM(JAVA_STRING),
-                                                     JPARAM(JAVA_STRING)), jkey);
-    if (jthr)
-        goto done;
-    jRet = jVal.l;
-    jthr = newCStr(env, jRet, val);
-done:
-    destroyLocalReference(env, jkey);
-    destroyLocalReference(env, jRet);
-    return jthr;
-}
-
-int hdfsConfGetStr(const char *key, char **val)
-{
-    JNIEnv *env;
-    int ret;
-    jthrowable jthr;
-    jobject jConfiguration = NULL;
-    
-    env = getJNIEnv();
-    if (env == NULL) {
-        ret = EINTERNAL;
-        goto done;
-    }
-    jthr = constructNewObjectOfClass(env, &jConfiguration, HADOOP_CONF, "()V");
-    if (jthr) {
-        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                    "hdfsConfGetStr(%s): new Configuration", key);
-        goto done;
-    }
-    jthr = hadoopConfGetStr(env, jConfiguration, key, val);
-    if (jthr) {
-        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                    "hdfsConfGetStr(%s): hadoopConfGetStr", key);
-        goto done;
-    }
-    ret = 0;
-done:
-    destroyLocalReference(env, jConfiguration);
-    if (ret)
-        errno = ret;
-    return ret;
-}
-
-void hdfsConfStrFree(char *val)
-{
-    free(val);
-}
-
-static jthrowable hadoopConfGetInt(JNIEnv *env, jobject jConfiguration,
-                                   const char *key, int32_t *val)
-{
-    jthrowable jthr = NULL;
-    jvalue jVal;
-    jstring jkey = NULL;
-    
-    jthr = newJavaStr(env, key, &jkey);
-    if (jthr)
-        return jthr;
-    jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
-                        HADOOP_CONF, "getInt", JMETHOD2(JPARAM(JAVA_STRING), "I", "I"),
-                        jkey, (jint)(*val));
-    destroyLocalReference(env, jkey);
-    if (jthr)
-        return jthr;
-    *val = jVal.i;
-    return NULL;
-}
-
-int hdfsConfGetInt(const char *key, int32_t *val)
-{
-    JNIEnv *env;
-    int ret;
-    jobject jConfiguration = NULL;
-    jthrowable jthr;
-    
-    env = getJNIEnv();
-    if (env == NULL) {
-        ret = EINTERNAL;
-        goto done;
-    }
-    jthr = constructNewObjectOfClass(env, &jConfiguration, HADOOP_CONF, "()V");
-    if (jthr) {
-        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                    "hdfsConfGetInt(%s): new Configuration", key);
-        goto done;
-    }
-    jthr = hadoopConfGetInt(env, jConfiguration, key, val);
-    if (jthr) {
-        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                    "hdfsConfGetInt(%s): hadoopConfGetInt", key);
-        goto done;
-    }
-    ret = 0;
-done:
-    destroyLocalReference(env, jConfiguration);
-    if (ret)
-        errno = ret;
-    return ret;
-}
-
-/**
- * Calculate the effective URI to use, given a builder configuration.
- *
- * If there is not already a URI scheme, we prepend 'hdfs://'.
- *
- * If there is not already a port specified, and a port was given to the
- * builder, we suffix that port.  If there is a port specified but also one in
- * the URI, that is an error.
- *
- * @param bld       The hdfs builder object
- * @param uri       (out param) dynamically allocated string representing the
- *                  effective URI
- *
- * @return          0 on success; error code otherwise
- */
-static int calcEffectiveURI(struct hdfsBuilder *bld, char ** uri)
-{
-    const char *scheme;
-    char suffix[64];
-    const char *lastColon;
-    char *u;
-    size_t uriLen;
-    
-    if (!bld->nn_jni)
-        return EINVAL;
-    scheme = (strstr(bld->nn_jni, "://")) ? "" : "hdfs://";
-    if (bld->port == 0) {
-        suffix[0] = '\0';
-    } else {
-        lastColon = rindex(bld->nn_jni, ':');
-        if (lastColon && (strspn(lastColon + 1, "0123456789") ==
-                          strlen(lastColon + 1))) {
-            fprintf(stderr, "port %d was given, but URI '%s' already "
-                    "contains a port!\n", bld->port, bld->nn_jni);
-            return EINVAL;
-        }
-        snprintf(suffix, sizeof(suffix), ":%d", bld->port);
-    }
-    
-    uriLen = strlen(scheme) + strlen(bld->nn_jni) + strlen(suffix);
-    u = malloc((uriLen + 1) * (sizeof(char)));
-    if (!u) {
-        fprintf(stderr, "calcEffectiveURI: out of memory");
-        return ENOMEM;
-    }
-    snprintf(u, uriLen + 1, "%s%s%s", scheme, bld->nn_jni, suffix);
-    *uri = u;
-    return 0;
-}
-
-static const char *maybeNull(const char *str)
-{
-    return str ? str : "(NULL)";
-}
-
-const char *hdfsBuilderToStr(const struct hdfsBuilder *bld,
-                                    char *buf, size_t bufLen)
-{
-    snprintf(buf, bufLen, "forceNewInstance=%d, nn=%s, port=%d, "
-             "kerbTicketCachePath=%s, userName=%s, workingDir=%s\n",
-             bld->forceNewInstance, maybeNull(bld->nn), bld->port,
-             maybeNull(bld->kerbTicketCachePath),
-             maybeNull(bld->userName), maybeNull(bld->workingDir));
-    return buf;
-}
-
-/*
- * The JNI version of builderConnect, return the reflection of FileSystem
- */
-jobject hdfsBuilderConnect_JNI(JNIEnv *env, struct hdfsBuilder *bld)
-{
-    jobject jConfiguration = NULL, jFS = NULL, jURI = NULL, jCachePath = NULL;
-    jstring jURIString = NULL, jUserString = NULL;
-    jvalue  jVal;
-    jthrowable jthr = NULL;
-    char *cURI = 0, buf[512];
-    int ret;
-    jobject jRet = NULL;
-    
-    //  jConfiguration = new Configuration();
-    jthr = constructNewObjectOfClass(env, &jConfiguration, HADOOP_CONF, "()V");
-    if (jthr) {
-        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                    "hdfsBuilderConnect_JNI(%s)", hdfsBuilderToStr(bld, buf, sizeof(buf)));
-        goto done;
-    }
-    
-    //Check what type of FileSystem the caller wants...
-    if (bld->nn_jni == NULL) {
-        // Get a local filesystem.
-        // Also handle the scenario where nn of hdfsBuilder is set to localhost.
-        if (bld->forceNewInstance) {
-            // fs = FileSytem#newInstanceLocal(conf);
-            jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS,
-                                "newInstanceLocal", JMETHOD1(JPARAM(HADOOP_CONF),
-                                                             JPARAM(HADOOP_LOCALFS)), jConfiguration);
-            if (jthr) {
-                ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                            "hdfsBuilderConnect_JNI(%s)",
-                                            hdfsBuilderToStr(bld, buf, sizeof(buf)));
-                goto done;
-            }
-            jFS = jVal.l;
-        } else {
-            // fs = FileSytem#getLocal(conf);
-            jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS, "getLocal",
-                                JMETHOD1(JPARAM(HADOOP_CONF),
-                                         JPARAM(HADOOP_LOCALFS)),
-                                jConfiguration);
-            if (jthr) {
-                ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                            "hdfsBuilderConnect_JNI(%s)",
-                                            hdfsBuilderToStr(bld, buf, sizeof(buf)));
-                goto done;
-            }
-            jFS = jVal.l;
-        }
-    } else {
-        if (!strcmp(bld->nn_jni, "default")) {
-            // jURI = FileSystem.getDefaultUri(conf)
-            jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS,
-                                "getDefaultUri",
-                                "(Lorg/apache/hadoop/conf/Configuration;)Ljava/net/URI;",
-                                jConfiguration);
-            if (jthr) {
-                ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                            "hdfsBuilderConnect_JNI(%s)",
-                                            hdfsBuilderToStr(bld, buf, sizeof(buf)));
-                goto done;
-            }
-            jURI = jVal.l;
-        } else {
-            // fs = FileSystem#get(URI, conf, ugi);
-            ret = calcEffectiveURI(bld, &cURI);
-            if (ret)
-                goto done;
-            jthr = newJavaStr(env, cURI, &jURIString);
-            if (jthr) {
-                ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                            "hdfsBuilderConnect_JNI(%s)",
-                                            hdfsBuilderToStr(bld, buf, sizeof(buf)));
-                goto done;
-            }
-            jthr = invokeMethod(env, &jVal, STATIC, NULL, JAVA_NET_URI,
-                                "create", "(Ljava/lang/String;)Ljava/net/URI;",
-                                jURIString);
-            if (jthr) {
-                ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                            "hdfsBuilderConnect_JNI(%s)",
-                                            hdfsBuilderToStr(bld, buf, sizeof(buf)));
-                goto done;
-            }
-            jURI = jVal.l;
-        }
-        
-        if (bld->kerbTicketCachePath) {
-            jthr = hadoopConfSetStr(env, jConfiguration,
-                                    KERBEROS_TICKET_CACHE_PATH, bld->kerbTicketCachePath);
-            if (jthr) {
-                ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                            "hdfsBuilderConnect_JNI(%s)",
-                                            hdfsBuilderToStr(bld, buf, sizeof(buf)));
-                goto done;
-            }
-        }
-        jthr = newJavaStr(env, bld->userName, &jUserString);
-        if (jthr) {
-            ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                        "hdfsBuilderConnect_JNI(%s)",
-                                        hdfsBuilderToStr(bld, buf, sizeof(buf)));
-            goto done;
-        }
-        if (bld->forceNewInstance) {
-            jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS,
-                                "newInstance", JMETHOD3(JPARAM(JAVA_NET_URI),
-                                                        JPARAM(HADOOP_CONF), JPARAM(JAVA_STRING),
-                                                        JPARAM(HADOOP_FS)),
-                                jURI, jConfiguration, jUserString);
-            if (jthr) {
-                ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                            "hdfsBuilderConnect_JNI(%s)",
-                                            hdfsBuilderToStr(bld, buf, sizeof(buf)));
-                goto done;
-            }
-            jFS = jVal.l;
-        } else {
-            jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS, "get",
-                                JMETHOD3(JPARAM(JAVA_NET_URI), JPARAM(HADOOP_CONF),
-                                         JPARAM(JAVA_STRING), JPARAM(HADOOP_FS)),
-                                jURI, jConfiguration, jUserString);
-            if (jthr) {
-                ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                            "hdfsBuilderConnect_JNI(%s)",
-                                            hdfsBuilderToStr(bld, buf, sizeof(buf)));
-                goto done;
-            }
-            jFS = jVal.l;
-        }
-    }
-    jRet = (*env)->NewGlobalRef(env, jFS);
-    if (!jRet) {
-        ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
-                                           "hdfsBuilderConnect_JNI(%s)",
-                                           hdfsBuilderToStr(bld, buf, sizeof(buf)));
-        goto done;
-    }
-    ret = 0;
-    
-done:
-    // Release unnecessary local references
-    destroyLocalReference(env, jConfiguration);
-    destroyLocalReference(env, jFS);
-    destroyLocalReference(env, jURI);
-    destroyLocalReference(env, jCachePath);
-    destroyLocalReference(env, jURIString);
-    destroyLocalReference(env, jUserString);
-    free(cURI);
-    
-    if (ret) {
-        errno = ret;
-        return NULL;
-    }
-    return jRet;
-}
-
-int hdfsDisconnect_JNI(jobject jFS)
-{
-    // JAVA EQUIVALENT:
-    //  fs.close()
-    
-    //Get the JNIEnv* corresponding to current thread
-    JNIEnv* env = getJNIEnv();
-    int ret;
-    
-    if (env == NULL) {
-        errno = EINTERNAL;
-        return -1;
-    }
-    
-    //Sanity check
-    if (jFS == NULL) {
-        errno = EBADF;
-        return -1;
-    }
-    
-    jthrowable jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
-                                   "close", "()V");
-    if (jthr) {
-        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                    "hdfsDisconnect: FileSystem#close");
-    } else {
-        ret = 0;
-    }
-    (*env)->DeleteGlobalRef(env, jFS);
-    if (ret) {
-        errno = ret;
-        return -1;
-    }
-    return 0;
-}
-
-static int hdfsCopyImpl(hdfsFS srcFS, const char* src, hdfsFS dstFS,
-                        const char* dst, jboolean deleteSource)
-{
-    //JAVA EQUIVALENT
-    //  FileUtil#copy(srcFS, srcPath, dstFS, dstPath,
-    //                 deleteSource = false, conf)
-    
-    //Get the JNIEnv* corresponding to current thread
-    JNIEnv* env = getJNIEnv();
-    if (env == NULL) {
-        errno = EINTERNAL;
-        return -1;
-    }
-    
-    //In libwebhdfs, the hdfsFS derived from hdfsBuilderConnect series functions
-    //is actually a hdfsBuilder instance containing address information of NameNode.
-    //Thus here we need to use JNI to get the real java FileSystem objects.
-    jobject jSrcFS = hdfsBuilderConnect_JNI(env, (struct hdfsBuilder *) srcFS);
-    jobject jDstFS = hdfsBuilderConnect_JNI(env, (struct hdfsBuilder *) dstFS);
-    
-    //Parameters
-    jobject jConfiguration = NULL, jSrcPath = NULL, jDstPath = NULL;
-    jthrowable jthr;
-    jvalue jVal;
-    int ret;
-    
-    jthr = constructNewObjectOfPath(env, src, &jSrcPath);
-    if (jthr) {
-        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                    "hdfsCopyImpl(src=%s): constructNewObjectOfPath", src);
-        goto done;
-    }
-    jthr = constructNewObjectOfPath(env, dst, &jDstPath);
-    if (jthr) {
-        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                    "hdfsCopyImpl(dst=%s): constructNewObjectOfPath", dst);
-        goto done;
-    }
-    
-    //Create the org.apache.hadoop.conf.Configuration object
-    jthr = constructNewObjectOfClass(env, &jConfiguration,
-                                     HADOOP_CONF, "()V");
-    if (jthr) {
-        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                    "hdfsCopyImpl: Configuration constructor");
-        goto done;
-    }
-    
-    //FileUtil#copy
-    jthr = invokeMethod(env, &jVal, STATIC,
-                        NULL, "org/apache/hadoop/fs/FileUtil", "copy",
-                        "(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;"
-                        "Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;"
-                        "ZLorg/apache/hadoop/conf/Configuration;)Z",
-                        jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource,
-                        jConfiguration);
-    if (jthr) {
-        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                    "hdfsCopyImpl(src=%s, dst=%s, deleteSource=%d): "
-                                    "FileUtil#copy", src, dst, deleteSource);
-        goto done;
-    }
-    if (!jVal.z) {
-        ret = EIO;
-        goto done;
-    }
-    ret = 0;
-    
-done:
-    destroyLocalReference(env, jConfiguration);
-    destroyLocalReference(env, jSrcPath);
-    destroyLocalReference(env, jDstPath);
-    //Disconnect src/dst FileSystem
-    hdfsDisconnect_JNI(jSrcFS);
-    hdfsDisconnect_JNI(jDstFS);
-    
-    if (ret) {
-        errno = ret;
-        return -1;
-    }
-    return 0;
-}
-
-int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
-{
-    return hdfsCopyImpl(srcFS, src, dstFS, dst, 0);
-}
-
-int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
-{
-    return hdfsCopyImpl(srcFS, src, dstFS, dst, 1);
-}
-
-tOffset hdfsGetDefaultBlockSize(hdfsFS fs)
-{
-    // JAVA EQUIVALENT:
-    //  fs.getDefaultBlockSize();
-    
-    //Get the JNIEnv* corresponding to current thread
-    JNIEnv* env = getJNIEnv();
-    if (env == NULL) {
-        errno = EINTERNAL;
-        return -1;
-    }
-    
-    //In libwebhdfs, the hdfsFS derived from hdfsConnect functions
-    //is actually a hdfsBuilder instance containing address information of NameNode.
-    //Thus here we need to use JNI to get the real java FileSystem objects.
-    jobject jFS = hdfsBuilderConnect_JNI(env, (struct hdfsBuilder *) fs);
-    
-    //FileSystem#getDefaultBlockSize()
-    jvalue jVal;
-    jthrowable jthr;
-    jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
-                        "getDefaultBlockSize", "()J");
-    if (jthr) {
-        errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                      "hdfsGetDefaultBlockSize: FileSystem#getDefaultBlockSize");
-        //Disconnect
-        hdfsDisconnect_JNI(jFS);
-        return -1;
-    }
-    
-    //Disconnect
-    hdfsDisconnect_JNI(jFS);
-    return jVal.j;
-}
-
-
-
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.c b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.c
index 5834c1e..5973fa5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.c
@@ -15,14 +15,76 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
+#include "exception.h"
+#include "hdfs.h" /* for hdfsFileInfo */
+#include "hdfs_json_parser.h"
+
 #include <stdlib.h>
 #include <string.h>
 #include <ctype.h>
 #include <jansson.h>
-#include "hdfs_json_parser.h"
-#include "exception.h"
 
-hdfsFileInfo *parseJsonGFS(json_t *jobj, hdfsFileInfo *fileStat, int *numEntries, const char *operation); //Forward Declaration
+/**
+ * Exception information after calling JSON operations
+ */
+struct jsonException {
+  const char *exception;
+  const char *javaClassName;
+  const char *message;
+};
+
+static hdfsFileInfo *parseJsonGFS(json_t *jobj, hdfsFileInfo *fileStat,
+                           int *numEntries, const char *operation);
+
+static void dotsToSlashes(char *str)
+{
+    for (; *str != '\0'; str++) {
+        if (*str == '.')
+            *str = '/';
+    }
+}
+
+int printJsonExceptionV(struct jsonException *exc, int noPrintFlags,
+                        const char *fmt, va_list ap)
+{
+    char *javaClassName = NULL;
+    int excErrno = EINTERNAL, shouldPrint = 0;
+    if (!exc) {
+        fprintf(stderr, "printJsonExceptionV: the jsonException is NULL\n");
+        return EINTERNAL;
+    }
+    javaClassName = strdup(exc->javaClassName);
+    if (!javaClassName) {
+        fprintf(stderr, "printJsonExceptionV: internal out of memory error\n");
+        return EINTERNAL;
+    }
+    dotsToSlashes(javaClassName);
+    getExceptionInfo(javaClassName, noPrintFlags, &excErrno, &shouldPrint);
+    free(javaClassName);
+    
+    if (shouldPrint) {
+        vfprintf(stderr, fmt, ap);
+        fprintf(stderr, " error:\n");
+        fprintf(stderr, "Exception: %s\nJavaClassName: %s\nMessage: %s\n",
+                exc->exception, exc->javaClassName, exc->message);
+    }
+    
+    free(exc);
+    return excErrno;
+}
+
+int printJsonException(struct jsonException *exc, int noPrintFlags,
+                       const char *fmt, ...)
+{
+    va_list ap;
+    int ret;
+    
+    va_start(ap, fmt);
+    ret = printJsonExceptionV(exc, noPrintFlags, fmt, ap);
+    va_end(ap);
+    return ret;
+}
 
 static hdfsFileInfo *json_parse_array(json_t *jobj, char *key, hdfsFileInfo *fileStat, int *numEntries, const char *operation) {
     int arraylen = json_array_size(jobj);                      //Getting the length of the array
@@ -88,12 +150,12 @@
     return (parseBoolean(response));
 }
 
-hdfs_exception_msg *parseJsonException(json_t *jobj) {
+struct jsonException *parseJsonException(json_t *jobj) {
     const char *key;
     json_t *value;
-    hdfs_exception_msg *exception = NULL;
+    struct jsonException *exception = NULL;
     
-    exception = (hdfs_exception_msg *) calloc(1, sizeof(hdfs_exception_msg));
+    exception = calloc(1, sizeof(*exception));
     if (!exception) {
         return NULL;
     }
@@ -117,7 +179,7 @@
     return exception;
 }
 
-hdfs_exception_msg *parseException(const char *content) {
+struct jsonException *parseException(const char *content) {
     if (!content) {
         return NULL;
     }
@@ -145,7 +207,9 @@
     return NULL;
 }
 
-hdfsFileInfo *parseJsonGFS(json_t *jobj, hdfsFileInfo *fileStat, int *numEntries, const char *operation) {
+static hdfsFileInfo *parseJsonGFS(json_t *jobj, hdfsFileInfo *fileStat,
+                                  int *numEntries, const char *operation)
+{
     const char *tempstr;
     const char *key;
     json_t *value;
@@ -196,9 +260,9 @@
                     fileStat = parseJsonGFS(value, &fileStat[0], numEntries, operation);
                 } else if (!strcmp(key,"RemoteException")) {
                     //Besides returning NULL, we also need to print the exception information
-                    hdfs_exception_msg *exception = parseJsonException(value);
+                    struct jsonException *exception = parseJsonException(value);
                     if (exception) {
-                        errno = printExceptionWeb(exception, PRINT_EXC_ALL, "Calling WEBHDFS (%s)", operation);
+                        errno = printJsonException(exception, PRINT_EXC_ALL, "Calling WEBHDFS (%s)", operation);
                     }
                     
                     if(fileStat != NULL) {
@@ -234,9 +298,9 @@
         return 0;
     }
     if(!(strstr(header, responseCode)) || !(header = strstr(header, "Content-Length"))) {
-        hdfs_exception_msg *exc = parseException(content);
+        struct jsonException *exc = parseException(content);
         if (exc) {
-            errno = printExceptionWeb(exc, PRINT_EXC_ALL, "Calling WEBHDFS (%s)", operation);
+            errno = printJsonException(exc, PRINT_EXC_ALL, "Calling WEBHDFS (%s)", operation);
         }
         return 0;
     }
@@ -259,14 +323,14 @@
         return -1;
     }
     if(!(strstr(header,responseCode1) && strstr(header, responseCode2))) {
-        hdfs_exception_msg *exc = parseException(content);
+        struct jsonException *exc = parseException(content);
         if (exc) {
             //if the exception is an IOException and it is because the offset is out of the range
             //do not print out the exception
             if (!strcasecmp(exc->exception, "IOException") && strstr(exc->message, "out of the range")) {
                 return 0;
             }
-            errno = printExceptionWeb(exc, PRINT_EXC_ALL, "Calling WEBHDFS (OPEN)");
+            errno = printJsonException(exc, PRINT_EXC_ALL, "Calling WEBHDFS (OPEN)");
         }
         return -1;
     }
@@ -297,9 +361,9 @@
     }
     if(!(tempHeader = strstr(headerstr,responseCode))) {
         //process possible exception information
-        hdfs_exception_msg *exc = parseException(content);
+        struct jsonException *exc = parseException(content);
         if (exc) {
-            errno = printExceptionWeb(exc, PRINT_EXC_ALL, "Calling WEBHDFS (%s)", operation);
+            errno = printJsonException(exc, PRINT_EXC_ALL, "Calling WEBHDFS (%s)", operation);
         }
         return 0;
     }
@@ -350,9 +414,9 @@
         return 0;
     }
     if(!(strstr(header,responseCode))) {
-        hdfs_exception_msg *exc = parseException(content);
+        struct jsonException *exc = parseException(content);
         if (exc) {
-            errno = printExceptionWeb(exc, PRINT_EXC_ALL, "Calling WEBHDFS (WRITE(DataNode))");
+            errno = printJsonException(exc, PRINT_EXC_ALL, "Calling WEBHDFS (WRITE(DataNode))");
         }
         return 0;
     }
@@ -365,9 +429,9 @@
         return 0;
     }
     if(!(strstr(header, responseCode))) {
-        hdfs_exception_msg *exc = parseException(content);
+        struct jsonException *exc = parseException(content);
         if (exc) {
-            errno = printExceptionWeb(exc, PRINT_EXC_ALL, "Calling WEBHDFS (APPEND(DataNode))");
+            errno = printJsonException(exc, PRINT_EXC_ALL, "Calling WEBHDFS (APPEND(DataNode))");
         }
         return 0;
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.h
index edf046f..2fbcb9d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.h
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.h
@@ -17,7 +17,23 @@
  */
 #ifndef _HDFS_JSON_PARSER_H_
 #define _HDFS_JSON_PARSER_H_
-#include "webhdfs.h"
+
+struct jsonException;
+
+/**
+ * Print out JSON exception information.
+ *
+ * @param exc             The exception information to print and free
+ * @param noPrintFlags    Flags which determine which exceptions we should NOT
+ *                        print.
+ * @param fmt             Printf-style format list
+ * @param ...             Printf-style varargs
+ *
+ * @return                The POSIX error number associated with the exception
+ *                        object.
+ */
+int printJsonException(struct jsonException *exc, int noPrintFlags,
+                       const char *fmt, ...);
 
 int parseMKDIR(char *response);
 int parseRENAME(char *response);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c
index 2a43971..5a80449 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_web.c
@@ -16,38 +16,63 @@
  * limitations under the License.
  */
 
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <jni.h>
-#include "webhdfs.h"
+#include "exception.h"
+#include "hdfs.h"
 #include "hdfs_http_client.h"
 #include "hdfs_http_query.h"
 #include "hdfs_json_parser.h"
 #include "jni_helper.h"
-#include "exception.h"
+
+#include <inttypes.h>
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
 
 #define HADOOP_HDFS_CONF        "org/apache/hadoop/hdfs/HdfsConfiguration"
 #define HADOOP_NAMENODE         "org/apache/hadoop/hdfs/server/namenode/NameNode"
 #define JAVA_INETSOCKETADDRESS  "java/net/InetSocketAddress"
 
-static void initFileinfo(hdfsFileInfo *fileInfo) {
-    if (fileInfo) {
-        fileInfo->mKind = kObjectKindFile;
-        fileInfo->mName = NULL;
-        fileInfo->mLastMod = 0;
-        fileInfo->mSize = 0;
-        fileInfo->mReplication = 0;
-        fileInfo->mBlockSize = 0;
-        fileInfo->mOwner = NULL;
-        fileInfo->mGroup = NULL;
-        fileInfo->mPermissions = 0;
-        fileInfo->mLastAccess = 0;
-    }
-}
+struct hdfsBuilder {
+    int forceNewInstance;
+    const char *nn;
+    tPort port;
+    const char *kerbTicketCachePath;
+    const char *userName;
+};
 
-static webhdfsBuffer *initWebHdfsBuffer() {
-    webhdfsBuffer *buffer = (webhdfsBuffer *) calloc(1, sizeof(webhdfsBuffer));
+/**
+ * The information required for accessing webhdfs,
+ * including the network address of the namenode and the user name
+ *
+ * Unlike the string in hdfsBuilder, the strings in this structure are
+ * dynamically allocated.  This structure will not be freed until we disconnect
+ * from HDFS.
+ */
+struct hdfs_internal {
+    char *nn;
+    tPort port;
+    char *userName;
+
+    /**
+     * Working directory -- stored with a trailing slash.
+     */
+    char *workingDir;
+};
+
+/**
+ * The 'file-handle' to a file in hdfs.
+ */
+struct hdfsFile_internal {
+    struct webhdfsFileHandle* file;
+    enum hdfsStreamType type;
+    int flags;
+    tOffset offset;
+};
+
+static webhdfsBuffer *initWebHdfsBuffer(void)
+{
+    webhdfsBuffer *buffer = calloc(1, sizeof(*buffer));
     if (!buffer) {
         fprintf(stderr, "Fail to allocate memory for webhdfsBuffer.\n");
         return NULL;
@@ -107,49 +132,36 @@
 }
 
 static void freeWebFileHandle(struct webhdfsFileHandle * handle) {
-    if (handle) {
-        freeWebhdfsBuffer(handle->uploadBuffer);
-        if (handle->datanode) {
-            free(handle->datanode);
-        }
-        if (handle->absPath) {
-            free(handle->absPath);
-        }
-        free(handle);
-        handle = NULL;
-    }
+    if (!handle)
+        return;
+    freeWebhdfsBuffer(handle->uploadBuffer);
+    free(handle->datanode);
+    free(handle->absPath);
+    free(handle);
 }
 
 struct hdfsBuilder *hdfsNewBuilder(void)
 {
     struct hdfsBuilder *bld = calloc(1, sizeof(struct hdfsBuilder));
-    if (!bld) {
+    if (!bld)
         return NULL;
-    }
-    hdfsSetWorkingDirectory(bld, "/");
     return bld;
 }
 
 void hdfsFreeBuilder(struct hdfsBuilder *bld)
 {
-    if (bld && bld->workingDir) {
-        free(bld->workingDir);
-    }
     free(bld);
 }
 
 void hdfsBuilderSetForceNewInstance(struct hdfsBuilder *bld)
 {
-    if (bld) {
-        bld->forceNewInstance = 1;
-    }
+    // We don't cache instances in libwebhdfs, so this is not applicable.
 }
 
 void hdfsBuilderSetNameNode(struct hdfsBuilder *bld, const char *nn)
 {
     if (bld) {
         bld->nn = nn;
-        bld->nn_jni = nn;
     }
 }
 
@@ -199,7 +211,7 @@
         return NULL;
     }
     hdfsBuilderSetForceNewInstance(bld);
-    return bld;
+    return hdfsBuilderConnect(bld);
 }
 
 hdfsFS hdfsConnectAsUserNewInstance(const char* host, tPort port,
@@ -215,290 +227,356 @@
     return hdfsBuilderConnect(bld);
 }
 
-const char *hdfsBuilderToStr(const struct hdfsBuilder *bld,
-                             char *buf, size_t bufLen);
+static const char *maybeNull(const char *str)
+{
+    return str ? str : "(NULL)";
+}
+
+static const char *hdfsBuilderToStr(const struct hdfsBuilder *bld,
+                                    char *buf, size_t bufLen)
+{
+    snprintf(buf, bufLen, "nn=%s, port=%d, "
+             "kerbTicketCachePath=%s, userName=%s",
+             maybeNull(bld->nn), bld->port,
+             maybeNull(bld->kerbTicketCachePath), maybeNull(bld->userName));
+    return buf;
+}
+
+static void freeWebHdfsInternal(struct hdfs_internal *fs)
+{
+    if (fs) {
+        free(fs->nn);
+        free(fs->userName);
+        free(fs->workingDir);
+    }
+}
+
+static int retrieveDefaults(const struct hdfsBuilder *bld, tPort *port,
+                            char **nn)
+{
+    JNIEnv *env = 0;
+    jobject jHDFSConf = NULL, jAddress = NULL;
+    jstring jHostName = NULL;
+    jvalue jVal;
+    jthrowable jthr = NULL;
+    int ret = 0;
+    char buf[512];
+    
+    // TODO: can we do this without using JNI?  See HDFS-3917
+    env = getJNIEnv();
+    if (!env) {
+        return EINTERNAL;
+    }
+    
+    //  jHDFSConf = new HDFSConfiguration();
+    jthr = constructNewObjectOfClass(env, &jHDFSConf, HADOOP_HDFS_CONF, "()V");
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                                    "hdfsBuilderConnect(%s)",
+                                    hdfsBuilderToStr(bld, buf, sizeof(buf)));
+        goto done;
+    }
+    
+    jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_NAMENODE, "getHttpAddress",
+                        "(Lorg/apache/hadoop/conf/Configuration;)Ljava/net/InetSocketAddress;",
+                        jHDFSConf);
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                        "hdfsBuilderConnect(%s)", hdfsBuilderToStr(bld, buf, sizeof(buf)));
+        goto done;
+    }
+    jAddress = jVal.l;
+    
+    jthr = invokeMethod(env, &jVal, INSTANCE, jAddress,
+                        JAVA_INETSOCKETADDRESS, "getPort", "()I");
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                                    "hdfsBuilderConnect(%s)",
+                                    hdfsBuilderToStr(bld, buf, sizeof(buf)));
+        goto done;
+    }
+    *port = jVal.i;
+    
+    jthr = invokeMethod(env, &jVal, INSTANCE, jAddress,
+                        JAVA_INETSOCKETADDRESS, "getHostName", "()Ljava/lang/String;");
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                                    "hdfsBuilderConnect(%s)",
+                                    hdfsBuilderToStr(bld, buf, sizeof(buf)));
+        goto done;
+    }
+    jHostName = jVal.l;
+    jthr = newCStr(env, jHostName, nn);
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                                    "hdfsBuilderConnect(%s)",
+                                    hdfsBuilderToStr(bld, buf, sizeof(buf)));
+        goto done;
+    }
+
+done:
+    destroyLocalReference(env, jHDFSConf);
+    destroyLocalReference(env, jAddress);
+    destroyLocalReference(env, jHostName);
+    return ret;
+}
 
 hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
 {
+    struct hdfs_internal *fs = NULL;
+    int ret;
+
     if (!bld) {
-        return NULL;
+        ret = EINVAL;
+        goto done;
     }
-    // if the hostname is null for the namenode, set it to localhost
-    //only handle bld->nn
     if (bld->nn == NULL) {
-        bld->nn = "localhost";
-    } else {
-        /* check whether the hostname of the namenode (nn in hdfsBuilder) has already contained the port */
-        const char *lastColon = rindex(bld->nn, ':');
-        if (lastColon && (strspn(lastColon + 1, "0123456789") == strlen(lastColon + 1))) {
-            fprintf(stderr, "port %d was given, but URI '%s' already "
-                    "contains a port!\n", bld->port, bld->nn);
-            char *newAddr = (char *)malloc(strlen(bld->nn) - strlen(lastColon) + 1);
-            if (!newAddr) {
-                return NULL;
-            }
-            strncpy(newAddr, bld->nn, strlen(bld->nn) - strlen(lastColon));
-            newAddr[strlen(bld->nn) - strlen(lastColon)] = '\0';
-            free(bld->nn);
-            bld->nn = newAddr;
-        }
+        // In the JNI version of libhdfs this returns a LocalFileSystem.
+        ret = ENOTSUP;
+        goto done;
     }
     
-    /* if the namenode is "default" and/or the port of namenode is 0, get the default namenode/port by using JNI */
+    fs = calloc(1, sizeof(*fs));
+    if (!fs) {
+        ret = ENOMEM;
+        goto done;
+    }
+    /* If the namenode is "default" and/or the port of namenode is 0, get the
+     * default namenode/port */
     if (bld->port == 0 || !strcasecmp("default", bld->nn)) {
-        JNIEnv *env = 0;
-        jobject jHDFSConf = NULL, jAddress = NULL;
-        jvalue jVal;
-        jthrowable jthr = NULL;
-        int ret = 0;
-        char buf[512];
-        
-        //Get the JNIEnv* corresponding to current thread
-        env = getJNIEnv();
-        if (env == NULL) {
-            errno = EINTERNAL;
-            free(bld);
-            bld = NULL;
-            return NULL;
-        }
-        
-        //  jHDFSConf = new HDFSConfiguration();
-        jthr = constructNewObjectOfClass(env, &jHDFSConf, HADOOP_HDFS_CONF, "()V");
-        if (jthr) {
-            ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                        "hdfsBuilderConnect(%s)",
-                                        hdfsBuilderToStr(bld, buf, sizeof(buf)));
+        ret = retrieveDefaults(bld, &fs->port, &fs->nn);
+        if (ret)
+            goto done;
+    } else {
+        fs->port = bld->port;
+        fs->nn = strdup(bld->nn);
+        if (!fs->nn) {
+            ret = ENOMEM;
             goto done;
         }
-        
-        jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_NAMENODE, "getHttpAddress",
-                            "(Lorg/apache/hadoop/conf/Configuration;)Ljava/net/InetSocketAddress;",
-                            jHDFSConf);
-        if (jthr) {
-            ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                            "hdfsBuilderConnect(%s)", hdfsBuilderToStr(bld, buf, sizeof(buf)));
-            goto done; //free(bld), deleteReference for jHDFSConf
-        }
-        jAddress = jVal.l;
-        
-        if (bld->port == 0) {
-            jthr = invokeMethod(env, &jVal, INSTANCE, jAddress,
-                                JAVA_INETSOCKETADDRESS, "getPort", "()I");
-            if (jthr) {
-                ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                            "hdfsBuilderConnect(%s)",
-                                            hdfsBuilderToStr(bld, buf, sizeof(buf)));
-                goto done;
-            }
-            bld->port = jVal.i;
-        }
-        
-        if (!strcasecmp("default", bld->nn)) {
-            jthr = invokeMethod(env, &jVal, INSTANCE, jAddress,
-                                JAVA_INETSOCKETADDRESS, "getHostName", "()Ljava/lang/String;");
-            if (jthr) {
-                ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                            "hdfsBuilderConnect(%s)",
-                                            hdfsBuilderToStr(bld, buf, sizeof(buf)));
-                goto done;
-            }
-            bld->nn = (const char*) ((*env)->GetStringUTFChars(env, jVal.l, NULL));
-        }
-        
-    done:
-        destroyLocalReference(env, jHDFSConf);
-        destroyLocalReference(env, jAddress);
-        if (ret) { //if there is error/exception, we free the builder and return NULL
-            free(bld);
-            bld = NULL;
+    }
+    if (bld->userName) {
+        // userName may be NULL
+        fs->userName = strdup(bld->userName);
+        if (!fs->userName) {
+            ret = ENOMEM;
+            goto done;
         }
     }
-    
+    // The working directory starts out as root.
+    fs->workingDir = strdup("/");
+    if (!fs->workingDir) {
+        ret = ENOMEM;
+        goto done;
+    }
     //for debug
     fprintf(stderr, "namenode: %s:%d\n", bld->nn, bld->port);
-    return bld;
+
+done:
+    free(bld);
+    if (ret) {
+        freeWebHdfsInternal(fs);
+        errno = ret;
+        return NULL;
+    }
+    return fs;
 }
 
 int hdfsDisconnect(hdfsFS fs)
 {
     if (fs == NULL) {
-        errno = EBADF;
+        errno = EINVAL;
         return -1;
-    } else {
-        free(fs);
-        fs = NULL;
     }
+    freeWebHdfsInternal(fs);
     return 0;
 }
 
-char *getAbsolutePath(hdfsFS fs, const char *path) {
-    if (fs == NULL || path == NULL) {
+static char *getAbsolutePath(hdfsFS fs, const char *path)
+{
+    char *absPath = NULL;
+    size_t absPathLen;
+    
+    if (path[0] == '/') {
+        // path is already absolute.
+        return strdup(path);
+    }
+    // prepend the workingDir to the path.
+    absPathLen = strlen(fs->workingDir) + strlen(path);
+    absPath = malloc(absPathLen + 1);
+    if (!absPath) {
         return NULL;
     }
-    char *absPath = NULL;
-    struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
-    
-    if ('/' != *path && bld->workingDir) {
-        absPath = (char *)malloc(strlen(bld->workingDir) + strlen(path) + 1);
-        if (!absPath) {
-            return NULL;
-        }
-        absPath = strcpy(absPath, bld->workingDir);
-        absPath = strcat(absPath, path);
-        return absPath;
-    } else {
-        absPath = (char *)malloc(strlen(path) + 1);
-        if (!absPath) {
-            return NULL;
-        }
-        absPath = strcpy(absPath, path);
-        return absPath;
-    }
+    snprintf(absPath, absPathLen + 1, "%s%s", fs->workingDir, path);
+    return absPath;
 }
 
 int hdfsCreateDirectory(hdfsFS fs, const char* path)
 {
-    if (fs == NULL || path == NULL) {
-        return -1;
-    }
-    
-    char *absPath = getAbsolutePath(fs, path);
-    if (!absPath) {
-        return -1;
-    }
-    
-    struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
-    char *url = NULL;
+    char *url = NULL, *absPath = NULL;
     Response resp = NULL;
     int ret = 0;
-    
-    if(!((url = prepareMKDIR(bld->nn, bld->port, absPath, bld->userName))
+
+    if (fs == NULL || path == NULL) {
+        ret = EINVAL;
+        goto done;
+    }
+    absPath = getAbsolutePath(fs, path);
+    if (!absPath) {
+        ret = ENOMEM;
+        goto done;
+    }
+    if(!((url = prepareMKDIR(fs->nn, fs->port, absPath, fs->userName))
          && (resp = launchMKDIR(url))
          && (parseMKDIR(resp->body->content)))) {
-        ret = -1;
+        ret = EIO;
+        goto done;
     }
     
+done:
     freeResponse(resp);
     free(url);
     free(absPath);
-    return ret;
+    if (ret) {
+        errno = ret;
+        return -1;
+    }
+    return 0;
 }
 
 int hdfsChmod(hdfsFS fs, const char* path, short mode)
 {
-    if (fs == NULL || path == NULL) {
-        return -1;
-    }
-    
-    char *absPath = getAbsolutePath(fs, path);
-    if (!absPath) {
-        return -1;
-    }
-    struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
-    char *url=NULL;
+    char *absPath = NULL, *url = NULL;
     Response resp = NULL;
     int ret = 0;
-    
-    if(!((url = prepareCHMOD(bld->nn, bld->port, absPath, (int)mode, bld->userName))
+
+    if (fs == NULL || path == NULL) {
+        ret = EINVAL;
+        goto done;
+    }
+    absPath = getAbsolutePath(fs, path);
+    if (!absPath) {
+        ret = ENOMEM;
+        goto done;
+    }
+    if(!((url = prepareCHMOD(fs->nn, fs->port, absPath, (int)mode, fs->userName))
          && (resp = launchCHMOD(url))
          && (parseCHMOD(resp->header->content, resp->body->content)))) {
-        ret = -1;
+        ret = EIO;
+        goto done;
     }
-    
+done:
     freeResponse(resp);
     free(absPath);
     free(url);
-    return ret;
+    if (ret) {
+        errno = ret;
+        return -1;
+    }
+    return 0;
 }
 
 int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group)
 {
-    if (fs == NULL || path == NULL) {
-        return -1;
-    }
-    
-    char *absPath = getAbsolutePath(fs, path);
-    if (!absPath) {
-        return -1;
-    }
-    struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
-    char *url=NULL;
-    Response resp = NULL;
     int ret = 0;
+    char *absPath = NULL, *url = NULL;
+    Response resp = NULL;
+
+    if (fs == NULL || path == NULL) {
+        ret = EINVAL;
+        goto done;
+    }
     
-    if(!((url = prepareCHOWN(bld->nn, bld->port, absPath, owner, group, bld->userName))
+    absPath = getAbsolutePath(fs, path);
+    if (!absPath) {
+        ret = ENOMEM;
+        goto done;
+    }
+    
+    if(!((url = prepareCHOWN(fs->nn, fs->port, absPath, owner, group, fs->userName))
          && (resp = launchCHOWN(url))
          && (parseCHOWN(resp->header->content, resp->body->content)))) {
-        ret = -1;
+        ret = EIO;
+        goto done;
     }
     
+done:
     freeResponse(resp);
     free(absPath);
     free(url);
-    return ret;
+    if (ret) {
+        errno = ret;
+        return -1;
+    }
+    return 0;
 }
 
 int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath)
 {
-    if (fs == NULL || oldPath == NULL || newPath == NULL) {
-        return -1;
-    }
-    
-    char *oldAbsPath = getAbsolutePath(fs, oldPath);
-    if (!oldAbsPath) {
-        return -1;
-    }
-    char *newAbsPath = getAbsolutePath(fs, newPath);
-    if (!newAbsPath) {
-        return -1;
-    }
-    
-    struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
-    char *url=NULL;
-    Response resp = NULL;
+    char *oldAbsPath = NULL, *newAbsPath = NULL, *url = NULL;
     int ret = 0;
-    
-    if(!((url = prepareRENAME(bld->nn, bld->port, oldAbsPath, newAbsPath, bld->userName))
+    Response resp = NULL;
+
+    if (fs == NULL || oldPath == NULL || newPath == NULL) {
+        ret = EINVAL;
+        goto done;
+    }
+    oldAbsPath = getAbsolutePath(fs, oldPath);
+    if (!oldAbsPath) {
+        ret = ENOMEM;
+        goto done;
+    }
+    newAbsPath = getAbsolutePath(fs, newPath);
+    if (!newAbsPath) {
+        ret = ENOMEM;
+        goto done;
+    }
+    if(!((url = prepareRENAME(fs->nn, fs->port, oldAbsPath, newAbsPath, fs->userName))
          && (resp = launchRENAME(url))
          && (parseRENAME(resp->body->content)))) {
         ret = -1;
     }
-    
+done:
     freeResponse(resp);
     free(oldAbsPath);
     free(newAbsPath);
     free(url);
-    return ret;
+    if (ret) {
+        errno = ret;
+        return -1;
+    }
+    return 0;
 }
 
 hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path)
 {
-    if (fs == NULL || path == NULL) {
-        return NULL;
-    }
-    
-    char *absPath = getAbsolutePath(fs, path);
-    if (!absPath) {
-        return NULL;
-    }
-    
-    struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
+    char *absPath = NULL;
     char *url=NULL;
     Response resp = NULL;
     int numEntries = 0;
     int ret = 0;
-    
-    hdfsFileInfo * fileInfo = (hdfsFileInfo *) calloc(1, sizeof(hdfsFileInfo));
-    if (!fileInfo) {
-        ret = -1;
+    hdfsFileInfo *fileInfo = NULL;
+
+    if (fs == NULL || path == NULL) {
+        ret = EINVAL;
         goto done;
     }
-    initFileinfo(fileInfo);
-    
-    if(!((url = prepareGFS(bld->nn, bld->port, absPath, bld->userName))
+    absPath = getAbsolutePath(fs, path);
+    if (!absPath) {
+        ret = ENOMEM;
+        goto done;
+    }
+    fileInfo = (hdfsFileInfo *) calloc(1, sizeof(hdfsFileInfo));
+    if (!fileInfo) {
+        ret = ENOMEM;
+        goto done;
+    }
+    fileInfo->mKind = kObjectKindFile;
+
+    if(!((url = prepareGFS(fs->nn, fs->port, absPath, fs->userName))
          && (resp = launchGFS(url))
          && (fileInfo = parseGFS(resp->body->content, fileInfo, &numEntries))))  {
-        ret = -1;
+        ret = EIO;
         goto done;
     }
     
@@ -511,36 +589,115 @@
         return fileInfo;
     } else {
         free(fileInfo);
+        errno = ret;
         return NULL;
     }
 }
 
 hdfsFileInfo *hdfsListDirectory(hdfsFS fs, const char* path, int *numEntries)
 {
-    if (fs == NULL || path == NULL) {
-        return NULL;
-    }
-    
-    char *absPath = getAbsolutePath(fs, path);
-    if (!absPath) {
-        return NULL;
-    }
-
-    struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
-    char *url = NULL;
+    char *url = NULL, *absPath = NULL;
     Response resp = NULL;
     int ret = 0;
-    
-    hdfsFileInfo * fileInfo = (hdfsFileInfo *) calloc(1, sizeof(hdfsFileInfo));
-    if (!fileInfo) {
-        ret = -1;
+    hdfsFileInfo *fileInfo = NULL;
+
+    if (fs == NULL || path == NULL) {
+        ret = EINVAL;
         goto done;
     }
-    
-    if(!((url = prepareLS(bld->nn, bld->port, absPath, bld->userName))
+    absPath = getAbsolutePath(fs, path);
+    if (!absPath) {
+        ret = ENOMEM;
+        goto done;
+    }
+    fileInfo = calloc(1, sizeof(*fileInfo));
+    if (!fileInfo) {
+        ret = ENOMEM;
+        goto done;
+    }
+    if(!((url = prepareLS(fs->nn, fs->port, absPath, fs->userName))
          && (resp = launchLS(url))
          && (fileInfo = parseGFS(resp->body->content, fileInfo, numEntries))))  {
-        ret = -1;
+        ret = EIO;
+        goto done;
+    }
+done:
+    freeResponse(resp);
+    free(absPath);
+    free(url);
+
+    if (ret == 0) {
+        return fileInfo;
+    } else {
+        hdfsFreeFileInfo(fileInfo, 1);
+        errno = ret;
+        return NULL;
+    }
+}
+
+int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication)
+{
+    char *url = NULL, *absPath = NULL;
+    Response resp = NULL;
+    int ret = 0;
+
+    if (fs == NULL || path == NULL) {
+        ret = EINVAL;
+        goto done;
+    }
+    absPath = getAbsolutePath(fs, path);
+    if (!absPath) {
+        ret = ENOMEM;
+        goto done;
+    }
+    if(!((url = prepareSETREPLICATION(fs->nn, fs->port, absPath, replication, fs->userName))
+         && (resp = launchSETREPLICATION(url))
+         && (parseSETREPLICATION(resp->body->content)))) {
+        ret = EIO;
+        goto done;
+    }
+done:
+    freeResponse(resp);
+    free(absPath);
+    free(url);
+    if (ret) {
+        errno = ret;
+        return -1;
+    }
+    return 0;
+}
+
+void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries)
+{
+    int i;
+
+    for (i=0; i < numEntries; ++i) {
+        free(hdfsFileInfo[i].mName);
+        free(hdfsFileInfo[i].mOwner);
+        free(hdfsFileInfo[i].mGroup);
+    }
+    free(hdfsFileInfo);
+}
+
+int hdfsDelete(hdfsFS fs, const char* path, int recursive)
+{
+    char *url = NULL, *absPath = NULL;
+    Response resp = NULL;
+    int ret = 0;
+
+    if (fs == NULL || path == NULL) {
+        ret = EINVAL;
+        goto done;
+    }
+    absPath = getAbsolutePath(fs, path);
+    if (!absPath) {
+        ret = ENOMEM;
+        goto done;
+    }
+    if(!((url = prepareDELETE(fs->nn, fs->port, absPath, recursive, fs->userName))
+         && (resp = launchDELETE(url))
+         && (parseDELETE(resp->body->content)))) {
+        ret = EIO;
         goto done;
     }
     
@@ -548,126 +705,56 @@
     freeResponse(resp);
     free(absPath);
     free(url);
-    
-    if (ret == 0) {
-        return fileInfo;
-    } else {
-        free(fileInfo);
-        return NULL;
-    }
-}
-
-int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication)
-{
-    if (fs == NULL || path == NULL) {
+    if (ret) {
+        errno = ret;
         return -1;
     }
-    char *absPath = getAbsolutePath(fs, path);
-    if (!absPath) {
-        return -1;
-    }
-    
-    struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
-    char *url = NULL;
-    Response resp = NULL;
-    int ret = 0;
-    
-    if(!((url = prepareSETREPLICATION(bld->nn, bld->port, absPath, replication, bld->userName))
-         && (resp = launchSETREPLICATION(url))
-         && (parseSETREPLICATION(resp->body->content)))) {
-        ret = -1;
-    }
-    
-    freeResponse(resp);
-    free(absPath);
-    free(url);
-    return ret;
-}
-
-void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries)
-{
-    //Free the mName, mOwner, and mGroup
-    int i;
-    for (i=0; i < numEntries; ++i) {
-        if (hdfsFileInfo[i].mName) {
-            free(hdfsFileInfo[i].mName);
-        }
-        if (hdfsFileInfo[i].mOwner) {
-            free(hdfsFileInfo[i].mOwner);
-        }
-        if (hdfsFileInfo[i].mGroup) {
-            free(hdfsFileInfo[i].mGroup);
-        }
-    }
-    
-    //Free entire block
-    free(hdfsFileInfo);
-    hdfsFileInfo = NULL;
-}
-
-int hdfsDelete(hdfsFS fs, const char* path, int recursive)
-{
-    if (fs == NULL || path == NULL) {
-        return -1;
-    }
-    char *absPath = getAbsolutePath(fs, path);
-    if (!absPath) {
-        return -1;
-    }
-    
-    struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
-    char *url = NULL;
-    Response resp = NULL;
-    int ret = 0;
-    
-    if(!((url = prepareDELETE(bld->nn, bld->port, absPath, recursive, bld->userName))
-         && (resp = launchDELETE(url))
-         && (parseDELETE(resp->body->content)))) {
-        ret = -1;
-    }
-    
-    freeResponse(resp);
-    free(absPath);
-    free(url);
-    return ret;
+    return 0;
 }
 
 int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime)
 {
-    if (fs == NULL || path == NULL) {
-        return -1;
-    }
-    char *absPath = getAbsolutePath(fs, path);
-    if (!absPath) {
-        return -1;
-    }
-    
-    struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
-    char *url = NULL;
+    char *url = NULL, *absPath = NULL;
     Response resp = NULL;
     int ret = 0;
-    
-    if(!((url = prepareUTIMES(bld->nn, bld->port, absPath, mtime, atime, bld->userName))
+
+    if (fs == NULL || path == NULL) {
+        ret = EINVAL;
+        goto done;
+    }
+    absPath = getAbsolutePath(fs, path);
+    if (!absPath) {
+        ret = ENOMEM;
+        goto done;
+    }
+    if(!((url = prepareUTIMES(fs->nn, fs->port, absPath, mtime, atime,
+                              fs->userName))
          && (resp = launchUTIMES(url))
          && (parseUTIMES(resp->header->content, resp->body->content)))) {
-        ret = -1;
+        ret = EIO;
+        goto done;
     }
     
+done:
     freeResponse(resp);
     free(absPath);
     free(url);
-    return ret;
+    if (ret) {
+        errno = ret;
+        return -1;
+    }
+    return 0;
 }
 
 int hdfsExists(hdfsFS fs, const char *path)
 {
     hdfsFileInfo *fileInfo = hdfsGetPathInfo(fs, path);
-    if (fileInfo) {
-        hdfsFreeFileInfo(fileInfo, 1);
-        return 0;
-    } else {
+    if (!fileInfo) {
+        // (errno will have been set by hdfsGetPathInfo)
         return -1;
     }
+    hdfsFreeFileInfo(fileInfo, 1);
+    return 0;
 }
 
 typedef struct {
@@ -701,39 +788,160 @@
     return data;
 }
 
+/**
+ * Free the memory associated with a webHDFS file handle.
+ *
+ * No other resources will be freed.
+ *
+ * @param file            The webhdfs file handle
+ */
+static void freeFileInternal(hdfsFile file)
+{
+    if (!file)
+        return;
+    freeWebFileHandle(file->file);
+    free(file);
+}
+
+/**
+ * Helper function for opening a file for OUTPUT.
+ *
+ * As part of the open process for OUTPUT files, we have to connect to the
+ * NameNode and get the URL of the corresponding DataNode.
+ * We also create a background thread here for doing I/O.
+ *
+ * @param webhandle              The webhandle being opened
+ * @return                       0 on success; error code otherwise
+ */
+static int hdfsOpenOutputFileImpl(hdfsFS fs, hdfsFile file)
+{
+    struct webhdfsFileHandle *webhandle = file->file;
+    Response resp = NULL;
+    int parseRet, append, ret = 0;
+    char *prepareUrl = NULL, *dnUrl = NULL;
+    threadData *data = NULL;
+
+    webhandle->uploadBuffer = initWebHdfsBuffer();
+    if (!webhandle->uploadBuffer) {
+        ret = ENOMEM;
+        goto done;
+    }
+    append = file->flags & O_APPEND;
+    if (!append) {
+        // If we're not appending, send a create request to the NN
+        prepareUrl = prepareNnWRITE(fs->nn, fs->port, webhandle->absPath,
+            fs->userName, webhandle->replication, webhandle->blockSize);
+    } else {
+        prepareUrl = prepareNnAPPEND(fs->nn, fs->port, webhandle->absPath,
+                              fs->userName);
+    }
+    if (!prepareUrl) {
+        fprintf(stderr, "fail to create the url connecting to namenode "
+                "for file creation/appending\n");
+        ret = EIO;
+        goto done;
+    }
+    if (!append) {
+        resp = launchNnWRITE(prepareUrl);
+    } else {
+        resp = launchNnAPPEND(prepareUrl);
+    }
+    if (!resp) {
+        fprintf(stderr, "fail to get the response from namenode for "
+                "file creation/appending\n");
+        ret = EIO;
+        goto done;
+    }
+    if (!append) {
+        parseRet = parseNnWRITE(resp->header->content, resp->body->content);
+    } else {
+        parseRet = parseNnAPPEND(resp->header->content, resp->body->content);
+    }
+    if (!parseRet) {
+        fprintf(stderr, "fail to parse the response from namenode for "
+                "file creation/appending\n");
+        ret = EIO;
+        goto done;
+    }
+    dnUrl = parseDnLoc(resp->header->content);
+    if (!dnUrl) {
+        fprintf(stderr, "fail to get the datanode url from namenode "
+                "for file creation/appending\n");
+        ret = EIO;
+        goto done;
+    }
+    //store the datanode url in the file handle
+    webhandle->datanode = strdup(dnUrl);
+    if (!webhandle->datanode) {
+        ret = ENOMEM;
+        goto done;
+    }
+    //create a new thread for performing the http transferring
+    data = calloc(1, sizeof(*data));
+    if (!data) {
+        ret = ENOMEM;
+        goto done;
+    }
+    data->url = strdup(dnUrl);
+    if (!data->url) {
+        ret = ENOMEM;
+        goto done;
+    }
+    data->flags = file->flags;
+    data->uploadBuffer = webhandle->uploadBuffer;
+    ret = pthread_create(&webhandle->connThread, NULL,
+                         writeThreadOperation, data);
+    if (ret) {
+        fprintf(stderr, "Failed to create the writing thread.\n");
+        goto done;
+    }
+    webhandle->uploadBuffer->openFlag = 1;
+
+done:
+    freeResponse(resp);
+    free(prepareUrl);
+    free(dnUrl);
+    if (ret) {
+        free(data->url);
+        free(data);
+    }
+    return ret;
+}
+
 hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
                       int bufferSize, short replication, tSize blockSize)
 {
-    /*
-     * the original version of libhdfs based on JNI store a fsinputstream/fsoutputstream in the hdfsFile
-     * in libwebhdfs that is based on webhdfs, we store (absolute_path, buffersize, replication, blocksize) in it
-     */
-    if (fs == NULL || path == NULL) {
-        return NULL;
-    }
-
+    int ret = 0;
     int accmode = flags & O_ACCMODE;
-    if (accmode == O_RDWR) {
-        fprintf(stderr, "ERROR: cannot open an hdfs file in O_RDWR mode\n");
-        errno = ENOTSUP;
-        return NULL;
+    struct webhdfsFileHandle *webhandle = NULL;
+    hdfsFile file = NULL;
+
+    if (fs == NULL || path == NULL) {
+        ret = EINVAL;
+        goto done;
     }
-    
+    if (accmode == O_RDWR) {
+        // TODO: the original libhdfs has very hackish support for this; should
+        // we do the same?  It would actually be a lot easier in libwebhdfs
+        // since the protocol isn't connection-oriented. 
+        fprintf(stderr, "ERROR: cannot open an hdfs file in O_RDWR mode\n");
+        ret = ENOTSUP;
+        goto done;
+    }
     if ((flags & O_CREAT) && (flags & O_EXCL)) {
         fprintf(stderr, "WARN: hdfs does not truly support O_CREATE && O_EXCL\n");
     }
-    
-    hdfsFile hdfsFileHandle = (hdfsFile) calloc(1, sizeof(struct hdfsFile_internal));
-    if (!hdfsFileHandle) {
-        return NULL;
+    file = calloc(1, sizeof(struct hdfsFile_internal));
+    if (!file) {
+        ret = ENOMEM;
+        goto done;
     }
-    int ret = 0;
-    hdfsFileHandle->flags = flags;
-    hdfsFileHandle->type = accmode == O_RDONLY ? INPUT : OUTPUT;
-    hdfsFileHandle->offset = 0;
-    struct webhdfsFileHandle *webhandle = (struct webhdfsFileHandle *) calloc(1, sizeof(struct webhdfsFileHandle));
+    file->flags = flags;
+    file->type = accmode == O_RDONLY ? INPUT : OUTPUT;
+    file->offset = 0;
+    webhandle = calloc(1, sizeof(struct webhdfsFileHandle));
     if (!webhandle) {
-        ret = -1;
+        ret = ENOMEM;
         goto done;
     }
     webhandle->bufferSize = bufferSize;
@@ -741,105 +949,28 @@
     webhandle->blockSize = blockSize;
     webhandle->absPath = getAbsolutePath(fs, path);
     if (!webhandle->absPath) {
-        ret = -1;
+        ret = ENOMEM;
         goto done;
     }
-    hdfsFileHandle->file = webhandle;
-    
-    //for write/append, need to connect to the namenode
-    //and get the url of corresponding datanode
-    if (hdfsFileHandle->type == OUTPUT) {
-        webhandle->uploadBuffer = initWebHdfsBuffer();
-        if (!webhandle->uploadBuffer) {
-            ret = -1;
-            goto done;
-        }
-        struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
-        char *url = NULL;
-        Response resp = NULL;
-        int append = flags & O_APPEND;
-        int create = append ? 0 : 1;
-        
-        //if create: send create request to NN
-        if (create) {
-            url = prepareNnWRITE(bld->nn, bld->port, webhandle->absPath, bld->userName, webhandle->replication, webhandle->blockSize);
-        } else if (append) {
-            url = prepareNnAPPEND(bld->nn, bld->port, webhandle->absPath, bld->userName);
-        }
-        if (!url) {
-            fprintf(stderr,
-                    "fail to create the url connecting to namenode for file creation/appending\n");
-            ret = -1;
-            goto done;
-        }
-
-        if (create) {
-            resp = launchNnWRITE(url);
-        } else if (append) {
-            resp = launchNnAPPEND(url);
-        }
-        if (!resp) {
-            fprintf(stderr,
-                    "fail to get the response from namenode for file creation/appending\n");
-            free(url);
-            ret = -1;
-            goto done;
-        }
-        
-        int parseRet = 0;
-        if (create) {
-            parseRet = parseNnWRITE(resp->header->content, resp->body->content);
-        } else if (append) {
-            parseRet = parseNnAPPEND(resp->header->content, resp->body->content);
-        }
-        if (!parseRet) {
-            fprintf(stderr,
-                    "fail to parse the response from namenode for file creation/appending\n");
-            free(url);
-            freeResponse(resp);
-            ret = -1;
-            goto done;
-        }
-            
-        free(url);
-        url = parseDnLoc(resp->header->content);
-        if (!url) {
-            fprintf(stderr,
-                    "fail to get the datanode url from namenode for file creation/appending\n");
-            freeResponse(resp);
-            ret = -1;
-            return NULL;
-        }
-        freeResponse(resp);
-        //store the datanode url in the file handle
-        webhandle->datanode = strdup(url);
- 
-        //create a new thread for performing the http transferring
-        threadData *data = (threadData *) calloc(1, sizeof(threadData));
-        if (!data) {
-            ret = -1;
-            goto done;
-        }
-        data->url = strdup(url);
-        data->flags = flags;
-        data->uploadBuffer = webhandle->uploadBuffer;
-        free(url);
-        ret = pthread_create(&webhandle->connThread, NULL, writeThreadOperation, data);
+    file->file = webhandle;
+    if (file->type == OUTPUT) {
+        ret = hdfsOpenOutputFileImpl(fs, file);
         if (ret) {
-            fprintf(stderr, "Failed to create the writing thread.\n");
-        } else {
-            webhandle->uploadBuffer->openFlag = 1;
+            goto done;
         }
     }
-    
+
 done:
-    if (ret == 0) {
-        return hdfsFileHandle;
-    } else {
-        freeWebFileHandle(webhandle);
-        free(hdfsFileHandle);
+    if (ret) {
+        if (file) {
+            freeFileInternal(file); // Also frees webhandle
+        } else {
+            freeWebFileHandle(webhandle);
+        }
+        errno = ret;
         return NULL;
     }
+    return file;
 }
 
 tSize hdfsWrite(hdfsFS fs, hdfsFile file, const void* buffer, tSize length)
@@ -848,15 +979,17 @@
         return 0;
     }
     if (fs == NULL || file == NULL || file->type != OUTPUT || length < 0) {
+        errno = EBADF;
         return -1;
     }
     
-    struct webhdfsFileHandle *wfile = (struct webhdfsFileHandle *) file->file;
+    struct webhdfsFileHandle *wfile = file->file;
     if (wfile->uploadBuffer && wfile->uploadBuffer->openFlag) {
         resetWebhdfsBuffer(wfile->uploadBuffer, buffer, length);
         return length;
     } else {
         fprintf(stderr, "Error: have not opened the file %s for writing yet.\n", wfile->absPath);
+        errno = EBADF;
         return -1;
     }
 }
@@ -868,7 +1001,7 @@
     if (file->type == OUTPUT) {
         void *respv;
         threadData *tdata;
-        struct webhdfsFileHandle *wfile = (struct webhdfsFileHandle *) file->file;
+        struct webhdfsFileHandle *wfile = file->file;
         pthread_mutex_lock(&(wfile->uploadBuffer->writeMutex));
         wfile->uploadBuffer->closeFlag = 1;
         pthread_cond_signal(&wfile->uploadBuffer->newwrite_or_close);
@@ -893,13 +1026,10 @@
         //free the threaddata
         freeThreadData(tdata);
     }
-    
-    fprintf(stderr, "To clean the webfilehandle...\n");
-    if (file) {
-        freeWebFileHandle(file->file);
-        free(file);
-        file = NULL;
-        fprintf(stderr, "Cleaned the webfilehandle...\n");
+    freeFileInternal(file);
+    fprintf(stderr, "Closed the webfilehandle...\n");
+    if (ret) {
+        errno = EIO;
     }
     return ret;
 }
@@ -914,111 +1044,155 @@
     return (file->type == OUTPUT);
 }
 
-tSize hdfsRead(hdfsFS fs, hdfsFile file, void* buffer, tSize length)
+static int hdfsReadImpl(hdfsFS fs, hdfsFile file, void* buffer, tSize off,
+                        tSize length, tSize *numRead)
 {
-    if (length == 0) {
-        return 0;
-    }
-    if (fs == NULL || file == NULL || file->type != INPUT || buffer == NULL || length < 0) {
-        errno = EINVAL;
-        return -1;
-    }
-    struct hdfsBuilder *bld = (struct hdfsBuilder *) fs;
-    struct webhdfsFileHandle *webFile = (struct webhdfsFileHandle *) file->file;
+    int ret = 0;
     char *url = NULL;
     Response resp = NULL;
     int openResult = -1;
-    
-    resp = (Response) calloc(1, sizeof(*resp));
+
+    if (fs == NULL || file == NULL || file->type != INPUT || buffer == NULL ||
+            length < 0) {
+        ret = EINVAL;
+        goto done;
+    }
+    if (length == 0) {
+        // Special case: the user supplied a buffer of zero length, so there is
+        // nothing to do.
+        *numRead = 0;
+        goto done;
+    }
+    resp = calloc(1, sizeof(*resp)); // resp is actually a pointer type
     if (!resp) {
-        return -1;
+        ret = ENOMEM;
+        goto done;
     }
     resp->header = initResponseBuffer();
     resp->body = initResponseBuffer();
     resp->body->content = buffer;
     resp->body->remaining = length;
     
-    if (!((url = prepareOPEN(bld->nn, bld->port, webFile->absPath, bld->userName, file->offset, length))
+    if (!((url = prepareOPEN(fs->nn, fs->port, file->file->absPath,
+                             fs->userName, off, length))
           && (resp = launchOPEN(url, resp))
           && ((openResult = parseOPEN(resp->header->content, resp->body->content)) > 0))) {
-        free(url);
-        freeResponseBuffer(resp->header);
         if (openResult == 0) {
-            return 0;
-        } else {
-            return -1;
+            // Special case: if parseOPEN returns 0, we asked for a byte range
+            // with outside what the file contains.  In this case, hdfsRead and
+            // hdfsPread return 0, meaning end-of-file.
+            *numRead = 0;
+            goto done;
         }
+        ret = EIO;
+        goto done;
     }
-    
-    size_t readSize = resp->body->offset;
-    file->offset += readSize;
-    
+    *numRead = resp->body->offset;
+
+done:
     freeResponseBuffer(resp->header);
     free(resp->body);
     free(resp);
     free(url);
-    return readSize;
+    return ret;
+}
+
+tSize hdfsRead(hdfsFS fs, hdfsFile file, void* buffer, tSize length)
+{
+    int ret;
+    tSize numRead = 0;
+
+    ret = hdfsReadImpl(fs, file, buffer, file->offset, length, &numRead);
+    if (ret) {
+        errno = ret;
+        return -1;
+    }
+    file->offset += numRead; 
+    return numRead;
 }
 
 int hdfsAvailable(hdfsFS fs, hdfsFile file)
 {
-    if (!file || !fs) {
-        return -1;
-    }
-    struct webhdfsFileHandle *wf = (struct webhdfsFileHandle *) file->file;
-    if (!wf) {
-        return -1;
-    }
-    hdfsFileInfo *fileInfo = hdfsGetPathInfo(fs, wf->absPath);
-    if (fileInfo) {
-        int available = (int)(fileInfo->mSize - file->offset);
-        hdfsFreeFileInfo(fileInfo, 1);
-        return available;
-    } else {
-        return -1;
-    }
+    /* We actually always block when reading from webhdfs, currently.  So the
+     * number of bytes that can be read without blocking is currently 0.
+     */
+    return 0;
+}
+
+int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
+{
+    errno = ENOTSUP;
+    return -1;
+}
+
+int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
+{
+    errno = ENOTSUP;
+    return -1;
 }
 
 int hdfsSeek(hdfsFS fs, hdfsFile file, tOffset desiredPos)
 {
-    if (!fs || !file || desiredPos < 0) {
-        return -1;
-    }
-    struct webhdfsFileHandle *wf = (struct webhdfsFileHandle *) file->file;
-    if (!wf) {
-        return -1;
-    }
-    hdfsFileInfo *fileInfo = hdfsGetPathInfo(fs, wf->absPath);
+    struct webhdfsFileHandle *wf;
+    hdfsFileInfo *fileInfo = NULL;
     int ret = 0;
+
+    if (!fs || !file || (file->type == OUTPUT) || (desiredPos < 0)) {
+        ret = EINVAL;
+        goto done;
+    }
+    wf = file->file;
+    if (!wf) {
+        ret = EINVAL;
+        goto done;
+    }
+    fileInfo = hdfsGetPathInfo(fs, wf->absPath);
+    if (!fileInfo) {
+        ret = errno;
+        goto done;
+    }
+    if (desiredPos > fileInfo->mSize) {
+        fprintf(stderr,
+                "hdfsSeek for %s failed since the desired position %" PRId64
+                " is beyond the size of the file %" PRId64 "\n",
+                wf->absPath, desiredPos, fileInfo->mSize);
+        ret = ENOTSUP;
+        goto done;
+    }
+    file->offset = desiredPos;
+
+done:
     if (fileInfo) {
-        if (fileInfo->mSize < desiredPos) {
-            errno = ENOTSUP;
-            fprintf(stderr,
-                    "hdfsSeek for %s failed since the desired position %lld is beyond the size of the file %lld\n",
-                    wf->absPath, desiredPos, fileInfo->mSize);
-            ret = -1;
-        } else {
-            file->offset = desiredPos;
-        }
         hdfsFreeFileInfo(fileInfo, 1);
-        return ret;
-    } else {
+    }
+    if (ret) {
+        errno = ret;
         return -1;
     }
+    return 0;
 }
 
 tSize hdfsPread(hdfsFS fs, hdfsFile file, tOffset position, void* buffer, tSize length)
 {
-    if (!fs || !file || file->type != INPUT || position < 0 || !buffer || length < 0) {
+    int ret;
+    tSize numRead = 0;
+
+    if (position < 0) {
+        errno = EINVAL;
         return -1;
     }
-    file->offset = position;
-    return hdfsRead(fs, file, buffer, length);
+    ret = hdfsReadImpl(fs, file, buffer, position, length, &numRead);
+    if (ret) {
+        errno = ret;
+        return -1;
+    }
+    return numRead;
 }
 
 tOffset hdfsTell(hdfsFS fs, hdfsFile file)
 {
     if (!file) {
+        errno = EINVAL;
         return -1;
     }
     return file->offset;
@@ -1027,29 +1201,51 @@
 char* hdfsGetWorkingDirectory(hdfsFS fs, char *buffer, size_t bufferSize)
 {
     if (fs == NULL || buffer == NULL ||  bufferSize <= 0) {
+        errno = EINVAL;
         return NULL;
     }
-    
-    struct hdfsBuilder * bld = (struct hdfsBuilder *) fs;
-    if (bld->workingDir) {
-        strncpy(buffer, bld->workingDir, bufferSize);
+    if (snprintf(buffer, bufferSize, "%s", fs->workingDir) >= bufferSize) {
+        errno = ENAMETOOLONG;
+        return NULL;
     }
     return buffer;
 }
 
 int hdfsSetWorkingDirectory(hdfsFS fs, const char* path)
 {
+    char *newWorkingDir;
+    size_t strlenPath, newWorkingDirLen;
+
     if (fs == NULL || path == NULL) {
+        errno = EINVAL;
         return -1;
     }
-    
-    struct hdfsBuilder * bld = (struct hdfsBuilder *) fs;
-    free(bld->workingDir);
-    bld->workingDir = (char *)malloc(strlen(path) + 1);
-    if (!(bld->workingDir)) {
+    strlenPath = strlen(path);
+    if (strlenPath < 1) {
+        errno = EINVAL;
         return -1;
     }
-    strcpy(bld->workingDir, path);
+    if (path[0] != '/') {
+        // TODO: support non-absolute paths.  They should be interpreted
+        // relative to the current path.
+        errno = ENOTSUP;
+        return -1;
+    }
+    if (strstr(path, "//")) {
+        // TODO: support non-normalized paths (by normalizing them.)
+        errno = ENOTSUP;
+        return -1;
+    }
+    newWorkingDirLen = strlenPath + 2;
+    newWorkingDir = malloc(newWorkingDirLen);
+    if (!newWorkingDir) {
+        errno = ENOMEM;
+        return -1;
+    }
+    snprintf(newWorkingDir, newWorkingDirLen, "%s%s",
+             path, (path[strlenPath - 1] == '/') ? "" : "/");
+    free(fs->workingDir);
+    fs->workingDir = newWorkingDir;
     return 0;
 }
 
@@ -1065,49 +1261,58 @@
     free(blockHosts);
 }
 
-/* not useful for libwebhdfs */
-int hdfsFileUsesDirectRead(hdfsFile file)
+tOffset hdfsGetDefaultBlockSize(hdfsFS fs)
 {
-    /* return !!(file->flags & HDFS_FILE_SUPPORTS_DIRECT_READ); */
-    fprintf(stderr, "hdfsFileUsesDirectRead is no longer useful for libwebhdfs.\n");
+    errno = ENOTSUP;
     return -1;
 }
 
-/* not useful for libwebhdfs */
-void hdfsFileDisableDirectRead(hdfsFile file)
+int hdfsFileUsesDirectRead(hdfsFile file)
 {
-    /* file->flags &= ~HDFS_FILE_SUPPORTS_DIRECT_READ; */
-    fprintf(stderr, "hdfsFileDisableDirectRead is no longer useful for libwebhdfs.\n");
+    return 0; // webhdfs never performs direct reads.
 }
 
-/* not useful for libwebhdfs */
+void hdfsFileDisableDirectRead(hdfsFile file)
+{
+    // webhdfs never performs direct reads
+}
+
 int hdfsHFlush(hdfsFS fs, hdfsFile file)
 {
+    if (file->type != OUTPUT) {
+        errno = EINVAL; 
+        return -1;
+    }
+    // TODO: block until our write buffer is flushed
     return 0;
 }
 
-/* not useful for libwebhdfs */
 int hdfsFlush(hdfsFS fs, hdfsFile file)
 {
+    if (file->type != OUTPUT) {
+        errno = EINVAL; 
+        return -1;
+    }
+    // TODO: block until our write buffer is flushed
     return 0;
 }
 
 char*** hdfsGetHosts(hdfsFS fs, const char* path,
                      tOffset start, tOffset length)
 {
-    fprintf(stderr, "hdfsGetHosts is not but will be supported by libwebhdfs yet.\n");
+    errno = ENOTSUP;
     return NULL;
 }
 
 tOffset hdfsGetCapacity(hdfsFS fs)
 {
-    fprintf(stderr, "hdfsGetCapacity is not but will be supported by libwebhdfs.\n");
+    errno = ENOTSUP;
     return -1;
 }
 
 tOffset hdfsGetUsed(hdfsFS fs)
 {
-    fprintf(stderr, "hdfsGetUsed is not but will be supported by libwebhdfs yet.\n");
+    errno = ENOTSUP;
     return -1;
 }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/jni_helper.c b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/jni_helper.c
deleted file mode 100644
index 3c558e0..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/jni_helper.c
+++ /dev/null
@@ -1,609 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#include "config.h"
-#include "exception.h"
-#include "jni_helper.h"
-
-#include <stdio.h> 
-#include <string.h> 
-
-static pthread_mutex_t hdfsHashMutex = PTHREAD_MUTEX_INITIALIZER;
-static pthread_mutex_t jvmMutex = PTHREAD_MUTEX_INITIALIZER;
-static volatile int hashTableInited = 0;
-
-#define LOCK_HASH_TABLE() pthread_mutex_lock(&hdfsHashMutex)
-#define UNLOCK_HASH_TABLE() pthread_mutex_unlock(&hdfsHashMutex)
-
-
-/** The Native return types that methods could return */
-#define VOID          'V'
-#define JOBJECT       'L'
-#define JARRAYOBJECT  '['
-#define JBOOLEAN      'Z'
-#define JBYTE         'B'
-#define JCHAR         'C'
-#define JSHORT        'S'
-#define JINT          'I'
-#define JLONG         'J'
-#define JFLOAT        'F'
-#define JDOUBLE       'D'
-
-
-/**
- * MAX_HASH_TABLE_ELEM: The maximum no. of entries in the hashtable.
- * It's set to 4096 to account for (classNames + No. of threads)
- */
-#define MAX_HASH_TABLE_ELEM 4096
-
-/** Key that allows us to retrieve thread-local storage */
-static pthread_key_t gTlsKey;
-
-/** nonzero if we succeeded in initializing gTlsKey. Protected by the jvmMutex */
-static int gTlsKeyInitialized = 0;
-
-/** Pthreads thread-local storage for each library thread. */
-struct hdfsTls {
-    JNIEnv *env;
-};
-
-/**
- * The function that is called whenever a thread with libhdfs thread local data
- * is destroyed.
- *
- * @param v         The thread-local data
- */
-static void hdfsThreadDestructor(void *v)
-{
-    struct hdfsTls *tls = v;
-    JavaVM *vm;
-    JNIEnv *env = tls->env;
-    jint ret;
-
-    ret = (*env)->GetJavaVM(env, &vm);
-    if (ret) {
-        fprintf(stderr, "hdfsThreadDestructor: GetJavaVM failed with "
-                "error %d\n", ret);
-        (*env)->ExceptionDescribe(env);
-    } else {
-        (*vm)->DetachCurrentThread(vm);
-    }
-    free(tls);
-}
-
-void destroyLocalReference(JNIEnv *env, jobject jObject)
-{
-  if (jObject)
-    (*env)->DeleteLocalRef(env, jObject);
-}
-
-static jthrowable validateMethodType(JNIEnv *env, MethType methType)
-{
-    if (methType != STATIC && methType != INSTANCE) {
-        return newRuntimeError(env, "validateMethodType(methType=%d): "
-            "illegal method type.\n", methType);
-    }
-    return NULL;
-}
-
-jthrowable newJavaStr(JNIEnv *env, const char *str, jstring *out)
-{
-    jstring jstr;
-
-    if (!str) {
-        /* Can't pass NULL to NewStringUTF: the result would be
-         * implementation-defined. */
-        *out = NULL;
-        return NULL;
-    }
-    jstr = (*env)->NewStringUTF(env, str);
-    if (!jstr) {
-        /* If NewStringUTF returns NULL, an exception has been thrown,
-         * which we need to handle.  Probaly an OOM. */
-        return getPendingExceptionAndClear(env);
-    }
-    *out = jstr;
-    return NULL;
-}
-
-jthrowable newCStr(JNIEnv *env, jstring jstr, char **out)
-{
-    const char *tmp;
-
-    if (!jstr) {
-        *out = NULL;
-        return NULL;
-    }
-    tmp = (*env)->GetStringUTFChars(env, jstr, NULL);
-    if (!tmp) {
-        return getPendingExceptionAndClear(env);
-    }
-    *out = strdup(tmp);
-    (*env)->ReleaseStringUTFChars(env, jstr, tmp);
-    return NULL;
-}
-
-static int hashTableInit(void)
-{
-    if (!hashTableInited) {
-        LOCK_HASH_TABLE();
-        if (!hashTableInited) {
-            if (hcreate(MAX_HASH_TABLE_ELEM) == 0) {
-                fprintf(stderr, "error creating hashtable, <%d>: %s\n",
-                        errno, strerror(errno));
-                return 0;
-            } 
-            hashTableInited = 1;
-        }
-        UNLOCK_HASH_TABLE();
-    }
-    return 1;
-}
-
-
-static int insertEntryIntoTable(const char *key, void *data)
-{
-    ENTRY e, *ep;
-    if (key == NULL || data == NULL) {
-        return 0;
-    }
-    if (! hashTableInit()) {
-      return -1;
-    }
-    e.data = data;
-    e.key = (char*)key;
-    LOCK_HASH_TABLE();
-    ep = hsearch(e, ENTER);
-    UNLOCK_HASH_TABLE();
-    if (ep == NULL) {
-        fprintf(stderr, "warn adding key (%s) to hash table, <%d>: %s\n",
-                key, errno, strerror(errno));
-    }  
-    return 0;
-}
-
-
-
-static void* searchEntryFromTable(const char *key)
-{
-    ENTRY e,*ep;
-    if (key == NULL) {
-        return NULL;
-    }
-    hashTableInit();
-    e.key = (char*)key;
-    LOCK_HASH_TABLE();
-    ep = hsearch(e, FIND);
-    UNLOCK_HASH_TABLE();
-    if (ep != NULL) {
-        return ep->data;
-    }
-    return NULL;
-}
-
-
-
-jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
-                 jobject instObj, const char *className,
-                 const char *methName, const char *methSignature, ...)
-{
-    va_list args;
-    jclass cls;
-    jmethodID mid;
-    jthrowable jthr;
-    const char *str; 
-    char returnType;
-    
-    jthr = validateMethodType(env, methType);
-    if (jthr)
-        return jthr;
-    jthr = globalClassReference(className, env, &cls);
-    if (jthr)
-        return jthr;
-    jthr = methodIdFromClass(className, methName, methSignature, 
-                            methType, env, &mid);
-    if (jthr)
-        return jthr;
-    str = methSignature;
-    while (*str != ')') str++;
-    str++;
-    returnType = *str;
-    va_start(args, methSignature);
-    if (returnType == JOBJECT || returnType == JARRAYOBJECT) {
-        jobject jobj = NULL;
-        if (methType == STATIC) {
-            jobj = (*env)->CallStaticObjectMethodV(env, cls, mid, args);
-        }
-        else if (methType == INSTANCE) {
-            jobj = (*env)->CallObjectMethodV(env, instObj, mid, args);
-        }
-        retval->l = jobj;
-    }
-    else if (returnType == VOID) {
-        if (methType == STATIC) {
-            (*env)->CallStaticVoidMethodV(env, cls, mid, args);
-        }
-        else if (methType == INSTANCE) {
-            (*env)->CallVoidMethodV(env, instObj, mid, args);
-        }
-    }
-    else if (returnType == JBOOLEAN) {
-        jboolean jbool = 0;
-        if (methType == STATIC) {
-            jbool = (*env)->CallStaticBooleanMethodV(env, cls, mid, args);
-        }
-        else if (methType == INSTANCE) {
-            jbool = (*env)->CallBooleanMethodV(env, instObj, mid, args);
-        }
-        retval->z = jbool;
-    }
-    else if (returnType == JSHORT) {
-        jshort js = 0;
-        if (methType == STATIC) {
-            js = (*env)->CallStaticShortMethodV(env, cls, mid, args);
-        }
-        else if (methType == INSTANCE) {
-            js = (*env)->CallShortMethodV(env, instObj, mid, args);
-        }
-        retval->s = js;
-    }
-    else if (returnType == JLONG) {
-        jlong jl = -1;
-        if (methType == STATIC) {
-            jl = (*env)->CallStaticLongMethodV(env, cls, mid, args);
-        }
-        else if (methType == INSTANCE) {
-            jl = (*env)->CallLongMethodV(env, instObj, mid, args);
-        }
-        retval->j = jl;
-    }
-    else if (returnType == JINT) {
-        jint ji = -1;
-        if (methType == STATIC) {
-            ji = (*env)->CallStaticIntMethodV(env, cls, mid, args);
-        }
-        else if (methType == INSTANCE) {
-            ji = (*env)->CallIntMethodV(env, instObj, mid, args);
-        }
-        retval->i = ji;
-    }
-    va_end(args);
-
-    jthr = (*env)->ExceptionOccurred(env);
-    if (jthr) {
-        (*env)->ExceptionClear(env);
-        return jthr;
-    }
-    return NULL;
-}
-
-jthrowable constructNewObjectOfClass(JNIEnv *env, jobject *out, const char *className, 
-                                  const char *ctorSignature, ...)
-{
-    va_list args;
-    jclass cls;
-    jmethodID mid; 
-    jobject jobj;
-    jthrowable jthr;
-
-    jthr = globalClassReference(className, env, &cls);
-    if (jthr)
-        return jthr;
-    jthr = methodIdFromClass(className, "<init>", ctorSignature, 
-                            INSTANCE, env, &mid);
-    if (jthr)
-        return jthr;
-    va_start(args, ctorSignature);
-    jobj = (*env)->NewObjectV(env, cls, mid, args);
-    va_end(args);
-    if (!jobj)
-        return getPendingExceptionAndClear(env);
-    *out = jobj;
-    return NULL;
-}
-
-
-jthrowable methodIdFromClass(const char *className, const char *methName, 
-                            const char *methSignature, MethType methType, 
-                            JNIEnv *env, jmethodID *out)
-{
-    jclass cls;
-    jthrowable jthr;
-
-    jthr = globalClassReference(className, env, &cls);
-    if (jthr)
-        return jthr;
-    jmethodID mid = 0;
-    jthr = validateMethodType(env, methType);
-    if (jthr)
-        return jthr;
-    if (methType == STATIC) {
-        mid = (*env)->GetStaticMethodID(env, cls, methName, methSignature);
-    }
-    else if (methType == INSTANCE) {
-        mid = (*env)->GetMethodID(env, cls, methName, methSignature);
-    }
-    if (mid == NULL) {
-        fprintf(stderr, "could not find method %s from class %s with "
-            "signature %s\n", methName, className, methSignature);
-        return getPendingExceptionAndClear(env);
-    }
-    *out = mid;
-    return NULL;
-}
-
-jthrowable globalClassReference(const char *className, JNIEnv *env, jclass *out)
-{
-    jclass clsLocalRef;
-    jclass cls = searchEntryFromTable(className);
-    if (cls) {
-        *out = cls;
-        return NULL;
-    }
-    clsLocalRef = (*env)->FindClass(env,className);
-    if (clsLocalRef == NULL) {
-        return getPendingExceptionAndClear(env);
-    }
-    cls = (*env)->NewGlobalRef(env, clsLocalRef);
-    if (cls == NULL) {
-        (*env)->DeleteLocalRef(env, clsLocalRef);
-        return getPendingExceptionAndClear(env);
-    }
-    (*env)->DeleteLocalRef(env, clsLocalRef);
-    insertEntryIntoTable(className, cls);
-    *out = cls;
-    return NULL;
-}
-
-jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name)
-{
-    jthrowable jthr;
-    jclass cls, clsClass = NULL;
-    jmethodID mid;
-    jstring str = NULL;
-    const char *cstr = NULL;
-    char *newstr;
-
-    cls = (*env)->GetObjectClass(env, jobj);
-    if (cls == NULL) {
-        jthr = getPendingExceptionAndClear(env);
-        goto done;
-    }
-    clsClass = (*env)->FindClass(env, "java/lang/Class");
-    if (clsClass == NULL) {
-        jthr = getPendingExceptionAndClear(env);
-        goto done;
-    }
-    mid = (*env)->GetMethodID(env, clsClass, "getName", "()Ljava/lang/String;");
-    if (mid == NULL) {
-        jthr = getPendingExceptionAndClear(env);
-        goto done;
-    }
-    str = (*env)->CallObjectMethod(env, cls, mid);
-    if (str == NULL) {
-        jthr = getPendingExceptionAndClear(env);
-        goto done;
-    }
-    cstr = (*env)->GetStringUTFChars(env, str, NULL);
-    if (!cstr) {
-        jthr = getPendingExceptionAndClear(env);
-        goto done;
-    }
-    newstr = strdup(cstr);
-    if (newstr == NULL) {
-        jthr = newRuntimeError(env, "classNameOfObject: out of memory");
-        goto done;
-    }
-    *name = newstr;
-    jthr = NULL;
-
-done:
-    destroyLocalReference(env, cls);
-    destroyLocalReference(env, clsClass);
-    if (str) {
-        if (cstr)
-            (*env)->ReleaseStringUTFChars(env, str, cstr);
-        (*env)->DeleteLocalRef(env, str);
-    }
-    return jthr;
-}
-
-
-/**
- * Get the global JNI environemnt.
- *
- * We only have to create the JVM once.  After that, we can use it in
- * every thread.  You must be holding the jvmMutex when you call this
- * function.
- *
- * @return          The JNIEnv on success; error code otherwise
- */
-static JNIEnv* getGlobalJNIEnv(void)
-{
-    const jsize vmBufLength = 1;
-    JavaVM* vmBuf[vmBufLength]; 
-    JNIEnv *env;
-    jint rv = 0; 
-    jint noVMs = 0;
-    jthrowable jthr;
-
-    rv = JNI_GetCreatedJavaVMs(&(vmBuf[0]), vmBufLength, &noVMs);
-    if (rv != 0) {
-        fprintf(stderr, "JNI_GetCreatedJavaVMs failed with error: %d\n", rv);
-        return NULL;
-    }
-
-    if (noVMs == 0) {
-        //Get the environment variables for initializing the JVM
-        char *hadoopClassPath = getenv("CLASSPATH");
-        if (hadoopClassPath == NULL) {
-            fprintf(stderr, "Environment variable CLASSPATH not set!\n");
-            return NULL;
-        } 
-        char *hadoopClassPathVMArg = "-Djava.class.path=";
-        size_t optHadoopClassPathLen = strlen(hadoopClassPath) + 
-          strlen(hadoopClassPathVMArg) + 1;
-        char *optHadoopClassPath = malloc(sizeof(char)*optHadoopClassPathLen);
-        snprintf(optHadoopClassPath, optHadoopClassPathLen,
-                "%s%s", hadoopClassPathVMArg, hadoopClassPath);
-
-        // Determine the # of LIBHDFS_OPTS args
-        int noArgs = 1;
-        char *hadoopJvmArgs = getenv("LIBHDFS_OPTS");
-        char jvmArgDelims[] = " ";
-        char *str, *token, *savePtr;
-        if (hadoopJvmArgs != NULL)  {
-          hadoopJvmArgs = strdup(hadoopJvmArgs);
-          for (noArgs = 1, str = hadoopJvmArgs; ; noArgs++, str = NULL) {
-            token = strtok_r(str, jvmArgDelims, &savePtr);
-            if (NULL == token) {
-              break;
-            }
-          }
-          free(hadoopJvmArgs);
-        }
-
-        // Now that we know the # args, populate the options array
-        JavaVMOption options[noArgs];
-        options[0].optionString = optHadoopClassPath;
-        hadoopJvmArgs = getenv("LIBHDFS_OPTS");
-	if (hadoopJvmArgs != NULL)  {
-          hadoopJvmArgs = strdup(hadoopJvmArgs);
-          for (noArgs = 1, str = hadoopJvmArgs; ; noArgs++, str = NULL) {
-            token = strtok_r(str, jvmArgDelims, &savePtr);
-            if (NULL == token) {
-              break;
-            }
-            options[noArgs].optionString = token;
-          }
-        }
-
-        //Create the VM
-        JavaVMInitArgs vm_args;
-        JavaVM *vm;
-        vm_args.version = JNI_VERSION_1_2;
-        vm_args.options = options;
-        vm_args.nOptions = noArgs; 
-        vm_args.ignoreUnrecognized = 1;
-
-        rv = JNI_CreateJavaVM(&vm, (void*)&env, &vm_args);
-
-        if (hadoopJvmArgs != NULL)  {
-          free(hadoopJvmArgs);
-        }
-        free(optHadoopClassPath);
-
-        if (rv != 0) {
-            fprintf(stderr, "Call to JNI_CreateJavaVM failed "
-                    "with error: %d\n", rv);
-            return NULL;
-        }
-        jthr = invokeMethod(env, NULL, STATIC, NULL,
-                         "org/apache/hadoop/fs/FileSystem",
-                         "loadFileSystems", "()V");
-        if (jthr) {
-            printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "loadFileSystems");
-        }
-    }
-    else {
-        //Attach this thread to the VM
-        JavaVM* vm = vmBuf[0];
-        rv = (*vm)->AttachCurrentThread(vm, (void*)&env, 0);
-        if (rv != 0) {
-            fprintf(stderr, "Call to AttachCurrentThread "
-                    "failed with error: %d\n", rv);
-            return NULL;
-        }
-    }
-
-    return env;
-}
-
-/**
- * getJNIEnv: A helper function to get the JNIEnv* for the given thread.
- * If no JVM exists, then one will be created. JVM command line arguments
- * are obtained from the LIBHDFS_OPTS environment variable.
- *
- * Implementation note: we rely on POSIX thread-local storage (tls).
- * This allows us to associate a destructor function with each thread, that
- * will detach the thread from the Java VM when the thread terminates.  If we
- * failt to do this, it will cause a memory leak.
- *
- * However, POSIX TLS is not the most efficient way to do things.  It requires a
- * key to be initialized before it can be used.  Since we don't know if this key
- * is initialized at the start of this function, we have to lock a mutex first
- * and check.  Luckily, most operating systems support the more efficient
- * __thread construct, which is initialized by the linker.
- *
- * @param: None.
- * @return The JNIEnv* corresponding to the thread.
- */
-JNIEnv* getJNIEnv(void)
-{
-    JNIEnv *env;
-    struct hdfsTls *tls;
-    int ret;
-
-#ifdef HAVE_BETTER_TLS
-    static __thread struct hdfsTls *quickTls = NULL;
-    if (quickTls)
-        return quickTls->env;
-#endif
-    pthread_mutex_lock(&jvmMutex);
-    if (!gTlsKeyInitialized) {
-        ret = pthread_key_create(&gTlsKey, hdfsThreadDestructor);
-        if (ret) {
-            pthread_mutex_unlock(&jvmMutex);
-            fprintf(stderr, "getJNIEnv: pthread_key_create failed with "
-                "error %d\n", ret);
-            return NULL;
-        }
-        gTlsKeyInitialized = 1;
-    }
-    tls = pthread_getspecific(gTlsKey);
-    if (tls) {
-        pthread_mutex_unlock(&jvmMutex);
-        return tls->env;
-    }
-
-    env = getGlobalJNIEnv();
-    pthread_mutex_unlock(&jvmMutex);
-    if (!env) {
-        fprintf(stderr, "getJNIEnv: getGlobalJNIEnv failed\n");
-        return NULL;
-    }
-    tls = calloc(1, sizeof(struct hdfsTls));
-    if (!tls) {
-        fprintf(stderr, "getJNIEnv: OOM allocating %zd bytes\n",
-                sizeof(struct hdfsTls));
-        return NULL;
-    }
-    tls->env = env;
-    ret = pthread_setspecific(gTlsKey, tls);
-    if (ret) {
-        fprintf(stderr, "getJNIEnv: pthread_setspecific failed with "
-            "error code %d\n", ret);
-        hdfsThreadDestructor(tls);
-        return NULL;
-    }
-#ifdef HAVE_BETTER_TLS
-    quickTls = tls;
-#endif
-    return env;
-}
-
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/jni_helper.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/jni_helper.h
deleted file mode 100644
index f37dea7..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/jni_helper.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef LIBHDFS_JNI_HELPER_H
-#define LIBHDFS_JNI_HELPER_H
-
-#include <jni.h>
-#include <stdio.h>
-
-#include <stdlib.h>
-#include <stdarg.h>
-#include <search.h>
-#include <pthread.h>
-#include <errno.h>
-
-#define PATH_SEPARATOR ':'
-
-
-/** Denote the method we want to invoke as STATIC or INSTANCE */
-typedef enum {
-    STATIC,
-    INSTANCE
-} MethType;
-
-/**
- * Create a new malloc'ed C string from a Java string.
- *
- * @param env       The JNI environment
- * @param jstr      The Java string
- * @param out       (out param) the malloc'ed C string
- *
- * @return          NULL on success; the exception otherwise
- */
-jthrowable newCStr(JNIEnv *env, jstring jstr, char **out);
-
-/**
- * Create a new Java string from a C string.
- *
- * @param env       The JNI environment
- * @param str       The C string
- * @param out       (out param) the java string
- *
- * @return          NULL on success; the exception otherwise
- */
-jthrowable newJavaStr(JNIEnv *env, const char *str, jstring *out);
-
-/**
- * Helper function to destroy a local reference of java.lang.Object
- * @param env: The JNIEnv pointer. 
- * @param jFile: The local reference of java.lang.Object object
- * @return None.
- */
-void destroyLocalReference(JNIEnv *env, jobject jObject);
-
-/** invokeMethod: Invoke a Static or Instance method.
- * className: Name of the class where the method can be found
- * methName: Name of the method
- * methSignature: the signature of the method "(arg-types)ret-type"
- * methType: The type of the method (STATIC or INSTANCE)
- * instObj: Required if the methType is INSTANCE. The object to invoke
-   the method on.
- * env: The JNIEnv pointer
- * retval: The pointer to a union type which will contain the result of the
-   method invocation, e.g. if the method returns an Object, retval will be
-   set to that, if the method returns boolean, retval will be set to the
-   value (JNI_TRUE or JNI_FALSE), etc.
- * exc: If the methods throws any exception, this will contain the reference
- * Arguments (the method arguments) must be passed after methSignature
- * RETURNS: -1 on error and 0 on success. If -1 is returned, exc will have 
-   a valid exception reference, and the result stored at retval is undefined.
- */
-jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
-                 jobject instObj, const char *className, const char *methName, 
-                 const char *methSignature, ...);
-
-jthrowable constructNewObjectOfClass(JNIEnv *env, jobject *out, const char *className, 
-                                  const char *ctorSignature, ...);
-
-jthrowable methodIdFromClass(const char *className, const char *methName, 
-                            const char *methSignature, MethType methType, 
-                            JNIEnv *env, jmethodID *out);
-
-jthrowable globalClassReference(const char *className, JNIEnv *env, jclass *out);
-
-/** classNameOfObject: Get an object's class name.
- * @param jobj: The object.
- * @param env: The JNIEnv pointer.
- * @param name: (out param) On success, will contain a string containing the
- * class name. This string must be freed by the caller.
- * @return NULL on success, or the exception
- */
-jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name);
-
-/** getJNIEnv: A helper function to get the JNIEnv* for the given thread.
- * If no JVM exists, then one will be created. JVM command line arguments
- * are obtained from the LIBHDFS_OPTS environment variable.
- * @param: None.
- * @return The JNIEnv* corresponding to the thread.
- * */
-JNIEnv* getJNIEnv(void);
-
-#endif /*LIBHDFS_JNI_HELPER_H*/
-
-/**
- * vim: ts=4: sw=4: et:
- */
-
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_multi_write.c b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_multi_write.c
index 2253079..8376712 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_multi_write.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_multi_write.c
@@ -17,7 +17,7 @@
  */
 
 #include "expect.h"
-#include "webhdfs.h"
+#include "hdfs.h"
 
 #include <errno.h>
 #include <semaphore.h>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c
index 411a009..76507e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-#include "webhdfs.h"
+#include "hdfs.h"
 
 #include <inttypes.h>
 #include <jni.h>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c
index 1a5ceb2..588cc62 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-#include "webhdfs.h"
+#include "hdfs.h"
 
 #include <stdio.h>
 #include <stdlib.h>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c
index a979a63..72f0b5f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c
@@ -17,7 +17,7 @@
  */
 
 #include "expect.h"
-#include "webhdfs.h"
+#include "hdfs.h"
 
 #include <errno.h>
 #include <semaphore.h>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c
index a21eaa4..73ddc26 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-#include "webhdfs.h"
+#include "hdfs.h"
 
 #include <limits.h>
 #include <stdio.h>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_read_bm.c b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_read_bm.c
index f5eb170..d265eea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_read_bm.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/test_read_bm.c
@@ -1,8 +1,9 @@
+#include "hdfs.h"
+
 #include <time.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <sys/time.h>
-#include "webhdfs.h"
 
 #ifdef __MACH__
 #include <mach/clock.h>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/webhdfs.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/webhdfs.h
deleted file mode 100644
index 23009a2..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/webhdfs.h
+++ /dev/null
@@ -1,694 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef LIB_WEBHDFS_H
-#define LIB_WEBHDFS_H
-
-#include <errno.h> /* for EINTERNAL, etc. */
-#include <fcntl.h> /* for O_RDONLY, O_WRONLY */
-#include <stdint.h> /* for uint64_t, etc. */
-#include <time.h> /* for time_t */
-#include <pthread.h>
-
-#ifndef O_RDONLY
-#define O_RDONLY 1
-#endif
-
-#ifndef O_WRONLY
-#define O_WRONLY 2
-#endif
-
-#ifndef EINTERNAL
-#define EINTERNAL 255
-#endif
-
-/** All APIs set errno to meaningful values */
-
-#ifdef __cplusplus
-extern  "C" {
-#endif
-    /**
-     * Some utility decls used in libhdfs.
-     */
-    typedef int32_t   tSize; /// size of data for read/write io ops
-    typedef time_t    tTime; /// time type in seconds
-    typedef int64_t   tOffset;/// offset within the file
-    typedef uint16_t  tPort; /// port
-    
-    /**
-     * The information required for accessing webhdfs,
-     * including the network address of the namenode and the user name
-     */
-    struct hdfsBuilder {
-        int forceNewInstance;
-        const char *nn;
-        const char *nn_jni;
-        tPort port;
-        const char *kerbTicketCachePath;
-        const char *userName;
-        /*
-         * This is a new attribute compared to libhdfs.
-         * We maintain a local workingDir for constructing absolute path
-         */
-        char *workingDir;
-    };
-    
-    typedef enum tObjectKind {
-        kObjectKindFile = 'F',
-        kObjectKindDirectory = 'D',
-    } tObjectKind;
-    
-    /**
-     * For libhdfs based on JNI, this is used as
-     * the C reflection of org.apache.org.hadoop.FileSystem .
-     * In the current libwebhdfs based on webhdfs,
-     * this is actually hdfsBuilder which contains
-     * the network address of the namenode and the user name
-     */
-    struct hdfs_internal;
-    typedef struct hdfs_internal* hdfsFS;
-    
-    /**
-     * The C equivalent of org.apache.org.hadoop.FSData(Input|Output)Stream .
-     */
-    enum hdfsStreamType
-    {
-        UNINITIALIZED = 0,
-        INPUT = 1,
-        OUTPUT = 2,
-    };
-    
-    /**
-     * The 'file-handle' to a file in hdfs.
-     */
-    struct hdfsFile_internal {
-        void* file;
-        enum hdfsStreamType type;
-        int flags;
-        tOffset offset;
-    };
-    typedef struct hdfsFile_internal* hdfsFile;
-    
-    /**
-     * hdfsFileInfo - Information about a file/directory.
-     */
-    typedef struct  {
-        tObjectKind mKind;   /* file or directory */
-        char *mName;         /* the name of the file */
-        tTime mLastMod;      /* the last modification time for the file in seconds */
-        tOffset mSize;       /* the size of the file in bytes */
-        short mReplication;    /* the count of replicas */
-        tOffset mBlockSize;  /* the block size for the file */
-        char *mOwner;        /* the owner of the file */
-        char *mGroup;        /* the group associated with the file */
-        short mPermissions;  /* the permissions associated with the file */
-        tTime mLastAccess;    /* the last access time for the file in seconds */
-    } hdfsFileInfo;
-    
-    /**
-     * webhdfsBuffer - used for hold the data for read/write from/to http connection
-     */
-    typedef struct {
-        const char *wbuffer;      /* the user's buffer for uploading */
-        size_t remaining;         /* length of content */
-        size_t offset;            /* offset for reading */
-        int openFlag;             /* check whether the hdfsOpenFile has been called before */
-        int closeFlag;      /* whether to close the http connection for writing */
-        pthread_mutex_t writeMutex;    // used for syschronization between the curl thread and the hdfsWrite thread
-        pthread_cond_t newwrite_or_close;   // transferring thread waits for this condition
-                                       // when there is no more content for transferring in the buffer
-        pthread_cond_t transfer_finish; // condition used to indicate finishing transferring (one buffer)
-    } webhdfsBuffer;
-    
-    struct webhdfsFileHandle {
-        char *absPath;
-        int bufferSize;
-        short replication;
-        tSize blockSize;
-        char *datanode;
-        webhdfsBuffer *uploadBuffer;
-        pthread_t connThread;
-    };
-    
-    // Bit fields for hdfsFile_internal flags
-#define HDFS_FILE_SUPPORTS_DIRECT_READ (1<<0)
-    
-    /**
-     * Determine if a file is open for read.
-     *
-     * @param file     The HDFS file
-     * @return         1 if the file is open for read; 0 otherwise
-     */
-    int hdfsFileIsOpenForRead(hdfsFile file);
-    
-    /**
-     * Determine if a file is open for write.
-     *
-     * @param file     The HDFS file
-     * @return         1 if the file is open for write; 0 otherwise
-     */
-    int hdfsFileIsOpenForWrite(hdfsFile file);
-    
-    /**
-     * Disable the direct read optimization for a file in libhdfs.
-     * This is mainly provided for unit testing purposes.
-     * No longer useful in libwebhdfs since libwebhdfs is based on webhdfs.
-     *
-     * @param file     The HDFS file
-     */
-    void hdfsFileDisableDirectRead(hdfsFile file);
-    
-    /**
-     * hdfsConnectAsUser - Connect to a hdfs file system as a specific user
-     * Connect to the hdfs.
-     * @param nn   The NameNode.  See hdfsBuilderSetNameNode for details.
-     * @param port The port on which the server is listening.
-     * @param user the user name (this is hadoop domain user). Or NULL is equivelant to hhdfsConnect(host, port)
-     * @return Returns a handle to the filesystem or NULL on error.
-     * @deprecated Use hdfsBuilderConnect instead.
-     */
-    hdfsFS hdfsConnectAsUser(const char* nn, tPort port, const char *user);
-    
-    
-    /**
-     * hdfsConnect - Connect to a hdfs file system.
-     * Connect to the hdfs.
-     * @param nn   The NameNode.  See hdfsBuilderSetNameNode for details.
-     * @param port The port on which the server is listening.
-     * @return Returns a handle to the filesystem or NULL on error.
-     * @deprecated Use hdfsBuilderConnect instead.
-     */
-    hdfsFS hdfsConnect(const char* nn, tPort port);
-    
-    /**
-     * hdfsConnect - Connect to an hdfs file system.
-     *
-     * The effect with hdfsConnectAsUser in libwebhdfs.
-     *
-     * @param nn     The NameNode.  See hdfsBuilderSetNameNode for details.
-     * @param port   The port on which the server is listening.
-     * @param user   The user name to use when connecting
-     * @return       Returns a handle to the filesystem or NULL on error.
-     * @deprecated   Use hdfsBuilderConnect instead.
-     */
-    hdfsFS hdfsConnectAsUserNewInstance(const char* nn, tPort port, const char *user );
-    
-    /**
-     * hdfsConnect - Connect to an hdfs file system.
-     *
-     * The same effect with hdfsConnect in libwebhdfs.
-     *
-     * @param nn     The NameNode.  See hdfsBuilderSetNameNode for details.
-     * @param port   The port on which the server is listening.
-     * @return       Returns a handle to the filesystem or NULL on error.
-     * @deprecated   Use hdfsBuilderConnect instead.
-     */
-    hdfsFS hdfsConnectNewInstance(const char* nn, tPort port);
-    
-    /**
-     * Connect to HDFS using the parameters defined by the builder.
-     *
-     * Every successful call to hdfsBuilderConnect should be matched with a call
-     * to hdfsDisconnect, when the hdfsFS is no longer needed.
-     *
-     * @param bld    The HDFS builder
-     * @return       Returns a handle to the filesystem, or NULL on error.
-     */
-    hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld);
-    
-    /**
-     * Create an HDFS builder.
-     *
-     * @return The HDFS builder, or NULL on error.
-     */
-    struct hdfsBuilder *hdfsNewBuilder(void);
-    
-    /**
-     * In libhdfs: force the builder to always create a new instance of the FileSystem,
-     * rather than possibly finding one in the cache.
-     *
-     * @param bld The HDFS builder
-     * @deprecated No longer usefule in libwebhdfs.
-     */
-    void hdfsBuilderSetForceNewInstance(struct hdfsBuilder *bld);
-    
-    /**
-     * Set the HDFS NameNode to connect to.
-     *
-     * @param bld  The HDFS builder
-     * @param nn   The NameNode to use.
-     *
-     *             If the string given is 'default', the default NameNode
-     *             configuration will be used (from the XML configuration files)
-     *
-     *             If NULL is given, a LocalFileSystem will be created.
-     *
-     *             If the string starts with a protocol type such as file:// or
-     *             hdfs://, this protocol type will be used.  If not, the
-     *             hdfs:// protocol type will be used.
-     *
-     *             You may specify a NameNode port in the usual way by
-     *             passing a string of the format hdfs://<hostname>:<port>.
-     *             Alternately, you may set the port with
-     *             hdfsBuilderSetNameNodePort.  However, you must not pass the
-     *             port in two different ways.
-     */
-    void hdfsBuilderSetNameNode(struct hdfsBuilder *bld, const char *nn);
-    
-    /**
-     * Set the port of the HDFS NameNode to connect to.
-     *
-     * @param bld The HDFS builder
-     * @param port The port.
-     */
-    void hdfsBuilderSetNameNodePort(struct hdfsBuilder *bld, tPort port);
-    
-    /**
-     * Set the username to use when connecting to the HDFS cluster.
-     *
-     * @param bld The HDFS builder
-     * @param userName The user name.  The string will be shallow-copied.
-     */
-    void hdfsBuilderSetUserName(struct hdfsBuilder *bld, const char *userName);
-    
-    /**
-     * Set the path to the Kerberos ticket cache to use when connecting to
-     * the HDFS cluster.
-     *
-     * @param bld The HDFS builder
-     * @param kerbTicketCachePath The Kerberos ticket cache path.  The string
-     *                            will be shallow-copied.
-     */
-    void hdfsBuilderSetKerbTicketCachePath(struct hdfsBuilder *bld,
-                                           const char *kerbTicketCachePath);
-    
-    /**
-     * Free an HDFS builder.
-     *
-     * @param bld The HDFS builder
-     */
-    void hdfsFreeBuilder(struct hdfsBuilder *bld);
-    
-    /**
-     * Get a configuration string.
-     *
-     * @param key      The key to find
-     * @param val      (out param) The value.  This will be set to NULL if the
-     *                 key isn't found.  You must free this string with
-     *                 hdfsConfStrFree.
-     *
-     * @return         0 on success; nonzero error code otherwise.
-     *                 Failure to find the key is not an error.
-     */
-    int hdfsConfGetStr(const char *key, char **val);
-    
-    /**
-     * Get a configuration integer.
-     *
-     * @param key      The key to find
-     * @param val      (out param) The value.  This will NOT be changed if the
-	 *                 key isn't found.
-     *
-     * @return         0 on success; nonzero error code otherwise.
-     *                 Failure to find the key is not an error.
-     */
-    int hdfsConfGetInt(const char *key, int32_t *val);
-    
-    /**
-     * Free a configuration string found with hdfsConfGetStr.
-     *
-     * @param val      A configuration string obtained from hdfsConfGetStr
-     */
-    void hdfsConfStrFree(char *val);
-    
-    /**
-     * hdfsDisconnect - Disconnect from the hdfs file system.
-     * Disconnect from hdfs.
-     *
-     * In libwebhdfs, we simply free the hdfsFS,
-     * so do not use it after hdfsCopy/hdfsMove/hdfsGetDefaultBlockSize which still use JNI for FileSystem connection.
-     *
-     * @param fs The configured filesystem handle.
-     * @return Returns 0 on success, -1 on error.
-     */
-    int hdfsDisconnect(hdfsFS fs);
-    
-    
-    /**
-     * hdfsOpenFile - Open a hdfs file in given mode.
-     * In libwebhdfs we simply store corresponding information in a hdfsFile.
-     *
-     * @param fs The configured filesystem handle.
-     * @param path The full path to the file.
-     * @param flags - an | of bits/fcntl.h file flags - supported flags are O_RDONLY, O_WRONLY (meaning create or overwrite i.e., implies O_TRUNCAT),
-     * O_WRONLY|O_APPEND. Other flags are generally ignored other than (O_RDWR || (O_EXCL & O_CREAT)) which return NULL and set errno equal ENOTSUP.
-     * @param bufferSize Size of buffer for read/write - pass 0 if you want
-     * to use the default configured values.
-     * @param replication Block replication - pass 0 if you want to use
-     * the default configured values.
-     * @param blocksize Size of block - pass 0 if you want to use the
-     * default configured values.
-     * @return Returns the handle to the open file or NULL on error.
-     */
-    hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
-                          int bufferSize, short replication, tSize blocksize);
-    
-    
-    /**
-     * hdfsCloseFile - Close an open file.
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @return Returns 0 on success, -1 on error.
-     */
-    int hdfsCloseFile(hdfsFS fs, hdfsFile file);
-    
-    
-    /**
-     * hdfsExists - Checks if a given path exsits on the filesystem
-     * @param fs The configured filesystem handle.
-     * @param path The path to look for
-     * @return Returns 0 on success, -1 on error.
-     */
-    int hdfsExists(hdfsFS fs, const char *path);
-    
-    
-    /**
-     * hdfsSeek - Seek to given offset in file.
-     * This works only for files opened in read-only mode.
-     * In libwebhdfs we store the offset in the local hdfsFile handle, thus
-     * in this function we simply set the local offset.
-     *
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @param desiredPos Offset into the file to seek into.
-     * @return Returns 0 on success, -1 on error.
-     */
-    int hdfsSeek(hdfsFS fs, hdfsFile file, tOffset desiredPos);
-    
-    
-    /**
-     * hdfsTell - Get the current offset in the file, in bytes.
-     * In libwebhdfs the current offset is stored in the local hdfsFile handle,
-     * thus this function simply sets the local offset.
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @return Current offset, -1 on error.
-     */
-    tOffset hdfsTell(hdfsFS fs, hdfsFile file);
-    
-    
-    /**
-     * hdfsRead - Read data from an open file.
-     * In libwebhdfs the reading starts from the current offset which is stored in the hdfsFile handle
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @param buffer The buffer to copy read bytes into.
-     * @param length The length of the buffer.
-     * @return      On success, a positive number indicating how many bytes
-     *              were read.
-     *              On end-of-file, 0.
-     *              On error, -1.  Errno will be set to the error code.
-     *              Just like the POSIX read function, hdfsRead will return -1
-     *              and set errno to EINTR if data is temporarily unavailable,
-     *              but we are not yet at the end of the file.
-     */
-    tSize hdfsRead(hdfsFS fs, hdfsFile file, void* buffer, tSize length);
-    
-    /**
-     * hdfsPread - Positional read of data from an open file.
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @param position Position from which to read
-     * @param buffer The buffer to copy read bytes into.
-     * @param length The length of the buffer.
-     * @return Returns the number of bytes actually read, possibly less than
-     * than length;-1 on error.
-     */
-    tSize hdfsPread(hdfsFS fs, hdfsFile file, tOffset position,
-                    void* buffer, tSize length);
-    
-    
-    /**
-     * hdfsWrite - Write data into an open file.
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @param buffer The data.
-     * @param length The no. of bytes to write.
-     * @return Returns the number of bytes written, -1 on error.
-     */
-    tSize hdfsWrite(hdfsFS fs, hdfsFile file, const void* buffer,
-                    tSize length);
-    
-    
-    /**
-     * hdfsWrite - Flush the data. No use for libwebhdfs.
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @return Returns 0 on success, -1 on error.
-     * @deprecated Not usefule in libwebhdfs.
-     */
-    int hdfsFlush(hdfsFS fs, hdfsFile file);
-    
-    
-    /**
-     * hdfsHFlush - Flush out the data in client's user buffer. After the
-     * return of this call, new readers will see the data.
-     * @param fs configured filesystem handle
-     * @param file file handle
-     * @return 0 on success, -1 on error and sets errno
-     * @deprecated Not usefule in libwebhdfs.
-     */
-    int hdfsHFlush(hdfsFS fs, hdfsFile file);
-    
-    
-    /**
-     * hdfsAvailable - Number of bytes that can be read from this
-     * input stream.
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @return Returns available bytes; -1 on error.
-     */
-    int hdfsAvailable(hdfsFS fs, hdfsFile file);
-    
-    
-    /**
-     * hdfsCopy - Copy file from one filesystem to another.
-     * @param srcFS The handle to source filesystem.
-     * @param src The path of source file.
-     * @param dstFS The handle to destination filesystem.
-     * @param dst The path of destination file.
-     * @return Returns 0 on success, -1 on error.
-     */
-    int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst);
-    
-    
-    /**
-     * hdfsMove - Move file from one filesystem to another.
-     * @param srcFS The handle to source filesystem.
-     * @param src The path of source file.
-     * @param dstFS The handle to destination filesystem.
-     * @param dst The path of destination file.
-     * @return Returns 0 on success, -1 on error.
-     */
-    int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst);
-    
-    
-    /**
-     * hdfsDelete - Delete file.
-     * @param fs The configured filesystem handle.
-     * @param path The path of the file.
-     * @param recursive if path is a directory and set to
-     * non-zero, the directory is deleted else throws an exception. In
-     * case of a file the recursive argument is irrelevant.
-     * @return Returns 0 on success, -1 on error.
-     */
-    int hdfsDelete(hdfsFS fs, const char* path, int recursive);
-    
-    /**
-     * hdfsRename - Rename file.
-     * @param fs The configured filesystem handle.
-     * @param oldPath The path of the source file.
-     * @param newPath The path of the destination file.
-     * @return Returns 0 on success, -1 on error.
-     */
-    int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath);
-    
-    
-    /**
-     * hdfsGetWorkingDirectory - Get the current working directory for
-     * the given filesystem. In libwebhdfs it is retrieved from local hdfsFS handle.
-     * @param fs The configured filesystem handle.
-     * @param buffer The user-buffer to copy path of cwd into.
-     * @param bufferSize The length of user-buffer.
-     * @return Returns buffer, NULL on error.
-     */
-    char* hdfsGetWorkingDirectory(hdfsFS fs, char *buffer, size_t bufferSize);
-    
-    
-    /**
-     * hdfsSetWorkingDirectory - Set the working directory. All relative
-     * paths will be resolved relative to it. In libwebhdfs the local hdfsFS is modified.
-     * @param fs The configured filesystem handle.
-     * @param path The path of the new 'cwd'.
-     * @return Returns 0 on success, -1 on error.
-     */
-    int hdfsSetWorkingDirectory(hdfsFS fs, const char* path);
-    
-    
-    /**
-     * hdfsCreateDirectory - Make the given file and all non-existent
-     * parents into directories.
-     * @param fs The configured filesystem handle.
-     * @param path The path of the directory.
-     * @return Returns 0 on success, -1 on error.
-     */
-    int hdfsCreateDirectory(hdfsFS fs, const char* path);
-    
-    
-    /**
-     * hdfsSetReplication - Set the replication of the specified
-     * file to the supplied value
-     * @param fs The configured filesystem handle.
-     * @param path The path of the file.
-     * @return Returns 0 on success, -1 on error.
-     */
-    int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication);
-    
-    
-    /**
-     * hdfsListDirectory - Get list of files/directories for a given
-     * directory-path. hdfsFreeFileInfo should be called to deallocate memory.
-     * @param fs The configured filesystem handle.
-     * @param path The path of the directory.
-     * @param numEntries Set to the number of files/directories in path.
-     * @return Returns a dynamically-allocated array of hdfsFileInfo
-     * objects; NULL on error.
-     */
-    hdfsFileInfo *hdfsListDirectory(hdfsFS fs, const char* path,
-                                    int *numEntries);
-    
-    
-    /**
-     * hdfsGetPathInfo - Get information about a path as a (dynamically
-     * allocated) single hdfsFileInfo struct. hdfsFreeFileInfo should be
-     * called when the pointer is no longer needed.
-     * @param fs The configured filesystem handle.
-     * @param path The path of the file.
-     * @return Returns a dynamically-allocated hdfsFileInfo object;
-     * NULL on error.
-     */
-    hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path);
-    
-    
-    /**
-     * hdfsFreeFileInfo - Free up the hdfsFileInfo array (including fields)
-     * @param hdfsFileInfo The array of dynamically-allocated hdfsFileInfo
-     * objects.
-     * @param numEntries The size of the array.
-     */
-    void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries);
-    
-    
-    /**
-     * hdfsGetHosts - Get hostnames where a particular block (determined by
-     * pos & blocksize) of a file is stored. The last element in the array
-     * is NULL. Due to replication, a single block could be present on
-     * multiple hosts.
-     * @param fs The configured filesystem handle.
-     * @param path The path of the file.
-     * @param start The start of the block.
-     * @param length The length of the block.
-     * @return Returns a dynamically-allocated 2-d array of blocks-hosts;
-     * NULL on error.
-     *
-     * Not supported yet but will be supported by libwebhdfs based on webhdfs.
-     */
-    char*** hdfsGetHosts(hdfsFS fs, const char* path,
-                         tOffset start, tOffset length);
-    
-    
-    /**
-     * hdfsFreeHosts - Free up the structure returned by hdfsGetHosts
-     * @param hdfsFileInfo The array of dynamically-allocated hdfsFileInfo
-     * objects.
-     * @param numEntries The size of the array.
-     */
-    void hdfsFreeHosts(char ***blockHosts);
-    
-    
-    /**
-     * hdfsGetDefaultBlockSize - Get the optimum blocksize.
-     * @param fs The configured filesystem handle.
-     * @return Returns the blocksize; -1 on error.
-     */
-    tOffset hdfsGetDefaultBlockSize(hdfsFS fs);
-    
-    
-    /**
-     * hdfsGetCapacity - Return the raw capacity of the filesystem.
-     * @param fs The configured filesystem handle.
-     * @return Returns the raw-capacity; -1 on error.
-     *
-     * Not supported yet but will be supported by libwebhdfs based on webhdfs.
-     */
-    tOffset hdfsGetCapacity(hdfsFS fs);
-    
-    
-    /**
-     * hdfsGetUsed - Return the total raw size of all files in the filesystem.
-     * @param fs The configured filesystem handle.
-     * @return Returns the total-size; -1 on error.
-     *
-     * Not supported yet but will be supported by libwebhdfs based on webhdfs.
-     */
-    tOffset hdfsGetUsed(hdfsFS fs);
-    
-    /**
-     * hdfsChown
-     * @param fs The configured filesystem handle.
-     * @param path the path to the file or directory
-     * @param owner this is a string in Hadoop land. Set to null or "" if only setting group
-     * @param group  this is a string in Hadoop land. Set to null or "" if only setting user
-     * @return 0 on success else -1
-     */
-    int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group);
-    
-    /**
-     * hdfsChmod
-     * @param fs The configured filesystem handle.
-     * @param path the path to the file or directory
-     * @param mode the bitmask to set it to
-     * @return 0 on success else -1
-     */
-    int hdfsChmod(hdfsFS fs, const char* path, short mode);
-    
-    /**
-     * hdfsUtime
-     * @param fs The configured filesystem handle.
-     * @param path the path to the file or directory
-     * @param mtime new modification time or -1 for no change
-     * @param atime new access time or -1 for no change
-     * @return 0 on success else -1
-     */
-    int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime);
-    
-#ifdef __cplusplus
-}
-#endif
-
-#endif /*LIB_WEBHDFS_H*/
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index fbe0d11..d7aef66 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -17,7 +17,7 @@
 
 bin=`which $0`
 bin=`dirname ${bin}`
-bin=`cd "$bin"; pwd`
+bin=`cd "$bin" > /dev/null; pwd`
 
 DEFAULT_LIBEXEC_DIR="$bin"/../libexec
 HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/conf/hadoop-metrics2.properties b/hadoop-hdfs-project/hadoop-hdfs/src/main/conf/hadoop-metrics2.properties
index 586e066..c3ffe31 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/conf/hadoop-metrics2.properties
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/conf/hadoop-metrics2.properties
@@ -19,7 +19,7 @@
 # See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
 
 *.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
-# default sampling period
+# default sampling period, in seconds
 *.period=10
 
 # The namenode-metrics.out will contain metrics from all context
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 92179fe..d29ab1d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -39,6 +39,8 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT;
@@ -209,6 +211,7 @@
     final int writePacketSize;
     final int socketTimeout;
     final int socketCacheCapacity;
+    final long socketCacheExpiry;
     /** Wait time window (in msec) if BlockMissingException is caught */
     final int timeWindow;
     final int nCachedConnRetry;
@@ -257,6 +260,8 @@
       taskId = conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE");
       socketCacheCapacity = conf.getInt(DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY,
           DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT);
+      socketCacheExpiry = conf.getLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,
+          DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT);
       prefetchSize = conf.getLong(DFS_CLIENT_READ_PREFETCH_SIZE_KEY,
           10 * defaultBlockSize);
       timeWindow = conf
@@ -427,7 +432,7 @@
       Joiner.on(',').join(localInterfaceAddrs) + "]");
     }
     
-    this.socketCache = new SocketCache(dfsClientConf.socketCacheCapacity);
+    this.socketCache = SocketCache.getInstance(dfsClientConf.socketCacheCapacity, dfsClientConf.socketCacheExpiry);
   }
 
   /**
@@ -641,7 +646,6 @@
   void abort() {
     clientRunning = false;
     closeAllFilesBeingWritten(true);
-    socketCache.clear();
 
     try {
       // remove reference to this client and stop the renewer,
@@ -688,7 +692,6 @@
   public synchronized void close() throws IOException {
     if(clientRunning) {
       closeAllFilesBeingWritten(false);
-      socketCache.clear();
       clientRunning = false;
       getLeaseRenewer().closeClient(this);
       // close connections to the namenode
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index fb808fd..918a790 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -74,6 +74,8 @@
   public static final String  DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY = "dfs.client.failover.connection.retries.on.timeouts";
   public static final int     DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT = 0;
   
+  public static final String  DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY = "dfs.client.socketcache.expiryMsec";
+  public static final long    DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT = 2 * 60 * 1000;
   public static final String  DFS_NAMENODE_BACKUP_ADDRESS_KEY = "dfs.namenode.backup.address";
   public static final String  DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT = "localhost:50100";
   public static final String  DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY = "dfs.namenode.backup.http-address";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
index 5c53644..48d0f5c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
@@ -254,6 +254,9 @@
                   ", assuming security is disabled");
               return null;
             }
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Exception getting delegation token", e);
+            }
             throw e;
           }
           for (Token<? extends TokenIdentifier> t : c.getAllTokens()) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java
index 2fa7b55..06d2a2b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java
@@ -26,33 +26,112 @@
 import java.util.List;
 import java.util.Map.Entry;
 
+import java.io.IOException;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.LinkedListMultimap;
 import org.apache.commons.logging.Log;
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.StringUtils;
 
 /**
- * A cache of sockets.
+ * A cache of input stream sockets to Data Node.
  */
 class SocketCache {
-  static final Log LOG = LogFactory.getLog(SocketCache.class);
+  private static final Log LOG = LogFactory.getLog(SocketCache.class);
 
-  private final LinkedListMultimap<SocketAddress, SocketAndStreams> multimap;
-  private final int capacity;
-
-  /**
-   * Create a SocketCache with the given capacity.
-   * @param capacity  Max cache size.
-   */
-  public SocketCache(int capacity) {
-    multimap = LinkedListMultimap.create();
-    this.capacity = capacity;
-    if (capacity <= 0) {
-      LOG.debug("SocketCache disabled in configuration.");
+  @InterfaceAudience.Private
+  static class SocketAndStreams implements Closeable {
+    public final Socket sock;
+    public final IOStreamPair ioStreams;
+    long createTime;
+    
+    public SocketAndStreams(Socket s, IOStreamPair ioStreams) {
+      this.sock = s;
+      this.ioStreams = ioStreams;
+      this.createTime = System.currentTimeMillis();
     }
+    
+    @Override
+    public void close() {
+      if (ioStreams != null) { 
+        IOUtils.closeStream(ioStreams.in);
+        IOUtils.closeStream(ioStreams.out);
+      }
+      IOUtils.closeSocket(sock);
+    }
+
+    public long getCreateTime() {
+      return this.createTime;
+    }
+  }
+
+  private Daemon daemon;
+  /** A map for per user per datanode. */
+  private static LinkedListMultimap<SocketAddress, SocketAndStreams> multimap =
+    LinkedListMultimap.create();
+  private static int capacity;
+  private static long expiryPeriod;
+  private static SocketCache scInstance = new SocketCache();
+  private static boolean isInitedOnce = false;
+ 
+  public static synchronized SocketCache getInstance(int c, long e) {
+    // capacity is only initialized once
+    if (isInitedOnce == false) {
+      capacity = c;
+      expiryPeriod = e;
+
+      if (capacity == 0 ) {
+        LOG.info("SocketCache disabled.");
+      }
+      else if (expiryPeriod == 0) {
+        throw new IllegalStateException("Cannot initialize expiryPeriod to " +
+           expiryPeriod + "when cache is enabled.");
+      }
+      isInitedOnce = true;
+    } else { //already initialized once
+      if (capacity != c || expiryPeriod != e) {
+        LOG.info("capacity and expiry periods already set to " + capacity + 
+          " and " + expiryPeriod + " respectively. Cannot set it to " + c + 
+          " and " + e);
+      }
+    }
+
+    return scInstance;
+  }
+
+  private boolean isDaemonStarted() {
+    return (daemon == null)? false: true;
+  }
+
+  private synchronized void startExpiryDaemon() {
+    // start daemon only if not already started
+    if (isDaemonStarted() == true) {
+      return;
+    }
+    
+    daemon = new Daemon(new Runnable() {
+      @Override
+      public void run() {
+        try {
+          SocketCache.this.run();
+        } catch(InterruptedException e) {
+          //noop
+        } finally {
+          SocketCache.this.clear();
+        }
+      }
+
+      @Override
+      public String toString() {
+        return String.valueOf(SocketCache.this);
+      }
+    });
+    daemon.start();
   }
 
   /**
@@ -61,16 +140,17 @@
    * @return  A socket with unknown state, possibly closed underneath. Or null.
    */
   public synchronized SocketAndStreams get(SocketAddress remote) {
+
     if (capacity <= 0) { // disabled
       return null;
     }
-    
-    List<SocketAndStreams> socklist = multimap.get(remote);
-    if (socklist == null) {
+
+    List<SocketAndStreams> sockStreamList = multimap.get(remote);
+    if (sockStreamList == null) {
       return null;
     }
 
-    Iterator<SocketAndStreams> iter = socklist.iterator();
+    Iterator<SocketAndStreams> iter = sockStreamList.iterator();
     while (iter.hasNext()) {
       SocketAndStreams candidate = iter.next();
       iter.remove();
@@ -86,14 +166,16 @@
    * @param sock socket not used by anyone.
    */
   public synchronized void put(Socket sock, IOStreamPair ioStreams) {
+
+    Preconditions.checkNotNull(sock);
     SocketAndStreams s = new SocketAndStreams(sock, ioStreams);
     if (capacity <= 0) {
       // Cache disabled.
       s.close();
       return;
     }
-    
-    Preconditions.checkNotNull(sock);
+ 
+    startExpiryDaemon();
 
     SocketAddress remoteAddr = sock.getRemoteSocketAddress();
     if (remoteAddr == null) {
@@ -106,7 +188,7 @@
     if (capacity == multimap.size()) {
       evictOldest();
     }
-    multimap.put(remoteAddr, new SocketAndStreams(sock, ioStreams));
+    multimap.put(remoteAddr, s);
   }
 
   public synchronized int size() {
@@ -114,13 +196,34 @@
   }
 
   /**
+   * Evict and close sockets older than expiry period from the cache.
+   */
+  private synchronized void evictExpired(long expiryPeriod) {
+    while (multimap.size() != 0) {
+      Iterator<Entry<SocketAddress, SocketAndStreams>> iter =
+        multimap.entries().iterator();
+      Entry<SocketAddress, SocketAndStreams> entry = iter.next();
+      // if oldest socket expired, remove it
+      if (entry == null || 
+        System.currentTimeMillis() - entry.getValue().getCreateTime() < 
+        expiryPeriod) {
+        break;
+      }
+      iter.remove();
+      SocketAndStreams s = entry.getValue();
+      s.close();
+    }
+  }
+
+  /**
    * Evict the oldest entry in the cache.
    */
   private synchronized void evictOldest() {
     Iterator<Entry<SocketAddress, SocketAndStreams>> iter =
       multimap.entries().iterator();
     if (!iter.hasNext()) {
-      throw new IllegalStateException("Cannot evict from empty cache!");
+      throw new IllegalStateException("Cannot evict from empty cache! " +
+        "capacity: " + capacity);
     }
     Entry<SocketAddress, SocketAndStreams> entry = iter.next();
     iter.remove();
@@ -129,38 +232,31 @@
   }
 
   /**
-   * Empty the cache, and close all sockets.
+   * Periodically check in the cache and expire the entries
+   * older than expiryPeriod minutes
    */
-  public synchronized void clear() {
-    for (SocketAndStreams s : multimap.values()) {
-      s.close();
+  private void run() throws InterruptedException {
+    for(long lastExpiryTime = System.currentTimeMillis();
+        !Thread.interrupted();
+        Thread.sleep(expiryPeriod)) {
+      final long elapsed = System.currentTimeMillis() - lastExpiryTime;
+      if (elapsed >= expiryPeriod) {
+        evictExpired(expiryPeriod);
+        lastExpiryTime = System.currentTimeMillis();
+      }
     }
-    multimap.clear();
+    clear();
+    throw new InterruptedException("Daemon Interrupted");
   }
 
-  @Override
-  protected void finalize() {
-    clear();
-  }
-  
-  @InterfaceAudience.Private
-  static class SocketAndStreams implements Closeable {
-    public final Socket sock;
-    public final IOStreamPair ioStreams;
-    
-    public SocketAndStreams(Socket s, IOStreamPair ioStreams) {
-      this.sock = s;
-      this.ioStreams = ioStreams;
+  /**
+   * Empty the cache, and close all sockets.
+   */
+  private synchronized void clear() {
+    for (SocketAndStreams sockAndStream : multimap.values()) {
+      sockAndStream.close();
     }
-    
-    @Override
-    public void close() {
-      if (ioStreams != null) { 
-        IOUtils.closeStream(ioStreams.in);
-        IOUtils.closeStream(ioStreams.out);
-      }
-      IOUtils.closeSocket(sock);
-    }
+    multimap.clear();
   }
 
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 0781218..817b183 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -612,7 +612,8 @@
         + " storage " + nodeReg.getStorageID());
 
     DatanodeDescriptor nodeS = datanodeMap.get(nodeReg.getStorageID());
-    DatanodeDescriptor nodeN = getDatanodeByHost(nodeReg.getXferAddr());
+    DatanodeDescriptor nodeN = host2DatanodeMap.getDatanodeByXferAddr(
+        nodeReg.getIpAddr(), nodeReg.getXferPort());
       
     if (nodeN != null && nodeN != nodeS) {
       NameNode.LOG.info("BLOCK* NameSystem.registerDatanode: "
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java
index 082816d..6f9049a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java
@@ -159,6 +159,35 @@
     }
   }
   
+  /**
+   * Find data node by its transfer address
+   *
+   * @return DatanodeDescriptor if found or null otherwise
+   */
+  public DatanodeDescriptor getDatanodeByXferAddr(String ipAddr,
+      int xferPort) {
+    if (ipAddr==null) {
+      return null;
+    }
+
+    hostmapLock.readLock().lock();
+    try {
+      DatanodeDescriptor[] nodes = map.get(ipAddr);
+      // no entry
+      if (nodes== null) {
+        return null;
+      }
+      for(DatanodeDescriptor containedNode:nodes) {
+        if (xferPort == containedNode.getXferPort()) {
+          return containedNode;
+        }
+      }
+      return null;
+    } finally {
+      hostmapLock.readLock().unlock();
+    }
+  }
+
   @Override
   public String toString() {
     final StringBuilder b = new StringBuilder(getClass().getSimpleName())
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
index 831f343..a2e0f50 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
@@ -276,6 +276,9 @@
         FIELD_PERCENT_REMAINING = 9,
         FIELD_ADMIN_STATE       = 10,
         FIELD_DECOMMISSIONED    = 11,
+        FIELD_BLOCKPOOL_USED    = 12,
+        FIELD_PERBLOCKPOOL_USED = 13,
+        FIELD_FAILED_VOLUMES    = 14,
         SORT_ORDER_ASC          = 1,
         SORT_ORDER_DSC          = 2;
 
@@ -303,6 +306,12 @@
           sortField = FIELD_ADMIN_STATE;
         } else if (field.equals("decommissioned")) {
           sortField = FIELD_DECOMMISSIONED;
+        } else if (field.equals("bpused")) {
+          sortField = FIELD_BLOCKPOOL_USED;
+        } else if (field.equals("pcbpused")) {
+          sortField = FIELD_PERBLOCKPOOL_USED;
+        } else if (field.equals("volfails")) {
+          sortField = FIELD_FAILED_VOLUMES;
         } else {
           sortField = FIELD_NAME;
         }
@@ -361,6 +370,18 @@
         case FIELD_NAME: 
           ret = d1.getHostName().compareTo(d2.getHostName());
           break;
+        case FIELD_BLOCKPOOL_USED:
+          dlong = d1.getBlockPoolUsed() - d2.getBlockPoolUsed();
+          ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0);
+          break;
+        case FIELD_PERBLOCKPOOL_USED:
+          ddbl = d1.getBlockPoolUsedPercent() - d2.getBlockPoolUsedPercent();
+          ret = (ddbl < 0) ? -1 : ((ddbl > 0) ? 1 : 0);
+          break;
+        case FIELD_FAILED_VOLUMES:
+          int dint = d1.getVolumeFailures() - d2.getVolumeFailures();
+          ret = (dint < 0) ? -1 : ((dint > 0) ? 1 : 0);
+          break;
         }
         return (sortOrder == SORT_ORDER_DSC) ? -ret : ret;
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 568fc33..36cde19 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4059,7 +4059,10 @@
         return "Safe mode is OFF.";
       String leaveMsg = "";
       if (areResourcesLow()) {
-        leaveMsg = "Resources are low on NN. Safe mode must be turned off manually";
+        leaveMsg = "Resources are low on NN. " 
+        	+ "Please add or free up more resources then turn off safe mode manually.  "
+        	+ "NOTE:  If you turn off safe mode before adding resources, "
+        	+ "the NN will immediately return to safe mode.";
       } else {
         leaveMsg = "Safe mode will be turned off automatically";
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 4cbcc3c..fb5c88d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -21,6 +21,7 @@
 import java.io.PrintStream;
 import java.net.InetSocketAddress;
 import java.net.URI;
+import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -510,7 +511,7 @@
     stopHttpServer();
   }
   
-  private void startTrashEmptier(Configuration conf) throws IOException {
+  private void startTrashEmptier(final Configuration conf) throws IOException {
     long trashInterval =
         conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT);
     if (trashInterval == 0) {
@@ -519,7 +520,18 @@
       throw new IOException("Cannot start tresh emptier with negative interval."
           + " Set " + FS_TRASH_INTERVAL_KEY + " to a positive value.");
     }
-    this.emptier = new Thread(new Trash(conf).getEmptier(), "Trash Emptier");
+    
+    // This may be called from the transitionToActive code path, in which
+    // case the current user is the administrator, not the NN. The trash
+    // emptier needs to run as the NN. See HDFS-3972.
+    FileSystem fs = SecurityUtil.doAsLoginUser(
+        new PrivilegedExceptionAction<FileSystem>() {
+          @Override
+          public FileSystem run() throws IOException {
+            return FileSystem.get(conf);
+          }
+        });
+    this.emptier = new Thread(new Trash(fs, conf).getEmptier(), "Trash Emptier");
     this.emptier.setDaemon(true);
     this.emptier.start();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
index fe73c42..3488f07 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
@@ -107,6 +107,10 @@
               DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
               SecurityUtil.getServerPrincipal(principalInConf,
                                               bindAddress.getHostName()));
+        } else if (UserGroupInformation.isSecurityEnabled()) {
+          LOG.error("WebHDFS and security are enabled, but configuration property '" +
+                    DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY +
+                    "' is not set.");
         }
         String httpKeytab = conf.get(
           DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
@@ -117,6 +121,10 @@
           params.put(
             DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
             httpKeytab);
+        } else if (UserGroupInformation.isSecurityEnabled()) {
+          LOG.error("WebHDFS and security are enabled, but configuration property '" +
+                    DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY +
+                    "' is not set.");
         }
         return params;
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index 9a44b1e..9d0629c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -78,6 +78,7 @@
 import org.apache.hadoop.util.Time;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
 
 /**********************************************************
@@ -122,6 +123,8 @@
   private CheckpointConf checkpointConf;
   private FSNamesystem namesystem;
 
+  private Thread checkpointThread;
+
 
   @Override
   public String toString() {
@@ -277,6 +280,15 @@
    */
   public void shutdown() {
     shouldRun = false;
+    if (checkpointThread != null) {
+      checkpointThread.interrupt();
+      try {
+        checkpointThread.join(10000);
+      } catch (InterruptedException e) {
+        LOG.info("Interrupted waiting to join on checkpointer thread");
+        Thread.currentThread().interrupt(); // maintain status
+      }
+    }
     try {
       if (infoServer != null) infoServer.stop();
     } catch (Exception e) {
@@ -586,12 +598,20 @@
       terminate(ret);
     }
 
-    // Create a never ending deamon
-    Daemon checkpointThread = new Daemon(secondary);
-    checkpointThread.start();
+    secondary.startCheckpointThread();
   }
   
   
+  public void startCheckpointThread() {
+    Preconditions.checkState(checkpointThread == null,
+        "Should not already have a thread");
+    Preconditions.checkState(shouldRun, "shouldRun should be true");
+    
+    checkpointThread = new Daemon(this);
+    checkpointThread.start();
+  }
+
+
   /**
    * Container for parsed command-line options.
    */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 421f7bc..dd5a0e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -20,6 +20,7 @@
 import java.io.File;
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -53,6 +54,7 @@
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.RefreshUserMappingsProtocol;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.util.StringUtils;
@@ -80,7 +82,7 @@
       super(fs.getConf());
       if (!(fs instanceof DistributedFileSystem)) {
         throw new IllegalArgumentException("FileSystem " + fs.getUri() + 
-            " is not a distributed file system");
+            " is not an HDFS file system");
       }
       this.dfs = (DistributedFileSystem)fs;
     }
@@ -284,7 +286,7 @@
     FileSystem fs = getFS();
     if (!(fs instanceof DistributedFileSystem)) {
       throw new IllegalArgumentException("FileSystem " + fs.getUri() + 
-      " is not a distributed file system");
+      " is not an HDFS file system");
     }
     return (DistributedFileSystem)fs;
   }
@@ -511,11 +513,17 @@
    * @return an exit code indicating success or failure.
    * @throws IOException
    */
-  public int fetchImage(String[] argv, int idx) throws IOException {
-    String infoServer = DFSUtil.getInfoServer(
+  public int fetchImage(final String[] argv, final int idx) throws IOException {
+    final String infoServer = DFSUtil.getInfoServer(
         HAUtil.getAddressOfActive(getDFS()), getConf(), false);
-    TransferFsImage.downloadMostRecentImageToDirectory(infoServer,
-        new File(argv[idx]));
+    SecurityUtil.doAsCurrentUser(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        TransferFsImage.downloadMostRecentImageToDirectory(infoServer,
+            new File(argv[idx]));
+        return null;
+      }
+    });
     return 0;
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.c
index 2095e2c..6a50c98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.c
@@ -67,6 +67,25 @@
     
 };
 
+void getExceptionInfo(const char *excName, int noPrintFlags,
+                      int *excErrno, int *shouldPrint)
+{
+    int i;
+
+    for (i = 0; i < EXCEPTION_INFO_LEN; i++) {
+        if (strstr(gExceptionInfo[i].name, excName)) {
+            break;
+        }
+    }
+    if (i < EXCEPTION_INFO_LEN) {
+        *shouldPrint = !(gExceptionInfo[i].noPrintFlag & noPrintFlags);
+        *excErrno = gExceptionInfo[i].excErrno;
+    } else {
+        *shouldPrint = 1;
+        *excErrno = EINTERNAL;
+    }
+}
+
 int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
         const char *fmt, va_list ap)
 {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.h
index 54e170b..e3615131 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.h
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.h
@@ -65,6 +65,21 @@
 #define NOPRINT_EXC_ILLEGAL_ARGUMENT            0x10
 
 /**
+ * Get information about an exception.
+ *
+ * @param excName         The Exception name.
+ *                        This is a Java class name in JNI format.
+ * @param noPrintFlags    Flags which determine which exceptions we should NOT
+ *                        print.
+ * @param excErrno        (out param) The POSIX error number associated with the
+ *                        exception.
+ * @param shouldPrint     (out param) Nonzero if we should print this exception,
+ *                        based on the noPrintFlags and its name. 
+ */
+void getExceptionInfo(const char *excName, int noPrintFlags,
+                      int *excErrno, int *shouldPrint);
+
+/**
  * Print out information about an exception and free it.
  *
  * @param env             The JNI environment
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 9fce04b..153e21b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -41,11 +41,34 @@
 </property>
 
 <property>
+  <name>dfs.namenode.rpc-address</name>
+  <value></value>
+  <description>
+    RPC address that handles all clients requests. In the case of HA/Federation where multiple namenodes exist,
+    the name service id is added to the name e.g. dfs.namenode.rpc-address.ns1
+    dfs.namenode.rpc-address.EXAMPLENAMESERVICE
+    The value of this property will take the form of hdfs://nn-host1:rpc-port.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.servicerpc-address</name>
+  <value></value>
+  <description>
+    RPC address for HDFS Services communication. BackupNode, Datanodes and all other services should be
+    connecting to this address if it is configured. In the case of HA/Federation where multiple namenodes exist,
+    the name service id is added to the name e.g. dfs.namenode.servicerpc-address.ns1
+    dfs.namenode.rpc-address.EXAMPLENAMESERVICE
+    The value of this property will take the form of hdfs://nn-host1:rpc-port.
+    If the value of this property is unset the value of dfs.namenode.rpc-address will be used as the default.
+  </description>
+</property>
+
+<property>
   <name>dfs.namenode.secondary.http-address</name>
   <value>0.0.0.0:50090</value>
   <description>
     The secondary namenode http server address and port.
-    If the port is 0 then the server will start on a free port.
   </description>
 </property>
 
@@ -54,7 +77,6 @@
   <value>0.0.0.0:50010</value>
   <description>
     The datanode server address and port for data transfer.
-    If the port is 0 then the server will start on a free port.
   </description>
 </property>
 
@@ -63,7 +85,6 @@
   <value>0.0.0.0:50075</value>
   <description>
     The datanode http server address and port.
-    If the port is 0 then the server will start on a free port.
   </description>
 </property>
 
@@ -72,7 +93,6 @@
   <value>0.0.0.0:50020</value>
   <description>
     The datanode ipc server address and port.
-    If the port is 0 then the server will start on a free port.
   </description>
 </property>
 
@@ -87,7 +107,6 @@
   <value>0.0.0.0:50070</value>
   <description>
     The address and the base port where the dfs namenode web ui will listen on.
-    If the port is 0 then the server will start on a free port.
   </description>
 </property>
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHdfsNativeCodeLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHdfsNativeCodeLoader.java
new file mode 100644
index 0000000..34164f4
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHdfsNativeCodeLoader.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.NativeCodeLoader;
+
+public class TestHdfsNativeCodeLoader {
+  static final Log LOG = LogFactory.getLog(TestHdfsNativeCodeLoader.class);
+
+  private static boolean requireTestJni() {
+    String rtj = System.getProperty("require.test.libhadoop");
+    if (rtj == null) return false;
+    if (rtj.compareToIgnoreCase("false") == 0) return false;
+    return true;
+  }
+
+  @Test
+  public void testNativeCodeLoaded() {
+    if (requireTestJni() == false) {
+      LOG.info("TestNativeCodeLoader: libhadoop.so testing is not required.");
+      return;
+    }
+    if (!NativeCodeLoader.isNativeCodeLoaded()) {
+      String LD_LIBRARY_PATH = System.getenv().get("LD_LIBRARY_PATH");
+      if (LD_LIBRARY_PATH == null) LD_LIBRARY_PATH = "";
+      fail("TestNativeCodeLoader: libhadoop.so testing was required, but " +
+          "libhadoop.so was not loaded.  LD_LIBRARY_PATH = " + LD_LIBRARY_PATH);
+    }
+    LOG.info("TestHdfsNativeCodeLoader: libhadoop.so is loaded.");
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index df8a41f..3e1451c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -211,27 +211,40 @@
   
   public static void createFile(FileSystem fs, Path fileName, long fileLen, 
       short replFactor, long seed) throws IOException {
+    createFile(fs, fileName, 1024, fileLen, fs.getDefaultBlockSize(fileName),
+        replFactor, seed);
+  }
+  
+  public static void createFile(FileSystem fs, Path fileName, int bufferLen,
+      long fileLen, long blockSize, short replFactor, long seed)
+      throws IOException {
+    assert bufferLen > 0;
     if (!fs.mkdirs(fileName.getParent())) {
       throw new IOException("Mkdirs failed to create " + 
                             fileName.getParent().toString());
     }
     FSDataOutputStream out = null;
     try {
-      out = fs.create(fileName, replFactor);
-      byte[] toWrite = new byte[1024];
-      Random rb = new Random(seed);
-      long bytesToWrite = fileLen;
-      while (bytesToWrite>0) {
-        rb.nextBytes(toWrite);
-        int bytesToWriteNext = (1024<bytesToWrite)?1024:(int)bytesToWrite;
-
-        out.write(toWrite, 0, bytesToWriteNext);
-        bytesToWrite -= bytesToWriteNext;
+      out = fs.create(fileName, true, fs.getConf()
+        .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
+        replFactor, blockSize);
+      if (fileLen > 0) {
+        byte[] toWrite = new byte[bufferLen];
+        Random rb = new Random(seed);
+        long bytesToWrite = fileLen;
+        while (bytesToWrite>0) {
+          rb.nextBytes(toWrite);
+          int bytesToWriteNext = (bufferLen < bytesToWrite) ? bufferLen
+              : (int) bytesToWrite;
+  
+          out.write(toWrite, 0, bytesToWriteNext);
+          bytesToWrite -= bytesToWriteNext;
+        }
       }
-      out.close();
-      out = null;
     } finally {
-      IOUtils.closeStream(out);
+      if (out != null) {
+        out.close();
+      }
     }
   }
   
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 3bb65c3..0c23858 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -624,14 +624,20 @@
     }
     
     federation = nnTopology.isFederated();
-    createNameNodesAndSetConf(
-        nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
-        enableManagedDfsDirsRedundancy,
-        format, operation, clusterId, conf);
-    
+    try {
+      createNameNodesAndSetConf(
+          nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
+          enableManagedDfsDirsRedundancy,
+          format, operation, clusterId, conf);
+    } catch (IOException ioe) {
+      LOG.error("IOE creating namenodes. Permissions dump:\n" +
+          createPermissionsDiagnosisString(data_dir));
+      throw ioe;
+    }
     if (format) {
       if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
-        throw new IOException("Cannot remove data directory: " + data_dir);
+        throw new IOException("Cannot remove data directory: " + data_dir +
+            createPermissionsDiagnosisString(data_dir));
       }
     }
     
@@ -647,6 +653,27 @@
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
   }
   
+  /**
+   * @return a debug string which can help diagnose an error of why
+   * a given directory might have a permissions error in the context
+   * of a test case
+   */
+  private String createPermissionsDiagnosisString(File path) {
+    StringBuilder sb = new StringBuilder();
+    while (path != null) { 
+      sb.append("path '" + path + "': ").append("\n");
+      sb.append("\tabsolute:").append(path.getAbsolutePath()).append("\n");
+      sb.append("\tpermissions: ");
+      sb.append(path.isDirectory() ? "d": "-");
+      sb.append(path.canRead() ? "r" : "-");
+      sb.append(path.canWrite() ? "w" : "-");
+      sb.append(path.canExecute() ? "x" : "-");
+      sb.append("\n");
+      path = path.getParentFile();
+    }
+    return sb.toString();
+  }
+
   private void createNameNodesAndSetConf(MiniDFSNNTopology nnTopology,
       boolean manageNameDfsDirs, boolean manageNameDfsSharedDirs,
       boolean enableManagedDfsDirsRedundancy, boolean format,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
index 327aa8f..d9020e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
@@ -25,6 +25,7 @@
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.Socket;
+import java.security.PrivilegedExceptionAction;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -54,10 +55,12 @@
 
   static final int BLOCK_SIZE = 4096;
   static final int FILE_SIZE = 3 * BLOCK_SIZE;
-
+  final static int CACHE_SIZE = 4;
+  final static long CACHE_EXPIRY_MS = 200;
   static Configuration conf = null;
   static MiniDFSCluster cluster = null;
   static FileSystem fs = null;
+  static SocketCache cache;
 
   static final Path testFile = new Path("/testConnCache.dat");
   static byte authenticData[] = null;
@@ -93,6 +96,9 @@
   public static void setupCluster() throws Exception {
     final int REPLICATION_FACTOR = 1;
 
+    /* create a socket cache. There is only one socket cache per jvm */
+    cache = SocketCache.getInstance(CACHE_SIZE, CACHE_EXPIRY_MS);
+
     util = new BlockReaderTestUtil(REPLICATION_FACTOR);
     cluster = util.getCluster();
     conf = util.getConf();
@@ -142,10 +148,7 @@
    * Test the SocketCache itself.
    */
   @Test
-  public void testSocketCache() throws IOException {
-    final int CACHE_SIZE = 4;
-    SocketCache cache = new SocketCache(CACHE_SIZE);
-
+  public void testSocketCache() throws Exception {
     // Make a client
     InetSocketAddress nnAddr =
         new InetSocketAddress("localhost", cluster.getNameNodePort());
@@ -159,6 +162,7 @@
     DataNode dn = util.getDataNode(block);
     InetSocketAddress dnAddr = dn.getXferAddress();
 
+
     // Make some sockets to the DN
     Socket[] dnSockets = new Socket[CACHE_SIZE];
     for (int i = 0; i < dnSockets.length; ++i) {
@@ -166,6 +170,7 @@
           dnAddr.getAddress(), dnAddr.getPort());
     }
 
+
     // Insert a socket to the NN
     Socket nnSock = new Socket(nnAddr.getAddress(), nnAddr.getPort());
     cache.put(nnSock, null);
@@ -179,7 +184,7 @@
 
     assertEquals("NN socket evicted", null, cache.get(nnAddr));
     assertTrue("Evicted socket closed", nnSock.isClosed());
-
+ 
     // Lookup the DN socks
     for (Socket dnSock : dnSockets) {
       assertEquals("Retrieve cached sockets", dnSock, cache.get(dnAddr).sock);
@@ -189,6 +194,51 @@
     assertEquals("Cache is empty", 0, cache.size());
   }
 
+
+  /**
+   * Test the SocketCache expiry.
+   * Verify that socket cache entries expire after the set
+   * expiry time.
+   */
+  @Test
+  public void testSocketCacheExpiry() throws Exception {
+    // Make a client
+    InetSocketAddress nnAddr =
+        new InetSocketAddress("localhost", cluster.getNameNodePort());
+    DFSClient client = new DFSClient(nnAddr, conf);
+
+    // Find out the DN addr
+    LocatedBlock block =
+        client.getNamenode().getBlockLocations(
+            testFile.toString(), 0, FILE_SIZE)
+        .getLocatedBlocks().get(0);
+    DataNode dn = util.getDataNode(block);
+    InetSocketAddress dnAddr = dn.getXferAddress();
+
+
+    // Make some sockets to the DN and put in cache
+    Socket[] dnSockets = new Socket[CACHE_SIZE];
+    for (int i = 0; i < dnSockets.length; ++i) {
+      dnSockets[i] = client.socketFactory.createSocket(
+          dnAddr.getAddress(), dnAddr.getPort());
+      cache.put(dnSockets[i], null);
+    }
+
+    // Client side still has the sockets cached
+    assertEquals(CACHE_SIZE, client.socketCache.size());
+
+    //sleep for a second and see if it expired
+    Thread.sleep(CACHE_EXPIRY_MS + 1000);
+    
+    // Client side has no sockets cached
+    assertEquals(0, client.socketCache.size());
+
+    //sleep for another second and see if 
+    //the daemon thread runs fine on empty cache
+    Thread.sleep(CACHE_EXPIRY_MS + 1000);
+  }
+
+
   /**
    * Read a file served entirely from one DN. Seek around and read from
    * different offsets. And verify that they all use the same socket.
@@ -229,33 +279,6 @@
 
     in.close();
   }
-  
-  /**
-   * Test that the socket cache can be disabled by setting the capacity to
-   * 0. Regression test for HDFS-3365.
-   */
-  @Test
-  public void testDisableCache() throws IOException {
-    LOG.info("Starting testDisableCache()");
-
-    // Reading with the normally configured filesystem should
-    // cache a socket.
-    DFSTestUtil.readFile(fs, testFile);
-    assertEquals(1, ((DistributedFileSystem)fs).dfs.socketCache.size());
-    
-    // Configure a new instance with no caching, ensure that it doesn't
-    // cache anything
-    Configuration confWithoutCache = new Configuration(fs.getConf());
-    confWithoutCache.setInt(
-        DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 0);
-    FileSystem fsWithoutCache = FileSystem.newInstance(confWithoutCache);
-    try {
-      DFSTestUtil.readFile(fsWithoutCache, testFile);
-      assertEquals(0, ((DistributedFileSystem)fsWithoutCache).dfs.socketCache.size());
-    } finally {
-      fsWithoutCache.close();
-    }
-  }
 
   @AfterClass
   public static void teardownCluster() throws Exception {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
index 92ac17a..77ea9c5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
@@ -37,7 +37,6 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -141,13 +140,6 @@
     }
   }
   
-  void createFile(FileSystem fs, Path path, int fileLen) throws IOException {
-    byte [] arr = new byte[fileLen];
-    FSDataOutputStream out = fs.create(path);
-    out.write(arr);
-    out.close();
-  }
-  
   void readFile(FileSystem fs, Path path, int fileLen) throws IOException {
     byte [] arr = new byte[fileLen];
     FSDataInputStream in = fs.open(path);
@@ -357,7 +349,9 @@
     
     int fileLen = Math.min(conf.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096), 4096);
     
-    createFile(fileSys, file, fileLen);
+      DFSTestUtil.createFile(fileSys, file, fileLen, fileLen,
+          fileSys.getDefaultBlockSize(file),
+          fileSys.getDefaultReplication(file), 0L);
 
     // get the first blockid for the file
     final ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
index 4d32b1f..63371ec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
@@ -27,6 +27,7 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@@ -92,6 +93,58 @@
   }
   
   @Test
+  public void testChangeStorageID() throws Exception {
+    final String DN_IP_ADDR = "127.0.0.1";
+    final String DN_HOSTNAME = "localhost";
+    final int DN_XFER_PORT = 12345;
+    final int DN_INFO_PORT = 12346;
+    final int DN_IPC_PORT = 12347;
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf)
+          .numDataNodes(0)
+          .build();
+      InetSocketAddress addr = new InetSocketAddress(
+        "localhost",
+        cluster.getNameNodePort());
+      DFSClient client = new DFSClient(addr, conf);
+      NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
+
+      // register a datanode
+      DatanodeID dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
+          "fake-storage-id", DN_XFER_PORT, DN_INFO_PORT, DN_IPC_PORT);
+      long nnCTime = cluster.getNamesystem().getFSImage().getStorage()
+          .getCTime();
+      StorageInfo mockStorageInfo = mock(StorageInfo.class);
+      doReturn(nnCTime).when(mockStorageInfo).getCTime();
+      doReturn(HdfsConstants.LAYOUT_VERSION).when(mockStorageInfo)
+          .getLayoutVersion();
+      DatanodeRegistration dnReg = new DatanodeRegistration(dnId,
+          mockStorageInfo, null, VersionInfo.getVersion());
+      rpcServer.registerDatanode(dnReg);
+
+      DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
+      assertEquals("Expected a registered datanode", 1, report.length);
+
+      // register the same datanode again with a different storage ID
+      dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
+          "changed-fake-storage-id", DN_XFER_PORT, DN_INFO_PORT, DN_IPC_PORT);
+      dnReg = new DatanodeRegistration(dnId,
+          mockStorageInfo, null, VersionInfo.getVersion());
+      rpcServer.registerDatanode(dnReg);
+
+      report = client.datanodeReport(DatanodeReportType.ALL);
+      assertEquals("Datanode with changed storage ID not recognized",
+          1, report.length);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  @Test
   public void testRegistrationWithDifferentSoftwareVersions() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "3.0.0");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index b668779..9d2edc6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -120,12 +120,9 @@
       DFSTestUtil.readFile(fileSys, p);
       
       DFSClient client = ((DistributedFileSystem)fileSys).dfs;
-      SocketCache cache = client.socketCache;
-      assertEquals(1, cache.size());
 
       fileSys.close();
       
-      assertEquals(0, cache.size());
     } finally {
       if (cluster != null) {cluster.shutdown();}
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
index 28c3e9c..0e0d33c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
@@ -79,7 +79,8 @@
     hftpfs = cluster.getHftpFileSystem(0);
     dfsClient = new DFSClient(NameNode.getAddress(conf), conf);
     file1 = new Path("filestatus.dat");
-    writeFile(fs, file1, 1, fileSize, blockSize);
+    DFSTestUtil.createFile(fs, file1, fileSize, fileSize, blockSize, (short) 1,
+        seed);
   }
   
   @AfterClass
@@ -87,18 +88,6 @@
     fs.close();
     cluster.shutdown();
   }
-
-  private static void writeFile(FileSystem fileSys, Path name, int repl,
-      int fileSize, int blockSize) throws IOException {
-    // Create and write a file that contains three blocks of data
-    FSDataOutputStream stm = fileSys.create(name, true,
-        HdfsConstants.IO_FILE_BUFFER_SIZE, (short)repl, (long)blockSize);
-    byte[] buffer = new byte[fileSize];
-    Random rand = new Random(seed);
-    rand.nextBytes(buffer);
-    stm.write(buffer);
-    stm.close();
-  }
   
   private void checkFile(FileSystem fileSys, Path name, int repl)
       throws IOException, InterruptedException, TimeoutException {
@@ -218,7 +207,8 @@
 
     // create another file that is smaller than a block.
     Path file2 = new Path(dir, "filestatus2.dat");
-    writeFile(fs, file2, 1, blockSize/4, blockSize);
+    DFSTestUtil.createFile(fs, file2, blockSize/4, blockSize/4, blockSize,
+        (short) 1, seed);
     checkFile(fs, file2, 1);
     
     // verify file attributes
@@ -230,7 +220,8 @@
 
     // Create another file in the same directory
     Path file3 = new Path(dir, "filestatus3.dat");
-    writeFile(fs, file3, 1, blockSize/4, blockSize);
+    DFSTestUtil.createFile(fs, file3, blockSize/4, blockSize/4, blockSize,
+        (short) 1, seed);
     checkFile(fs, file3, 1);
     file3 = fs.makeQualified(file3);
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
index c1f4a67..55d1aa7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
@@ -110,9 +110,7 @@
       // do the writing but do not close the FSDataOutputStream
       // in order to mimic the ongoing writing
       final Path fileName = new Path("/file1");
-      stm = fileSys.create(
-          fileName,
-          true,
+      stm = fileSys.create(fileName, true,
           fileSys.getConf().getInt(
               CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
           (short) 3, blockSize);
@@ -180,29 +178,15 @@
 
     final short REPLICATION_FACTOR = (short) 2;
     final int DEFAULT_BLOCK_SIZE = 1024;
-    final Random r = new Random();
 
     CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
         REPLICATION_FACTOR).build();
     try {
       cluster.waitActive();
-
-      // create a file with two blocks
-      FileSystem fs = cluster.getFileSystem();
-      FSDataOutputStream out = fs.create(new Path("/tmp.txt"),
-          REPLICATION_FACTOR);
-      byte[] data = new byte[1024];
       long fileLen = 2 * DEFAULT_BLOCK_SIZE;
-      long bytesToWrite = fileLen;
-      while (bytesToWrite > 0) {
-        r.nextBytes(data);
-        int bytesToWriteNext = (1024 < bytesToWrite) ? 1024
-            : (int) bytesToWrite;
-        out.write(data, 0, bytesToWriteNext);
-        bytesToWrite -= bytesToWriteNext;
-      }
-      out.close();
+      DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/tmp.txt"),
+          fileLen, REPLICATION_FACTOR, 0L);
 
       // get blocks & data nodes
       List<LocatedBlock> locatedBlocks;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java
index 6be9af8..6dd7545 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java
@@ -41,6 +41,8 @@
 
   @Test
   public void testHdfsDelegationToken() throws Exception {
+    SecurityUtilTestHelper.setTokenServiceUseIp(true);
+
     final Configuration conf = new Configuration();
     conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
     UserGroupInformation.setConfiguration(conf);
@@ -265,4 +267,4 @@
     @Override
     protected void initDelegationToken() throws IOException {}
   }
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java
index 3fa5eaa..6cb0ad1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java
@@ -102,9 +102,15 @@
   
   @AfterClass
   public static void tearDown() throws IOException {
-    hdfs.close();
-    hftpFs.close();
-    cluster.shutdown();
+    if (hdfs != null) {
+      hdfs.close();
+    }
+    if (hftpFs != null) {
+      hftpFs.close();
+    }
+    if (cluster != null) {
+      cluster.shutdown();
+    }
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java
index 0f8d7d0..345c150 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java
@@ -53,19 +53,23 @@
     boolean timedout = false;
 
     HftpFileSystem fs = (HftpFileSystem)FileSystem.get(uri, conf);
-    HttpURLConnection conn = fs.openConnection("/", "");
-    timedout = false;
     try {
-      // this will consume the only slot in the backlog
-      conn.getInputStream();
-    } catch (SocketTimeoutException ste) {
-      timedout = true;
-      assertEquals("Read timed out", ste.getMessage());
+      HttpURLConnection conn = fs.openConnection("/", "");
+      timedout = false;
+      try {
+        // this will consume the only slot in the backlog
+        conn.getInputStream();
+      } catch (SocketTimeoutException ste) {
+        timedout = true;
+        assertEquals("Read timed out", ste.getMessage());
+      } finally {
+        if (conn != null) conn.disconnect();
+      }
+      assertTrue("read timedout", timedout);
+      assertTrue("connect timedout", checkConnectTimeout(fs, false));
     } finally {
-      if (conn != null) conn.disconnect();
+      fs.close();
     }
-    assertTrue("read timedout", timedout);
-    assertTrue("connect timedout", checkConnectTimeout(fs, false));
   }
 
   @Test
@@ -79,20 +83,24 @@
     boolean timedout = false;
 
     HsftpFileSystem fs = (HsftpFileSystem)FileSystem.get(uri, conf);
-    HttpURLConnection conn = null;
-    timedout = false;
     try {
-      // this will consume the only slot in the backlog
-      conn = fs.openConnection("/", "");
-    } catch (SocketTimeoutException ste) {
-      // SSL expects a negotiation, so it will timeout on read, unlike hftp
-      timedout = true;
-      assertEquals("Read timed out", ste.getMessage());
+      HttpURLConnection conn = null;
+      timedout = false;
+      try {
+        // this will consume the only slot in the backlog
+        conn = fs.openConnection("/", "");
+      } catch (SocketTimeoutException ste) {
+        // SSL expects a negotiation, so it will timeout on read, unlike hftp
+        timedout = true;
+        assertEquals("Read timed out", ste.getMessage());
+      } finally {
+        if (conn != null) conn.disconnect();
+      }
+      assertTrue("ssl read connect timedout", timedout);
+      assertTrue("connect timedout", checkConnectTimeout(fs, true));
     } finally {
-      if (conn != null) conn.disconnect();
+      fs.close();
     }
-    assertTrue("ssl read connect timedout", timedout);
-    assertTrue("connect timedout", checkConnectTimeout(fs, true));
   }
   
   private boolean checkConnectTimeout(HftpFileSystem fs, boolean ignoreReadTimeout)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
index ab28ce2..a42b034 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
@@ -52,22 +52,6 @@
   private static final Log LOG = LogFactory.getLog(
       "org.apache.hadoop.hdfs.TestInjectionForSimulatedStorage");
 
-  
-  private void writeFile(FileSystem fileSys, Path name, int repl)
-                                                throws IOException {
-    // create and write a file that contains three blocks of data
-    FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
-        .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
-        (short) repl, blockSize);
-    byte[] buffer = new byte[filesize];
-    for (int i=0; i<buffer.length; i++) {
-      buffer[i] = '1';
-    }
-    stm.write(buffer);
-    stm.close();
-  }
-  
-  // Waits for all of the blocks to have expected replication
 
   // Waits for all of the blocks to have expected replication
   private void waitForBlockReplication(String filename, 
@@ -149,7 +133,8 @@
                                             cluster.getNameNodePort()),
                                             conf);
       
-      writeFile(cluster.getFileSystem(), testPath, numDataNodes);
+      DFSTestUtil.createFile(cluster.getFileSystem(), testPath, filesize,
+          filesize, blockSize, (short) numDataNodes, 0L);
       waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, 20);
       Iterable<Block>[] blocksList = cluster.getAllBlockReports(bpid);
       
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java
index e5ef1ac..3978444 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java
@@ -50,19 +50,6 @@
   Random myrand = new Random();
   Path hostsFile;
   Path excludeFile;
-
-  private void writeFile(FileSystem fileSys, Path name, int repl)
-    throws IOException {
-    // create and write a file that contains three blocks of data
-    FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
-        .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
-        (short) repl, blockSize);
-    byte[] buffer = new byte[fileSize];
-    Random rand = new Random(seed);
-    rand.nextBytes(buffer);
-    stm.write(buffer);
-    stm.close();
-  }
   
   private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
     assertTrue(fileSys.exists(name));
@@ -105,7 +92,8 @@
      System.out.println("Creating testdir1 and testdir1/test1.dat.");
      Path dir1 = new Path("testdir1");
      Path file1 = new Path(dir1, "test1.dat");
-     writeFile(fileSys, file1, replicas);
+     DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+         (short) replicas, seed);
      FileStatus stat = fileSys.getFileStatus(file1);
      long mtime1 = stat.getModificationTime();
      assertTrue(mtime1 != 0);
@@ -120,7 +108,8 @@
      //
      System.out.println("Creating testdir1/test2.dat.");
      Path file2 = new Path(dir1, "test2.dat");
-     writeFile(fileSys, file2, replicas);
+     DFSTestUtil.createFile(fileSys, file2, fileSize, fileSize, blockSize,
+         (short) replicas, seed);
      stat = fileSys.getFileStatus(file2);
 
      //
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
index dacd4bc..1c59eca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
@@ -83,7 +83,7 @@
   static class DirectReadWorkerHelper implements ReadWorkerHelper {
     @Override
     public int read(DFSInputStream dis, byte[] target, int startOff, int len) throws IOException {
-      ByteBuffer bb = ByteBuffer.wrap(target);
+      ByteBuffer bb = ByteBuffer.allocateDirect(target.length);
       int cnt = 0;
       synchronized(dis) {
         dis.seek(startOff);
@@ -95,6 +95,8 @@
           cnt += read;
         }
       }
+      bb.clear();
+      bb.get(target);
       return cnt;
     }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
index 5569541..1e0681f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
@@ -41,11 +41,9 @@
   boolean simulatedStorage = false;
 
   private void writeFile(FileSystem fileSys, Path name) throws IOException {
-    // create and write a file that contains three blocks of data
-    DataOutputStream stm = fileSys.create(name, true, 4096, (short)1,
-                                          blockSize);
     // test empty file open and read
-    stm.close();
+    DFSTestUtil.createFile(fileSys, name, 12 * blockSize, 0,
+        blockSize, (short) 1, seed);
     FSDataInputStream in = fileSys.open(name);
     byte[] buffer = new byte[12 * blockSize];
     in.readFully(0, buffer, 0, 0);
@@ -62,11 +60,8 @@
       assertTrue("Cannot delete file", false);
     
     // now create the real file
-    stm = fileSys.create(name, true, 4096, (short)1, blockSize);
-    Random rand = new Random(seed);
-    rand.nextBytes(buffer);
-    stm.write(buffer);
-    stm.close();
+    DFSTestUtil.createFile(fileSys, name, 12 * blockSize, 12 * blockSize,
+        blockSize, (short) 1, seed);
   }
   
   private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
index 7ea963d..1ec395d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
@@ -26,15 +26,12 @@
 import java.io.RandomAccessFile;
 import java.net.InetSocketAddress;
 import java.util.Iterator;
-import java.util.Random;
 import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -61,19 +58,6 @@
   private static final int numDatanodes = racks.length;
   private static final Log LOG = LogFactory.getLog(
                                        "org.apache.hadoop.hdfs.TestReplication");
-
-  private void writeFile(FileSystem fileSys, Path name, int repl)
-    throws IOException {
-    // create and write a file that contains three blocks of data
-    FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
-        .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
-        (short) repl, blockSize);
-    byte[] buffer = new byte[fileSize];
-    Random rand = new Random(seed);
-    rand.nextBytes(buffer);
-    stm.write(buffer);
-    stm.close();
-  }
   
   /* check if there are at least two nodes are on the same rack */
   private void checkFile(FileSystem fileSys, Path name, int repl)
@@ -222,19 +206,25 @@
     FileSystem fileSys = cluster.getFileSystem();
     try {
       Path file1 = new Path("/smallblocktest.dat");
-      writeFile(fileSys, file1, 3);
+      //writeFile(fileSys, file1, 3);
+      DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+          (short) 3, seed);
       checkFile(fileSys, file1, 3);
       cleanupFile(fileSys, file1);
-      writeFile(fileSys, file1, 10);
+      DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+          (short) 10, seed);
       checkFile(fileSys, file1, 10);
       cleanupFile(fileSys, file1);
-      writeFile(fileSys, file1, 4);
+      DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+          (short) 4, seed);
       checkFile(fileSys, file1, 4);
       cleanupFile(fileSys, file1);
-      writeFile(fileSys, file1, 1);
+      DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+          (short) 1, seed);
       checkFile(fileSys, file1, 1);
       cleanupFile(fileSys, file1);
-      writeFile(fileSys, file1, 2);
+      DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+          (short) 2, seed);
       checkFile(fileSys, file1, 2);
       cleanupFile(fileSys, file1);
     } finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java
index c2f1cf3..2ff1c29 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java
@@ -40,16 +40,6 @@
   static final long seed = 0xDEADBEEFL;
   static final int ONEMB = 1 << 20;
   
-  private void writeFile(FileSystem fileSys, Path name) throws IOException {
-    // create and write a file that contains 1MB
-    DataOutputStream stm = fileSys.create(name);
-    byte[] buffer = new byte[ONEMB];
-    Random rand = new Random(seed);
-    rand.nextBytes(buffer);
-    stm.write(buffer);
-    stm.close();
-  }
-  
   private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
     for (int idx = 0; idx < actual.length; idx++) {
       assertEquals(message+" byte "+(from+idx)+" differs. expected "+
@@ -132,7 +122,9 @@
     FileSystem fileSys = cluster.getFileSystem();
     try {
       Path file1 = new Path("seektest.dat");
-      writeFile(fileSys, file1);
+      DFSTestUtil.createFile(fileSys, file1, ONEMB, ONEMB,
+          fileSys.getDefaultBlockSize(file1),
+          fileSys.getDefaultReplication(file1), seed);
       seekReadFile(fileSys, file1);
       smallReadSeek(fileSys, file1);
       cleanupFile(fileSys, file1);
@@ -151,7 +143,9 @@
     FileSystem fileSys = FileSystem.getLocal(conf);
     try {
       Path file1 = new Path("build/test/data", "seektest.dat");
-      writeFile(fileSys, file1);
+      DFSTestUtil.createFile(fileSys, file1, ONEMB, ONEMB,
+          fileSys.getDefaultBlockSize(file1),
+          fileSys.getDefaultReplication(file1), seed);
       seekReadFile(fileSys, file1);
       cleanupFile(fileSys, file1);
     } finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
index 719a798..623c1f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
@@ -115,6 +115,14 @@
     stm.close();
   }
 
+  private static byte [] arrayFromByteBuffer(ByteBuffer buf) {
+    ByteBuffer alt = buf.duplicate();
+    alt.clear();
+    byte[] arr = new byte[alt.remaining()];
+    alt.get(arr);
+    return arr;
+  }
+  
   /**
    * Verifies that reading a file with the direct read(ByteBuffer) api gives the expected set of bytes.
    */
@@ -122,7 +130,7 @@
       int readOffset) throws IOException {
     HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);
 
-    ByteBuffer actual = ByteBuffer.allocate(expected.length - readOffset);
+    ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
 
     IOUtils.skipFully(stm, readOffset);
 
@@ -136,7 +144,8 @@
     // Read across chunk boundary
     actual.limit(Math.min(actual.capacity(), nread + 517));
     nread += stm.read(actual);
-    checkData(actual.array(), readOffset, expected, nread, "A few bytes");
+    checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
+        "A few bytes");
     //Now read rest of it
     actual.limit(actual.capacity());
     while (actual.hasRemaining()) {
@@ -147,7 +156,7 @@
       }
       nread += nbytes;
     }
-    checkData(actual.array(), readOffset, expected, "Read 3");
+    checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
     stm.close();
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java
index 8cbb4fd..90f47e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java
@@ -42,18 +42,6 @@
   static final int blockSize = 1;
   static final int fileSize = 20;
   boolean simulatedStorage = false;
-
-  private void writeFile(FileSystem fileSys, Path name) throws IOException {
-    // create and write a file that contains three blocks of data
-    FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
-        .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
-        (short) 1, blockSize);
-    byte[] buffer = new byte[fileSize];
-    Random rand = new Random(seed);
-    rand.nextBytes(buffer);
-    stm.write(buffer);
-    stm.close();
-  }
   
   private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
     for (int idx = 0; idx < actual.length; idx++) {
@@ -105,7 +93,8 @@
     FileSystem fileSys = cluster.getFileSystem();
     try {
       Path file1 = new Path("smallblocktest.dat");
-      writeFile(fileSys, file1);
+      DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+          (short) 1, seed);
       checkFile(fileSys, file1);
       cleanupFile(fileSys, file1);
     } finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSocketCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSocketCache.java
new file mode 100644
index 0000000..255d408
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSocketCache.java
@@ -0,0 +1,171 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.spy;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+import java.security.PrivilegedExceptionAction;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.security.token.Token;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.mockito.Matchers;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+/**
+ * This class tests the client connection caching in a single node
+ * mini-cluster.
+ */
+public class TestSocketCache {
+  static final Log LOG = LogFactory.getLog(TestSocketCache.class);
+
+  static final int BLOCK_SIZE = 4096;
+  static final int FILE_SIZE = 3 * BLOCK_SIZE;
+  final static int CACHE_SIZE = 4;
+  final static long CACHE_EXPIRY_MS = 200;
+  static Configuration conf = null;
+  static MiniDFSCluster cluster = null;
+  static FileSystem fs = null;
+  static SocketCache cache;
+
+  static final Path testFile = new Path("/testConnCache.dat");
+  static byte authenticData[] = null;
+
+  static BlockReaderTestUtil util = null;
+
+
+  /**
+   * A mock Answer to remember the BlockReader used.
+   *
+   * It verifies that all invocation to DFSInputStream.getBlockReader()
+   * use the same socket.
+   */
+  private class MockGetBlockReader implements Answer<RemoteBlockReader2> {
+    public RemoteBlockReader2 reader = null;
+    private Socket sock = null;
+
+    @Override
+    public RemoteBlockReader2 answer(InvocationOnMock invocation) throws Throwable {
+      RemoteBlockReader2 prevReader = reader;
+      reader = (RemoteBlockReader2) invocation.callRealMethod();
+      if (sock == null) {
+        sock = reader.dnSock;
+      } else if (prevReader != null) {
+        assertSame("DFSInputStream should use the same socket",
+                   sock, reader.dnSock);
+      }
+      return reader;
+    }
+  }
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    final int REPLICATION_FACTOR = 1;
+
+    HdfsConfiguration confWithoutCache = new HdfsConfiguration();
+    confWithoutCache.setInt(
+        DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 0);
+    util = new BlockReaderTestUtil(REPLICATION_FACTOR, confWithoutCache);
+    cluster = util.getCluster();
+    conf = util.getConf();
+
+    authenticData = util.writeFile(testFile, FILE_SIZE / 1024);
+  }
+
+
+  /**
+   * (Optionally) seek to position, read and verify data.
+   *
+   * Seek to specified position if pos is non-negative.
+   */
+  private void pread(DFSInputStream in,
+                     long pos,
+                     byte[] buffer,
+                     int offset,
+                     int length)
+      throws IOException {
+    assertTrue("Test buffer too small", buffer.length >= offset + length);
+
+    if (pos >= 0)
+      in.seek(pos);
+
+    LOG.info("Reading from file of size " + in.getFileLength() +
+             " at offset " + in.getPos());
+
+    while (length > 0) {
+      int cnt = in.read(buffer, offset, length);
+      assertTrue("Error in read", cnt > 0);
+      offset += cnt;
+      length -= cnt;
+    }
+
+    // Verify
+    for (int i = 0; i < length; ++i) {
+      byte actual = buffer[i];
+      byte expect = authenticData[(int)pos + i];
+      assertEquals("Read data mismatch at file offset " + (pos + i) +
+                   ". Expects " + expect + "; got " + actual,
+                   actual, expect);
+    }
+  }
+
+  
+  /**
+   * Test that the socket cache can be disabled by setting the capacity to
+   * 0. Regression test for HDFS-3365.
+   */
+  @Test
+  public void testDisableCache() throws IOException {
+    LOG.info("Starting testDisableCache()");
+
+    // Configure a new instance with no caching, ensure that it doesn't
+    // cache anything
+
+    FileSystem fsWithoutCache = FileSystem.newInstance(conf);
+    try {
+      DFSTestUtil.readFile(fsWithoutCache, testFile);
+      assertEquals(0, ((DistributedFileSystem)fsWithoutCache).dfs.socketCache.size());
+    } finally {
+      fsWithoutCache.close();
+    }
+  }
+
+  @AfterClass
+  public static void teardownCluster() throws Exception {
+    util.shutdown();
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithEncryptedTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithEncryptedTransfer.java
index 30c7ce9..28c5194 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithEncryptedTransfer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithEncryptedTransfer.java
@@ -33,17 +33,17 @@
     conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
   }
   
-  @Test
+  @Test(timeout=60000)
   public void testEncryptedBalancer0() throws Exception {
     new TestBalancer().testBalancer0Internal(conf);
   }
   
-  @Test
+  @Test(timeout=60000)
   public void testEncryptedBalancer1() throws Exception {
     new TestBalancer().testBalancer1Internal(conf);
   }
   
-  @Test
+  @Test(timeout=60000)
   public void testEncryptedBalancer2() throws Exception {
     new TestBalancer().testBalancer2Internal(conf);
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
index 8c8674d..c07fae4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
@@ -53,7 +53,7 @@
    * datanode, namenode should ask to invalidate that corrupted block and
    * schedule replication for one more replica for that under replicated block.
    */
-  @Test
+  @Test(timeout=60000)
   public void testBlockInvalidationWhenRBWReplicaMissedInDN()
       throws IOException, InterruptedException {
     Configuration conf = new HdfsConfiguration();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
index d024bcd..bad1fff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
@@ -22,6 +22,7 @@
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.util.ArrayList;
 
 import javax.servlet.ServletContext;
 import javax.servlet.http.HttpServletRequest;
@@ -29,7 +30,9 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
 import org.apache.hadoop.hdfs.web.resources.DoAsParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
@@ -399,4 +402,43 @@
                           ugi.getAuthenticationMethod());
     }
   }
+
+  @Test
+  public void testSortNodeByFields() throws Exception {
+    DatanodeID dnId1 = new DatanodeID("127.0.0.1", "localhost1", "storage1",
+        1234, 2345, 3456);
+    DatanodeID dnId2 = new DatanodeID("127.0.0.2", "localhost2", "storage2",
+        1235, 2346, 3457);
+    DatanodeDescriptor dnDesc1 = new DatanodeDescriptor(dnId1, "rack1", 1024,
+        100, 924, 100, 10, 2);
+    DatanodeDescriptor dnDesc2 = new DatanodeDescriptor(dnId2, "rack2", 2500,
+        200, 1848, 200, 20, 1);
+    ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+    live.add(dnDesc1);
+    live.add(dnDesc2);
+
+    // Test sorting by failed volumes
+    JspHelper.sortNodeList(live, "volfails", "ASC");
+    Assert.assertEquals(dnDesc2, live.get(0));
+    Assert.assertEquals(dnDesc1, live.get(1));
+    JspHelper.sortNodeList(live, "volfails", "DSC");
+    Assert.assertEquals(dnDesc1, live.get(0));
+    Assert.assertEquals(dnDesc2, live.get(1));
+
+    // Test sorting by Blockpool used
+    JspHelper.sortNodeList(live, "bpused", "ASC");
+    Assert.assertEquals(dnDesc1, live.get(0));
+    Assert.assertEquals(dnDesc2, live.get(1));
+    JspHelper.sortNodeList(live, "bpused", "DSC");
+    Assert.assertEquals(dnDesc2, live.get(0));
+    Assert.assertEquals(dnDesc1, live.get(1));
+
+    // Test sorting by Percentage Blockpool used
+    JspHelper.sortNodeList(live, "pcbpused", "ASC");
+    Assert.assertEquals(dnDesc2, live.get(0));
+    Assert.assertEquals(dnDesc1, live.get(1));
+    JspHelper.sortNodeList(live, "pcbpused", "DSC");
+    Assert.assertEquals(dnDesc1, live.get(0));
+    Assert.assertEquals(dnDesc2, live.get(1));
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
similarity index 100%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
rename to hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
index 351a61c..50272f1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
@@ -36,6 +36,7 @@
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.util.StringUtils;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
@@ -216,48 +217,62 @@
       LOG.info("dn bpos len (still should be 3):" + bposs.length);
       Assert.assertEquals("should've registered with three namenodes", 3, bposs.length);
     } finally {
-      if(cluster != null) 
-        cluster.shutdown();
+      cluster.shutdown();
     }
   }
 
   @Test
   public void testMiniDFSClusterWithMultipleNN() throws IOException {
-
     Configuration conf = new HdfsConfiguration();
     // start Federated cluster and add a node.
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
       .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
       .build();
-    Assert.assertNotNull(cluster);
-    Assert.assertEquals("(1)Should be 2 namenodes", 2, cluster.getNumNameNodes());
     
     // add a node
-    cluster.addNameNode(conf, 0);
-    Assert.assertEquals("(1)Should be 3 namenodes", 3, cluster.getNumNameNodes());
-    cluster.shutdown();
+    try {
+      Assert.assertNotNull(cluster);
+      cluster.waitActive();
+      Assert.assertEquals("(1)Should be 2 namenodes", 2, cluster.getNumNameNodes());
+
+      cluster.addNameNode(conf, 0);
+      Assert.assertEquals("(1)Should be 3 namenodes", 3, cluster.getNumNameNodes());
+    } catch (IOException ioe) {
+      Assert.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe));
+    } finally {
+      cluster.shutdown();
+    }
         
     // 2. start with Federation flag set
     conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf)
       .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(1))
       .build();
-    Assert.assertNotNull(cluster);
-    Assert.assertEquals("(2)Should be 1 namenodes", 1, cluster.getNumNameNodes());
     
-    // add a node
-    cluster.addNameNode(conf, 0);
-    Assert.assertEquals("(2)Should be 2 namenodes", 2, cluster.getNumNameNodes());
-    cluster.shutdown();
+    try {
+      Assert.assertNotNull(cluster);
+      cluster.waitActive();
+      Assert.assertEquals("(2)Should be 1 namenodes", 1, cluster.getNumNameNodes());
+    
+      // add a node
+      cluster.addNameNode(conf, 0);
+      Assert.assertEquals("(2)Should be 2 namenodes", 2, cluster.getNumNameNodes());
+    } catch (IOException ioe) {
+      Assert.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe));
+    } finally {
+      cluster.shutdown();
+    }
 
     // 3. start non-federated
     conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf).build();
-    Assert.assertNotNull(cluster);
-    Assert.assertEquals("(2)Should be 1 namenodes", 1, cluster.getNumNameNodes());
     
     // add a node
     try {
+      cluster.waitActive();
+      Assert.assertNotNull(cluster);
+      Assert.assertEquals("(2)Should be 1 namenodes", 1, cluster.getNumNameNodes());
+
       cluster.addNameNode(conf, 9929);
       Assert.fail("shouldn't be able to add another NN to non federated cluster");
     } catch (IOException e) {
@@ -268,6 +283,4 @@
       cluster.shutdown();
     }
   }
-      
-
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
index 13f8719..b81feac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
@@ -36,6 +36,7 @@
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -64,6 +65,10 @@
   }
   
   static final String BASE_DIR = MiniDFSCluster.getBaseDirectory();
+  
+  static final long seed = 0xDEADBEEFL;
+  static final int blockSize = 4096;
+  static final int fileSize = 8192;
 
   @Before
   public void setUp() throws Exception {
@@ -350,14 +355,17 @@
           + NetUtils.getHostPortString(add)).toUri(), conf);
       boolean canWrite = true;
       try {
-        TestCheckpoint.writeFile(bnFS, file3, replication);
+        DFSTestUtil.createFile(bnFS, file3, fileSize, fileSize, blockSize,
+            replication, seed);
       } catch (IOException eio) {
         LOG.info("Write to BN failed as expected: ", eio);
         canWrite = false;
       }
       assertFalse("Write to BackupNode must be prohibited.", canWrite);
 
-      TestCheckpoint.writeFile(fileSys, file3, replication);
+      DFSTestUtil.createFile(fileSys, file3, fileSize, fileSize, blockSize,
+          replication, seed);
+      
       TestCheckpoint.checkFile(fileSys, file3, replication);
       // should also be on BN right away
       assertTrue("file3 does not exist on BackupNode",
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
index 58fbfec..8a91de7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
@@ -28,22 +28,21 @@
 import static org.junit.Assert.fail;
 
 import java.io.File;
-import java.io.FilenameFilter;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
+import java.lang.management.ThreadInfo;
+import java.lang.management.ThreadMXBean;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
-import java.util.Random;
 
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileContext;
@@ -51,6 +50,7 @@
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -76,6 +76,7 @@
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Level;
+import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.ArgumentMatcher;
@@ -116,19 +117,22 @@
     faultInjector = Mockito.mock(CheckpointFaultInjector.class);
     CheckpointFaultInjector.instance = faultInjector;
   }
-
-  static void writeFile(FileSystem fileSys, Path name, int repl)
-    throws IOException {
-    FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
-        .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
-        (short) repl, blockSize);
-    byte[] buffer = new byte[TestCheckpoint.fileSize];
-    Random rand = new Random(TestCheckpoint.seed);
-    rand.nextBytes(buffer);
-    stm.write(buffer);
-    stm.close();
-  }
   
+  @After
+  public void checkForSNNThreads() {
+    ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
+    
+    ThreadInfo[] infos = threadBean.getThreadInfo(threadBean.getAllThreadIds(), 20);
+    for (ThreadInfo info : infos) {
+      if (info == null) continue;
+      LOG.info("Check thread: " + info.getThreadName());
+      if (info.getThreadName().contains("SecondaryNameNode")) {
+        fail("Leaked thread: " + info + "\n" +
+            Joiner.on("\n").join(info.getStackTrace()));
+      }
+    }
+    LOG.info("--------");
+  }
   
   static void checkFile(FileSystem fileSys, Path name, int repl)
     throws IOException {
@@ -259,7 +263,8 @@
       //
       // Create a new file
       //
-      writeFile(fileSys, file1, replication);
+      DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+          replication, seed);
       checkFile(fileSys, file1, replication);
     } finally {
       fileSys.close();
@@ -323,7 +328,8 @@
       //
       // Create a new file
       //
-      writeFile(fileSys, file1, replication);
+      DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+          replication, seed);
       checkFile(fileSys, file1, replication);
     } finally {
       fileSys.close();
@@ -394,7 +400,8 @@
       //
       // Create a new file
       //
-      writeFile(fileSys, file1, replication);
+      DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+          replication, seed);
       checkFile(fileSys, file1, replication);
     } finally {
       fileSys.close();
@@ -580,7 +587,8 @@
       //
       // Create a new file
       //
-      writeFile(fileSys, file1, replication);
+      DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+          replication, seed);
       checkFile(fileSys, file1, replication);
     } finally {
       fileSys.close();
@@ -906,7 +914,8 @@
       //
       // Create file1
       //
-      writeFile(fileSys, file1, replication);
+      DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+          replication, seed);
       checkFile(fileSys, file1, replication);
 
       //
@@ -933,7 +942,8 @@
       cleanupFile(fileSys, file1);
 
       // create new file file2
-      writeFile(fileSys, file2, replication);
+      DFSTestUtil.createFile(fileSys, file2, fileSize, fileSize, blockSize,
+          replication, seed);
       checkFile(fileSys, file2, replication);
 
       //
@@ -999,7 +1009,8 @@
       }
       // create new file
       Path file = new Path("namespace.dat");
-      writeFile(fs, file, replication);
+      DFSTestUtil.createFile(fs, file, fileSize, fileSize, blockSize,
+          replication, seed);
       checkFile(fs, file, replication);
 
       // create new link
@@ -1746,7 +1757,7 @@
   /**
    * Test that the 2NN triggers a checkpoint after the configurable interval
    */
-  @Test
+  @Test(timeout=30000)
   public void testCheckpointTriggerOnTxnCount() throws Exception {
     MiniDFSCluster cluster = null;
     SecondaryNameNode secondary = null;
@@ -1760,8 +1771,7 @@
           .format(true).build();
       FileSystem fs = cluster.getFileSystem();
       secondary = startSecondaryNameNode(conf);
-      Thread t = new Thread(secondary);
-      t.start();
+      secondary.startCheckpointThread();
       final NNStorage storage = secondary.getFSImage().getStorage();
 
       // 2NN should checkpoint at startup
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
index 0138070..6483386 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
@@ -34,6 +34,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -116,19 +117,6 @@
     stm.close();
   }
 
-  private void writeFile(FileSystem fileSys, Path name, short repl)
-      throws IOException {
-    // create and write a file that contains three blocks of data
-    FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
-        .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), repl,
-        blockSize);
-    byte[] buffer = new byte[fileSize];
-    Random rand = new Random(seed);
-    rand.nextBytes(buffer);
-    stm.write(buffer);
-    stm.close();
-  }
- 
   private FSDataOutputStream writeIncompleteFile(FileSystem fileSys, Path name,
       short repl) throws IOException {
     // create and write a file that contains three blocks of data
@@ -198,7 +186,8 @@
     // Decommission one node. Verify the decommission status
     // 
     Path file1 = new Path("decommission.dat");
-    writeFile(fileSys, file1, replicas);
+    DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+        replicas, seed);
 
     Path file2 = new Path("decommission1.dat");
     FSDataOutputStream st1 = writeIncompleteFile(fileSys, file2, replicas);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java
index 7fd6f47..ca51d63 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java
@@ -20,14 +20,12 @@
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
-import java.util.Random;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
@@ -43,19 +41,6 @@
   static final int blockSize = 8192;
   boolean simulatedStorage = false;
 
-  // creates a zero file.
-  private void createFile(FileSystem fileSys, Path name)
-    throws IOException {
-    FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
-        .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
-        (short) 1, blockSize);
-    byte[] buffer = new byte[1024];
-    Random rand = new Random(seed);
-    rand.nextBytes(buffer);
-    stm.write(buffer);
-    stm.close();
-  }
-
   private void waitForLimit(FSNamesystem namesys, long num)
   {
     // wait for number of blocks to decrease
@@ -106,7 +91,7 @@
       //
       for (int i = 0; i < maxObjects/2; i++) {
         Path file = new Path("/filestatus" + i);
-        createFile(fs, file);
+        DFSTestUtil.createFile(fs, file, 1024, 1024, blockSize, (short) 1, seed);
         System.out.println("Created file " + file);
         currentNodes += 2;      // two more objects for this creation.
       }
@@ -115,7 +100,7 @@
       boolean hitException = false;
       try {
         Path file = new Path("/filestatus");
-        createFile(fs, file);
+        DFSTestUtil.createFile(fs, file, 1024, 1024, blockSize, (short) 1, seed);
         System.out.println("Created file " + file);
       } catch (IOException e) {
         hitException = true;
@@ -132,7 +117,7 @@
       waitForLimit(namesys, currentNodes);
 
       // now, we shud be able to create a new file
-      createFile(fs, file0);
+      DFSTestUtil.createFile(fs, file0, 1024, 1024, blockSize, (short) 1, seed);
       System.out.println("Created file " + file0 + " again.");
       currentNodes += 2;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
similarity index 94%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
rename to hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
index 08e5d56..346844d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
@@ -61,7 +61,7 @@
               throws IllegalArgumentException {
     replication = -1;
     preferredBlockSize = 128*1024*1024;
-    INodeFile inf = new INodeFile(new PermissionStatus(userName, null,
+    new INodeFile(new PermissionStatus(userName, null,
                                   FsPermission.getDefault()), null, replication,
                                   0L, 0L, preferredBlockSize);
   }
@@ -102,7 +102,7 @@
               throws IllegalArgumentException {
     replication = 3;
     preferredBlockSize = -1;
-    INodeFile inf = new INodeFile(new PermissionStatus(userName, null, 
+    new INodeFile(new PermissionStatus(userName, null, 
                                   FsPermission.getDefault()), null, replication,
                                   0L, 0L, preferredBlockSize);
   } 
@@ -117,7 +117,7 @@
               throws IllegalArgumentException {
     replication = 3;
     preferredBlockSize = BLKSIZE_MAXVALUE+1;
-    INodeFile inf = new INodeFile(new PermissionStatus(userName, null, 
+    new INodeFile(new PermissionStatus(userName, null, 
                                   FsPermission.getDefault()), null, replication,
                                   0L, 0L, preferredBlockSize);
   }
@@ -160,10 +160,6 @@
     INodeFile[] appendFiles =   createINodeFiles(4, "appendfile");
     origFile.appendBlocks(appendFiles, getTotalBlocks(appendFiles));
     assertEquals("Number of blocks didn't match", origFile.numBlocks(), 5L);
-    
-    for(int i=0; i< origFile.numBlocks(); i++) {
-      assertSame("INodeFiles didn't Match", origFile, origFile.getBlocks()[i].getINode());
-    }
   }
 
   /** 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
index 2f2b688..9befc49 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
@@ -32,6 +32,7 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.junit.AfterClass;
@@ -48,17 +49,6 @@
   private static MiniDFSCluster cluster = null;
   private static FileSystem fileSys = null;
 
-  private void createFile(FileSystem fileSys, Path name) throws IOException {
-    FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
-        .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
-        (short) 2, blockSize);
-    byte[] buffer = new byte[1024];
-    Random rand = new Random(seed);
-    rand.nextBytes(buffer);
-    stm.write(buffer);
-    stm.close();
-  }
-
   @BeforeClass
   public static void setUp() throws IOException {
     // start a cluster
@@ -84,7 +74,8 @@
 
     for (int i = 0; i < 2; i++) {
       Path file = new Path("/filestatus" + i);
-      createFile(fileSys, file);
+      DFSTestUtil.createFile(fileSys, file, 1024, 1024, blockSize, (short) 2,
+          seed);
     }
 
     cluster.stopDataNode(1);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
index 70f5b57..63388be 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
@@ -25,17 +25,15 @@
 import java.io.File;
 import java.io.IOException;
 import java.util.List;
-import java.util.Random;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
@@ -71,18 +69,6 @@
     }
   }
 
-  private void writeFile(FileSystem fileSys, Path name, int repl)
-      throws IOException {
-    FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
-        .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
-        (short) repl, BLOCK_SIZE);
-    byte[] buffer = new byte[FILE_SIZE];
-    Random rand = new Random(SEED);
-    rand.nextBytes(buffer);
-    stm.write(buffer);
-    stm.close();
-  }
-
   void checkImageAndEditsFilesExistence(File dir, 
                                         boolean shouldHaveImages,
                                         boolean shouldHaveEdits)
@@ -187,7 +173,8 @@
 
     try {
       assertTrue(!fileSys.exists(file1));
-      writeFile(fileSys, file1, replication);
+      DFSTestUtil.createFile(fileSys, file1, FILE_SIZE, FILE_SIZE, BLOCK_SIZE,
+          (short) replication, SEED);
       checkFile(fileSys, file1, replication);
       secondary.doCheckpoint();
     } finally {
@@ -224,7 +211,8 @@
       assertTrue(fileSys.exists(file1));
       checkFile(fileSys, file1, replication);
       cleanupFile(fileSys, file1);
-      writeFile(fileSys, file2, replication);
+      DFSTestUtil.createFile(fileSys, file2, FILE_SIZE, FILE_SIZE, BLOCK_SIZE,
+          (short) replication, SEED);
       checkFile(fileSys, file2, replication);
       secondary.doCheckpoint();
     } finally {
@@ -260,7 +248,8 @@
       assertTrue(fileSys.exists(file2));
       checkFile(fileSys, file2, replication);
       cleanupFile(fileSys, file2);
-      writeFile(fileSys, file3, replication);
+      DFSTestUtil.createFile(fileSys, file3, FILE_SIZE, FILE_SIZE, BLOCK_SIZE,
+          (short) replication, SEED);
       checkFile(fileSys, file3, replication);
       secondary.doCheckpoint();
     } finally {
@@ -364,7 +353,8 @@
       fileSys = cluster.getFileSystem();
 
       assertTrue(!fileSys.exists(file1));
-      writeFile(fileSys, file1, replication);
+      DFSTestUtil.createFile(fileSys, file1, FILE_SIZE, FILE_SIZE, BLOCK_SIZE,
+          (short) replication, SEED);
       checkFile(fileSys, file1, replication);
     } finally  {
       fileSys.close();
@@ -402,7 +392,8 @@
       assertTrue(fileSys.exists(file1));
       checkFile(fileSys, file1, replication);
       cleanupFile(fileSys, file1);
-      writeFile(fileSys, file2, replication);
+      DFSTestUtil.createFile(fileSys, file2, FILE_SIZE, FILE_SIZE, BLOCK_SIZE,
+          (short) replication, SEED);
       checkFile(fileSys, file2, replication);
     } finally {
       fileSys.close();
@@ -429,7 +420,8 @@
       assertTrue(fileSys.exists(file2));
       checkFile(fileSys, file2, replication);
       cleanupFile(fileSys, file2);
-      writeFile(fileSys, file3, replication);
+      DFSTestUtil.createFile(fileSys, file3, FILE_SIZE, FILE_SIZE, BLOCK_SIZE,
+          (short) replication, SEED);
       checkFile(fileSys, file3, replication);
     } finally {
       fileSys.close();
@@ -483,7 +475,8 @@
       assertTrue(fileSys.exists(file3));
       checkFile(fileSys, file3, replication);
       cleanupFile(fileSys, file3);
-      writeFile(fileSys, file3, replication);
+      DFSTestUtil.createFile(fileSys, file3, FILE_SIZE, FILE_SIZE, BLOCK_SIZE,
+          (short) replication, SEED);
       checkFile(fileSys, file3, replication);
     } finally {
       fileSys.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
index 28e22aa..45a4f60 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
@@ -77,20 +77,6 @@
   static final int fileSize = 8192;
   private long editsLength=0, fsimageLength=0;
 
-
-  private void writeFile(FileSystem fileSys, Path name, int repl)
-  throws IOException {
-    FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
-        .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
-        (short) repl, blockSize);
-    byte[] buffer = new byte[fileSize];
-    Random rand = new Random(seed);
-    rand.nextBytes(buffer);
-    stm.write(buffer);
-    stm.close();
-  }
-
-
   @Before
   public void setUp() throws Exception {
     config = new HdfsConfiguration();
@@ -150,7 +136,8 @@
       // create a file
       FileSystem fileSys = cluster.getFileSystem();
       Path file1 = new Path("t1");
-      this.writeFile(fileSys, file1, 1);
+      DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, 
+          (short) 1, seed);
 
       LOG.info("--doing checkpoint");
       sn.doCheckpoint();  // this shouldn't fail
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
index 6de0c69..3d09f8b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
@@ -166,7 +166,7 @@
   }
 
   @Test
-  public void testFilePermision() throws Exception {
+  public void testFilePermission() throws Exception {
     final Configuration conf = new HdfsConfiguration();
     conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
@@ -244,6 +244,10 @@
       fs.mkdirs(p);
       return true;
     } catch(AccessControlException e) {
+      // We check that AccessControlExceptions contain absolute paths.
+      Path parent = p.getParent();
+      assertTrue(parent.isUriPathAbsolute());
+      assertTrue(e.getMessage().contains(parent.toString()));
       return false;
     }
   }
@@ -253,6 +257,9 @@
       fs.create(p);
       return true;
     } catch(AccessControlException e) {
+      Path parent = p.getParent();
+      assertTrue(parent.isUriPathAbsolute());
+      assertTrue(e.getMessage().contains(parent.toString()));
       return false;
     }
   }
@@ -262,6 +269,8 @@
       fs.open(p);
       return true;
     } catch(AccessControlException e) {
+      assertTrue(p.isUriPathAbsolute());
+      assertTrue(e.getMessage().contains(p.toString()));
       return false;
     }
   }
@@ -272,6 +281,9 @@
       fs.rename(src, dst);
       return true;
     } catch(AccessControlException e) {
+      Path parent = dst.getParent();
+      assertTrue(parent.isUriPathAbsolute());
+      assertTrue(e.getMessage().contains(parent.toString()));
       return false;
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java
new file mode 100644
index 0000000..d6f9171
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java
@@ -0,0 +1,40 @@
+package org.apache.hadoop.tools;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenRenewer;
+
+public class FakeRenewer extends TokenRenewer {
+  static Token<?> lastRenewed = null;
+  static Token<?> lastCanceled = null;
+  static final Text KIND = new Text("TESTING-TOKEN-KIND");
+
+  @Override
+  public boolean handleKind(Text kind) {
+    return FakeRenewer.KIND.equals(kind);
+  }
+
+  @Override
+  public boolean isManaged(Token<?> token) throws IOException {
+    return true;
+  }
+
+  @Override
+  public long renew(Token<?> token, Configuration conf) {
+    lastRenewed = token;
+    return 0;
+  }
+
+  @Override
+  public void cancel(Token<?> token, Configuration conf) {
+    lastCanceled = token;
+  }
+
+  public static void reset() {
+    lastRenewed = null;
+    lastCanceled = null;
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java
index 48b8642..6304349 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java
@@ -36,9 +36,7 @@
 import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenRenewer;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.invocation.InvocationOnMock;
@@ -50,7 +48,6 @@
   private Configuration conf;
   private URI uri;
   private static final String SERVICE_VALUE = "localhost:2005";
-  private static final Text KIND = new Text("TESTING-TOKEN-KIND");
   private static String tokenFile = "file.dta";
 
   @Before 
@@ -61,37 +58,6 @@
     FileSystemTestHelper.addFileSystemForTesting(uri, conf, dfs);
   }
   
-  public static class FakeRenewer extends TokenRenewer {
-    static Token<?> lastRenewed = null;
-    static Token<?> lastCanceled = null;
-
-    @Override
-    public boolean handleKind(Text kind) {
-      return KIND.equals(kind);
-    }
-
-    @Override
-    public boolean isManaged(Token<?> token) throws IOException {
-      return true;
-    }
-
-    @Override
-    public long renew(Token<?> token, Configuration conf) {
-      lastRenewed = token;
-      return 0;
-    }
-
-    @Override
-    public void cancel(Token<?> token, Configuration conf) {
-      lastCanceled = token;
-    }
-    
-    public static void reset() {
-      lastRenewed = null;
-      lastCanceled = null;
-    }
-  }
-
   /**
    * Verify that when the DelegationTokenFetcher runs, it talks to the Namenode,
    * pulls out the correct user's token and successfully serializes it to disk.
@@ -103,13 +69,11 @@
         new Text("renewer"), new Text("realuser")).getBytes();
     final byte[] pw = new byte[] { 42 };
     final Text service = new Text(uri.toString());
-    final String user = 
-        UserGroupInformation.getCurrentUser().getShortUserName();
 
     // Create a token for the fetcher to fetch, wire NN to return it when asked
     // for this particular user.
     final Token<DelegationTokenIdentifier> t = 
-      new Token<DelegationTokenIdentifier>(ident, pw, KIND, service);
+      new Token<DelegationTokenIdentifier>(ident, pw, FakeRenewer.KIND, service);
     when(dfs.addDelegationTokens(eq((String) null), any(Credentials.class))).thenAnswer(
         new Answer<Token<?>[]>() {
           @Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
index 6901f643..92a1530 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
@@ -32,6 +32,7 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.tools.JMXGet;
@@ -53,19 +54,6 @@
   static final int blockSize = 4096;
   static final int fileSize = 8192;
 
-  private void writeFile(FileSystem fileSys, Path name, int repl)
-  throws IOException {
-    FSDataOutputStream stm = fileSys.create(name, true,
-        fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
-        (short)repl, blockSize);
-    byte[] buffer = new byte[fileSize];
-    Random rand = new Random(seed);
-    rand.nextBytes(buffer);
-    stm.write(buffer);
-    stm.close();
-  }
-
-
   @Before
   public void setUp() throws Exception {
     config = new HdfsConfiguration();
@@ -96,7 +84,8 @@
     cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build();
     cluster.waitActive();
 
-    writeFile(cluster.getFileSystem(), new Path("/test1"), 2);
+    DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/test1"),
+        fileSize, fileSize, blockSize, (short) 2, seed);
 
     JMXGet jmx = new JMXGet();
     //jmx.setService("*"); // list all hadoop services
@@ -125,7 +114,8 @@
     cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build();
     cluster.waitActive();
 
-    writeFile(cluster.getFileSystem(), new Path("/test"), 2);
+    DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/test"),
+        fileSize, fileSize, blockSize, (short) 2, seed);
 
     JMXGet jmx = new JMXGet();
     //jmx.setService("*"); // list all hadoop services
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
index 568cc80..721b996 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
@@ -1 +1 @@
-org.apache.hadoop.tools.TestDelegationTokenFetcher$FakeRenewer
+org.apache.hadoop.tools.FakeRenewer
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java
deleted file mode 100644
index a2a4d3d..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode;
-
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SECONDARY_NAMENODE_USER_NAME_KEY;
-import static org.junit.Assert.*;
-
-import java.io.IOException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.security.SecurityUtil;
-import org.junit.Test;
-
-public class TestGetImageServlet {
-  private static final String HOST = "foo.com";
-  private static final String KERBEROS_DOMAIN = "@HADOOP.ORG";
-  
-  private static Configuration getConf() {
-    Configuration conf = new Configuration();
-    FileSystem.setDefaultUri(conf, "hdfs://" + HOST);
-    conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, HOST
-        + ":50090");
-    return conf;
-  }
-  
-  // Worker class to poke the isValidRequestor method with verifying it accepts
-  // or rejects with these standard allowed principals
-  private void verifyIsValidReqBehavior(GetImageServlet gim, 
-                                        boolean shouldSucceed, String msg) 
-      throws IOException {
-    final String [] validRequestors = {DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY,
-                                       DFS_NAMENODE_USER_NAME_KEY,
-                                       DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY,
-                                       DFS_SECONDARY_NAMENODE_USER_NAME_KEY };
-    
-    Configuration conf = getConf();
-    for(String v : validRequestors) {
-      conf.set(v, "a/" + SecurityUtil.HOSTNAME_PATTERN + KERBEROS_DOMAIN);
-      assertEquals(msg + v, gim.isValidRequestor(shouldSucceed ? "a/" + HOST
-          + KERBEROS_DOMAIN : "b/" + HOST + KERBEROS_DOMAIN, conf),
-          shouldSucceed);
-    }
-  }
-  
-  @Test
-  public void IsValidRequestorAcceptsCorrectly() throws IOException {
-    GetImageServlet gim = new GetImageServlet();
-
-    verifyIsValidReqBehavior(gim, true, 
-        "isValidRequestor has rejected a valid requestor: ");
-  }
-  
-  @Test
-  public void IsValidRequestorRejectsCorrectly() throws IOException {
-    GetImageServlet gim = new GetImageServlet();
-    
-    // Don't set any valid requestors
-    assertFalse("isValidRequestor allowed a requestor despite no values being set",
-                gim.isValidRequestor("not set", getConf()));
-    
-    verifyIsValidReqBehavior(gim, false, 
-        "isValidRequestor has allowed an invalid requestor: ");
-  }
-
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java
deleted file mode 100644
index 089cf7b..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java
+++ /dev/null
@@ -1,462 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.server.namenode;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.when;
-
-import java.io.File;
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.permission.PermissionStatus;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.log4j.Level;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-public class TestNNLeaseRecovery {
-  private static final Log LOG = LogFactory.getLog(TestNNLeaseRecovery.class);
-  private static final String NAME_DIR =
-    MiniDFSCluster.getBaseDirectory() + "name";
-
-  FSNamesystem fsn;
-  Configuration conf;
-  
-  static {
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
-  }
-
-  /**
-   * Initiates and sets a spied on FSNamesystem so tests can hook its methods
-   * @throws IOException if an error occurred
-   */
-  @Before
-  public void startUp() throws IOException {
-    conf = new HdfsConfiguration();
-    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, NAME_DIR);
-    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, NAME_DIR);
-    // avoid stubbing access control
-    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); 
-    NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
-
-    FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
-    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
-    DFSTestUtil.formatNameNode(conf);
-    fsn = spy(FSNamesystem.loadFromDisk(conf));
-  }
-
-  /**
-   * Cleans the resources and closes the instance of FSNamesystem
-   * @throws IOException if an error occurred
-   */
-  @After
-  public void tearDown() throws IOException {
-    if (fsn != null) {
-      try {
-        fsn.close();
-      } catch(Exception e) {
-        LOG.error("Cannot close: ", e);
-      } finally {
-        File dir = new File(NAME_DIR);
-        if (dir != null)
-          assertTrue("Cannot delete name-node dirs", FileUtil.fullyDelete(dir));
-      }
-    }
-  }
-
-  // Release the lease for the given file
-  private boolean releaseLease(FSNamesystem ns, LeaseManager.Lease lm, 
-      Path file) throws IOException {
-    fsn.writeLock();
-    try {
-      return fsn.internalReleaseLease(lm, file.toString(), null);
-    } finally {
-      fsn.writeUnlock();
-    }
-  }
-
-  /**
-   * Mocks FSNamesystem instance, adds an empty file and invokes lease recovery
-   * method. 
-   * @throws IOException in case of an error
-   */
-  @Test
-  public void testInternalReleaseLease_allCOMPLETE () throws IOException {
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Running " + GenericTestUtils.getMethodName());    
-    }
-    LeaseManager.Lease lm = mock(LeaseManager.Lease.class);
-    Path file = spy(new Path("/test.dat"));
-    DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
-    PermissionStatus ps =
-      new PermissionStatus("test", "test", new FsPermission((short)0777));
-    
-    fsn.dir.addFile(file.toString(), ps, (short)3, 1l, 
-      "test", "test-machine", dnd, 1001l);
-    assertTrue("True has to be returned in this case",
-        releaseLease(fsn, lm, file));
-  }
-  
-  /**
-   * Mocks FSNamesystem instance, adds an empty file, sets status of last two
-   * blocks to non-defined and UNDER_CONSTRUCTION and invokes lease recovery
-   * method. IOException is expected for releasing a create lock on a 
-   * closed file. 
-   * @throws IOException as the result
-   */
-  @Test(expected=IOException.class)
-  public void testInternalReleaseLease_UNKNOWN_COMM () throws IOException {
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Running " + GenericTestUtils.getMethodName());
-    }
-    LeaseManager.Lease lm = mock(LeaseManager.Lease.class);
-    Path file = 
-      spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));    
-    DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
-    PermissionStatus ps =
-      new PermissionStatus("test", "test", new FsPermission((short)0777));
-    
-    mockFileBlocks(2, null, 
-      HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false);
-    
-    releaseLease(fsn, lm, file);
-    fail("FSNamesystem.internalReleaseLease suppose to throw " +
-      "IOException here");
-  }  
-
-  /**
-   * Mocks FSNamesystem instance, adds an empty file, sets status of last two
-   * blocks to COMMITTED and COMMITTED and invokes lease recovery
-   * method. AlreadyBeingCreatedException is expected.
-   * @throws AlreadyBeingCreatedException as the result
-   */
-  @Test(expected=AlreadyBeingCreatedException.class)
-  public void testInternalReleaseLease_COMM_COMM () throws IOException {
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Running " + GenericTestUtils.getMethodName());
-    }
-    LeaseManager.Lease lm = mock(LeaseManager.Lease.class);
-    Path file = 
-      spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));
-    DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
-    PermissionStatus ps =
-      new PermissionStatus("test", "test", new FsPermission((short)0777));
-
-    mockFileBlocks(2, HdfsServerConstants.BlockUCState.COMMITTED, 
-      HdfsServerConstants.BlockUCState.COMMITTED, file, dnd, ps, false);
-
-    releaseLease(fsn, lm, file);
-    fail("FSNamesystem.internalReleaseLease suppose to throw " +
-      "IOException here");
-  }
-
-  /**
-   * Mocks FSNamesystem instance, adds an empty file with 0 blocks
-   * and invokes lease recovery method. 
-   */
-  @Test
-  public void testInternalReleaseLease_0blocks () throws IOException {
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Running " + GenericTestUtils.getMethodName());
-    }
-    LeaseManager.Lease lm = mock(LeaseManager.Lease.class);
-    Path file = 
-      spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));
-    DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
-    PermissionStatus ps =
-      new PermissionStatus("test", "test", new FsPermission((short)0777));
-
-    mockFileBlocks(0, null, null, file, dnd, ps, false);
-
-    assertTrue("True has to be returned in this case",
-        releaseLease(fsn, lm, file));
-  }
-  
-  /**
-   * Mocks FSNamesystem instance, adds an empty file with 1 block
-   * and invokes lease recovery method. 
-   * AlreadyBeingCreatedException is expected.
-   * @throws AlreadyBeingCreatedException as the result
-   */
-  @Test(expected=AlreadyBeingCreatedException.class)
-  public void testInternalReleaseLease_1blocks () throws IOException {
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Running " + GenericTestUtils.getMethodName());
-    }
-    LeaseManager.Lease lm = mock(LeaseManager.Lease.class);
-    Path file = 
-      spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));
-    DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
-    PermissionStatus ps =
-      new PermissionStatus("test", "test", new FsPermission((short)0777));
-
-    mockFileBlocks(1, null, HdfsServerConstants.BlockUCState.COMMITTED, file, dnd, ps, false);
-
-    releaseLease(fsn, lm, file);
-    fail("FSNamesystem.internalReleaseLease suppose to throw " +
-      "IOException here");
-  }
-
-  /**
-   * Mocks FSNamesystem instance, adds an empty file, sets status of last two
-   * blocks to COMMITTED and UNDER_CONSTRUCTION and invokes lease recovery
-   * method. <code>false</code> is expected as the result
-   * @throws IOException in case of an error
-   */
-  @Test
-  public void testInternalReleaseLease_COMM_CONSTRUCTION () throws IOException {
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Running " + GenericTestUtils.getMethodName());
-    }
-    LeaseManager.Lease lm = mock(LeaseManager.Lease.class);
-    Path file = 
-      spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));
-    DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
-    PermissionStatus ps =
-      new PermissionStatus("test", "test", new FsPermission((short)0777));
-    
-    mockFileBlocks(2, HdfsServerConstants.BlockUCState.COMMITTED, 
-      HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false);
-        
-    assertFalse("False is expected in return in this case",
-        releaseLease(fsn, lm, file));
-  }
-
-  @Test
-  public void testCommitBlockSynchronization_BlockNotFound () 
-    throws IOException {
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Running " + GenericTestUtils.getMethodName());
-    }
-    long recoveryId = 2002;
-    long newSize = 273487234;
-    Path file = 
-      spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));
-    DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
-    PermissionStatus ps =
-      new PermissionStatus("test", "test", new FsPermission((short)0777));
-    
-    mockFileBlocks(2, HdfsServerConstants.BlockUCState.COMMITTED, 
-      HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false);
-    
-    BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock(); 
-    try {
-      fsn.commitBlockSynchronization(fsn.getExtendedBlock(lastBlock),
-        recoveryId, newSize, true, false, new DatanodeID[1]);
-    } catch (IOException ioe) {
-      assertTrue(ioe.getMessage().startsWith("Block (="));
-    }
-  }
-  
-  @Test
-  public void testCommitBlockSynchronization_notUR () 
-    throws IOException {
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Running " + GenericTestUtils.getMethodName());
-    }
-    long recoveryId = 2002;
-    long newSize = 273487234;
-    Path file = 
-      spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));
-    DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
-    PermissionStatus ps =
-      new PermissionStatus("test", "test", new FsPermission((short)0777));
-    
-    mockFileBlocks(2, HdfsServerConstants.BlockUCState.COMMITTED, 
-      HdfsServerConstants.BlockUCState.COMPLETE, file, dnd, ps, true);
-    
-    BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock();
-    when(lastBlock.isComplete()).thenReturn(true);
-    
-    try {
-      fsn.commitBlockSynchronization(fsn.getExtendedBlock(lastBlock),
-        recoveryId, newSize, true, false, new DatanodeID[1]);
-    } catch (IOException ioe) {
-      assertTrue(ioe.getMessage().startsWith("Unexpected block (="));
-    }
-  }
-  
-  @Test
-  public void testCommitBlockSynchronization_WrongGreaterRecoveryID() 
-    throws IOException {
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Running " + GenericTestUtils.getMethodName());
-    }
-    long recoveryId = 2002;
-    long newSize = 273487234;
-    Path file = 
-      spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));
-    DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
-    PermissionStatus ps =
-      new PermissionStatus("test", "test", new FsPermission((short)0777));
-    
-    mockFileBlocks(2, HdfsServerConstants.BlockUCState.COMMITTED, 
-      HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, true);
-    
-    BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock();
-    when(((BlockInfoUnderConstruction)lastBlock).getBlockRecoveryId()).thenReturn(recoveryId-100);
-    
-    try {
-      fsn.commitBlockSynchronization(fsn.getExtendedBlock(lastBlock),
-        recoveryId, newSize, true, false, new DatanodeID[1]);
-    } catch (IOException ioe) {
-      assertTrue(ioe.getMessage().startsWith("The recovery id " + recoveryId + " does not match current recovery id " + (recoveryId-100)));
-    }
-  }  
-  
-  @Test
-  public void testCommitBlockSynchronization_WrongLesserRecoveryID() 
-    throws IOException {
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Running " + GenericTestUtils.getMethodName());
-    }
-    long recoveryId = 2002;
-    long newSize = 273487234;
-    Path file = 
-      spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));
-    DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
-    PermissionStatus ps =
-      new PermissionStatus("test", "test", new FsPermission((short)0777));
-    
-    mockFileBlocks(2, HdfsServerConstants.BlockUCState.COMMITTED, 
-      HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, true);
-    
-    BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock();
-    when(((BlockInfoUnderConstruction)lastBlock).getBlockRecoveryId()).thenReturn(recoveryId+100);
-    
-    try {           
-      fsn.commitBlockSynchronization(fsn.getExtendedBlock(lastBlock),
-        recoveryId, newSize, true, false, new DatanodeID[1]);
-    } catch (IOException ioe) {
-      assertTrue(ioe.getMessage().startsWith("The recovery id " + recoveryId + " does not match current recovery id " + (recoveryId+100)));
-    }
-  }
-
-  @Test
-  public void testCommitBlockSynchronization_EqualRecoveryID() 
-    throws IOException {
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Running " + GenericTestUtils.getMethodName());
-    }
-    long recoveryId = 2002;
-    long newSize = 273487234;
-    Path file = 
-      spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));
-    DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
-    PermissionStatus ps =
-      new PermissionStatus("test", "test", new FsPermission((short)0777));
-    
-    mockFileBlocks(2, HdfsServerConstants.BlockUCState.COMMITTED, 
-      HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, true);
-    
-    BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock();
-    when(((BlockInfoUnderConstruction)lastBlock).getBlockRecoveryId()).thenReturn(recoveryId);
-    
-    boolean recoveryChecked = false;
-    try {
-      fsn.commitBlockSynchronization(fsn.getExtendedBlock(lastBlock),
-        recoveryId, newSize, true, false, new DatanodeID[1]);
-    } catch (NullPointerException ioe) {
-      // It is fine to get NPE here because the datanodes array is empty
-      LOG.info("Exception ", ioe);
-      recoveryChecked = true;
-    }
-    assertTrue("commitBlockSynchronization had to throw NPE here", recoveryChecked);
-  }
-
-  private void mockFileBlocks(int fileBlocksNumber,
-                              HdfsServerConstants.BlockUCState penUltState,
-                              HdfsServerConstants.BlockUCState lastState,
-                              Path file, DatanodeDescriptor dnd,
-                              PermissionStatus ps,
-                              boolean setStoredBlock) throws IOException {
-    BlockInfo b = mock(BlockInfo.class);
-    BlockInfoUnderConstruction b1 = mock(BlockInfoUnderConstruction.class);
-    when(b.getBlockUCState()).thenReturn(penUltState);
-    when(b1.getBlockUCState()).thenReturn(lastState);
-    BlockInfo[] blocks;
-
-    FSDirectory fsDir = mock(FSDirectory.class);
-    INodeFileUnderConstruction iNFmock = mock(INodeFileUnderConstruction.class);
-
-    fsn.dir.close();
-    fsn.dir = fsDir;
-    FSImage fsImage = mock(FSImage.class);
-    FSEditLog editLog = mock(FSEditLog.class);
-                            
-    when(fsn.getFSImage()).thenReturn(fsImage);
-    when(fsn.getFSImage().getEditLog()).thenReturn(editLog);
-    
-    switch (fileBlocksNumber) {
-      case 0:
-        blocks = new BlockInfo[0];
-        break;
-      case 1:
-        blocks = new BlockInfo[]{b1};
-        when(iNFmock.getLastBlock()).thenReturn(b1);
-        break;
-      default:
-        when(iNFmock.getPenultimateBlock()).thenReturn(b);
-        when(iNFmock.getLastBlock()).thenReturn(b1);
-        blocks = new BlockInfo[]{b, b1};
-    }
-    
-    when(iNFmock.getBlocks()).thenReturn(blocks);
-    when(iNFmock.numBlocks()).thenReturn(blocks.length);
-    when(iNFmock.isUnderConstruction()).thenReturn(true);
-    when(iNFmock.convertToInodeFile()).thenReturn(iNFmock);    
-    fsDir.addFile(file.toString(), ps, (short)3, 1l, "test", 
-      "test-machine", dnd, 1001l);
-
-    fsn.leaseManager = mock(LeaseManager.class);
-    fsn.leaseManager.addLease("mock-lease", file.toString());
-    if (setStoredBlock) {
-      when(b1.getINode()).thenReturn(iNFmock);
-      fsn.getBlockManager().addINode(b1, iNFmock);
-    }
-
-    when(fsDir.getFileINode(anyString())).thenReturn(iNFmock);
-  }
-}
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index c4698a7..893096d 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -61,6 +61,8 @@
     MAPREDUCE-4371. Check for cyclic dependencies in Jobcontrol job DAG
     (madhukara phatak via bobby)
 
+    MAPREDUCE-4686. hadoop-mapreduce-client-core fails compilation in Eclipse due to missing Avro-generated classes (Chris Nauroth via harsh)
+
   BUG FIXES
 
     MAPREDUCE-4356. [Rumen] Provide access to the method
@@ -126,14 +128,33 @@
 
     MAPREDUCE-3868. Make Raid Compile. (Weiyan Wang via schen)
 
+    MAPREDUCE-4685. DBCount should not use ACCESS. (Viji via harsh)
+
+    MAPREDUCE-3223. Remove MR1 configs from mapred-default.xml (tlipcon via harsh)
+
+    MAPREDUCE-4678. Running the Pentomino example with defaults throws
+    java.lang.NegativeArraySizeException (Chris McConnell via harsh)
+
+    MAPREDUCE-4695. Fix LocalRunner on trunk after MAPREDUCE-3223 broke it
+    (harsh)
+
+    MAPREDUCE-4574. Fix TotalOrderParitioner to work with
+    non-WritableComparable key types. (harsh)
+
 Release 2.0.3-alpha - Unreleased 
 
   INCOMPATIBLE CHANGES
 
+    MAPREDUCE-4123. Remove the 'mapred groups' command, which is no longer
+    supported. (Devaraj K via sseth)
+
   NEW FEATURES
 
   IMPROVEMENTS
 
+    MAPREDUCE-3678. The Map tasks logs should have the value of input
+    split it processed. (harsh)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -147,6 +168,13 @@
     MAPREDUCE-4674. Hadoop examples secondarysort has a typo
     "secondarysrot" in the usage. (Robert Justice via eli)
 
+    MAPREDUCE-4681. Fix unit tests broken by HDFS-3910. (acmurthy) 
+
+    MAPREDUCE-4712. mr-jobhistory-daemon.sh doesn't accept --config
+    (Vinod Kumar Vavilapalli via tgraves)
+
+    MAPREDUCE-4654. TestDistCp is ignored. (Sandy Ryza via tomwhite)
+
 Release 2.0.2-alpha - 2012-09-07 
 
   INCOMPATIBLE CHANGES
@@ -187,9 +215,6 @@
     MAPREDUCE-3921. MR AM should act on node health status changes. 
     (Bikas Saha via sseth)
 
-    MAPREDUCE-4253. Tests for mapreduce-client-core are lying under
-    mapreduce-client-jobclient (Tsuyoshi Ozawa via harsh)
-
     MAPREDUCE-2220. Fix new API FileOutputFormat-related typos in
     mapred-default.xml (Rui Kubo via harsh)
 
@@ -339,6 +364,10 @@
     MAPREDUCE-4380. Empty Userlogs directory is getting created under logs
     directory (Devaraj K via bobby)
 
+    MAPREDUCE-4649. Ensure MapReduce JobHistory Daemon doens't assume
+    HADOOP_YARN_HOME and HADOOP_MAPRED_HOME are the same. (vinodkv via
+    acmurthy)
+
 Release 2.0.0-alpha - 05-23-2012
 
   INCOMPATIBLE CHANGES
@@ -522,6 +551,25 @@
     MAPREDUCE-4444. nodemanager fails to start when one of the local-dirs is
     bad (Jason Lowe via bobby)
 
+Release 0.23.5 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    MAPREDUCE-4554. Job Credentials are not transmitted if security is turned 
+    off (Benoy Antony via bobby)
+
+    MAPREDUCE-4705. Fix a bug in job history lookup, which makes older jobs
+    inaccessible despite the presence of a valid history file. (Jason Lowe
+    via sseth)
+
 Release 0.23.4 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -542,7 +590,16 @@
 
   BUG FIXES
 
-Release 0.23.3 - UNRELEASED
+    MAPREDUCE-4647. We should only unjar jobjar if there is a lib directory 
+    in it. (Robert Evans via tgraves)
+
+    MAPREDUCE-4691. Historyserver can report "Unknown job" after RM says job
+    has completed (Robert Joseph Evans via jlowe)
+
+    MAPREDUCE-4689. JobClient.getMapTaskReports on failed job results in NPE
+    (jlowe via bobby)
+
+Release 0.23.3
 
   INCOMPATIBLE CHANGES
 
diff --git a/hadoop-mapreduce-project/INSTALL b/hadoop-mapreduce-project/INSTALL
index e35c8cab..fae2370 100644
--- a/hadoop-mapreduce-project/INSTALL
+++ b/hadoop-mapreduce-project/INSTALL
@@ -19,7 +19,7 @@
 You can omit -Pnative it you don't want to build native packages.
 
 Step 4) Untar the tarball from hadoop-dist/target/ into a clean and different
-directory, say YARN_HOME.
+directory, say HADOOP_YARN_HOME.
 
 Step 5)
 Start hdfs
@@ -32,7 +32,7 @@
 export HADOOP_MAPRED_HOME=<mapred loc>
 export HADOOP_COMMON_HOME=<common loc>
 export HADOOP_HDFS_HOME=<hdfs loc>
-export YARN_HOME=directory where you untarred yarn
+export HADOOP_YARN_HOME=directory where you untarred yarn
 export HADOOP_CONF_DIR=<conf loc>
 export YARN_CONF_DIR=$HADOOP_CONF_DIR
 
@@ -53,7 +53,7 @@
       <value>yarn</value>  
     </property>
 
-Step 9) cd $YARN_HOME
+Step 9) cd $HADOOP_YARN_HOME
 
 Step 10) sbin/yarn-daemon.sh start resourcemanager
 
@@ -64,7 +64,7 @@
 Step 13) You are all set, an example on how to run a mapreduce job is:
 cd $HADOOP_MAPRED_HOME
 ant examples -Dresolvers=internal 
-$HADOOP_COMMON_HOME/bin/hadoop jar $HADOOP_MAPRED_HOME/build/hadoop-mapreduce-examples-*.jar randomwriter -Dmapreduce.job.user.name=$USER -Dmapreduce.randomwriter.bytespermap=10000 -Ddfs.blocksize=536870912 -Ddfs.block.size=536870912 -libjars $YARN_HOME/modules/hadoop-mapreduce-client-jobclient-*.jar output 
+$HADOOP_COMMON_HOME/bin/hadoop jar $HADOOP_MAPRED_HOME/build/hadoop-mapreduce-examples-*.jar randomwriter -Dmapreduce.job.user.name=$USER -Dmapreduce.randomwriter.bytespermap=10000 -Ddfs.blocksize=536870912 -Ddfs.block.size=536870912 -libjars $HADOOP_YARN_HOME/modules/hadoop-mapreduce-client-jobclient-*.jar output 
 
 The output on the command line should be almost similar to what you see in the JT/TT setup (Hadoop 0.20/0.21)
 
diff --git a/hadoop-mapreduce-project/bin/mapred b/hadoop-mapreduce-project/bin/mapred
index deb9caf..c446414 100755
--- a/hadoop-mapreduce-project/bin/mapred
+++ b/hadoop-mapreduce-project/bin/mapred
@@ -17,7 +17,7 @@
 
 bin=`which $0`
 bin=`dirname ${bin}`
-bin=`cd "$bin"; pwd`
+bin=`cd "$bin" > /dev/null; pwd`
 
 DEFAULT_LIBEXEC_DIR="$bin"/../libexec
 HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
@@ -35,7 +35,6 @@
   echo "  queue                get information regarding JobQueues"
   echo "  classpath            prints the class path needed for running"
   echo "                       mapreduce subcommands"
-  echo "  groups               get the groups which users belong to"
   echo "  historyserver        run job history servers as a standalone daemon"
   echo "  distcp <srcurl> <desturl> copy file or directories recursively"
   echo "  archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive"
@@ -63,9 +62,6 @@
   HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
 elif [ "$COMMAND" = "classpath" ] ; then
   echo -n 
-elif [ "$COMMAND" = "groups" ] ; then
-  CLASS=org.apache.hadoop.mapred.tools.GetGroups
-  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
 elif [ "$COMMAND" = "historyserver" ] ; then
   CLASS=org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer
   HADOOP_OPTS="$HADOOP_OPTS -Dmapred.jobsummary.logger=${HADOOP_JHS_LOGGER:-INFO,console} $HADOOP_JOB_HISTORYSERVER_OPTS"
@@ -74,7 +70,8 @@
   fi
 elif [ "$COMMAND" = "mradmin" ] \
     || [ "$COMMAND" = "jobtracker" ] \
-    || [ "$COMMAND" = "tasktracker" ] ; then
+    || [ "$COMMAND" = "tasktracker" ] \
+    || [ "$COMMAND" = "groups" ] ; then
   echo "Sorry, the $COMMAND command is no longer supported."
   echo "You may find similar functionality with the \"yarn\" shell command."
   print_usage
@@ -115,6 +112,11 @@
   CLASSPATH=${CLASSPATH}:$f;
 done
 
+# Need YARN jars also
+for f in $HADOOP_YARN_HOME/${YARN_DIR}/*.jar; do
+  CLASSPATH=${CLASSPATH}:$f;
+done
+
 # add libs to CLASSPATH
 for f in $HADOOP_MAPRED_HOME/${MAPRED_LIB_JARS_DIR}/*.jar; do
   CLASSPATH=${CLASSPATH}:$f;
diff --git a/hadoop-mapreduce-project/bin/mapred-config.sh b/hadoop-mapreduce-project/bin/mapred-config.sh
index d1eb627..254e0a0 100644
--- a/hadoop-mapreduce-project/bin/mapred-config.sh
+++ b/hadoop-mapreduce-project/bin/mapred-config.sh
@@ -38,3 +38,15 @@
   echo "Hadoop common not found."
   exit
 fi
+
+# Only set locally to use in HADOOP_OPTS. No need to export.
+# The following defaults are useful when somebody directly invokes bin/mapred.
+HADOOP_MAPRED_LOG_DIR=${HADOOP_MAPRED_LOG_DIR:-${HADOOP_MAPRED_HOME}/logs}
+HADOOP_MAPRED_LOGFILE=${HADOOP_MAPRED_LOGFILE:-hadoop.log}
+HADOOP_MAPRED_ROOT_LOGGER=${HADOOP_MAPRED_ROOT_LOGGER:-INFO,console}
+
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_MAPRED_LOG_DIR"
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.file=$HADOOP_MAPRED_LOGFILE"
+export HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.root.logger=${HADOOP_MAPRED_ROOT_LOGGER}"
+
+
diff --git a/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh b/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh
index ed2eef0..9ef3d45 100644
--- a/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh
+++ b/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh
@@ -16,22 +16,16 @@
 # limitations under the License.
 
 
-# Runs a yarn command as a daemon.
 #
 # Environment Variables
 #
-#   HADOOP_LOGFILE Hadoop log file.
-#   HADOOP_ROOT_LOGGER Hadoop root logger.
 #   HADOOP_JHS_LOGGER  Hadoop JobSummary logger.
-#   YARN_CONF_DIR  Alternate conf dir. Default is ${YARN_HOME}/conf.
-#   YARN_LOG_DIR   Where log files are stored.  PWD by default.
-#   YARN_MASTER    host:path where hadoop code should be rsync'd from
-#   YARN_PID_DIR   The pid files are stored. /tmp by default.
-#   YARN_IDENT_STRING   A string representing this instance of hadoop. $USER by default
-#   YARN_NICENESS The scheduling priority for daemons. Defaults to 0.
+#   HADOOP_CONF_DIR  Alternate conf dir. Default is ${HADOOP_MAPRED_HOME}/conf.
+#   HADOOP_MAPRED_PID_DIR   The pid files are stored. /tmp by default.
+#   HADOOP_MAPRED_NICENESS The scheduling priority for daemons. Defaults to 0.
 ##
 
-usage="Usage: mr-jobhistory-daemon.sh [--config <conf-dir>] [--hosts hostlistfile] (start|stop) <mapred-command> "
+usage="Usage: mr-jobhistory-daemon.sh [--config <conf-dir>] (start|stop) <mapred-command> "
 
 # if no args specified, show usage
 if [ $# -le 1 ]; then
@@ -44,7 +38,9 @@
 
 DEFAULT_LIBEXEC_DIR="$bin"/../libexec
 HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/yarn-config.sh
+if [ -e ${HADOOP_LIBEXEC_DIR}/mapred-config.sh ]; then
+  . $HADOOP_LIBEXEC_DIR/mapred-config.sh
+fi
 
 # get arguments
 startStop=$1
@@ -69,43 +65,43 @@
   fi
 }
 
-if [ -f "${YARN_CONF_DIR}/yarn-env.sh" ]; then
-  . "${YARN_CONF_DIR}/yarn-env.sh"
+if [ "$HADOOP_MAPRED_IDENT_STRING" = "" ]; then
+  export HADOOP_MAPRED_IDENT_STRING="$USER"
 fi
 
-if [ "$YARN_IDENT_STRING" = "" ]; then
-  export YARN_IDENT_STRING="$USER"
-fi
-
-# get log directory
-if [ "$YARN_LOG_DIR" = "" ]; then
-  export YARN_LOG_DIR="$YARN_HOME/logs"
-fi
-mkdir -p "$YARN_LOG_DIR"
-chown $YARN_IDENT_STRING $YARN_LOG_DIR
-
-if [ "$YARN_PID_DIR" = "" ]; then
-  YARN_PID_DIR=/tmp
-fi
-
-# some variables
-export HADOOP_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log
-export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-INFO,RFA}
+export HADOOP_MAPRED_HOME=${HADOOP_MAPRED_HOME:-${HADOOP_PREFIX}}
+export HADOOP_MAPRED_LOGFILE=mapred-$HADOOP_MAPRED_IDENT_STRING-$command-$HOSTNAME.log
+export HADOOP_MAPRED_ROOT_LOGGER=${HADOOP_MAPRED_ROOT_LOGGER:-INFO,RFA}
 export HADOOP_JHS_LOGGER=${HADOOP_JHS_LOGGER:-INFO,JSA}
-log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out
-pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid
-YARN_STOP_TIMEOUT=${YARN_STOP_TIMEOUT:-5}
+
+if [ -f "${HADOOP_CONF_DIR}/mapred-env.sh" ]; then
+  . "${HADOOP_CONF_DIR}/mapred-env.sh"
+fi
+
+mkdir -p "$HADOOP_MAPRED_LOG_DIR"
+chown $HADOOP_MAPRED_IDENT_STRING $HADOOP_MAPRED_LOG_DIR
+
+if [ "$HADOOP_MAPRED_PID_DIR" = "" ]; then
+  HADOOP_MAPRED_PID_DIR=/tmp
+fi
+
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_MAPRED_IDENT_STRING"
+
+log=$HADOOP_MAPRED_LOG_DIR/mapred-$HADOOP_MAPRED_IDENT_STRING-$command-$HOSTNAME.out
+pid=$HADOOP_MAPRED_PID_DIR/mapred-$HADOOP_MAPRED_IDENT_STRING-$command.pid
+
+HADOOP_MAPRED_STOP_TIMEOUT=${HADOOP_MAPRED_STOP_TIMEOUT:-5}
 
 # Set default scheduling priority
-if [ "$YARN_NICENESS" = "" ]; then
-  export YARN_NICENESS=0
+if [ "$HADOOP_MAPRED_NICENESS" = "" ]; then
+  export HADOOP_MAPRED_NICENESS=0
 fi
 
 case $startStop in
 
   (start)
 
-    mkdir -p "$YARN_PID_DIR"
+    mkdir -p "$HADOOP_MAPRED_PID_DIR"
 
     if [ -f $pid ]; then
       if kill -0 `cat $pid` > /dev/null 2>&1; then
@@ -114,15 +110,10 @@
       fi
     fi
 
-    if [ "$YARN_MASTER" != "" ]; then
-      echo rsync from $YARN_MASTER
-      rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $YARN_MASTER/ "$YARN_HOME"
-    fi
-
     hadoop_rotate_log $log
     echo starting $command, logging to $log
-    cd "$YARN_HOME"
-    nohup nice -n $YARN_NICENESS "$YARN_HOME"/bin/mapred --config $YARN_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
+    cd "$HADOOP_MAPRED_HOME"
+    nohup nice -n $HADOOP_MAPRED_NICENESS "$HADOOP_MAPRED_HOME"/bin/mapred --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
     echo $! > $pid
     sleep 1; head "$log"
     ;;
@@ -134,9 +125,9 @@
       if kill -0 $TARGET_PID > /dev/null 2>&1; then
         echo stopping $command
         kill $TARGET_PID
-        sleep $YARN_STOP_TIMEOUT
+        sleep $HADOOP_MAPRED_STOP_TIMEOUT
         if kill -0 $TARGET_PID > /dev/null 2>&1; then
-          echo "$command did not stop gracefully after $YARN_STOP_TIMEOUT seconds: killing with kill -9"
+          echo "$command did not stop gracefully after $HADOOP_MAPRED_STOP_TIMEOUT seconds: killing with kill -9"
           kill -9 $TARGET_PID
         fi
       else
diff --git a/hadoop-mapreduce-project/conf/mapred-env.sh b/hadoop-mapreduce-project/conf/mapred-env.sh
new file mode 100644
index 0000000..6be1e27
--- /dev/null
+++ b/hadoop-mapreduce-project/conf/mapred-env.sh
@@ -0,0 +1,27 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+
+export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000
+
+export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
+
+#export HADOOP_JOB_HISTORYSERVER_OPTS=
+#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.
+#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
+#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
+#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
+#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
index 64d8bb8..23f436f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -490,19 +490,17 @@
     try {
       this.currentUser = UserGroupInformation.getCurrentUser();
 
-      if (UserGroupInformation.isSecurityEnabled()) {
-        // Read the file-system tokens from the localized tokens-file.
-        Path jobSubmitDir = 
-            FileContext.getLocalFSFileContext().makeQualified(
-                new Path(new File(MRJobConfig.JOB_SUBMIT_DIR)
-                    .getAbsolutePath()));
-        Path jobTokenFile = 
-            new Path(jobSubmitDir, MRJobConfig.APPLICATION_TOKENS_FILE);
-        fsTokens.addAll(Credentials.readTokenStorageFile(jobTokenFile, conf));
-        LOG.info("jobSubmitDir=" + jobSubmitDir + " jobTokenFile="
-            + jobTokenFile);
-        currentUser.addCredentials(fsTokens); // For use by AppMaster itself.
-      }
+      // Read the file-system tokens from the localized tokens-file.
+      Path jobSubmitDir = 
+          FileContext.getLocalFSFileContext().makeQualified(
+              new Path(new File(MRJobConfig.JOB_SUBMIT_DIR)
+                  .getAbsolutePath()));
+      Path jobTokenFile = 
+          new Path(jobSubmitDir, MRJobConfig.APPLICATION_TOKENS_FILE);
+      fsTokens.addAll(Credentials.readTokenStorageFile(jobTokenFile, conf));
+      LOG.info("jobSubmitDir=" + jobSubmitDir + " jobTokenFile="
+          + jobTokenFile);
+      currentUser.addCredentials(fsTokens); // For use by AppMaster itself.
     } catch (IOException e) {
       throw new YarnException(e);
     }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
index 341e721..d756480 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
@@ -82,8 +82,7 @@
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.ipc.RPCUtil;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
-import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager;
-import org.apache.hadoop.yarn.security.client.ClientTokenIdentifier;
+import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager;
 import org.apache.hadoop.yarn.service.AbstractService;
 import org.apache.hadoop.yarn.webapp.WebApp;
 import org.apache.hadoop.yarn.webapp.WebApps;
@@ -115,16 +114,15 @@
     YarnRPC rpc = YarnRPC.create(conf);
     InetSocketAddress address = new InetSocketAddress(0);
 
-    ClientToAMSecretManager secretManager = null;
+    ClientToAMTokenSecretManager secretManager = null;
     if (UserGroupInformation.isSecurityEnabled()) {
-      secretManager = new ClientToAMSecretManager();
       String secretKeyStr =
           System
               .getenv(ApplicationConstants.APPLICATION_CLIENT_SECRET_ENV_NAME);
       byte[] bytes = Base64.decodeBase64(secretKeyStr);
-      ClientTokenIdentifier identifier = new ClientTokenIdentifier(
-          this.appContext.getApplicationID());
-      secretManager.setMasterKey(identifier, bytes);
+      secretManager =
+          new ClientToAMTokenSecretManager(this.appContext.getApplicationID(),
+            bytes);
     }
     server =
         rpc.getServer(MRClientProtocol.class, protocolHandler, address,
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
index 255d393..6b4709c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
@@ -45,6 +45,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobContext;
 import org.apache.hadoop.mapred.MapReduceChildJVM;
 import org.apache.hadoop.mapred.ShuffleHandler;
 import org.apache.hadoop.mapred.Task;
@@ -610,10 +611,12 @@
       if (jobJar != null) {
         Path remoteJobJar = (new Path(jobJar)).makeQualified(remoteFS
             .getUri(), remoteFS.getWorkingDirectory());
-        localResources.put(
-            MRJobConfig.JOB_JAR,
-            createLocalResource(remoteFS, remoteJobJar,
-                LocalResourceType.ARCHIVE, LocalResourceVisibility.APPLICATION));
+        LocalResource rc = createLocalResource(remoteFS, remoteJobJar,
+            LocalResourceType.PATTERN, LocalResourceVisibility.APPLICATION);
+        String pattern = conf.getPattern(JobContext.JAR_UNPACK_PATTERN, 
+            JobConf.UNPACK_JAR_PATTERN_DEFAULT).pattern();
+        rc.setPattern(pattern);
+        localResources.put(MRJobConfig.JOB_JAR, rc);
         LOG.info("The job-jar file on the remote FS is "
             + remoteJobJar.toUri().toASCIIString());
       } else {
@@ -644,14 +647,10 @@
       MRApps.setupDistributedCache(conf, localResources);
 
       // Setup up task credentials buffer
-      Credentials taskCredentials = new Credentials();
-
-      if (UserGroupInformation.isSecurityEnabled()) {
-        LOG.info("Adding #" + credentials.numberOfTokens()
-            + " tokens and #" + credentials.numberOfSecretKeys()
-            + " secret keys for NM use for launching container");
-        taskCredentials.addAll(credentials);
-      }
+      LOG.info("Adding #" + credentials.numberOfTokens()
+          + " tokens and #" + credentials.numberOfSecretKeys()
+          + " secret keys for NM use for launching container");
+      Credentials taskCredentials = new Credentials(credentials);
 
       // LocalStorageToken is needed irrespective of whether security is enabled
       // or not.
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java
index 67c9cf5..3dd6c33 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java
@@ -179,6 +179,10 @@
      public Configuration getConfig() {
        return conf;
      }
+
+     @Override
+     protected void downloadTokensAndSetupUGI(Configuration conf) {
+     }
    }
 
   private final class MRAppTestCleanup extends MRApp {
@@ -266,4 +270,4 @@
     Assert.assertTrue("Staging directory not cleaned before notifying RM",
         app.cleanedBeforeContainerAllocatorStopped);
   }
- }
\ No newline at end of file
+ }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
index fb26245..3368d51 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
@@ -152,6 +152,10 @@
           localArchives.add(pathString);
         } else if (resource.getType() == LocalResourceType.FILE) {
           localFiles.add(pathString);
+        } else if (resource.getType() == LocalResourceType.PATTERN) {
+          //PATTERN is not currently used in local mode
+          throw new IllegalArgumentException("Resource type PATTERN is not " +
+          		"implemented yet. " + resource.getResource());
         }
         Path resourcePath;
         try {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
index 4235e72..055b079 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
@@ -210,7 +210,7 @@
     Apps.addToEnvironment(
         environment,
         Environment.CLASSPATH.name(),
-        MRJobConfig.JOB_JAR + Path.SEPARATOR);
+        MRJobConfig.JOB_JAR + Path.SEPARATOR + MRJobConfig.JOB_JAR);
     Apps.addToEnvironment(
         environment,
         Environment.CLASSPATH.name(),
@@ -281,7 +281,7 @@
   }
 
   private static String getResourceDescription(LocalResourceType type) {
-    if(type == LocalResourceType.ARCHIVE) {
+    if(type == LocalResourceType.ARCHIVE || type == LocalResourceType.PATTERN) {
       return "cache archive (" + MRJobConfig.CACHE_ARCHIVES + ") ";
     }
     return "cache file (" + MRJobConfig.CACHE_FILES + ") ";
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
index 345539d..dbd3538 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
@@ -166,7 +166,7 @@
     }
     String env_str = env.get("CLASSPATH");
     assertSame("MAPREDUCE_JOB_USER_CLASSPATH_FIRST set, but not taking effect!",
-      env_str.indexOf("$PWD:job.jar/:job.jar/classes/:job.jar/lib/*:$PWD/*"), 0);
+      env_str.indexOf("$PWD:job.jar/job.jar:job.jar/classes/:job.jar/lib/*:$PWD/*"), 0);
   }
 
   @Test public void testSetClasspathWithNoUserPrecendence() {
@@ -180,7 +180,7 @@
     }
     String env_str = env.get("CLASSPATH");
     int index = 
-         env_str.indexOf("job.jar/:job.jar/classes/:job.jar/lib/*:$PWD/*");
+         env_str.indexOf("job.jar/job.jar:job.jar/classes/:job.jar/lib/*:$PWD/*");
     assertNotSame("MAPREDUCE_JOB_USER_CLASSPATH_FIRST false, and job.jar is not"
             + " in the classpath!", index, -1);
     assertNotSame("MAPREDUCE_JOB_USER_CLASSPATH_FIRST false, but taking effect!",
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
index c4177c9..dbe2737 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
@@ -68,6 +68,24 @@
         </executions>
       </plugin>
       <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>add-source</id>
+            <phase>generate-sources</phase>
+            <goals>
+              <goal>add-source</goal>
+            </goals>
+            <configuration>
+              <sources>
+                <source>target/generated-sources/avro</source>
+              </sources>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-antrun-plugin</artifactId>
         <executions>
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
index 1232645..b10be36 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
@@ -424,6 +424,7 @@
       job.setLong(JobContext.MAP_INPUT_START, fileSplit.getStart());
       job.setLong(JobContext.MAP_INPUT_PATH, fileSplit.getLength());
     }
+    LOG.info("Processing split: " + inputSplit);
   }
 
   static class NewTrackingRecordReader<K,V> 
@@ -694,6 +695,7 @@
     org.apache.hadoop.mapreduce.InputSplit split = null;
     split = getSplitDetails(new Path(splitIndex.getSplitLocation()),
         splitIndex.getStartOffset());
+    LOG.info("Processing split: " + split);
 
     org.apache.hadoop.mapreduce.RecordReader<INKEY,INVALUE> input =
       new NewTrackingRecordReader<INKEY,INVALUE>
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TotalOrderPartitioner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TotalOrderPartitioner.java
index 14e0962..f393876 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TotalOrderPartitioner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TotalOrderPartitioner.java
@@ -31,7 +31,7 @@
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class TotalOrderPartitioner<K extends WritableComparable<?>,V>
+public class TotalOrderPartitioner<K ,V>
     extends org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner<K, V>
     implements Partitioner<K,V> {
 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/TotalOrderPartitioner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/TotalOrderPartitioner.java
index fa12976..632abdf 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/TotalOrderPartitioner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/TotalOrderPartitioner.java
@@ -47,7 +47,7 @@
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class TotalOrderPartitioner<K extends WritableComparable<?>,V>
+public class TotalOrderPartitioner<K,V>
     extends Partitioner<K,V> implements Configurable {
 
   private Node partitions;
@@ -298,12 +298,13 @@
   @SuppressWarnings("unchecked") // map output key class
   private K[] readPartitions(FileSystem fs, Path p, Class<K> keyClass,
       Configuration conf) throws IOException {
-    SequenceFile.Reader reader = new SequenceFile.Reader(fs, p, conf);
+    SequenceFile.Reader reader = new SequenceFile.Reader(
+        conf,
+        SequenceFile.Reader.file(p));
     ArrayList<K> parts = new ArrayList<K>();
     K key = ReflectionUtils.newInstance(keyClass, conf);
-    NullWritable value = NullWritable.get();
     try {
-      while (reader.next(key, value)) {
+      while ((key = (K) reader.next(key)) != null) {
         parts.add(key);
         key = ReflectionUtils.newInstance(keyClass, conf);
       }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index a45c9e2..92aeba6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -24,47 +24,6 @@
 <configuration>
 
 <property>
-  <name>mapreduce.jobtracker.jobhistory.location</name>
-  <value></value>
-  <description> If job tracker is static the history files are stored 
-  in this single well known place. If No value is set here, by default,
-  it is in the local file system at ${hadoop.log.dir}/history.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.jobhistory.task.numberprogresssplits</name>
-  <value>12</value>
-  <description> Every task attempt progresses from 0.0 to 1.0 [unless
-  it fails or is killed].  We record, for each task attempt, certain 
-  statistics over each twelfth of the progress range.  You can change
-  the number of intervals we divide the entire range of progress into
-  by setting this property.  Higher values give more precision to the
-  recorded data, but costs more memory in the job tracker at runtime.
-  Each increment in this attribute costs 16 bytes per running task.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.job.userhistorylocation</name>
-  <value></value>
-  <description> User can specify a location to store the history files of 
-  a particular job. If nothing is specified, the logs are stored in 
-  output directory. The files are stored in "_logs/history/" in the directory.
-  User can stop logging by giving the value "none". 
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.jobhistory.completed.location</name>
-  <value></value>
-  <description> The completed job history files are stored at this single well 
-  known location. If nothing is specified, the files are stored at 
-  ${mapreduce.jobtracker.jobhistory.location}/done.
-  </description>
-</property>
-
-<property>
   <name>mapreduce.job.committer.setup.cleanup.needed</name>
   <value>true</value>
   <description> true, if job needs job-setup and job-cleanup.
@@ -99,15 +58,6 @@
 </property>
 
 <property>
-  <name>mapreduce.jobtracker.address</name>
-  <value>local</value>
-  <description>The host and port that the MapReduce job tracker runs
-  at.  If "local", then jobs are run in-process as a single map
-  and reduce task.
-  </description>
-</property>
-
-<property>
   <name>mapreduce.local.clientfactory.class.name</name>
   <value>org.apache.hadoop.mapred.LocalClientFactory</value>
   <description>This the client factory that is responsible for 
@@ -115,138 +65,10 @@
 </property>
 
 <property>
-  <name>mapreduce.jobtracker.http.address</name>
-  <value>0.0.0.0:50030</value>
-  <description>
-    The job tracker http server address and port the server will listen on.
-    If the port is 0 then the server will start on a free port.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.handler.count</name>
-  <value>10</value>
-  <description>
-    The number of server threads for the JobTracker. This should be roughly
-    4% of the number of tasktracker nodes.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.report.address</name>
-  <value>127.0.0.1:0</value>
-  <description>The interface and port that task tracker server listens on. 
-  Since it is only connected to by the tasks, it uses the local interface.
-  EXPERT ONLY. Should only be changed if your host does not have the loopback 
-  interface.</description>
-</property>
-
-<property>
-  <name>mapreduce.cluster.local.dir</name>
-  <value>${hadoop.tmp.dir}/mapred/local</value>
-  <description>The local directory where MapReduce stores intermediate
-  data files.  May be a comma-separated list of
-  directories on different devices in order to spread disk i/o.
-  Directories that do not exist are ignored.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.system.dir</name>
-  <value>${hadoop.tmp.dir}/mapred/system</value>
-  <description>The directory where MapReduce stores control files.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.staging.root.dir</name>
-  <value>${hadoop.tmp.dir}/mapred/staging</value>
-  <description>The root of the staging area for users' job files
-  In practice, this should be the directory where users' home 
-  directories are located (usually /user)
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.cluster.temp.dir</name>
-  <value>${hadoop.tmp.dir}/mapred/temp</value>
-  <description>A shared directory for temporary files.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.local.dir.minspacestart</name>
-  <value>0</value>
-  <description>If the space in mapreduce.cluster.local.dir drops under this, 
-  do not ask for more tasks.
-  Value in bytes.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.local.dir.minspacekill</name>
-  <value>0</value>
-  <description>If the space in mapreduce.cluster.local.dir drops under this, 
-    do not ask more tasks until all the current ones have finished and 
-    cleaned up. Also, to save the rest of the tasks we have running, 
-    kill one of them, to clean up some space. Start with the reduce tasks,
-    then go with the ones that have finished the least.
-    Value in bytes.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.expire.trackers.interval</name>
-  <value>600000</value>
-  <description>Expert: The time-interval, in miliseconds, after which
-  a tasktracker is declared 'lost' if it doesn't send heartbeats.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.instrumentation</name>
-  <value>org.apache.hadoop.mapred.TaskTrackerMetricsInst</value>
-  <description>Expert: The instrumentation class to associate with each TaskTracker.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.resourcecalculatorplugin</name>
-  <value></value>
-  <description>
-   Name of the class whose instance will be used to query resource information
-   on the tasktracker.
-   
-   The class must be an instance of 
-   org.apache.hadoop.util.ResourceCalculatorPlugin. If the value is null, the
-   tasktracker attempts to use a class appropriate to the platform. 
-   Currently, the only platform supported is Linux.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.taskmemorymanager.monitoringinterval</name>
-  <value>5000</value>
-  <description>The interval, in milliseconds, for which the tasktracker waits
-   between two cycles of monitoring its tasks' memory usage. Used only if
-   tasks' memory management is enabled via mapred.tasktracker.tasks.maxmemory.
-   </description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.tasks.sleeptimebeforesigkill</name>
-  <value>5000</value>
-  <description>The time, in milliseconds, the tasktracker waits for sending a
-  SIGKILL to a task, after it has been sent a SIGTERM. This is currently
-  not used on WINDOWS where tasks are just sent a SIGTERM.
-  </description>
-</property>
-
-<property>
   <name>mapreduce.job.maps</name>
   <value>2</value>
   <description>The default number of map tasks per job.
-  Ignored when mapreduce.jobtracker.address is "local".  
+  Ignored when mapreduce.framework.name is "local".
   </description>
 </property>
 
@@ -256,54 +78,21 @@
   <description>The default number of reduce tasks per job. Typically set to 99%
   of the cluster's reduce capacity, so that if a node fails the reduces can 
   still be executed in a single wave.
-  Ignored when mapreduce.jobtracker.address is "local".
+  Ignored when mapreduce.framework.name is "local".
   </description>
 </property>
 
 <property>
-  <name>mapreduce.jobtracker.restart.recover</name>
-  <value>false</value>
-  <description>"true" to enable (job) recovery upon restart,
-               "false" to start afresh
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.jobhistory.block.size</name>
-  <value>3145728</value>
-  <description>The block size of the job history file. Since the job recovery
-               uses job history, its important to dump job history to disk as 
-               soon as possible. Note that this is an expert level parameter.
-               The default value is set to 3 MB.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.taskscheduler</name>
-  <value>org.apache.hadoop.mapred.JobQueueTaskScheduler</value>
-  <description>The class responsible for scheduling the tasks.</description>
-</property>
-
-
-<property>
   <name>mapreduce.job.split.metainfo.maxsize</name>
   <value>10000000</value>
   <description>The maximum permissible size of the split metainfo file. 
-  The JobTracker won't attempt to read split metainfo files bigger than
-  the configured value.
+  The MapReduce ApplicationMaster won't attempt to read submitted split metainfo
+  files bigger than this configured value.
   No limits if set to -1.
   </description>
 </property>
 
 <property>
-  <name>mapreduce.jobtracker.taskscheduler.maxrunningtasks.perjob</name>
-  <value></value>
-  <description>The maximum number of running tasks for a job before
-  it gets preempted. No limits if undefined.
-  </description>
-</property>
-
-<property>
   <name>mapreduce.map.maxattempts</name>
   <value>4</value>
   <description>Expert: The maximum number of attempts per map task.
@@ -333,7 +122,7 @@
   <name>mapreduce.reduce.shuffle.connect.timeout</name>
   <value>180000</value>
   <description>Expert: The maximum amount of time (in milli seconds) reduce
-  task spends in trying to connect to a tasktracker for getting map output.
+  task spends in trying to connect to a remote node for getting map output.
   </description>
 </property>
 
@@ -355,51 +144,6 @@
   </description>
 </property>
 
-<property>
-  <name>mapreduce.tasktracker.map.tasks.maximum</name>
-  <value>2</value>
-  <description>The maximum number of map tasks that will be run
-  simultaneously by a task tracker.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.reduce.tasks.maximum</name>
-  <value>2</value>
-  <description>The maximum number of reduce tasks that will be run
-  simultaneously by a task tracker.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.retiredjobs.cache.size</name>
-  <value>1000</value>
-  <description>The number of retired job status to keep in the cache.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.outofband.heartbeat</name>
-  <value>false</value>
-  <description>Expert: Set this to true to let the tasktracker send an 
-  out-of-band heartbeat on task-completion for better latency.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.jobhistory.lru.cache.size</name>
-  <value>5</value>
-  <description>The number of job history files loaded in memory. The jobs are 
-  loaded when they are first accessed. The cache is cleared based on LRU.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.instrumentation</name>
-  <value>org.apache.hadoop.mapred.JobTrackerMetricsInst</value>
-  <description>Expert: The instrumentation class to associate with each JobTracker.
-  </description>
-</property>
 
 <property>
   <name>mapred.child.java.opts</name>
@@ -424,7 +168,7 @@
   <description>User added environment variables for the task tracker child 
   processes. Example :
   1) A=foo  This will set the env variable A to foo
-  2) B=$B:c This is inherit tasktracker's B env variable.  
+  2) B=$B:c This is inherit nodemanager's B env variable.
   </description>
 </property>
 
@@ -568,17 +312,9 @@
   <name>mapreduce.job.speculative.slownodethreshold</name>
   <value>1.0</value>
   <description>The number of standard deviations by which a Task 
-  Tracker's ave map and reduce progress-rates (finishTime-dispatchTime)
+  Tracker's average map and reduce progress-rates (finishTime-dispatchTime)
   must be lower than the average of all successful map/reduce task's for
-  the TT to be considered too slow to give a speculative task to.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.job.jvm.numtasks</name>
-  <value>1</value>
-  <description>How many tasks to run per jvm. If set to -1, there is
-  no limit. 
+  the NodeManager to be considered too slow to give a speculative task to.
   </description>
 </property>
 
@@ -630,12 +366,6 @@
   take priority over this setting.</description>
 </property>
 
-<property>
-  <name>mapreduce.jobtracker.maxtasks.perjob</name>
-  <value>-1</value>
-  <description>The maximum number of tasks for a single job.
-  A value of -1 indicates that there is no maximum.  </description>
-</property>
 
 <property>
   <name>mapreduce.client.submit.file.replication</name>
@@ -645,41 +375,6 @@
   </description>
 </property>
 
-
-<property>
-  <name>mapreduce.tasktracker.dns.interface</name>
-  <value>default</value>
-  <description>The name of the Network Interface from which a task
-  tracker should report its IP address.
-  </description>
- </property>
- 
-<property>
-  <name>mapreduce.tasktracker.dns.nameserver</name>
-  <value>default</value>
-  <description>The host name or IP address of the name server (DNS)
-  which a TaskTracker should use to determine the host name used by
-  the JobTracker for communication and display purposes.
-  </description>
- </property>
- 
-<property>
-  <name>mapreduce.tasktracker.http.threads</name>
-  <value>40</value>
-  <description>The number of worker threads that for the http server. This is
-               used for map output fetching
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.http.address</name>
-  <value>0.0.0.0:50060</value>
-  <description>
-    The task tracker http server address and port.
-    If the port is 0 then the server will start on a free port.
-  </description>
-</property>
-
 <property>
   <name>mapreduce.task.files.preserve.failedtasks</name>
   <value>false</value>
@@ -752,53 +447,9 @@
 </property>
 
 <property>
-  <name>mapreduce.job.userlog.retain.hours</name>
-  <value>24</value>
-  <description>The maximum time, in hours, for which the user-logs are to be 
-               retained after the job completion.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.hosts.filename</name>
-  <value></value>
-  <description>Names a file that contains the list of nodes that may
-  connect to the jobtracker.  If the value is empty, all hosts are
-  permitted.</description>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.hosts.exclude.filename</name>
-  <value></value>
-  <description>Names a file that contains the list of hosts that
-  should be excluded by the jobtracker.  If the value is empty, no
-  hosts are excluded.</description>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.heartbeats.in.second</name>
-  <value>100</value>
-  <description>Expert: Approximate number of heart-beats that could arrive 
-               at JobTracker in a second. Assuming each RPC can be processed 
-               in 10msec, the default value is made 100 RPCs in a second.
-  </description>
-</property> 
-
-<property>
-  <name>mapreduce.jobtracker.tasktracker.maxblacklists</name>
-  <value>4</value>
-  <description>The number of blacklists for a taskTracker by various jobs
-               after which the task tracker could be blacklisted across
-               all jobs. The tracker will be given a tasks later
-               (after a day). The tracker will become a healthy
-               tracker after a restart.
-  </description>
-</property> 
-
-<property>
   <name>mapreduce.job.maxtaskfailures.per.tracker</name>
   <value>3</value>
-  <description>The number of task-failures on a tasktracker of a given job 
+  <description>The number of task-failures on a node manager of a given job 
                after which new tasks of that job aren't assigned to it. It
                MUST be less than mapreduce.map.maxattempts and
                mapreduce.reduce.maxattempts otherwise the failed task will
@@ -820,8 +471,8 @@
     <name>mapreduce.client.completion.pollinterval</name>
     <value>5000</value>
     <description>The interval (in milliseconds) between which the JobClient
-    polls the JobTracker for updates about job status. You may want to set this
-    to a lower value to make tests run faster on a single node system. Adjusting
+    polls the MapReduce ApplicationMaster for updates about job status. You may want to
+    set this to a lower value to make tests run faster on a single node system. Adjusting
     this value in production may lead to unwanted client-server traffic.
     </description>
   </property>
@@ -836,32 +487,6 @@
     </description>
   </property>
 
-  <property>
-    <name>mapreduce.jobtracker.persist.jobstatus.active</name>
-    <value>true</value>
-    <description>Indicates if persistency of job status information is
-      active or not.
-    </description>
-  </property>
-
-  <property>
-  <name>mapreduce.jobtracker.persist.jobstatus.hours</name>
-  <value>1</value>
-  <description>The number of hours job status information is persisted in DFS.
-    The job status information will be available after it drops of the memory
-    queue and between jobtracker restarts. With a zero value the job status
-    information is not persisted at all in DFS.
-  </description>
-</property>
-
-  <property>
-    <name>mapreduce.jobtracker.persist.jobstatus.dir</name>
-    <value>/jobtracker/jobsInfo</value>
-    <description>The directory where the job status information is persisted
-      in a file system to be available after it drops of the memory queue and
-      between jobtracker restarts.
-    </description>
-  </property>
 
   <property>
     <name>mapreduce.task.profile</name>
@@ -894,8 +519,8 @@
     <description> The number of Task attempts AFTER which skip mode 
     will be kicked off. When skip mode is kicked off, the 
     tasks reports the range of records which it will process 
-    next, to the TaskTracker. So that on failures, TT knows which 
-    ones are possibly the bad records. On further executions, 
+    next, to the MR ApplicationMaster. So that on failures, the MR AM
+    knows which ones are possibly the bad records. On further executions,
     those are skipped.
     </description>
   </property>
@@ -1006,15 +631,6 @@
                 calls</description>
 </property>
   
-<!-- Proxy Configuration -->
-<property>
-  <name>mapreduce.jobtracker.taskcache.levels</name>
-  <value>2</value>
-  <description> This is the max level of the task cache. For example, if
-    the level is 2, the tasks cached are at the host level and at the rack
-    level.
-  </description>
-</property>
 
 <property>
   <name>mapreduce.job.queuename</name>
@@ -1028,18 +644,29 @@
 </property>
 
 <property>
+  <name>mapreduce.cluster.local.dir</name>
+  <value>${hadoop.tmp.dir}/mapred/local</value>
+  <description>
+      The local directory where MapReduce stores intermediate
+      data files.  May be a comma-separated list of
+      directories on different devices in order to spread disk i/o.
+      Directories that do not exist are ignored.
+  </description>
+</property>
+
+<property>
   <name>mapreduce.cluster.acls.enabled</name>
   <value>false</value>
   <description> Specifies whether ACLs should be checked
     for authorization of users for doing various queue and job level operations.
     ACLs are disabled by default. If enabled, access control checks are made by
-    JobTracker and TaskTracker when requests are made by users for queue
+    MapReduce ApplicationMaster when requests are made by users for queue
     operations like submit job to a queue and kill a job in the queue and job
     operations like viewing the job-details (See mapreduce.job.acl-view-job)
     or for modifying the job (See mapreduce.job.acl-modify-job) using
     Map/Reduce APIs, RPCs or via the console and web user interfaces.
-    For enabling this flag(mapreduce.cluster.acls.enabled), this is to be set
-    to true in mapred-site.xml on JobTracker node and on all TaskTracker nodes.
+    For enabling this flag, set to true in mapred-site.xml file of all
+    MapReduce clients (MR job submitting nodes).
   </description>
 </property>
 
@@ -1092,8 +719,8 @@
       o job-level counters
       o task-level counters
       o tasks' diagnostic information
-      o task-logs displayed on the TaskTracker web-UI and
-      o job.xml showed by the JobTracker's web-UI
+      o task-logs displayed on the HistoryServer's web-UI and
+      o job.xml showed by the HistoryServer's web-UI
     Every other piece of information of jobs is still accessible by any other
     user, for e.g., JobStatus, JobProfile, list of jobs in the queue, etc.
 
@@ -1111,18 +738,10 @@
 </property>
 
 <property>
-  <name>mapreduce.tasktracker.indexcache.mb</name>
-  <value>10</value>
-  <description> The maximum memory that a task tracker allows for the 
-    index cache that is used when serving map outputs to reducers.
-  </description>
-</property>
-
-<property>
   <name>mapreduce.task.merge.progress.records</name>
   <value>10000</value>
   <description> The number of records to process during merge before
-   sending a progress notification to the TaskTracker.
+   sending a progress notification to the MR ApplicationMaster.
   </description>
 </property>
 
@@ -1143,22 +762,6 @@
 </property>
 
 <property>
-  <name>mapreduce.tasktracker.taskcontroller</name>
-  <value>org.apache.hadoop.mapred.DefaultTaskController</value>
-  <description>TaskController which is used to launch and manage task execution 
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.group</name>
-  <value></value>
-  <description>Expert: Group to which TaskTracker belongs. If 
-   LinuxTaskController is configured via mapreduce.tasktracker.taskcontroller,
-   the group owner of the task-controller binary should be same as this group.
-  </description>
-</property>
-
-<property>
   <name>mapreduce.shuffle.port</name>
   <value>8080</value>
   <description>Default port that the ShuffleHandler will run on. ShuffleHandler 
@@ -1167,42 +770,6 @@
   </description>
 </property>
 
-<!--  Node health script variables -->
-
-<property>
-  <name>mapreduce.tasktracker.healthchecker.script.path</name>
-  <value></value>
-  <description>Absolute path to the script which is
-  periodicallyrun by the node health monitoring service to determine if
-  the node is healthy or not. If the value of this key is empty or the
-  file does not exist in the location configured here, the node health
-  monitoring service is not started.</description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.healthchecker.interval</name>
-  <value>60000</value>
-  <description>Frequency of the node health script to be run,
-  in milliseconds</description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.healthchecker.script.timeout</name>
-  <value>600000</value>
-  <description>Time after node health script should be killed if 
-  unresponsive and considered that the script has failed.</description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.healthchecker.script.args</name>
-  <value></value>
-  <description>List of arguments which are to be passed to 
-  node health script when it is being launched comma seperated.
-  </description>
-</property>
-
-<!--  end of node health script variables -->
-
 <!-- MR YARN Application properties -->
 
 <property>
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTask.java
index 669eaa4..830b64f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTask.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTask.java
@@ -42,6 +42,8 @@
 
 public class CompletedTask implements Task {
 
+  private static final Counters EMPTY_COUNTERS = new Counters();
+
   private final TaskId taskId;
   private final TaskInfo taskInfo;
   private TaskReport report;
@@ -124,7 +126,11 @@
     report.setFinishTime(taskInfo.getFinishTime());
     report.setTaskState(getState());
     report.setProgress(getProgress());
-    report.setCounters(TypeConverter.toYarn(getCounters()));
+    Counters counters = getCounters();
+    if (counters == null) {
+      counters = EMPTY_COUNTERS;
+    }
+    report.setCounters(TypeConverter.toYarn(counters));
     if (successfulAttempt != null) {
       report.setSuccessfulAttempt(successfulAttempt);
     }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
index 6779588..f7bc506 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
@@ -23,14 +23,14 @@
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
-import java.util.Map;
 import java.util.Set;
 import java.util.SortedMap;
 import java.util.TreeMap;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadFactory;
@@ -62,6 +62,7 @@
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.service.AbstractService;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /**
@@ -77,7 +78,7 @@
   private static enum HistoryInfoState {
     IN_INTERMEDIATE, IN_DONE, DELETED, MOVE_FAILED
   };
-
+  
   private static String DONE_BEFORE_SERIAL_TAIL = JobHistoryUtils
       .doneSubdirsBeforeSerialTail();
 
@@ -130,7 +131,7 @@
     }
   }
 
-  private static class JobListCache {
+  static class JobListCache {
     private ConcurrentSkipListMap<JobId, HistoryFileInfo> cache;
     private int maxSize;
     private long maxAge;
@@ -199,6 +200,29 @@
     }
   }
 
+  /**
+   * This class represents a user dir in the intermediate done directory.  This
+   * is mostly for locking purposes. 
+   */
+  private class UserLogDir {
+    long modTime = 0;
+    
+    public synchronized void scanIfNeeded(FileStatus fs) {
+      long newModTime = fs.getModificationTime();
+      if (modTime != newModTime) {
+        Path p = fs.getPath();
+        try {
+          scanIntermediateDirectory(p);
+          //If scanning fails, we will scan again.  We assume the failure is
+          // temporary.
+          modTime = newModTime;
+        } catch (IOException e) {
+          LOG.error("Error while trying to scan the directory " + p, e);
+        }
+      }
+    }
+  }
+  
   public class HistoryFileInfo {
     private Path historyFile;
     private Path confFile;
@@ -216,12 +240,14 @@
           : HistoryInfoState.IN_INTERMEDIATE;
     }
 
-    private synchronized boolean isMovePending() {
+    @VisibleForTesting
+    synchronized boolean isMovePending() {
       return state == HistoryInfoState.IN_INTERMEDIATE
           || state == HistoryInfoState.MOVE_FAILED;
     }
 
-    private synchronized boolean didMoveFail() {
+    @VisibleForTesting
+    synchronized boolean didMoveFail() {
       return state == HistoryInfoState.MOVE_FAILED;
     }
     
@@ -342,7 +368,7 @@
   }
 
   private SerialNumberIndex serialNumberIndex = null;
-  private JobListCache jobListCache = null;
+  protected JobListCache jobListCache = null;
 
   // Maintains a list of known done subdirectories.
   private final Set<Path> existingDoneSubdirs = Collections
@@ -352,7 +378,8 @@
    * Maintains a mapping between intermediate user directories and the last
    * known modification time.
    */
-  private Map<String, Long> userDirModificationTimeMap = new HashMap<String, Long>();
+  private ConcurrentMap<String, UserLogDir> userDirModificationTimeMap = 
+    new ConcurrentHashMap<String, UserLogDir>();
 
   private JobACLsManager aclsMgr;
 
@@ -584,23 +611,15 @@
 
     for (FileStatus userDir : userDirList) {
       String name = userDir.getPath().getName();
-      long newModificationTime = userDir.getModificationTime();
-      boolean shouldScan = false;
-      synchronized (userDirModificationTimeMap) {
-        if (!userDirModificationTimeMap.containsKey(name)
-            || newModificationTime > userDirModificationTimeMap.get(name)) {
-          shouldScan = true;
-          userDirModificationTimeMap.put(name, newModificationTime);
+      UserLogDir dir = userDirModificationTimeMap.get(name);
+      if(dir == null) {
+        dir = new UserLogDir();
+        UserLogDir old = userDirModificationTimeMap.putIfAbsent(name, dir);
+        if(old != null) {
+          dir = old;
         }
       }
-      if (shouldScan) {
-        try {
-          scanIntermediateDirectory(userDir.getPath());
-        } catch (IOException e) {
-          LOG.error("Error while trying to scan the directory " 
-              + userDir.getPath(), e);
-        }
-      }
+      dir.scanIfNeeded(userDir);
     }
   }
 
@@ -691,8 +710,8 @@
    * @throws IOException
    */
   private HistoryFileInfo scanOldDirsForJob(JobId jobId) throws IOException {
-    int jobSerialNumber = JobHistoryUtils.jobSerialNumber(jobId);
-    String boxedSerialNumber = String.valueOf(jobSerialNumber);
+    String boxedSerialNumber = JobHistoryUtils.serialNumberDirectoryComponent(
+        jobId, serialNumberFormat);
     Set<String> dateStringSet = serialNumberIndex.get(boxedSerialNumber);
     if (dateStringSet == null) {
       return null;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java
index b596a21..e24cf05 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java
@@ -49,6 +49,7 @@
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
 import org.apache.hadoop.mapreduce.v2.app.MRApp;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
@@ -59,6 +60,7 @@
 import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo;
 import org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory;
 import org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo;
 import org.apache.hadoop.net.DNSToSwitchMapping;
@@ -402,6 +404,108 @@
     }
   }
   
+  @Test
+  public void testCountersForFailedTask() throws Exception {
+    LOG.info("STARTING testCountersForFailedTask");
+    try {
+    Configuration conf = new Configuration();
+    conf
+        .setClass(
+            CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
+            MyResolver.class, DNSToSwitchMapping.class);
+    RackResolver.init(conf);
+    MRApp app = new MRAppWithHistoryWithFailedTask(2, 1, true,
+        this.getClass().getName(), true);
+    app.submit(conf);
+    Job job = app.getContext().getAllJobs().values().iterator().next();
+    JobId jobId = job.getID();
+    app.waitForState(job, JobState.FAILED);
+
+    // make sure all events are flushed
+    app.waitForState(Service.STATE.STOPPED);
+
+    String jobhistoryDir = JobHistoryUtils
+        .getHistoryIntermediateDoneDirForUser(conf);
+    JobHistory jobHistory = new JobHistory();
+    jobHistory.init(conf);
+
+    JobIndexInfo jobIndexInfo = jobHistory.getJobFileInfo(jobId)
+        .getJobIndexInfo();
+    String jobhistoryFileName = FileNameIndexUtils
+        .getDoneFileName(jobIndexInfo);
+
+    Path historyFilePath = new Path(jobhistoryDir, jobhistoryFileName);
+    FSDataInputStream in = null;
+    FileContext fc = null;
+    try {
+      fc = FileContext.getFileContext(conf);
+      in = fc.open(fc.makeQualified(historyFilePath));
+    } catch (IOException ioe) {
+      LOG.info("Can not open history file: " + historyFilePath, ioe);
+      throw (new Exception("Can not open History File"));
+    }
+
+    JobHistoryParser parser = new JobHistoryParser(in);
+    JobInfo jobInfo = parser.parse();
+    Exception parseException = parser.getParseException();
+    Assert.assertNull("Caught an expected exception " + parseException,
+        parseException);
+    for (Map.Entry<TaskID,TaskInfo> entry : jobInfo.getAllTasks().entrySet()) {
+      TaskId yarnTaskID = TypeConverter.toYarn(entry.getKey());
+      CompletedTask ct = new CompletedTask(yarnTaskID, entry.getValue());
+      Assert.assertNotNull("completed task report has null counters",
+          ct.getReport().getCounters());
+    }
+    } finally {
+      LOG.info("FINISHED testCountersForFailedTask");
+    }
+  }
+
+  @Test
+  public void testScanningOldDirs() throws Exception {
+    LOG.info("STARTING testScanningOldDirs");
+    try {
+    Configuration conf = new Configuration();
+    conf
+        .setClass(
+            CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
+            MyResolver.class, DNSToSwitchMapping.class);
+    RackResolver.init(conf);
+    MRApp app =
+        new MRAppWithHistory(1, 1, true,
+            this.getClass().getName(), true);
+    app.submit(conf);
+    Job job = app.getContext().getAllJobs().values().iterator().next();
+    JobId jobId = job.getID();
+    LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
+    app.waitForState(job, JobState.SUCCEEDED);
+
+    // make sure all events are flushed
+    app.waitForState(Service.STATE.STOPPED);
+
+    HistoryFileManagerForTest hfm = new HistoryFileManagerForTest();
+    hfm.init(conf);
+    HistoryFileInfo fileInfo = hfm.getFileInfo(jobId);
+    Assert.assertNotNull("Unable to locate job history", fileInfo);
+
+    // force the manager to "forget" the job
+    hfm.deleteJobFromJobListCache(fileInfo);
+    final int msecPerSleep = 10;
+    int msecToSleep = 10 * 1000;
+    while (fileInfo.isMovePending() && msecToSleep > 0) {
+      Assert.assertTrue(!fileInfo.didMoveFail());
+      msecToSleep -= msecPerSleep;
+      Thread.sleep(msecPerSleep);
+    }
+    Assert.assertTrue("Timeout waiting for history move", msecToSleep > 0);
+
+    fileInfo = hfm.getFileInfo(jobId);
+    Assert.assertNotNull("Unable to locate old job history", fileInfo);
+   } finally {
+      LOG.info("FINISHED testScanningOldDirs");
+    }
+  }
+
   static class MRAppWithHistoryWithFailedAttempt extends MRAppWithHistory {
 
     public MRAppWithHistoryWithFailedAttempt(int maps, int reduces, boolean autoComplete,
@@ -422,6 +526,32 @@
     }
   }
 
+  static class MRAppWithHistoryWithFailedTask extends MRAppWithHistory {
+
+    public MRAppWithHistoryWithFailedTask(int maps, int reduces, boolean autoComplete,
+        String testName, boolean cleanOnStart) {
+      super(maps, reduces, autoComplete, testName, cleanOnStart);
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    protected void attemptLaunched(TaskAttemptId attemptID) {
+      if (attemptID.getTaskId().getId() == 0) {
+        getContext().getEventHandler().handle(
+            new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
+      } else {
+        getContext().getEventHandler().handle(
+            new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_DONE));
+      }
+    }
+  }
+
+  static class HistoryFileManagerForTest extends HistoryFileManager {
+    void deleteJobFromJobListCache(HistoryFileInfo fileInfo) {
+      jobListCache.delete(fileInfo);
+    }
+  }
+
   public static void main(String[] args) throws Exception {
     TestJobHistoryParsing t = new TestJobHistoryParsing();
     t.testHistoryParsing();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
index 10a6bea..f3ec383 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
@@ -102,7 +102,7 @@
 <property><!--Loaded from job.xml--><name>dfs.permissions.enabled</name><value>true</value></property>
 <property><!--Loaded from job.xml--><name>mapreduce.tasktracker.taskcontroller</name><value>org.apache.hadoop.mapred.DefaultTaskController</value></property>
 <property><!--Loaded from job.xml--><name>mapreduce.reduce.shuffle.parallelcopies</name><value>5</value></property>
-<property><!--Loaded from job.xml--><name>yarn.nodemanager.env-whitelist</name><value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,YARN_HOME</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.env-whitelist</name><value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,HADOOP_YARN_HOME</value></property>
 <property><!--Loaded from job.xml--><name>mapreduce.jobtracker.heartbeats.in.second</name><value>100</value></property>
 <property><!--Loaded from job.xml--><name>mapreduce.job.maxtaskfailures.per.tracker</name><value>4</value></property>
 <property><!--Loaded from job.xml--><name>ipc.client.connection.maxidletime</name><value>10000</value></property>
@@ -317,8 +317,8 @@
         $HADOOP_COMMON_HOME/share/hadoop/common/lib/*,
         $HADOOP_HDFS_HOME/share/hadoop/hdfs/*,
         $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,
-        $YARN_HOME/share/hadoop/mapreduce/*,
-        $YARN_HOME/share/hadoop/mapreduce/lib/*
+        $HADOOP_YARN_HOME/share/hadoop/mapreduce/*,
+        $HADOOP_YARN_HOME/share/hadoop/mapreduce/lib/*
      </value></property>
 <property><!--Loaded from job.xml--><name>yarn.nodemanager.log-aggregation.compression-type</name><value>gz</value></property>
 <property><!--Loaded from job.xml--><name>dfs.image.compress</name><value>false</value></property>
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
index f327176..b5008ee 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
@@ -346,9 +346,13 @@
             jobConfPath, LocalResourceType.FILE));
     if (jobConf.get(MRJobConfig.JAR) != null) {
       Path jobJarPath = new Path(jobConf.get(MRJobConfig.JAR));
-      localResources.put(MRJobConfig.JOB_JAR,
-          createApplicationResource(defaultFileContext,
-              jobJarPath, LocalResourceType.ARCHIVE));
+      LocalResource rc = createApplicationResource(defaultFileContext,
+          jobJarPath, 
+          LocalResourceType.PATTERN);
+      String pattern = conf.getPattern(JobContext.JAR_UNPACK_PATTERN, 
+          JobConf.UNPACK_JAR_PATTERN_DEFAULT).pattern();
+      rc.setPattern(pattern);
+      localResources.put(MRJobConfig.JOB_JAR, rc);
     } else {
       // Job jar may be null. For e.g, for pipes, the job jar is the hadoop
       // mapreduce jar itself which is already on the classpath.
@@ -368,12 +372,9 @@
     }
 
     // Setup security tokens
-    ByteBuffer securityTokens = null;
-    if (UserGroupInformation.isSecurityEnabled()) {
-      DataOutputBuffer dob = new DataOutputBuffer();
-      ts.writeTokenStorageToStream(dob);
-      securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
-    }
+    DataOutputBuffer dob = new DataOutputBuffer();
+    ts.writeTokenStorageToStream(dob);
+    ByteBuffer securityTokens  = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
 
     // Setup the command to run the AM
     List<String> vargs = new ArrayList<String>(8);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestAuditLogger.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestAuditLogger.java
similarity index 100%
rename from hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestAuditLogger.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestAuditLogger.java
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIFile.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestIFile.java
similarity index 100%
rename from hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIFile.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestIFile.java
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java
new file mode 100644
index 0000000..b6a2df0
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java
@@ -0,0 +1,324 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapred;
+
+import java.io.DataOutputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.Random;
+import java.util.zip.CRC32;
+import java.util.zip.CheckedOutputStream;
+
+import org.apache.hadoop.fs.ChecksumException;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
+
+import junit.framework.TestCase;
+
+public class TestIndexCache extends TestCase {
+  private JobConf conf;
+  private FileSystem fs;
+  private Path p;
+
+  @Override
+  public void setUp() throws IOException {
+    conf = new JobConf();
+    fs = FileSystem.getLocal(conf).getRaw();
+    p =  new Path(System.getProperty("test.build.data", "/tmp"),
+        "cache").makeQualified(fs.getUri(), fs.getWorkingDirectory());
+  }
+
+  public void testLRCPolicy() throws Exception {
+    Random r = new Random();
+    long seed = r.nextLong();
+    r.setSeed(seed);
+    System.out.println("seed: " + seed);
+    fs.delete(p, true);
+    conf.setInt(TTConfig.TT_INDEX_CACHE, 1);
+    final int partsPerMap = 1000;
+    final int bytesPerFile = partsPerMap * 24;
+    IndexCache cache = new IndexCache(conf);
+
+    // fill cache
+    int totalsize = bytesPerFile;
+    for (; totalsize < 1024 * 1024; totalsize += bytesPerFile) {
+      Path f = new Path(p, Integer.toString(totalsize, 36));
+      writeFile(fs, f, totalsize, partsPerMap);
+      IndexRecord rec = cache.getIndexInformation(
+        Integer.toString(totalsize, 36), r.nextInt(partsPerMap), f,
+        UserGroupInformation.getCurrentUser().getShortUserName());
+      checkRecord(rec, totalsize);
+    }
+
+    // delete files, ensure cache retains all elem
+    for (FileStatus stat : fs.listStatus(p)) {
+      fs.delete(stat.getPath(),true);
+    }
+    for (int i = bytesPerFile; i < 1024 * 1024; i += bytesPerFile) {
+      Path f = new Path(p, Integer.toString(i, 36));
+      IndexRecord rec = cache.getIndexInformation(Integer.toString(i, 36),
+        r.nextInt(partsPerMap), f,
+        UserGroupInformation.getCurrentUser().getShortUserName());
+      checkRecord(rec, i);
+    }
+
+    // push oldest (bytesPerFile) out of cache
+    Path f = new Path(p, Integer.toString(totalsize, 36));
+    writeFile(fs, f, totalsize, partsPerMap);
+    cache.getIndexInformation(Integer.toString(totalsize, 36),
+        r.nextInt(partsPerMap), f,
+        UserGroupInformation.getCurrentUser().getShortUserName());
+    fs.delete(f, false);
+
+    // oldest fails to read, or error
+    boolean fnf = false;
+    try {
+      cache.getIndexInformation(Integer.toString(bytesPerFile, 36),
+        r.nextInt(partsPerMap), new Path(p, Integer.toString(bytesPerFile)),
+        UserGroupInformation.getCurrentUser().getShortUserName());
+    } catch (IOException e) {
+      if (e.getCause() == null ||
+          !(e.getCause()  instanceof FileNotFoundException)) {
+        throw e;
+      }
+      else {
+        fnf = true;
+      }
+    }
+    if (!fnf)
+      fail("Failed to push out last entry");
+    // should find all the other entries
+    for (int i = bytesPerFile << 1; i < 1024 * 1024; i += bytesPerFile) {
+      IndexRecord rec = cache.getIndexInformation(Integer.toString(i, 36),
+          r.nextInt(partsPerMap), new Path(p, Integer.toString(i, 36)),
+          UserGroupInformation.getCurrentUser().getShortUserName());
+      checkRecord(rec, i);
+    }
+    IndexRecord rec = cache.getIndexInformation(Integer.toString(totalsize, 36),
+      r.nextInt(partsPerMap), f,
+      UserGroupInformation.getCurrentUser().getShortUserName());
+
+    checkRecord(rec, totalsize);
+  }
+
+  public void testBadIndex() throws Exception {
+    final int parts = 30;
+    fs.delete(p, true);
+    conf.setInt(TTConfig.TT_INDEX_CACHE, 1);
+    IndexCache cache = new IndexCache(conf);
+
+    Path f = new Path(p, "badindex");
+    FSDataOutputStream out = fs.create(f, false);
+    CheckedOutputStream iout = new CheckedOutputStream(out, new CRC32());
+    DataOutputStream dout = new DataOutputStream(iout);
+    for (int i = 0; i < parts; ++i) {
+      for (int j = 0; j < MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8; ++j) {
+        if (0 == (i % 3)) {
+          dout.writeLong(i);
+        } else {
+          out.writeLong(i);
+        }
+      }
+    }
+    out.writeLong(iout.getChecksum().getValue());
+    dout.close();
+    try {
+      cache.getIndexInformation("badindex", 7, f,
+        UserGroupInformation.getCurrentUser().getShortUserName());
+      fail("Did not detect bad checksum");
+    } catch (IOException e) {
+      if (!(e.getCause() instanceof ChecksumException)) {
+        throw e;
+      }
+    }
+  }
+
+  public void testInvalidReduceNumberOrLength() throws Exception {
+    fs.delete(p, true);
+    conf.setInt(TTConfig.TT_INDEX_CACHE, 1);
+    final int partsPerMap = 1000;
+    final int bytesPerFile = partsPerMap * 24;
+    IndexCache cache = new IndexCache(conf);
+
+    // fill cache
+    Path feq = new Path(p, "invalidReduceOrPartsPerMap");
+    writeFile(fs, feq, bytesPerFile, partsPerMap);
+
+    // Number of reducers should always be less than partsPerMap as reducer
+    // numbers start from 0 and there cannot be more reducer than parts
+
+    try {
+      // Number of reducers equal to partsPerMap
+      cache.getIndexInformation("reduceEqualPartsPerMap", 
+               partsPerMap, // reduce number == partsPerMap
+               feq, UserGroupInformation.getCurrentUser().getShortUserName());
+      fail("Number of reducers equal to partsPerMap did not fail");
+    } catch (Exception e) {
+      if (!(e instanceof IOException)) {
+        throw e;
+      }
+    }
+
+    try {
+      // Number of reducers more than partsPerMap
+      cache.getIndexInformation(
+      "reduceMorePartsPerMap", 
+      partsPerMap + 1, // reduce number > partsPerMap
+      feq, UserGroupInformation.getCurrentUser().getShortUserName());
+      fail("Number of reducers more than partsPerMap did not fail");
+    } catch (Exception e) {
+      if (!(e instanceof IOException)) {
+        throw e;
+      }
+    }
+  }
+
+  public void testRemoveMap() throws Exception {
+    // This test case use two thread to call getIndexInformation and 
+    // removeMap concurrently, in order to construct race condition.
+    // This test case may not repeatable. But on my macbook this test 
+    // fails with probability of 100% on code before MAPREDUCE-2541,
+    // so it is repeatable in practice.
+    fs.delete(p, true);
+    conf.setInt(TTConfig.TT_INDEX_CACHE, 10);
+    // Make a big file so removeMapThread almost surely runs faster than 
+    // getInfoThread 
+    final int partsPerMap = 100000;
+    final int bytesPerFile = partsPerMap * 24;
+    final IndexCache cache = new IndexCache(conf);
+
+    final Path big = new Path(p, "bigIndex");
+    final String user = 
+      UserGroupInformation.getCurrentUser().getShortUserName();
+    writeFile(fs, big, bytesPerFile, partsPerMap);
+    
+    // run multiple times
+    for (int i = 0; i < 20; ++i) {
+      Thread getInfoThread = new Thread() {
+        @Override
+        public void run() {
+          try {
+            cache.getIndexInformation("bigIndex", partsPerMap, big, user);
+          } catch (Exception e) {
+            // should not be here
+          }
+        }
+      };
+      Thread removeMapThread = new Thread() {
+        @Override
+        public void run() {
+          cache.removeMap("bigIndex");
+        }
+      };
+      if (i%2==0) {
+        getInfoThread.start();
+        removeMapThread.start();        
+      } else {
+        removeMapThread.start();        
+        getInfoThread.start();
+      }
+      getInfoThread.join();
+      removeMapThread.join();
+      assertEquals(true, cache.checkTotalMemoryUsed());
+    }      
+  }
+  
+  public void testCreateRace() throws Exception {
+    fs.delete(p, true);
+    conf.setInt(TTConfig.TT_INDEX_CACHE, 1);
+    final int partsPerMap = 1000;
+    final int bytesPerFile = partsPerMap * 24;
+    final IndexCache cache = new IndexCache(conf);
+    
+    final Path racy = new Path(p, "racyIndex");
+    final String user =  
+      UserGroupInformation.getCurrentUser().getShortUserName();
+    writeFile(fs, racy, bytesPerFile, partsPerMap);
+
+    // run multiple instances
+    Thread[] getInfoThreads = new Thread[50];
+    for (int i = 0; i < 50; i++) {
+      getInfoThreads[i] = new Thread() {
+        @Override
+        public void run() {
+          try {
+            cache.getIndexInformation("racyIndex", partsPerMap, racy, user);
+            cache.removeMap("racyIndex");
+          } catch (Exception e) {
+            // should not be here
+          }
+        }
+      };
+    }
+
+    for (int i = 0; i < 50; i++) {
+      getInfoThreads[i].start();
+    }
+
+    final Thread mainTestThread = Thread.currentThread();
+
+    Thread timeoutThread = new Thread() {
+      @Override
+      public void run() {
+        try {
+          Thread.sleep(15000);
+          mainTestThread.interrupt();
+        } catch (InterruptedException ie) {
+          // we are done;
+        }
+      }
+    };
+
+    for (int i = 0; i < 50; i++) {
+      try {
+        getInfoThreads[i].join();
+      } catch (InterruptedException ie) {
+        // we haven't finished in time. Potential deadlock/race.
+        fail("Unexpectedly long delay during concurrent cache entry creations");
+      }
+    }
+    // stop the timeoutThread. If we get interrupted before stopping, there
+    // must be something wrong, although it wasn't a deadlock. No need to
+    // catch and swallow.
+    timeoutThread.interrupt();
+  }
+
+  private static void checkRecord(IndexRecord rec, long fill) {
+    assertEquals(fill, rec.startOffset);
+    assertEquals(fill, rec.rawLength);
+    assertEquals(fill, rec.partLength);
+  }
+
+  private static void writeFile(FileSystem fs, Path f, long fill, int parts)
+      throws IOException {
+    FSDataOutputStream out = fs.create(f, false);
+    CheckedOutputStream iout = new CheckedOutputStream(out, new CRC32());
+    DataOutputStream dout = new DataOutputStream(iout);
+    for (int i = 0; i < parts; ++i) {
+      for (int j = 0; j < MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8; ++j) {
+        dout.writeLong(fill);
+      }
+    }
+    out.writeLong(iout.getChecksum().getValue());
+    dout.close();
+  }
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobConf.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobConf.java
similarity index 100%
rename from hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobConf.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJobConf.java
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestKeyValueTextInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestKeyValueTextInputFormat.java
similarity index 100%
rename from hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestKeyValueTextInputFormat.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestKeyValueTextInputFormat.java
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestMultiFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMultiFileInputFormat.java
similarity index 100%
rename from hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestMultiFileInputFormat.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMultiFileInputFormat.java
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestMultiFileSplit.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMultiFileSplit.java
similarity index 100%
rename from hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestMultiFileSplit.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMultiFileSplit.java
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestReduceTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestReduceTask.java
similarity index 100%
rename from hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestReduceTask.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestReduceTask.java
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestSequenceFileAsBinaryInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestSequenceFileAsBinaryInputFormat.java
similarity index 100%
rename from hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestSequenceFileAsBinaryInputFormat.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestSequenceFileAsBinaryInputFormat.java
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestSequenceFileAsBinaryOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestSequenceFileAsBinaryOutputFormat.java
similarity index 100%
rename from hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestSequenceFileAsBinaryOutputFormat.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestSequenceFileAsBinaryOutputFormat.java
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestSequenceFileAsTextInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestSequenceFileAsTextInputFormat.java
similarity index 100%
rename from hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestSequenceFileAsTextInputFormat.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestSequenceFileAsTextInputFormat.java
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestSequenceFileInputFilter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestSequenceFileInputFilter.java
similarity index 100%
rename from hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestSequenceFileInputFilter.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestSequenceFileInputFilter.java
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestSortedRanges.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestSortedRanges.java
similarity index 100%
rename from hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestSortedRanges.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestSortedRanges.java
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestStatisticsCollector.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestStatisticsCollector.java
similarity index 100%
rename from hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestStatisticsCollector.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestStatisticsCollector.java
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskStatus.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestTaskStatus.java
similarity index 100%
rename from hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskStatus.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestTaskStatus.java
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestUtils.java
similarity index 100%
rename from hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestUtils.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestUtils.java
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestCounters.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestCounters.java
similarity index 100%
rename from hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestCounters.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestCounters.java
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestControlledJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestControlledJob.java
similarity index 100%
rename from hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestControlledJob.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestControlledJob.java
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/partition/TestTotalOrderPartitioner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/partition/TestTotalOrderPartitioner.java
index a3cd18c..a844737 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/partition/TestTotalOrderPartitioner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/partition/TestTotalOrderPartitioner.java
@@ -21,19 +21,25 @@
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Comparator;
 
 import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.RawComparator;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.WritableComparator;
 import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.io.SequenceFile.CompressionType;
+import org.apache.hadoop.io.serializer.JavaSerialization;
+import org.apache.hadoop.io.serializer.JavaSerializationComparator;
+import org.apache.hadoop.io.serializer.Serialization;
+import org.apache.hadoop.io.serializer.WritableSerialization;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 
 public class TestTotalOrderPartitioner extends TestCase {
@@ -51,6 +57,19 @@
     new Text("yak"),   // 9
   };
 
+  private static final String[] splitJavaStrings = new String[] {
+    // -inf            // 0
+    new String("aabbb"), // 1
+    new String("babbb"), // 2
+    new String("daddd"), // 3
+    new String("dddee"), // 4
+    new String("ddhee"), // 5
+    new String("dingo"), // 6
+    new String("hijjj"), // 7
+    new String("n"),     // 8
+    new String("yak"),   // 9
+  };
+
   static class Check<T> {
     T data;
     int part;
@@ -76,19 +95,41 @@
     testStrings.add(new Check<Text>(new Text("hi"), 6));
   };
 
-  private static <T extends WritableComparable<?>> Path writePartitionFile(
+  private static final ArrayList<Check<String>> testJavaStrings =
+      new ArrayList<Check<String>>();
+    static {
+      testJavaStrings.add(new Check<String>(new String("aaaaa"), 0));
+      testJavaStrings.add(new Check<String>(new String("aaabb"), 0));
+      testJavaStrings.add(new Check<String>(new String("aabbb"), 1));
+      testJavaStrings.add(new Check<String>(new String("aaaaa"), 0));
+      testJavaStrings.add(new Check<String>(new String("babbb"), 2));
+      testJavaStrings.add(new Check<String>(new String("baabb"), 1));
+      testJavaStrings.add(new Check<String>(new String("yai"), 8));
+      testJavaStrings.add(new Check<String>(new String("yak"), 9));
+      testJavaStrings.add(new Check<String>(new String("z"), 9));
+      testJavaStrings.add(new Check<String>(new String("ddngo"), 5));
+      testJavaStrings.add(new Check<String>(new String("hi"), 6));
+    };
+
+
+  private static <T> Path writePartitionFile(
       String testname, Configuration conf, T[] splits) throws IOException {
     final FileSystem fs = FileSystem.getLocal(conf);
     final Path testdir = new Path(System.getProperty("test.build.data", "/tmp")
-                                 ).makeQualified(fs);
+                                 ).makeQualified(
+                                     fs.getUri(),
+                                     fs.getWorkingDirectory());
     Path p = new Path(testdir, testname + "/_partition.lst");
     TotalOrderPartitioner.setPartitionFile(conf, p);
     conf.setInt(MRJobConfig.NUM_REDUCES, splits.length + 1);
     SequenceFile.Writer w = null;
     try {
-      w = SequenceFile.createWriter(fs, conf, p,
-          splits[0].getClass(), NullWritable.class,
-          SequenceFile.CompressionType.NONE);
+      w = SequenceFile.createWriter(
+          conf,
+          SequenceFile.Writer.file(p),
+          SequenceFile.Writer.keyClass(splits[0].getClass()),
+          SequenceFile.Writer.valueClass(NullWritable.class),
+          SequenceFile.Writer.compression(CompressionType.NONE));
       for (int i = 0; i < splits.length; ++i) {
         w.append(splits[i], NullWritable.get());
       }
@@ -99,6 +140,31 @@
     return p;
   }
 
+  public void testTotalOrderWithCustomSerialization() throws Exception {
+    TotalOrderPartitioner<String, NullWritable> partitioner =
+        new TotalOrderPartitioner<String, NullWritable>();
+    Configuration conf = new Configuration();
+    conf.setStrings(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY,
+        JavaSerialization.class.getName(),
+        WritableSerialization.class.getName());
+    conf.setClass(MRJobConfig.KEY_COMPARATOR,
+        JavaSerializationComparator.class,
+        Comparator.class);
+    Path p = TestTotalOrderPartitioner.<String>writePartitionFile(
+        "totalordercustomserialization", conf, splitJavaStrings);
+    conf.setClass(MRJobConfig.MAP_OUTPUT_KEY_CLASS, String.class, Object.class);
+    try {
+      partitioner.setConf(conf);
+      NullWritable nw = NullWritable.get();
+      for (Check<String> chk : testJavaStrings) {
+        assertEquals(chk.data.toString(), chk.part,
+            partitioner.getPartition(chk.data, nw, splitJavaStrings.length + 1));
+      }
+    } finally {
+      p.getFileSystem(conf).delete(p, true);
+    }
+  }
+
   public void testTotalOrderMemCmp() throws Exception {
     TotalOrderPartitioner<Text,NullWritable> partitioner =
       new TotalOrderPartitioner<Text,NullWritable>();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/CredentialsTestJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/CredentialsTestJob.java
new file mode 100644
index 0000000..2141080
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/CredentialsTestJob.java
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.security;
+
+import java.io.IOException;
+
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.Reducer;
+import org.apache.hadoop.mapreduce.SleepJob;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+/**
+ * class for testing transport of keys via Credentials . 
+ * Client passes a list of keys in the Credentials object. 
+ * The mapper and reducer checks whether it can access the keys
+ * from Credentials.
+ */
+public class CredentialsTestJob extends Configured implements Tool {
+
+  private static final int NUM_OF_KEYS = 10;
+
+  private static void checkSecrets(Credentials ts) {
+    if  ( ts == null){
+      throw new RuntimeException("The credentials are not available"); 
+      // fail the test
+    }
+
+    for(int i=0; i<NUM_OF_KEYS; i++) {
+      String secretName = "alias"+i;
+      // get token storage and a key
+      byte[] secretValue =  ts.getSecretKey(new Text(secretName));
+      System.out.println(secretValue);
+
+      if (secretValue == null){
+        throw new RuntimeException("The key "+ secretName + " is not available. "); 
+        // fail the test
+      }
+
+      String secretValueStr = new String (secretValue);
+
+      if  ( !("password"+i).equals(secretValueStr)){
+        throw new RuntimeException("The key "+ secretName +
+            " is not correct. Expected value is "+ ("password"+i) +
+            ". Actual value is " + secretValueStr); // fail the test
+      }        
+    }
+  }
+
+  public static class CredentialsTestMapper 
+  extends Mapper<IntWritable, IntWritable, IntWritable, NullWritable> {
+    Credentials ts;
+
+    protected void setup(Context context) 
+    throws IOException, InterruptedException {
+      ts = context.getCredentials();
+    }
+
+    public void map(IntWritable key, IntWritable value, Context context
+    ) throws IOException, InterruptedException {
+      checkSecrets(ts);
+
+    }
+  }
+
+  public static class CredentialsTestReducer  
+  extends Reducer<IntWritable, NullWritable, NullWritable, NullWritable> {
+    Credentials ts;
+
+    protected void setup(Context context) 
+    throws IOException, InterruptedException {
+      ts = context.getCredentials();
+    }
+
+    public void reduce(IntWritable key, Iterable<NullWritable> values,
+        Context context)
+    throws IOException {
+      checkSecrets(ts);
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    int res = ToolRunner.run(new Configuration(), new CredentialsTestJob(), args);
+    System.exit(res);
+  }
+
+  public Job createJob() 
+  throws IOException {
+    Configuration conf = getConf();
+    conf.setInt(MRJobConfig.NUM_MAPS, 1);
+    Job job = Job.getInstance(conf, "test");
+    job.setNumReduceTasks(1);
+    job.setJarByClass(CredentialsTestJob.class);
+    job.setNumReduceTasks(1);
+    job.setMapperClass(CredentialsTestJob.CredentialsTestMapper.class);
+    job.setMapOutputKeyClass(IntWritable.class);
+    job.setMapOutputValueClass(NullWritable.class);
+    job.setReducerClass(CredentialsTestJob.CredentialsTestReducer.class);
+    job.setInputFormatClass(SleepJob.SleepInputFormat.class);
+    job.setPartitionerClass(SleepJob.SleepJobPartitioner.class);
+    job.setOutputFormatClass(NullOutputFormat.class);
+    job.setSpeculativeExecution(false);
+    job.setJobName("test job");
+    FileInputFormat.addInputPath(job, new Path("ignored"));
+    return job;
+  }
+
+  public int run(String[] args) throws Exception {
+
+    Job job = createJob();
+    return job.waitForCompletion(true) ? 0 : 1;
+  }
+
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestMRCredentials.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestMRCredentials.java
new file mode 100644
index 0000000..a496997
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestMRCredentials.java
@@ -0,0 +1,137 @@
+package org.apache.hadoop.mapreduce.security;
+
+/** Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.net.URI;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.MiniMRClientCluster;
+import org.apache.hadoop.mapred.MiniMRClientClusterFactory;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Tests whether a protected secret passed from JobClient is
+ * available to the child task
+ */
+
+public class TestMRCredentials {
+
+  static final int NUM_OF_KEYS = 10;
+  private static MiniMRClientCluster mrCluster;
+  private static MiniDFSCluster dfsCluster;
+  private static int numSlaves = 1;
+  private static JobConf jConf;
+
+  @SuppressWarnings("deprecation")
+  @BeforeClass
+  public static void setUp() throws Exception {
+    System.setProperty("hadoop.log.dir", "logs");
+    Configuration conf = new Configuration();
+    dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);  
+    jConf = new JobConf(conf);
+    FileSystem.setDefaultUri(conf, dfsCluster.getFileSystem().getUri().toString());
+    mrCluster = MiniMRClientClusterFactory.create(TestMRCredentials.class, 1, jConf);
+    createKeysAsJson("keys.json");
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    if(mrCluster != null)
+      mrCluster.stop();
+    mrCluster = null;
+    if(dfsCluster != null)
+      dfsCluster.shutdown();
+    dfsCluster = null;
+
+    new File("keys.json").delete();
+
+  }
+
+  public static void createKeysAsJson (String fileName) 
+  throws FileNotFoundException, IOException{
+    StringBuilder jsonString = new StringBuilder();
+    jsonString.append("{");
+    for(int i=0; i<NUM_OF_KEYS; i++) {
+      String keyName = "alias"+i;
+      String password = "password"+i;
+      jsonString.append("\""+ keyName +"\":"+ "\""+password+"\"" );
+      if (i < (NUM_OF_KEYS-1)){
+        jsonString.append(",");
+      }
+
+    }
+    jsonString.append("}");
+
+    FileOutputStream fos= new FileOutputStream (fileName);
+    fos.write(jsonString.toString().getBytes());
+    fos.close();
+  }
+
+
+  /**
+   * run a distributed job and verify that TokenCache is available
+   * @throws IOException
+   */
+  @Test
+  public void test () throws IOException {
+
+    // make sure JT starts
+    Configuration jobConf =  new JobConf(mrCluster.getConfig());
+
+    // provide namenodes names for the job to get the delegation tokens for
+    //String nnUri = dfsCluster.getNameNode().getUri(namenode).toString();
+    NameNode nn = dfsCluster.getNameNode();
+    URI nnUri = NameNode.getUri(nn.getNameNodeAddress());
+    jobConf.set(JobContext.JOB_NAMENODES, nnUri + "," + nnUri.toString());
+
+
+    jobConf.set("mapreduce.job.credentials.json" , "keys.json");
+
+    // using argument to pass the file name
+    String[] args = {
+        "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
+    };
+
+    int res = -1;
+    try {
+      res = ToolRunner.run(jobConf, new CredentialsTestJob(), args);
+    } catch (Exception e) {
+      System.out.println("Job failed with" + e.getLocalizedMessage());
+      e.printStackTrace(System.out);
+      fail("Job failed");
+    }
+    assertEquals("dist job res is not 0", res, 0);
+
+  }
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/util/TestProcfsBasedProcessTree.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/util/TestProcfsBasedProcessTree.java
similarity index 100%
rename from hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/util/TestProcfsBasedProcessTree.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/util/TestProcfsBasedProcessTree.java
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
index 5850242..270ddc9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
@@ -27,7 +27,6 @@
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
-import java.util.Iterator;
 import java.util.Random;
 
 import org.apache.commons.logging.Log;
@@ -82,6 +81,7 @@
   
   private Connection connection;
   private boolean initialized = false;
+  private boolean isOracle = false;
 
   private static final String[] AccessFieldNames = {"url", "referrer", "time"};
   private static final String[] PageviewFieldNames = {"url", "pageview"};
@@ -102,7 +102,9 @@
   
   private void createConnection(String driverClassName
       , String url) throws Exception {
-    
+    if(driverClassName.toLowerCase().contains("oracle")) {
+      isOracle = true;
+    }
     Class.forName(driverClassName);
     connection = DriverManager.getConnection(url);
     connection.setAutoCommit(false);
@@ -142,7 +144,7 @@
   }
   
   private void dropTables() {
-    String dropAccess = "DROP TABLE Access";
+    String dropAccess = "DROP TABLE HAccess";
     String dropPageview = "DROP TABLE Pageview";
     Statement st = null;
     try {
@@ -157,18 +159,21 @@
   }
   
   private void createTables() throws SQLException {
-
+	String dataType = "BIGINT NOT NULL";
+	if(isOracle) {
+	  dataType = "NUMBER(19) NOT NULL";
+	}
     String createAccess = 
       "CREATE TABLE " +
-      "Access(url      VARCHAR(100) NOT NULL," +
+      "HAccess(url      VARCHAR(100) NOT NULL," +
             " referrer VARCHAR(100)," +
-            " time     BIGINT NOT NULL, " +
+            " time     " + dataType + ", " +
             " PRIMARY KEY (url, time))";
 
     String createPageview = 
       "CREATE TABLE " +
       "Pageview(url      VARCHAR(100) NOT NULL," +
-              " pageview     BIGINT NOT NULL, " +
+              " pageview     " + dataType + ", " +
                " PRIMARY KEY (url))";
     
     Statement st = connection.createStatement();
@@ -189,7 +194,7 @@
     PreparedStatement statement = null ;
     try {
       statement = connection.prepareStatement(
-          "INSERT INTO Access(url, referrer, time)" +
+          "INSERT INTO HAccess(url, referrer, time)" +
           " VALUES (?, ?, ?)");
 
       Random random = new Random();
@@ -248,7 +253,7 @@
   /**Verifies the results are correct */
   private boolean verify() throws SQLException {
     //check total num pageview
-    String countAccessQuery = "SELECT COUNT(*) FROM Access";
+    String countAccessQuery = "SELECT COUNT(*) FROM HAccess";
     String sumPageviewQuery = "SELECT SUM(pageview) FROM Pageview";
     Statement st = null;
     ResultSet rs = null;
@@ -396,7 +401,7 @@
 
     DBConfiguration.configureDB(conf, driverClassName, url);
 
-    Job job = new Job(conf);
+    Job job = Job.getInstance(conf);
         
     job.setJobName("Count Pageviews of URLs");
     job.setJarByClass(DBCountPageView.class);
@@ -404,7 +409,7 @@
     job.setCombinerClass(LongSumReducer.class);
     job.setReducerClass(PageviewReducer.class);
 
-    DBInputFormat.setInput(job, AccessRecord.class, "Access"
+    DBInputFormat.setInput(job, AccessRecord.class, "HAccess"
         , null, "url", AccessFieldNames);
 
     DBOutputFormat.setOutput(job, "Pageview", PageviewFieldNames);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java
index 09d65b0..5f76d87 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java
@@ -165,16 +165,30 @@
   }
 
   public int run(String[] args) throws Exception {
+    Configuration conf = getConf();
     if (args.length == 0) {
-      System.out.println("pentomino <output>");
+      System.out.println("Usage: pentomino <output> [-depth #] [-height #] [-width #]");
       ToolRunner.printGenericCommandUsage(System.out);
       return 2;
     }
-
-    Configuration conf = getConf();
-    int width = conf.getInt(Pentomino.WIDTH, PENT_WIDTH);
-    int height = conf.getInt(Pentomino.HEIGHT, PENT_HEIGHT);
-    int depth = conf.getInt(Pentomino.DEPTH, PENT_DEPTH);
+    // check for passed parameters, otherwise use defaults
+    int width = PENT_WIDTH;
+    int height = PENT_HEIGHT;
+    int depth = PENT_DEPTH;
+    for (int i = 0; i < args.length; i++) {
+      if (args[i].equalsIgnoreCase("-depth")) {
+          depth = Integer.parseInt(args[i++].trim());
+      } else if (args[i].equalsIgnoreCase("-height")) {
+	  height = Integer.parseInt(args[i++].trim());
+      } else if (args[i].equalsIgnoreCase("-width") ) {
+	  width = Integer.parseInt(args[i++].trim()); 
+      }
+    }
+    // now set the values within conf for M/R tasks to read, this
+    // will ensure values are set preventing MAPREDUCE-4678
+    conf.setInt(Pentomino.WIDTH, width);
+    conf.setInt(Pentomino.HEIGHT, height);
+    conf.setInt(Pentomino.DEPTH, depth);
     Class<? extends Pentomino> pentClass = conf.getClass(Pentomino.CLASS, 
       OneSidedPentomino.class, Pentomino.class);
     int numMaps = conf.getInt(MRJobConfig.NUM_MAPS, DEFAULT_MAPS);
diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml
index eb2dc22..294441d4 100644
--- a/hadoop-mapreduce-project/pom.xml
+++ b/hadoop-mapreduce-project/pom.xml
@@ -149,7 +149,6 @@
     <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
-      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.jboss.netty</groupId>
diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueInformation.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueInformation.java
index b18a527..ea2980c 100644
--- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueInformation.java
+++ b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueInformation.java
@@ -98,7 +98,7 @@
     dfsCluster.shutdown();
   }
 
-  public void testJobQueues() throws IOException {
+  public void testJobQueues() throws Exception {
     JobClient jc = new JobClient(mrCluster.createJobConf());
     String expectedQueueInfo = "Maximum Tasks Per Job :: 10";
     JobQueueInfo[] queueInfos = jc.getQueues();
diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSetupAndCleanupFailure.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSetupAndCleanupFailure.java
index 07706b1..2c4d999 100644
--- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSetupAndCleanupFailure.java
+++ b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSetupAndCleanupFailure.java
@@ -149,7 +149,7 @@
   private void testSetupAndCleanupKill(MiniMRCluster mr, 
                                        MiniDFSCluster dfs, 
                                        boolean commandLineKill) 
-  throws IOException {
+  throws Exception {
     // launch job with waiting setup/cleanup
     RunningJob job = launchJobWithWaitingSetupAndCleanup(mr);
     
@@ -223,7 +223,7 @@
   // Also Tests the command-line kill for setup/cleanup attempts. 
   // tests the setup/cleanup attempts getting killed if 
   // they were running on a lost tracker
-  public void testWithDFS() throws IOException {
+  public void testWithDFS() throws Exception {
     MiniDFSCluster dfs = null;
     MiniMRCluster mr = null;
     FileSystem fileSys = null;
diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java
index fc3c617..1c7e70c 100644
--- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java
+++ b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java
@@ -449,7 +449,7 @@
   static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys, 
                           String mapSignalFile, 
                           String reduceSignalFile, int replication) 
-  throws IOException {
+  throws Exception {
     writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(mapSignalFile), 
               (short)replication);
     writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(reduceSignalFile), 
@@ -462,7 +462,7 @@
   static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys, 
                           boolean isMap, String mapSignalFile, 
                           String reduceSignalFile)
-  throws IOException {
+  throws Exception {
     //  signal the maps to complete
     writeFile(dfs.getNameNode(), fileSys.getConf(),
               isMap 
@@ -483,7 +483,7 @@
   }
   
   static void writeFile(NameNode namenode, Configuration conf, Path name, 
-      short replication) throws IOException {
+      short replication) throws Exception {
     FileSystem fileSys = FileSystem.get(conf);
     SequenceFile.Writer writer = 
       SequenceFile.createWriter(fileSys, conf, name, 
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index f23adc7..372a5ed 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -698,7 +698,7 @@
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-surefire-plugin</artifactId>
-          <version>2.12</version>
+          <version>2.12.3</version>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
@@ -834,9 +834,9 @@
         <configuration>
           <forkMode>always</forkMode>
           <forkedProcessTimeoutInSeconds>900</forkedProcessTimeoutInSeconds>
-          <argLine>-Xmx1024m</argLine>
+          <argLine>-Xmx1024m -XX:+HeapDumpOnOutOfMemoryError</argLine>
           <environmentVariables>
-            <LD_LIBRARY_PATH>${env.LD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib</LD_LIBRARY_PATH>
+            <LD_LIBRARY_PATH>${env.LD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${basedir}/../../hadoop-common-project/hadoop-common/target/native/target/usr/local/lib/</LD_LIBRARY_PATH>
             <MALLOC_ARENA_MAX>4</MALLOC_ARENA_MAX>
           </environmentVariables>
           <systemPropertyVariables>
@@ -854,6 +854,7 @@
             <java.net.preferIPv4Stack>true</java.net.preferIPv4Stack>
             <java.security.krb5.conf>${basedir}/src/test/resources/krb5.conf</java.security.krb5.conf>
             <java.security.egd>file:///dev/urandom</java.security.egd>
+            <require.test.libhadoop>${require.test.libhadoop}</require.test.libhadoop>
           </systemPropertyVariables>
           <includes>
             <include>**/Test*.java</include>
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCp.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCp.java
deleted file mode 100644
index 64018cf..0000000
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCp.java
+++ /dev/null
@@ -1,275 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.tools;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.mapred.MiniMRCluster;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.JobSubmissionFiles;
-import org.apache.hadoop.mapreduce.Cluster;
-import org.apache.hadoop.tools.mapred.CopyOutputFormat;
-import org.junit.*;
-
-import java.util.List;
-import java.util.ArrayList;
-import java.io.*;
-
-@Ignore
-public class TestDistCp {
-  private static final Log LOG = LogFactory.getLog(TestDistCp.class);
-  private static List<Path> pathList = new ArrayList<Path>();
-  private static final int FILE_SIZE = 1024;
-
-  private static Configuration configuration;
-  private static MiniDFSCluster cluster;
-  private static MiniMRCluster mrCluster;
-
-  private static final String SOURCE_PATH = "/tmp/source";
-  private static final String TARGET_PATH = "/tmp/target";
-
-  @BeforeClass
-  public static void setup() throws Exception {
-    configuration = getConfigurationForCluster();
-    cluster = new MiniDFSCluster.Builder(configuration).numDataNodes(1)
-                    .format(true).build();
-    System.setProperty("org.apache.hadoop.mapred.TaskTracker", "target/tmp");
-    configuration.set("org.apache.hadoop.mapred.TaskTracker", "target/tmp");
-    System.setProperty("hadoop.log.dir", "target/tmp");
-    configuration.set("hadoop.log.dir", "target/tmp");
-    mrCluster = new MiniMRCluster(1, cluster.getFileSystem().getUri().toString(), 1);
-    Configuration mrConf = mrCluster.createJobConf();
-    final String mrJobTracker = mrConf.get("mapred.job.tracker");
-    configuration.set("mapred.job.tracker", mrJobTracker);
-    final String mrJobTrackerAddress
-            = mrConf.get("mapred.job.tracker.http.address");
-    configuration.set("mapred.job.tracker.http.address", mrJobTrackerAddress);
-  }
-
-  @AfterClass
-  public static void cleanup() {
-    if (mrCluster != null) mrCluster.shutdown();
-    if (cluster != null) cluster.shutdown();
-  }
-
-  private static Configuration getConfigurationForCluster() throws IOException {
-    Configuration configuration = new Configuration();
-    System.setProperty("test.build.data", "target/build/TEST_DISTCP/data");
-    configuration.set("hadoop.log.dir", "target/tmp");
-
-    LOG.debug("fs.default.name  == " + configuration.get("fs.default.name"));
-    LOG.debug("dfs.http.address == " + configuration.get("dfs.http.address"));
-    return configuration;
-  }
-
-  private static void createSourceData() throws Exception {
-    mkdirs(SOURCE_PATH + "/1");
-    mkdirs(SOURCE_PATH + "/2");
-    mkdirs(SOURCE_PATH + "/2/3/4");
-    mkdirs(SOURCE_PATH + "/2/3");
-    mkdirs(SOURCE_PATH + "/5");
-    touchFile(SOURCE_PATH + "/5/6");
-    mkdirs(SOURCE_PATH + "/7");
-    mkdirs(SOURCE_PATH + "/7/8");
-    touchFile(SOURCE_PATH + "/7/8/9");
-  }
-
-  private static void mkdirs(String path) throws Exception {
-    FileSystem fileSystem = cluster.getFileSystem();
-    final Path qualifiedPath = new Path(path).makeQualified(fileSystem.getUri(),
-                                  fileSystem.getWorkingDirectory());
-    pathList.add(qualifiedPath);
-    fileSystem.mkdirs(qualifiedPath);
-  }
-
-  private static void touchFile(String path) throws Exception {
-    FileSystem fs;
-    DataOutputStream outputStream = null;
-    try {
-      fs = cluster.getFileSystem();
-      final Path qualifiedPath = new Path(path).makeQualified(fs.getUri(),
-                                            fs.getWorkingDirectory());
-      final long blockSize = fs.getDefaultBlockSize(new Path(path)) * 2;
-      outputStream = fs.create(qualifiedPath, true, 0,
-              (short)(fs.getDefaultReplication(new Path(path))*2),
-              blockSize);
-      outputStream.write(new byte[FILE_SIZE]);
-      pathList.add(qualifiedPath);
-    }
-    finally {
-      IOUtils.cleanup(null, outputStream);
-    }
-  }
-
-  private static void clearState() throws Exception {
-    pathList.clear();
-    cluster.getFileSystem().delete(new Path(TARGET_PATH), true);
-    createSourceData();
-  }
-
-//  @Test
-  public void testUniformSizeDistCp() throws Exception {
-    try {
-      clearState();
-      final FileSystem fileSystem = cluster.getFileSystem();
-      Path sourcePath = new Path(SOURCE_PATH)
-              .makeQualified(fileSystem.getUri(),
-                             fileSystem.getWorkingDirectory());
-      List<Path> sources = new ArrayList<Path>();
-      sources.add(sourcePath);
-
-      Path targetPath = new Path(TARGET_PATH)
-              .makeQualified(fileSystem.getUri(), fileSystem.getWorkingDirectory());
-      DistCpOptions options = new DistCpOptions(sources, targetPath);
-      options.setAtomicCommit(true);
-      options.setBlocking(false);
-      Job job = new DistCp(configuration, options).execute();
-      Path workDir = CopyOutputFormat.getWorkingDirectory(job);
-      Path finalDir = CopyOutputFormat.getCommitDirectory(job);
-
-      while (!job.isComplete()) {
-        if (cluster.getFileSystem().exists(workDir)) {
-          break;
-        }
-      }
-      job.waitForCompletion(true);
-      Assert.assertFalse(cluster.getFileSystem().exists(workDir));
-      Assert.assertTrue(cluster.getFileSystem().exists(finalDir));
-      Assert.assertFalse(cluster.getFileSystem().exists(
-          new Path(job.getConfiguration().get(DistCpConstants.CONF_LABEL_META_FOLDER))));
-      verifyResults();
-    }
-    catch (Exception e) {
-      LOG.error("Exception encountered", e);
-      Assert.fail("Unexpected exception: " + e.getMessage());
-    }
-  }
-
-//  @Test
-  public void testCleanup() {
-    try {
-      clearState();
-      Path sourcePath = new Path("noscheme:///file");
-      List<Path> sources = new ArrayList<Path>();
-      sources.add(sourcePath);
-
-      final FileSystem fs = cluster.getFileSystem();
-      Path targetPath = new Path(TARGET_PATH)
-              .makeQualified(fs.getUri(), fs.getWorkingDirectory());
-      DistCpOptions options = new DistCpOptions(sources, targetPath);
-
-      Path stagingDir = JobSubmissionFiles.getStagingDir(
-              new Cluster(configuration), configuration);
-      stagingDir.getFileSystem(configuration).mkdirs(stagingDir);
-
-      try {
-        new DistCp(configuration, options).execute();
-      } catch (Throwable t) {
-        Assert.assertEquals(stagingDir.getFileSystem(configuration).
-            listStatus(stagingDir).length, 0);
-      }
-    } catch (Exception e) {
-      LOG.error("Exception encountered ", e);
-      Assert.fail("testCleanup failed " + e.getMessage());
-    }
-  }
-
-  @Test
-  public void testRootPath() throws Exception {
-    try {
-      clearState();
-      List<Path> sources = new ArrayList<Path>();
-      final FileSystem fs = cluster.getFileSystem();
-      sources.add(new Path("/a")
-              .makeQualified(fs.getUri(), fs.getWorkingDirectory()));
-      sources.add(new Path("/b")
-              .makeQualified(fs.getUri(), fs.getWorkingDirectory()));
-      touchFile("/a/a.txt");
-      touchFile("/b/b.txt");
-
-      Path targetPath = new Path("/c")
-              .makeQualified(fs.getUri(), fs.getWorkingDirectory());
-      DistCpOptions options = new DistCpOptions(sources, targetPath);
-      new DistCp(configuration, options).execute();
-      Assert.assertTrue(fs.exists(new Path("/c/a/a.txt")));
-      Assert.assertTrue(fs.exists(new Path("/c/b/b.txt")));
-    }
-    catch (Exception e) {
-      LOG.error("Exception encountered", e);
-      Assert.fail("Unexpected exception: " + e.getMessage());
-    }
-  }
-
-  @Test
-  public void testDynamicDistCp() throws Exception {
-    try {
-      clearState();
-      final FileSystem fs = cluster.getFileSystem();
-      Path sourcePath = new Path(SOURCE_PATH)
-              .makeQualified(fs.getUri(), fs.getWorkingDirectory());
-      List<Path> sources = new ArrayList<Path>();
-      sources.add(sourcePath);
-
-      Path targetPath = new Path(TARGET_PATH)
-              .makeQualified(fs.getUri(), fs.getWorkingDirectory());
-      DistCpOptions options = new DistCpOptions(sources, targetPath);
-      options.setCopyStrategy("dynamic");
-
-      options.setAtomicCommit(true);
-      options.setAtomicWorkPath(new Path("/work"));
-      options.setBlocking(false);
-      Job job = new DistCp(configuration, options).execute();
-      Path workDir = CopyOutputFormat.getWorkingDirectory(job);
-      Path finalDir = CopyOutputFormat.getCommitDirectory(job);
-
-      while (!job.isComplete()) {
-        if (fs.exists(workDir)) {
-          break;
-        }
-      }
-      job.waitForCompletion(true);
-      Assert.assertFalse(fs.exists(workDir));
-      Assert.assertTrue(fs.exists(finalDir));
-
-      verifyResults();
-    }
-    catch (Exception e) {
-      LOG.error("Exception encountered", e);
-      Assert.fail("Unexpected exception: " + e.getMessage());
-    }
-  }
-
-  private static void verifyResults() throws Exception {
-    for (Path path : pathList) {
-      FileSystem fs = cluster.getFileSystem();
-
-      Path sourcePath = path.makeQualified(fs.getUri(), fs.getWorkingDirectory());
-      Path targetPath
-              = new Path(sourcePath.toString().replaceAll(SOURCE_PATH, TARGET_PATH));
-
-      Assert.assertTrue(fs.exists(targetPath));
-      Assert.assertEquals(fs.isFile(sourcePath), fs.isFile(targetPath));
-    }
-  }
-}
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestIntegration.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestIntegration.java
index 303a4e0..ca08e25 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestIntegration.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestIntegration.java
@@ -21,8 +21,11 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.Cluster;
+import org.apache.hadoop.mapreduce.JobSubmissionFiles;
 import org.apache.hadoop.tools.util.TestDistCpUtils;
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -30,6 +33,8 @@
 
 import java.io.IOException;
 import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.List;
 
 public class TestIntegration {
   private static final Log LOG = LogFactory.getLog(TestIntegration.class);
@@ -317,6 +322,58 @@
       TestDistCpUtils.delete(fs, root);
     }
   }
+  
+  @Test
+  public void testDeleteMissingInDestination() {
+    
+    try {
+      addEntries(listFile, "srcdir");
+      createFiles("srcdir/file1", "dstdir/file1", "dstdir/file2");
+      
+      Path target = new Path(root + "/dstdir");
+      runTest(listFile, target, true, true, false);
+      
+      checkResult(target, 1, "file1");
+    } catch (IOException e) {
+      LOG.error("Exception encountered while running distcp", e);
+      Assert.fail("distcp failure");
+    } finally {
+      TestDistCpUtils.delete(fs, root);
+      TestDistCpUtils.delete(fs, "target/tmp1");
+    }
+  }
+  
+  @Test
+  public void testOverwrite() {
+    byte[] contents1 = "contents1".getBytes();
+    byte[] contents2 = "contents2".getBytes();
+    Assert.assertEquals(contents1.length, contents2.length);
+    
+    try {
+      addEntries(listFile, "srcdir");
+      createWithContents("srcdir/file1", contents1);
+      createWithContents("dstdir/file1", contents2);
+      
+      Path target = new Path(root + "/dstdir");
+      runTest(listFile, target, false, false, true);
+      
+      checkResult(target, 1, "file1");
+      
+      // make sure dstdir/file1 has been overwritten with the contents
+      // of srcdir/file1
+      FSDataInputStream is = fs.open(new Path(root + "/dstdir/file1"));
+      byte[] dstContents = new byte[contents1.length];
+      is.readFully(dstContents);
+      is.close();
+      Assert.assertArrayEquals(contents1, dstContents);
+    } catch (IOException e) {
+      LOG.error("Exception encountered while running distcp", e);
+      Assert.fail("distcp failure");
+    } finally {
+      TestDistCpUtils.delete(fs, root);
+      TestDistCpUtils.delete(fs, "target/tmp1");
+    }
+  }
 
   @Test
   public void testGlobTargetMissingSingleLevel() {
@@ -410,7 +467,33 @@
       TestDistCpUtils.delete(fs, "target/tmp1");
     }
   }
+  
+  @Test
+  public void testCleanup() {
+    try {
+      Path sourcePath = new Path("noscheme:///file");
+      List<Path> sources = new ArrayList<Path>();
+      sources.add(sourcePath);
 
+      DistCpOptions options = new DistCpOptions(sources, target);
+
+      Configuration conf = getConf();
+      Path stagingDir = JobSubmissionFiles.getStagingDir(
+              new Cluster(conf), conf);
+      stagingDir.getFileSystem(conf).mkdirs(stagingDir);
+
+      try {
+        new DistCp(conf, options).execute();
+      } catch (Throwable t) {
+        Assert.assertEquals(stagingDir.getFileSystem(conf).
+            listStatus(stagingDir).length, 0);
+      }
+    } catch (Exception e) {
+      LOG.error("Exception encountered ", e);
+      Assert.fail("testCleanup failed " + e.getMessage());
+    }
+  }
+  
   private void addEntries(Path listFile, String... entries) throws IOException {
     OutputStream out = fs.create(listFile);
     try {
@@ -434,16 +517,32 @@
       }
     }
   }
+  
+  private void createWithContents(String entry, byte[] contents) throws IOException {
+    OutputStream out = fs.create(new Path(root + "/" + entry));
+    try {
+      out.write(contents);
+    } finally {
+      out.close();
+    }
+  }
 
   private void mkdirs(String... entries) throws IOException {
     for (String entry : entries){
       fs.mkdirs(new Path(entry));
     }
   }
-
+    
   private void runTest(Path listFile, Path target, boolean sync) throws IOException {
+    runTest(listFile, target, sync, false, false);
+  }
+  
+  private void runTest(Path listFile, Path target, boolean sync, boolean delete,
+      boolean overwrite) throws IOException {
     DistCpOptions options = new DistCpOptions(listFile, target);
     options.setSyncFolder(sync);
+    options.setDeleteMissing(delete);
+    options.setOverwrite(overwrite);
     try {
       new DistCp(getConf(), options).execute();
     } catch (Exception e) {
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 71a85e1..71d65da 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1,4 +1,4 @@
-Hadoop MapReduce Change Log
+Hadoop YARN Change Log
 
 Trunk - Unreleased 
 
@@ -40,14 +40,49 @@
     YARN-53. Added the missing getGroups API to ResourceManager. (Bo Wang via
     vinodkv)
 
+    YARN-116. Add the ability to change the RM include/exclude file without
+    a restart. (xieguiming and Harsh J via sseth)
+
+    YARN-23. FairScheduler: FSQueueSchedulable#updateDemand() - potential 
+    redundant aggregation. (kkambatl via tucu)
+
+    YARN-127. Move RMAdmin tool to its correct location - the client module.
+    (vinodkv)
+
+    YARN-40. Provided support for missing YARN commands (Devaraj K and Vinod
+    Kumar Vavilapalli via vinodkv)
+
+    YARN-33. Change LocalDirsHandlerService to validate the configured local and
+    log dirs. (Mayank Bansal via sseth)
+
+    YARN-94. Modify DistributedShell to point to main-class by default, clean up
+    the help message, and hard-code the AM class. (Hitesh Shah via vinodkv)
+
   OPTIMIZATIONS
 
   BUG FIXES
+    
+    YARN-131. Fix incorrect ACL properties in capacity scheduler documentation.
+    (Ahmed Radwan via sseth)
+
+    YARN-102. Move the apache header to the top of the file in MemStore.java.
+    (Devaraj K via sseth)
+    
+    YARN-134. ClientToAMSecretManager creates keys without checking for
+    validity of the appID. (Vinod Kumar Vavilapalli via sseth)
+
+    YARN-30. Fixed tests verifying web-services to work on JDK7. (Thomas Graves
+    via vinodkv)
+
+    YARN-150. Fixes AppRejectedTransition does not unregister a rejected
+    app-attempt from the ApplicationMasterService (Bikas Saha via sseth)
 
 Release 2.0.2-alpha - 2012-09-07 
 
   INCOMPATIBLE CHANGES
 
+    YARN-9. Rename YARN_HOME to HADOOP_YARN_HOME. (vinodkv via acmurthy)
+
   NEW FEATURES
 
     YARN-1. Promote YARN to be a sub-project of Apache Hadoop. (acmurthy)
@@ -62,6 +97,9 @@
     YARN-80. Add support for delaying rack-local containers in
     CapacityScheduler. (acmurthy) 
 
+    YARN-137. Change the default YARN scheduler to be the CapacityScheduler. 
+    (sseth via acmurthy) 
+
   OPTIMAZATIONS
 
   BUG FIXES
@@ -89,6 +127,21 @@
     MAPREDUCE-2374. "Text File Busy" errors launching MR tasks. (Andy Isaacson
     via atm)
 
+    YARN-138. Ensure default values for minimum/maximum container sizes is
+    sane. (harsh & sseth via acmurthy)
+
+Release 0.23.5 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 0.23.4 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -109,7 +162,10 @@
     YARN-88. DefaultContainerExecutor can fail to set proper permissions.
     (Jason Lowe via sseth)
 
-Release 0.23.3 - Unreleased 
+    YARN-106. Nodemanager needs to set permissions of local directories (jlowe
+    via bobby)
+
+Release 0.23.3
 
   INCOMPATIBLE CHANGES
 
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/slaves.sh b/hadoop-yarn-project/hadoop-yarn/bin/slaves.sh
index ee25460..9b783b4 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/slaves.sh
+++ b/hadoop-yarn-project/hadoop-yarn/bin/slaves.sh
@@ -22,7 +22,7 @@
 #
 #   YARN_SLAVES    File naming remote hosts.
 #     Default is ${YARN_CONF_DIR}/slaves.
-#   YARN_CONF_DIR  Alternate conf dir. Default is ${YARN_HOME}/conf.
+#   YARN_CONF_DIR  Alternate conf dir. Default is ${HADOOP_YARN_HOME}/conf.
 #   YARN_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
 #   YARN_SSH_OPTS Options passed to ssh when running remote commands.
 ##
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index 01687b0..323f959 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -41,13 +41,13 @@
 #                              more than one command (fs, dfs, fsck, 
 #                              dfsadmin etc)  
 #
-#   YARN_CONF_DIR  Alternate conf dir. Default is ${YARN_HOME}/conf.
+#   YARN_CONF_DIR  Alternate conf dir. Default is ${HADOOP_YARN_HOME}/conf.
 #
 #   YARN_ROOT_LOGGER The root appender. Default is INFO,console
 #
 
 bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
+bin=`cd "$bin" > /dev/null; pwd`
 
 DEFAULT_LIBEXEC_DIR="$bin"/../libexec
 HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
@@ -67,6 +67,8 @@
   echo "  rmadmin              admin tools" 
   echo "  version              print the version"
   echo "  jar <jar>            run a jar file"
+  echo "  application          prints application(s) report/kill application"
+  echo "  node                 prints node report(s)"
   echo "  logs                 dump container logs"
   echo "  classpath            prints the class path needed to get the"
   echo "                       Hadoop jar and the required libraries"
@@ -116,43 +118,43 @@
 CLASSPATH="${HADOOP_CONF_DIR}:${YARN_CONF_DIR}:${CLASSPATH}"
 
 # for developers, add Hadoop classes to CLASSPATH
-if [ -d "$YARN_HOME/yarn-api/target/classes" ]; then
-  CLASSPATH=${CLASSPATH}:$YARN_HOME/yarn-api/target/classes
+if [ -d "$HADOOP_YARN_HOME/yarn-api/target/classes" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-api/target/classes
 fi
-if [ -d "$YARN_HOME/yarn-common/target/classes" ]; then
-  CLASSPATH=${CLASSPATH}:$YARN_HOME/yarn-common/target/classes
+if [ -d "$HADOOP_YARN_HOME/yarn-common/target/classes" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-common/target/classes
 fi
-if [ -d "$YARN_HOME/yarn-mapreduce/target/classes" ]; then
-  CLASSPATH=${CLASSPATH}:$YARN_HOME/yarn-mapreduce/target/classes
+if [ -d "$HADOOP_YARN_HOME/yarn-mapreduce/target/classes" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-mapreduce/target/classes
 fi
-if [ -d "$YARN_HOME/yarn-master-worker/target/classes" ]; then
-  CLASSPATH=${CLASSPATH}:$YARN_HOME/yarn-master-worker/target/classes
+if [ -d "$HADOOP_YARN_HOME/yarn-master-worker/target/classes" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-master-worker/target/classes
 fi
-if [ -d "$YARN_HOME/yarn-server/yarn-server-nodemanager/target/classes" ]; then
-  CLASSPATH=${CLASSPATH}:$YARN_HOME/yarn-server/yarn-server-nodemanager/target/classes
+if [ -d "$HADOOP_YARN_HOME/yarn-server/yarn-server-nodemanager/target/classes" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-server/yarn-server-nodemanager/target/classes
 fi
-if [ -d "$YARN_HOME/yarn-server/yarn-server-common/target/classes" ]; then
-  CLASSPATH=${CLASSPATH}:$YARN_HOME/yarn-server/yarn-server-common/target/classes
+if [ -d "$HADOOP_YARN_HOME/yarn-server/yarn-server-common/target/classes" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-server/yarn-server-common/target/classes
 fi
-if [ -d "$YARN_HOME/yarn-server/yarn-server-resourcemanager/target/classes" ]; then
-  CLASSPATH=${CLASSPATH}:$YARN_HOME/yarn-server/yarn-server-resourcemanager/target/classes
+if [ -d "$HADOOP_YARN_HOME/yarn-server/yarn-server-resourcemanager/target/classes" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/yarn-server/yarn-server-resourcemanager/target/classes
 fi
-if [ -d "$YARN_HOME/build/test/classes" ]; then
-  CLASSPATH=${CLASSPATH}:$YARN_HOME/target/test/classes
+if [ -d "$HADOOP_YARN_HOME/build/test/classes" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/target/test/classes
 fi
-if [ -d "$YARN_HOME/build/tools" ]; then
-  CLASSPATH=${CLASSPATH}:$YARN_HOME/build/tools
+if [ -d "$HADOOP_YARN_HOME/build/tools" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/build/tools
 fi
 
-CLASSPATH=${CLASSPATH}:$YARN_HOME/${YARN_DIR}/*
-CLASSPATH=${CLASSPATH}:$YARN_HOME/${YARN_LIB_JARS_DIR}/*
+CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/${YARN_DIR}/*
+CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/${YARN_LIB_JARS_DIR}/*
 
 # so that filenames w/ spaces are handled correctly in loops below
 IFS=
 
 # default log directory & file
 if [ "$YARN_LOG_DIR" = "" ]; then
-  YARN_LOG_DIR="$YARN_HOME/logs"
+  YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
 fi
 if [ "$YARN_LOGFILE" = "" ]; then
   YARN_LOGFILE='yarn.log'
@@ -169,7 +171,13 @@
   echo $CLASSPATH
   exit
 elif [ "$COMMAND" = "rmadmin" ] ; then
-  CLASS='org.apache.hadoop.yarn.server.resourcemanager.tools.RMAdmin'
+  CLASS='org.apache.hadoop.yarn.client.RMAdmin'
+  YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "application" ] ; then
+  CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI
+  YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
+elif [ "$COMMAND" = "node" ] ; then
+  CLASS=org.apache.hadoop.yarn.client.cli.NodeCLI
   YARN_OPTS="$YARN_OPTS $YARN_CLIENT_OPTS"
 elif [ "$COMMAND" = "resourcemanager" ] ; then
   CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/rm-config/log4j.properties
@@ -210,7 +218,7 @@
 # cygwin path translation
 if $cygwin; then
   CLASSPATH=`cygpath -p -w "$CLASSPATH"`
-  YARN_HOME=`cygpath -w "$YARN_HOME"`
+  HADOOP_YARN_HOME=`cygpath -w "$HADOOP_YARN_HOME"`
   YARN_LOG_DIR=`cygpath -w "$YARN_LOG_DIR"`
   TOOL_PATH=`cygpath -p -w "$TOOL_PATH"`
 fi
@@ -224,8 +232,8 @@
 YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
 YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
 YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
-YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_HOME"
-YARN_OPTS="$YARN_OPTS -Dhadoop.home.dir=$YARN_HOME"
+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$HADOOP_YARN_HOME"
+YARN_OPTS="$YARN_OPTS -Dhadoop.home.dir=$HADOOP_YARN_HOME"
 YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
 YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
 if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn-config.sh b/hadoop-yarn-project/hadoop-yarn/bin/yarn-config.sh
index 275869f..3d67801 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn-config.sh
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn-config.sh
@@ -49,7 +49,7 @@
 fi
  
 # Allow alternate conf dir location.
-export YARN_CONF_DIR="${HADOOP_CONF_DIR:-$YARN_HOME/conf}"
+export YARN_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
 
 #check to see it is specified whether to use the slaves or the
 # masters file
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn-daemon.sh b/hadoop-yarn-project/hadoop-yarn/bin/yarn-daemon.sh
index 07326a1..2df10446 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn-daemon.sh
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn-daemon.sh
@@ -20,7 +20,7 @@
 #
 # Environment Variables
 #
-#   YARN_CONF_DIR  Alternate conf dir. Default is ${YARN_HOME}/conf.
+#   YARN_CONF_DIR  Alternate conf dir. Default is ${HADOOP_YARN_HOME}/conf.
 #   YARN_LOG_DIR   Where log files are stored.  PWD by default.
 #   YARN_MASTER    host:path where hadoop code should be rsync'd from
 #   YARN_PID_DIR   The pid files are stored. /tmp by default.
@@ -76,7 +76,7 @@
 
 # get log directory
 if [ "$YARN_LOG_DIR" = "" ]; then
-  export YARN_LOG_DIR="$YARN_HOME/logs"
+  export YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
 fi
 
 if [ ! -w "$YARN_LOG_DIR" ] ; then
@@ -115,13 +115,13 @@
 
     if [ "$YARN_MASTER" != "" ]; then
       echo rsync from $YARN_MASTER
-      rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $YARN_MASTER/ "$YARN_HOME"
+      rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $YARN_MASTER/ "$HADOOP_YARN_HOME"
     fi
 
     hadoop_rotate_log $log
     echo starting $command, logging to $log
-    cd "$YARN_HOME"
-    nohup nice -n $YARN_NICENESS "$YARN_HOME"/bin/yarn --config $YARN_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
+    cd "$HADOOP_YARN_HOME"
+    nohup nice -n $YARN_NICENESS "$HADOOP_YARN_HOME"/bin/yarn --config $YARN_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
     echo $! > $pid
     sleep 1; head "$log"
     ;;
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn-daemons.sh b/hadoop-yarn-project/hadoop-yarn/bin/yarn-daemons.sh
index aafb42b..a7858e4 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn-daemons.sh
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn-daemons.sh
@@ -34,5 +34,5 @@
 HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
 . $HADOOP_LIBEXEC_DIR/yarn-config.sh
 
-exec "$bin/slaves.sh" --config $YARN_CONF_DIR cd "$YARN_HOME" \; "$bin/yarn-daemon.sh" --config $YARN_CONF_DIR "$@"
+exec "$bin/slaves.sh" --config $YARN_CONF_DIR cd "$HADOOP_YARN_HOME" \; "$bin/yarn-daemon.sh" --config $YARN_CONF_DIR "$@"
 
diff --git a/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh b/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
index 2ccb38e..2a40f10 100644
--- a/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
+++ b/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
@@ -17,7 +17,7 @@
 export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
 
 # resolve links - $0 may be a softlink
-export YARN_CONF_DIR="${YARN_CONF_DIR:-$YARN_HOME/conf}"
+export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
 
 # some Java parameters
 # export JAVA_HOME=/home/y/libexec/jdk1.6.0/
@@ -47,7 +47,7 @@
 
 # default log directory & file
 if [ "$YARN_LOG_DIR" = "" ]; then
-  YARN_LOG_DIR="$YARN_HOME/logs"
+  YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
 fi
 if [ "$YARN_LOGFILE" = "" ]; then
   YARN_LOGFILE='yarn.log'
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
index ff8a548..2c00ae9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
@@ -69,6 +69,8 @@
                 <argument>src/main/proto/AM_RM_protocol.proto</argument>
                 <argument>src/main/proto/client_RM_protocol.proto</argument>
                 <argument>src/main/proto/container_manager.proto</argument>
+                <argument>src/main/proto/yarn_server_resourcemanager_service_protos.proto</argument>
+                <argument>src/main/proto/RMAdminProtocol.proto</argument>
               </arguments>
             </configuration>
             <goals>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
index ec27983..7431067 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
@@ -34,7 +34,7 @@
 
   // TODO: They say tokens via env isn't good.
   public static final String APPLICATION_CLIENT_SECRET_ENV_NAME =
-    "AppClientTokenEnv";
+    "AppClientSecretEnv";
   
   /**
    * The environment variable for CONTAINER_ID. Set in AppMaster environment
@@ -169,9 +169,9 @@
     MALLOC_ARENA_MAX("MALLOC_ARENA_MAX"),
     
     /**
-     * $YARN_HOME
+     * $HADOOP_YARN_HOME
      */
-    YARN_HOME("YARN_HOME");
+    HADOOP_YARN_HOME("HADOOP_YARN_HOME");
 
     private final String variable;
     private Environment(String variable) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/RMAdminProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/RMAdminProtocol.java
new file mode 100644
index 0000000..46a8d1b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/RMAdminProtocol.java
@@ -0,0 +1,59 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.api;
+
+import org.apache.hadoop.tools.GetUserMappingsProtocol;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshAdminAclsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshAdminAclsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshNodesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshNodesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshQueuesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshQueuesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshServiceAclsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshServiceAclsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
+
+public interface RMAdminProtocol extends GetUserMappingsProtocol {
+  public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request) 
+  throws YarnRemoteException;
+  
+  public RefreshNodesResponse refreshNodes(RefreshNodesRequest request)
+  throws YarnRemoteException;
+  
+  public RefreshSuperUserGroupsConfigurationResponse 
+  refreshSuperUserGroupsConfiguration(
+      RefreshSuperUserGroupsConfigurationRequest request)
+  throws YarnRemoteException;
+
+  public RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings(
+      RefreshUserToGroupsMappingsRequest request)
+  throws YarnRemoteException;
+  
+  public RefreshAdminAclsResponse refreshAdminAcls(
+      RefreshAdminAclsRequest request)
+  throws YarnRemoteException;
+  
+  public RefreshServiceAclsResponse refreshServiceAcls(
+      RefreshServiceAclsRequest request)
+  throws YarnRemoteException;
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/RMAdminProtocolPB.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/RMAdminProtocolPB.java
similarity index 94%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/RMAdminProtocolPB.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/RMAdminProtocolPB.java
index 5511894..890bfb09 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/RMAdminProtocolPB.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/RMAdminProtocolPB.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.yarn.server.resourcemanager.api;
+package org.apache.hadoop.yarn.api;
 
 import org.apache.hadoop.ipc.ProtocolInfo;
 import org.apache.hadoop.yarn.proto.RMAdminProtocol.RMAdminProtocolService;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshAdminAclsRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshAdminAclsRequest.java
similarity index 91%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshAdminAclsRequest.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshAdminAclsRequest.java
index 135f942..74692df 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshAdminAclsRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshAdminAclsRequest.java
@@ -16,7 +16,7 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords;
+package org.apache.hadoop.yarn.api.protocolrecords;
 
 public interface RefreshAdminAclsRequest {
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshAdminAclsResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshAdminAclsResponse.java
similarity index 91%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshAdminAclsResponse.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshAdminAclsResponse.java
index 12cdd03..1a470c3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshAdminAclsResponse.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshAdminAclsResponse.java
@@ -16,7 +16,7 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords;
+package org.apache.hadoop.yarn.api.protocolrecords;
 
 public interface RefreshAdminAclsResponse {
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshNodesRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshNodesRequest.java
similarity index 91%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshNodesRequest.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshNodesRequest.java
index c0f86e0..0fbb9a3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshNodesRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshNodesRequest.java
@@ -16,7 +16,7 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords;
+package org.apache.hadoop.yarn.api.protocolrecords;
 
 public interface RefreshNodesRequest {
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshNodesResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshNodesResponse.java
similarity index 91%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshNodesResponse.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshNodesResponse.java
index f265439..86b260a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshNodesResponse.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshNodesResponse.java
@@ -16,7 +16,7 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords;
+package org.apache.hadoop.yarn.api.protocolrecords;
 
 public interface RefreshNodesResponse {
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshQueuesRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshQueuesRequest.java
similarity index 91%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshQueuesRequest.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshQueuesRequest.java
index 5c52536..4fd7e16 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshQueuesRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshQueuesRequest.java
@@ -16,7 +16,7 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords;
+package org.apache.hadoop.yarn.api.protocolrecords;
 
 public interface RefreshQueuesRequest {
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshQueuesResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshQueuesResponse.java
similarity index 91%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshQueuesResponse.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshQueuesResponse.java
index ee3c1e9..91d3903 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshQueuesResponse.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshQueuesResponse.java
@@ -16,7 +16,7 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords;
+package org.apache.hadoop.yarn.api.protocolrecords;
 
 public interface RefreshQueuesResponse {
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshServiceAclsRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshServiceAclsRequest.java
similarity index 91%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshServiceAclsRequest.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshServiceAclsRequest.java
index b016a71..cb1ca27 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshServiceAclsRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshServiceAclsRequest.java
@@ -16,7 +16,7 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords;
+package org.apache.hadoop.yarn.api.protocolrecords;
 
 public interface RefreshServiceAclsRequest {
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshServiceAclsResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshServiceAclsResponse.java
similarity index 91%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshServiceAclsResponse.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshServiceAclsResponse.java
index dd6ef33..5c44032 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshServiceAclsResponse.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshServiceAclsResponse.java
@@ -16,7 +16,7 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords;
+package org.apache.hadoop.yarn.api.protocolrecords;
 
 public interface RefreshServiceAclsResponse {
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshSuperUserGroupsConfigurationRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshSuperUserGroupsConfigurationRequest.java
similarity index 91%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshSuperUserGroupsConfigurationRequest.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshSuperUserGroupsConfigurationRequest.java
index 0779c71..1762689 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshSuperUserGroupsConfigurationRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshSuperUserGroupsConfigurationRequest.java
@@ -16,7 +16,7 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords;
+package org.apache.hadoop.yarn.api.protocolrecords;
 
 public interface RefreshSuperUserGroupsConfigurationRequest {
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshSuperUserGroupsConfigurationResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshSuperUserGroupsConfigurationResponse.java
similarity index 91%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshSuperUserGroupsConfigurationResponse.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshSuperUserGroupsConfigurationResponse.java
index edbbfdd..e83fb63 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshSuperUserGroupsConfigurationResponse.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshSuperUserGroupsConfigurationResponse.java
@@ -16,7 +16,7 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords;
+package org.apache.hadoop.yarn.api.protocolrecords;
 
 public interface RefreshSuperUserGroupsConfigurationResponse {
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshUserToGroupsMappingsRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshUserToGroupsMappingsRequest.java
similarity index 91%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshUserToGroupsMappingsRequest.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshUserToGroupsMappingsRequest.java
index cc11a22..7a8f4f0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshUserToGroupsMappingsRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshUserToGroupsMappingsRequest.java
@@ -16,7 +16,7 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords;
+package org.apache.hadoop.yarn.api.protocolrecords;
 
 public interface RefreshUserToGroupsMappingsRequest {
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshUserToGroupsMappingsResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshUserToGroupsMappingsResponse.java
similarity index 91%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshUserToGroupsMappingsResponse.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshUserToGroupsMappingsResponse.java
index 231bac9..c53d062 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/RefreshUserToGroupsMappingsResponse.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RefreshUserToGroupsMappingsResponse.java
@@ -16,7 +16,7 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords;
+package org.apache.hadoop.yarn.api.protocolrecords;
 
 public interface RefreshUserToGroupsMappingsResponse {
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshAdminAclsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshAdminAclsRequestPBImpl.java
similarity index 89%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshAdminAclsRequestPBImpl.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshAdminAclsRequestPBImpl.java
index c0926aa..82436af 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshAdminAclsRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshAdminAclsRequestPBImpl.java
@@ -16,11 +16,11 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb;
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
 
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshAdminAclsRequest;
 import org.apache.hadoop.yarn.api.records.ProtoBase;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsRequest;
 
 public class RefreshAdminAclsRequestPBImpl 
 extends ProtoBase<RefreshAdminAclsRequestProto>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshAdminAclsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshAdminAclsResponsePBImpl.java
similarity index 89%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshAdminAclsResponsePBImpl.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshAdminAclsResponsePBImpl.java
index 752d688..e152006 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshAdminAclsResponsePBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshAdminAclsResponsePBImpl.java
@@ -16,11 +16,11 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb;
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
 
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshAdminAclsResponse;
 import org.apache.hadoop.yarn.api.records.ProtoBase;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsResponseProto;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsResponse;
 
 public class RefreshAdminAclsResponsePBImpl extends ProtoBase<RefreshAdminAclsResponseProto>
 implements RefreshAdminAclsResponse {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshNodesRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshNodesRequestPBImpl.java
similarity index 89%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshNodesRequestPBImpl.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshNodesRequestPBImpl.java
index e1dc4d9..67e9e23 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshNodesRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshNodesRequestPBImpl.java
@@ -16,11 +16,11 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb;
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
 
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshNodesRequest;
 import org.apache.hadoop.yarn.api.records.ProtoBase;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesRequest;
 
 public class RefreshNodesRequestPBImpl extends ProtoBase<RefreshNodesRequestProto>
 implements RefreshNodesRequest {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshNodesResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshNodesResponsePBImpl.java
similarity index 89%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshNodesResponsePBImpl.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshNodesResponsePBImpl.java
index 83978ec..3eb6e60 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshNodesResponsePBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshNodesResponsePBImpl.java
@@ -16,11 +16,11 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb;
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
 
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshNodesResponse;
 import org.apache.hadoop.yarn.api.records.ProtoBase;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResponseProto;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesResponse;
 
 public class RefreshNodesResponsePBImpl extends ProtoBase<RefreshNodesResponseProto>
 implements RefreshNodesResponse {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshQueuesRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshQueuesRequestPBImpl.java
similarity index 89%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshQueuesRequestPBImpl.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshQueuesRequestPBImpl.java
index b190e54..9a71fad 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshQueuesRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshQueuesRequestPBImpl.java
@@ -16,11 +16,11 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb;
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
 
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshQueuesRequest;
 import org.apache.hadoop.yarn.api.records.ProtoBase;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesRequest;
 
 public class RefreshQueuesRequestPBImpl extends ProtoBase<RefreshQueuesRequestProto>
 implements RefreshQueuesRequest {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshQueuesResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshQueuesResponsePBImpl.java
similarity index 89%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshQueuesResponsePBImpl.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshQueuesResponsePBImpl.java
index c2997ca..dfccb68 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshQueuesResponsePBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshQueuesResponsePBImpl.java
@@ -16,11 +16,11 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb;
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
 
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshQueuesResponse;
 import org.apache.hadoop.yarn.api.records.ProtoBase;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesResponseProto;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesResponse;
 
 public class RefreshQueuesResponsePBImpl extends ProtoBase<RefreshQueuesResponseProto>
 implements RefreshQueuesResponse {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshServiceAclsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshServiceAclsRequestPBImpl.java
similarity index 89%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshServiceAclsRequestPBImpl.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshServiceAclsRequestPBImpl.java
index 0d558fe..6b6250a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshServiceAclsRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshServiceAclsRequestPBImpl.java
@@ -16,11 +16,11 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb;
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
 
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshServiceAclsRequest;
 import org.apache.hadoop.yarn.api.records.ProtoBase;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsRequestProto;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshServiceAclsRequest;
 
 public class RefreshServiceAclsRequestPBImpl 
 extends ProtoBase<RefreshServiceAclsRequestProto>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshServiceAclsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshServiceAclsResponsePBImpl.java
similarity index 89%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshServiceAclsResponsePBImpl.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshServiceAclsResponsePBImpl.java
index f09bc5c..ad04430 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshServiceAclsResponsePBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshServiceAclsResponsePBImpl.java
@@ -16,11 +16,11 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb;
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
 
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshServiceAclsResponse;
 import org.apache.hadoop.yarn.api.records.ProtoBase;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsResponseProto;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshServiceAclsResponse;
 
 public class RefreshServiceAclsResponsePBImpl 
 extends ProtoBase<RefreshServiceAclsResponseProto>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationRequestPBImpl.java
similarity index 90%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationRequestPBImpl.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationRequestPBImpl.java
index 24c33f1..94a2ff4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationRequestPBImpl.java
@@ -16,11 +16,11 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb;
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
 
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
 import org.apache.hadoop.yarn.api.records.ProtoBase;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
 
 public class RefreshSuperUserGroupsConfigurationRequestPBImpl 
 extends ProtoBase<RefreshSuperUserGroupsConfigurationRequestProto>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationResponsePBImpl.java
similarity index 90%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationResponsePBImpl.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationResponsePBImpl.java
index 5f9194b..b6f1076 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationResponsePBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationResponsePBImpl.java
@@ -16,11 +16,11 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb;
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
 
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
 import org.apache.hadoop.yarn.api.records.ProtoBase;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationResponseProto;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
 
 public class RefreshSuperUserGroupsConfigurationResponsePBImpl extends ProtoBase<RefreshSuperUserGroupsConfigurationResponseProto>
 implements RefreshSuperUserGroupsConfigurationResponse {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsRequestPBImpl.java
similarity index 89%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsRequestPBImpl.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsRequestPBImpl.java
index 611dc0c..8dfbdc3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsRequestPBImpl.java
@@ -16,11 +16,11 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb;
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
 
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
 import org.apache.hadoop.yarn.api.records.ProtoBase;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
 
 public class RefreshUserToGroupsMappingsRequestPBImpl 
 extends ProtoBase<RefreshUserToGroupsMappingsRequestProto>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsResponsePBImpl.java
similarity index 89%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsResponsePBImpl.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsResponsePBImpl.java
index 8a09e82..791664e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsResponsePBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsResponsePBImpl.java
@@ -16,11 +16,11 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb;
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
 
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
 import org.apache.hadoop.yarn.api.records.ProtoBase;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsResponseProto;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
 
 public class RefreshUserToGroupsMappingsResponsePBImpl extends ProtoBase<RefreshUserToGroupsMappingsResponseProto>
 implements RefreshUserToGroupsMappingsResponse {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResource.java
index 10dd23a..1a8790a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResource.java
@@ -106,4 +106,20 @@
    *                   localized
    */
   public void setVisibility(LocalResourceVisibility visibility);
+  
+  /**
+   * Get the <em>pattern</em> that should be used to extract entries from the
+   * archive (only used when type is <code>PATTERN</code>).
+   * @return <em>pattern</em> that should be used to extract entries from the 
+   * archive. 
+   */
+  public String getPattern();
+  
+  /**
+   * Set the <em>pattern</em> that should be used to extract entries from the
+   * archive (only used when type is <code>PATTERN</code>).
+   * @param pattern <em>pattern</em> that should be used to extract entries 
+   * from the archive.
+   */
+  public void setPattern(String pattern);
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceType.java
index 0cfed1c..2529f12 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceType.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceType.java
@@ -55,5 +55,14 @@
   /**
    * Regular file i.e. uninterpreted bytes.
    */
-  FILE
+  FILE,
+  
+  /**
+   * A hybrid between archive and file.  Only part of the file is unarchived,
+   * and the original file is left in place, but in the same directory as the
+   * unarchived part.  The part that is unarchived is determined by pattern
+   * in #{@link LocalResource}.  Currently only jars support pattern, all
+   * others will be treated like a #{@link LocalResourceType#ARCHIVE}.
+   */
+  PATTERN
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Priority.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Priority.java
index fea1f48..5c27f38 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Priority.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Priority.java
@@ -64,4 +64,8 @@
     return this.getPriority() - other.getPriority();
   }
 
+  @Override
+  public String toString() {
+    return "{Priority: " + getPriority() + "}";
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/LocalResourcePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/LocalResourcePBImpl.java
index dd57f30..60ea0da 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/LocalResourcePBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/LocalResourcePBImpl.java
@@ -151,6 +151,25 @@
     }
     builder.setVisibility(convertToProtoFormat(visibility));
   }
+  
+  @Override
+  public synchronized String getPattern() {
+    LocalResourceProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasPattern()) {
+      return null;
+    }
+    return p.getPattern();
+  }
+
+  @Override
+  public synchronized void setPattern(String pattern) {
+    maybeInitBuilder();
+    if (pattern == null) {
+      builder.clearPattern();
+      return;
+    }
+    builder.setPattern(pattern);
+  }
 
   private LocalResourceTypeProto convertToProtoFormat(LocalResourceType e) {
     return ProtoUtils.convertToProtoFormat(e);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
index f3b8ffa..f3834a4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
@@ -162,5 +162,10 @@
   private ResourceProto convertToProtoFormat(Resource t) {
     return ((ResourcePBImpl)t).getProto();
   }
-
-}  
+  
+  @Override
+  public String toString() {
+    return "{Priority: " + getPriority() + ", Capability: " + getCapability()
+        + "}";
+  }
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/RMAdminProtocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/RMAdminProtocol.proto
similarity index 100%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/RMAdminProtocol.proto
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/RMAdminProtocol.proto
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 8daeddd..55c0d78 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -135,6 +135,7 @@
 enum LocalResourceTypeProto {
   ARCHIVE = 1;
   FILE = 2;
+  PATTERN = 3;
 }
 
 message LocalResourceProto {
@@ -142,7 +143,8 @@
   optional int64 size = 2;
   optional int64 timestamp = 3;
   optional LocalResourceTypeProto type = 4;
-  optional LocalResourceVisibilityProto visibility= 5;
+  optional LocalResourceVisibilityProto visibility = 5;
+  optional string pattern = 6;
 }
 
 message ApplicationResourceUsageReportProto {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_server_resourcemanager_service_protos.proto
similarity index 100%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_service_protos.proto
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_server_resourcemanager_service_protos.proto
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
index c630050..e6ea7b8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
@@ -82,6 +82,13 @@
             <phase>test-compile</phase>
           </execution>
         </executions>
+        <configuration>
+           <archive>
+             <manifest>
+               <mainClass>org.apache.hadoop.yarn.applications.distributedshell.Client</mainClass>
+             </manifest>
+           </archive>
+        </configuration>
       </plugin>
       <plugin>
         <artifactId>maven-dependency-plugin</artifactId>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
index 3a4a515..4a11cd0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
@@ -119,7 +119,8 @@
   // Application master jar file
   private String appMasterJar = ""; 
   // Main class to invoke application master
-  private String appMasterMainClass = "";
+  private final String appMasterMainClass =
+      "org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster";
 
   // Shell command to be executed 
   private String shellCommand = ""; 
@@ -149,6 +150,9 @@
   // Debug flag
   boolean debugFlag = false;	
 
+  // Command line options
+  private Options opts;
+
   /**
    * @param args Command line arguments 
    */
@@ -157,9 +161,15 @@
     try {
       Client client = new Client();
       LOG.info("Initializing Client");
-      boolean doRun = client.init(args);
-      if (!doRun) {
-        System.exit(0);
+      try {
+        boolean doRun = client.init(args);
+        if (!doRun) {
+          System.exit(0);
+        }
+      } catch (IllegalArgumentException e) {
+        System.err.println(e.getLocalizedMessage());
+        client.printUsage();
+        System.exit(-1);
       }
       result = client.run();
     } catch (Throwable t) {
@@ -180,6 +190,23 @@
     super();
     this.conf = conf;
     init(conf);
+    opts = new Options();
+    opts.addOption("appname", true, "Application Name. Default value - DistributedShell");
+    opts.addOption("priority", true, "Application Priority. Default 0");
+    opts.addOption("queue", true, "RM Queue in which this application is to be submitted");
+    opts.addOption("timeout", true, "Application timeout in milliseconds");
+    opts.addOption("master_memory", true, "Amount of memory in MB to be requested to run the application master");
+    opts.addOption("jar", true, "Jar file containing the application master");
+    opts.addOption("shell_command", true, "Shell command to be executed by the Application Master");
+    opts.addOption("shell_script", true, "Location of the shell script to be executed");
+    opts.addOption("shell_args", true, "Command line args for the shell script");
+    opts.addOption("shell_env", true, "Environment for shell script. Specified as env_key=env_val pairs");
+    opts.addOption("shell_cmd_priority", true, "Priority for the shell command containers");
+    opts.addOption("container_memory", true, "Amount of memory in MB to be requested to run the shell command");
+    opts.addOption("num_containers", true, "No. of containers on which the shell command needs to be executed");
+    opts.addOption("log_properties", true, "log4j.properties file");
+    opts.addOption("debug", false, "Dump out debug information");
+    opts.addOption("help", false, "Print usage");
   }
 
   /**
@@ -192,7 +219,7 @@
    * Helper function to print out usage
    * @param opts Parsed command line options 
    */
-  private void printUsage(Options opts) {
+  private void printUsage() {
     new HelpFormatter().printHelp("Client", opts);
   }
 
@@ -204,33 +231,14 @@
    */
   public boolean init(String[] args) throws ParseException {
 
-    Options opts = new Options();
-    opts.addOption("appname", true, "Application Name. Default value - DistributedShell");
-    opts.addOption("priority", true, "Application Priority. Default 0");
-    opts.addOption("queue", true, "RM Queue in which this application is to be submitted");
-    opts.addOption("timeout", true, "Application timeout in milliseconds");
-    opts.addOption("master_memory", true, "Amount of memory in MB to be requested to run the application master");
-    opts.addOption("jar", true, "Jar file containing the application master");
-    opts.addOption("class", true, "Main class to  be run for the Application Master.");
-    opts.addOption("shell_command", true, "Shell command to be executed by the Application Master");
-    opts.addOption("shell_script", true, "Location of the shell script to be executed");
-    opts.addOption("shell_args", true, "Command line args for the shell script");
-    opts.addOption("shell_env", true, "Environment for shell script. Specified as env_key=env_val pairs");
-    opts.addOption("shell_cmd_priority", true, "Priority for the shell command containers");		
-    opts.addOption("container_memory", true, "Amount of memory in MB to be requested to run the shell command");
-    opts.addOption("num_containers", true, "No. of containers on which the shell command needs to be executed");
-    opts.addOption("log_properties", true, "log4j.properties file");
-    opts.addOption("debug", false, "Dump out debug information");
-    opts.addOption("help", false, "Print usage");
     CommandLine cliParser = new GnuParser().parse(opts, args);
 
     if (args.length == 0) {
-      printUsage(opts);
       throw new IllegalArgumentException("No args specified for client to initialize");
     }		
 
     if (cliParser.hasOption("help")) {
-      printUsage(opts);
+      printUsage();
       return false;
     }
 
@@ -254,8 +262,6 @@
     }		
 
     appMasterJar = cliParser.getOptionValue("jar");
-    appMasterMainClass = cliParser.getOptionValue("class",
-        "org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster");		
 
     if (!cliParser.hasOption("shell_command")) {
       throw new IllegalArgumentException("No shell command specified to be executed by application master");
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
index e76e2db..0838178 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
@@ -31,6 +31,8 @@
 import org.apache.hadoop.util.JarFinder;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.MiniYARNCluster;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -41,7 +43,7 @@
       LogFactory.getLog(TestDistributedShell.class);
 
   protected static MiniYARNCluster yarnCluster = null;
-  protected static Configuration conf = new Configuration();
+  protected static Configuration conf = new YarnConfiguration();
 
   protected static String APPMASTER_JAR = JarFinder.getJar(ApplicationMaster.class);
 
@@ -49,6 +51,8 @@
   public static void setup() throws InterruptedException, IOException {
     LOG.info("Starting up YARN cluster");
     conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128);
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, 
+        FifoScheduler.class, ResourceScheduler.class);
     if (yarnCluster == null) {
       yarnCluster = new MiniYARNCluster(TestDistributedShell.class.getName(),
           1, 1, 1);
@@ -106,6 +110,22 @@
 
   }
 
+  @Test
+  public void testDSShellWithNoArgs() throws Exception {
+
+    String[] args = {};
+
+    LOG.info("Initializing DS Client with no args");
+    Client client = new Client(new Configuration(yarnCluster.getConfig()));
+    boolean exceptionThrown = false;
+    try {
+      boolean initSuccess = client.init(args);
+    }
+    catch (IllegalArgumentException e) {
+      exceptionThrown = true;
+    }
+    Assert.assertTrue(exceptionThrown);
+  }
 
 }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/tools/RMAdmin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/RMAdmin.java
similarity index 93%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/tools/RMAdmin.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/RMAdmin.java
index 43cbc36..fd30fb4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/tools/RMAdmin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/RMAdmin.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.yarn.server.resourcemanager.tools;
+package org.apache.hadoop.yarn.client;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -29,17 +29,17 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.yarn.api.RMAdminProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshAdminAclsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshNodesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshQueuesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshServiceAclsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
-import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocol;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshServiceAclsRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
 
 public class RMAdmin extends Configured implements Tool {
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
new file mode 100644
index 0000000..b8ecae7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
@@ -0,0 +1,159 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.client.cli;
+
+import java.io.PrintWriter;
+import java.util.List;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Options;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+public class ApplicationCLI extends YarnCLI {
+  private static final String APPLICATIONS_PATTERN = "%30s\t%20s\t%10s\t%10s\t%18s\t%18s\t%35s\n";
+
+  public static void main(String[] args) throws Exception {
+    ApplicationCLI cli = new ApplicationCLI();
+    cli.setSysOutPrintStream(System.out);
+    cli.setSysErrPrintStream(System.err);
+    int res = ToolRunner.run(cli, args);
+    cli.stop();
+    System.exit(res);
+  }
+
+  @Override
+  public int run(String[] args) throws Exception {
+
+    Options opts = new Options();
+    opts.addOption(STATUS_CMD, true, "Prints the status of the application.");
+    opts.addOption(LIST_CMD, false, "Lists all the Applications from RM.");
+    opts.addOption(KILL_CMD, true, "Kills the application.");
+    CommandLine cliParser = new GnuParser().parse(opts, args);
+
+    int exitCode = -1;
+    if (cliParser.hasOption(STATUS_CMD)) {
+      if (args.length != 2) {
+        printUsage(opts);
+        return exitCode;
+      }
+      printApplicationReport(cliParser.getOptionValue(STATUS_CMD));
+    } else if (cliParser.hasOption(LIST_CMD)) {
+      listAllApplications();
+    } else if (cliParser.hasOption(KILL_CMD)) {
+      if (args.length != 2) {
+        printUsage(opts);
+        return exitCode;
+      }
+      killApplication(cliParser.getOptionValue(KILL_CMD));
+    } else {
+      syserr.println("Invalid Command Usage : ");
+      printUsage(opts);
+    }
+    return 0;
+  }
+
+  /**
+   * It prints the usage of the command
+   * 
+   * @param opts
+   */
+  private void printUsage(Options opts) {
+    new HelpFormatter().printHelp("application", opts);
+  }
+
+  /**
+   * Lists all the applications present in the Resource Manager
+   * 
+   * @throws YarnRemoteException
+   */
+  private void listAllApplications() throws YarnRemoteException {
+    PrintWriter writer = new PrintWriter(sysout);
+    List<ApplicationReport> appsReport = client.getApplicationList();
+
+    writer.println("Total Applications:" + appsReport.size());
+    writer.printf(APPLICATIONS_PATTERN, "Application-Id",
+        "Application-Name", "User", "Queue", "State", "Final-State",
+        "Tracking-URL");
+    for (ApplicationReport appReport : appsReport) {
+      writer.printf(APPLICATIONS_PATTERN, appReport.getApplicationId(),
+          appReport.getName(), appReport.getUser(), appReport.getQueue(),
+          appReport.getYarnApplicationState(), appReport
+              .getFinalApplicationStatus(), appReport.getOriginalTrackingUrl());
+    }
+    writer.flush();
+  }
+
+  /**
+   * Kills the application with the application id as appId
+   * 
+   * @param applicationId
+   * @throws YarnRemoteException
+   */
+  private void killApplication(String applicationId) throws YarnRemoteException {
+    ApplicationId appId = ConverterUtils.toApplicationId(applicationId);
+    sysout.println("Killing application " + applicationId);
+    client.killApplication(appId);
+  }
+
+  /**
+   * Prints the application report for an application id.
+   * 
+   * @param applicationId
+   * @throws YarnRemoteException
+   */
+  private void printApplicationReport(String applicationId)
+      throws YarnRemoteException {
+    ApplicationReport appReport = client.getApplicationReport(ConverterUtils
+        .toApplicationId(applicationId));
+    StringBuffer appReportStr = new StringBuffer();
+    if (appReport != null) {
+      appReportStr.append("Application Report : ");
+      appReportStr.append("\n\tApplication-Id : ");
+      appReportStr.append(appReport.getApplicationId());
+      appReportStr.append("\n\tApplication-Name : ");
+      appReportStr.append(appReport.getName());
+      appReportStr.append("\n\tUser : ");
+      appReportStr.append(appReport.getUser());
+      appReportStr.append("\n\tQueue : ");
+      appReportStr.append(appReport.getQueue());
+      appReportStr.append("\n\tStart-Time : ");
+      appReportStr.append(appReport.getStartTime());
+      appReportStr.append("\n\tFinish-Time : ");
+      appReportStr.append(appReport.getFinishTime());
+      appReportStr.append("\n\tState : ");
+      appReportStr.append(appReport.getYarnApplicationState());
+      appReportStr.append("\n\tFinal-State : ");
+      appReportStr.append(appReport.getFinalApplicationStatus());
+      appReportStr.append("\n\tTracking-URL : ");
+      appReportStr.append(appReport.getOriginalTrackingUrl());
+      appReportStr.append("\n\tDiagnostics : ");
+      appReportStr.append(appReport.getDiagnostics());
+    } else {
+      appReportStr.append("Application with id '" + applicationId
+          + "' doesn't exist in RM.");
+    }
+    sysout.println(appReportStr.toString());
+  }
+
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java
new file mode 100644
index 0000000..cfde538
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java
@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.client.cli;
+
+import java.io.PrintWriter;
+import java.util.List;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Options;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+public class NodeCLI extends YarnCLI {
+  private static final String NODES_PATTERN = "%16s\t%10s\t%17s\t%26s\t%18s\n";
+  public static void main(String[] args) throws Exception {
+    NodeCLI cli = new NodeCLI();
+    cli.setSysOutPrintStream(System.out);
+    cli.setSysErrPrintStream(System.err);
+    int res = ToolRunner.run(cli, args);
+    cli.stop();
+    System.exit(res);
+  }
+
+  @Override
+  public int run(String[] args) throws Exception {
+
+    Options opts = new Options();
+    opts.addOption(STATUS_CMD, true, "Prints the status report of the node.");
+    opts.addOption(LIST_CMD, false, "Lists all the nodes.");
+    CommandLine cliParser = new GnuParser().parse(opts, args);
+
+    int exitCode = -1;
+    if (cliParser.hasOption("status")) {
+      if (args.length != 2) {
+        printUsage(opts);
+        return exitCode;
+      }
+      printNodeStatus(cliParser.getOptionValue("status"));
+    } else if (cliParser.hasOption("list")) {
+      listClusterNodes();
+    } else {
+      syserr.println("Invalid Command Usage : ");
+      printUsage(opts);
+    }
+    return 0;
+  }
+
+  /**
+   * It prints the usage of the command
+   * 
+   * @param opts
+   */
+  private void printUsage(Options opts) {
+    new HelpFormatter().printHelp("node", opts);
+  }
+
+  /**
+   * Lists all the nodes present in the cluster
+   * 
+   * @throws YarnRemoteException
+   */
+  private void listClusterNodes() throws YarnRemoteException {
+    PrintWriter writer = new PrintWriter(sysout);
+    List<NodeReport> nodesReport = client.getNodeReports();
+    writer.println("Total Nodes:" + nodesReport.size());
+    writer.printf(NODES_PATTERN, "Node-Id", "Node-State", "Node-Http-Address",
+        "Health-Status(isNodeHealthy)", "Running-Containers");
+    for (NodeReport nodeReport : nodesReport) {
+      writer.printf(NODES_PATTERN, nodeReport.getNodeId(), nodeReport
+          .getNodeState(), nodeReport.getHttpAddress(), nodeReport
+          .getNodeHealthStatus().getIsNodeHealthy(), nodeReport
+          .getNumContainers());
+    }
+    writer.flush();
+  }
+
+  /**
+   * Prints the node report for node id.
+   * 
+   * @param nodeIdStr
+   * @throws YarnRemoteException
+   */
+  private void printNodeStatus(String nodeIdStr) throws YarnRemoteException {
+    NodeId nodeId = ConverterUtils.toNodeId(nodeIdStr);
+    List<NodeReport> nodesReport = client.getNodeReports();
+    StringBuffer nodeReportStr = new StringBuffer();
+    NodeReport nodeReport = null;
+    for (NodeReport report : nodesReport) {
+      if (!report.getNodeId().equals(nodeId)) {
+        continue;
+      }
+      nodeReport = report;
+      nodeReportStr.append("Node Report : ");
+      nodeReportStr.append("\n\tNode-Id : ");
+      nodeReportStr.append(nodeReport.getNodeId());
+      nodeReportStr.append("\n\tRack : ");
+      nodeReportStr.append(nodeReport.getRackName());
+      nodeReportStr.append("\n\tNode-State : ");
+      nodeReportStr.append(nodeReport.getNodeState());
+      nodeReportStr.append("\n\tNode-Http-Address : ");
+      nodeReportStr.append(nodeReport.getHttpAddress());
+      nodeReportStr.append("\n\tHealth-Status(isNodeHealthy) : ");
+      nodeReportStr.append(nodeReport.getNodeHealthStatus()
+          .getIsNodeHealthy());
+      nodeReportStr.append("\n\tLast-Last-Health-Update : ");
+      nodeReportStr.append(nodeReport.getNodeHealthStatus()
+          .getLastHealthReportTime());
+      nodeReportStr.append("\n\tHealth-Report : ");
+      nodeReportStr
+          .append(nodeReport.getNodeHealthStatus().getHealthReport());
+      nodeReportStr.append("\n\tContainers : ");
+      nodeReportStr.append(nodeReport.getNumContainers());
+      nodeReportStr.append("\n\tMemory-Used : ");
+      nodeReportStr.append((nodeReport.getUsed() == null) ? "0M"
+          : (nodeReport.getUsed().getMemory() + "M"));
+      nodeReportStr.append("\n\tMemory-Capacity : ");
+      nodeReportStr.append(nodeReport.getCapability().getMemory());
+    }
+
+    if (nodeReport == null) {
+      nodeReportStr.append("Could not find the node report for node id : "
+          + nodeIdStr);
+    }
+
+    sysout.println(nodeReportStr.toString());
+  }
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/YarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/YarnCLI.java
new file mode 100644
index 0000000..a36e671
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/YarnCLI.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.client.cli;
+
+import java.io.PrintStream;
+
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.yarn.client.YarnClient;
+import org.apache.hadoop.yarn.client.YarnClientImpl;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+
+public abstract class YarnCLI extends Configured implements Tool {
+
+  public static final String STATUS_CMD = "status";
+  public static final String LIST_CMD = "list";
+  public static final String KILL_CMD = "kill";
+  protected PrintStream sysout;
+  protected PrintStream syserr;
+  protected YarnClient client;
+
+  public YarnCLI() {
+    super(new YarnConfiguration());
+    client = new YarnClientImpl();
+    client.init(getConf());
+    client.start();
+  }
+
+  public void setSysOutPrintStream(PrintStream sysout) {
+    this.sysout = sysout;
+  }
+
+  public void setSysErrPrintStream(PrintStream syserr) {
+    this.syserr = syserr;
+  }
+
+  public YarnClient getClient() {
+    return client;
+  }
+
+  public void setClient(YarnClient client) {
+    this.client = client;
+  }
+
+  public void stop() {
+    this.client.stop();
+  }
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/tools/GetGroupsForTesting.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/GetGroupsForTesting.java
similarity index 94%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/tools/GetGroupsForTesting.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/GetGroupsForTesting.java
index 8a22d5c..8827e08 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/tools/GetGroupsForTesting.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/GetGroupsForTesting.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.yarn.server.resourcemanager.tools;
+package org.apache.hadoop.yarn.client;
 
 import java.io.IOException;
 import java.io.PrintStream;
@@ -25,9 +25,9 @@
 import org.apache.hadoop.tools.GetGroupsBase;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
 import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.yarn.api.RMAdminProtocol;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
-import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocol;
 
 public class GetGroupsForTesting extends GetGroupsBase {
   
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/tools/TestGetGroups.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
similarity index 97%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/tools/TestGetGroups.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
index 8947507..9f2e554 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/tools/TestGetGroups.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.yarn.server.resourcemanager.tools;
+package org.apache.hadoop.yarn.client;
 
 import java.io.IOException;
 import java.io.PrintStream;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
new file mode 100644
index 0000000..146f938
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
@@ -0,0 +1,228 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.client.cli;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Matchers.isA;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.List;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.client.YarnClient;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.apache.hadoop.yarn.util.Records;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestYarnCLI {
+
+  private YarnClient client = mock(YarnClient.class);
+  ByteArrayOutputStream sysOutStream;
+  private PrintStream sysOut;
+  ByteArrayOutputStream sysErrStream;
+  private PrintStream sysErr;
+
+  @Before
+  public void setup() {
+    sysOutStream = new ByteArrayOutputStream();
+    sysOut = spy(new PrintStream(sysOutStream));
+    sysErrStream = new ByteArrayOutputStream();
+    sysErr = spy(new PrintStream(sysErrStream));
+  }
+  
+  @Test
+  public void testGetApplicationReport() throws Exception {
+    ApplicationCLI cli = createAndGetAppCLI();
+    ApplicationId applicationId = BuilderUtils.newApplicationId(1234, 5);
+    ApplicationReport newApplicationReport = BuilderUtils.newApplicationReport(
+        applicationId, BuilderUtils.newApplicationAttemptId(applicationId, 1),
+        "user", "queue", "appname", "host", 124, null,
+        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
+        FinalApplicationStatus.SUCCEEDED, null, "N/A");
+    when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
+        newApplicationReport);
+    int result = cli.run(new String[] { "-status", applicationId.toString() });
+    assertEquals(0, result);
+    verify(client).getApplicationReport(applicationId);
+    String appReportStr = "Application Report : \n\t"
+        + "Application-Id : application_1234_0005\n\t"
+        + "Application-Name : appname\n\tUser : user\n\t"
+        + "Queue : queue\n\tStart-Time : 0\n\tFinish-Time : 0\n\t"
+        + "State : FINISHED\n\tFinal-State : SUCCEEDED\n\t"
+        + "Tracking-URL : N/A\n\tDiagnostics : diagnostics\n";
+    Assert.assertEquals(appReportStr, sysOutStream.toString());
+    verify(sysOut, times(1)).println(isA(String.class));
+  }
+
+  @Test
+  public void testGetAllApplications() throws Exception {
+    ApplicationCLI cli = createAndGetAppCLI();
+    ApplicationId applicationId = BuilderUtils.newApplicationId(1234, 5);
+    ApplicationReport newApplicationReport = BuilderUtils.newApplicationReport(
+        applicationId, BuilderUtils.newApplicationAttemptId(applicationId, 1),
+        "user", "queue", "appname", "host", 124, null,
+        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
+        FinalApplicationStatus.SUCCEEDED, null, "N/A");
+    List<ApplicationReport> applicationReports = new ArrayList<ApplicationReport>();
+    applicationReports.add(newApplicationReport);
+    when(client.getApplicationList()).thenReturn(applicationReports);
+    int result = cli.run(new String[] { "-list" });
+    assertEquals(0, result);
+    verify(client).getApplicationList();
+
+    StringBuffer appsReportStrBuf = new StringBuffer();
+    appsReportStrBuf.append("Total Applications:1\n");
+    appsReportStrBuf
+        .append("                Application-Id\t    Application-Name"
+            + "\t      User\t     Queue\t             State\t       "
+            + "Final-State\t                       Tracking-URL\n");
+    appsReportStrBuf.append("         application_1234_0005\t             "
+        + "appname\t      user\t     queue\t          FINISHED\t         "
+        + "SUCCEEDED\t                                N/A\n");
+    Assert.assertEquals(appsReportStrBuf.toString(), sysOutStream.toString());
+    verify(sysOut, times(1)).write(any(byte[].class), anyInt(), anyInt());
+  }
+
+  @Test
+  public void testKillApplication() throws Exception {
+    ApplicationCLI cli = createAndGetAppCLI();
+    ApplicationId applicationId = BuilderUtils.newApplicationId(1234, 5);
+    int result = cli.run(new String[] { "-kill", applicationId.toString() });
+    assertEquals(0, result);
+    verify(client).killApplication(any(ApplicationId.class));
+    verify(sysOut).println("Killing application application_1234_0005");
+  }
+
+  @Test
+  public void testListClusterNodes() throws Exception {
+    NodeCLI cli = new NodeCLI();
+    when(client.getNodeReports()).thenReturn(getNodeReports(3));
+    cli.setClient(client);
+    cli.setSysOutPrintStream(sysOut);
+    int result = cli.run(new String[] { "-list" });
+    assertEquals(0, result);
+    verify(client).getNodeReports();
+    StringBuffer nodesReportStr = new StringBuffer();
+    nodesReportStr.append("Total Nodes:3");
+    nodesReportStr
+        .append("\n         Node-Id\tNode-State\tNode-Http-Address\t"
+            + "Health-Status(isNodeHealthy)\tRunning-Containers");
+    nodesReportStr.append("\n         host0:0\t   RUNNING\t       host1:8888"
+        + "\t                     false\t                 0");
+    nodesReportStr.append("\n         host1:0\t   RUNNING\t       host1:8888"
+        + "\t                     false\t                 0");
+    nodesReportStr.append("\n         host2:0\t   RUNNING\t       host1:8888"
+        + "\t                     false\t                 0\n");
+    Assert.assertEquals(nodesReportStr.toString(), sysOutStream.toString());
+    verify(sysOut, times(1)).write(any(byte[].class), anyInt(), anyInt());
+  }
+
+  @Test
+  public void testNodeStatus() throws Exception {
+    NodeId nodeId = BuilderUtils.newNodeId("host0", 0);
+    NodeCLI cli = new NodeCLI();
+    when(client.getNodeReports()).thenReturn(getNodeReports(3));
+    cli.setClient(client);
+    cli.setSysOutPrintStream(sysOut);
+    cli.setSysErrPrintStream(sysErr);
+    int result = cli.run(new String[] { "-status", nodeId.toString() });
+    assertEquals(0, result);
+    verify(client).getNodeReports();
+    String nodeStatusStr = "Node Report : \n\tNode-Id : host0:0\n\t"
+        + "Rack : rack1\n\tNode-State : RUNNING\n\t"
+        + "Node-Http-Address : host1:8888\n\tHealth-Status(isNodeHealthy) "
+        + ": false\n\tLast-Last-Health-Update : 0\n\tHealth-Report : null"
+        + "\n\tContainers : 0\n\tMemory-Used : 0M\n\tMemory-Capacity : 0";
+    verify(sysOut, times(1)).println(isA(String.class));
+    verify(sysOut).println(nodeStatusStr);
+  }
+
+  @Test
+  public void testAbsentNodeStatus() throws Exception {
+    NodeId nodeId = BuilderUtils.newNodeId("Absenthost0", 0);
+    NodeCLI cli = new NodeCLI();
+    when(client.getNodeReports()).thenReturn(getNodeReports(0));
+    cli.setClient(client);
+    cli.setSysOutPrintStream(sysOut);
+    cli.setSysErrPrintStream(sysErr);
+    int result = cli.run(new String[] { "-status", nodeId.toString() });
+    assertEquals(0, result);
+    verify(client).getNodeReports();
+    verify(sysOut, times(1)).println(isA(String.class));
+    verify(sysOut).println(
+      "Could not find the node report for node id : " + nodeId.toString());
+  }
+
+  @Test
+  public void testAppCLIUsageInfo() throws Exception {
+    verifyUsageInfo(new ApplicationCLI());
+  }
+
+  @Test
+  public void testNodeCLIUsageInfo() throws Exception {
+    verifyUsageInfo(new NodeCLI());
+  }
+
+  private void verifyUsageInfo(YarnCLI cli) throws Exception {
+    cli.setSysErrPrintStream(sysErr);
+    cli.run(new String[0]);
+    verify(sysErr).println("Invalid Command Usage : ");
+  }
+
+  private List<NodeReport> getNodeReports(int noOfNodes) {
+    List<NodeReport> nodeReports = new ArrayList<NodeReport>();
+
+    for (int i = 0; i < noOfNodes; i++) {
+      NodeReport nodeReport = BuilderUtils.newNodeReport(BuilderUtils
+          .newNodeId("host" + i, 0), NodeState.RUNNING, "host" + 1 + ":8888",
+          "rack1", Records.newRecord(Resource.class), Records
+              .newRecord(Resource.class), 0, Records
+              .newRecord(NodeHealthStatus.class));
+      nodeReports.add(nodeReport);
+    }
+    return nodeReports;
+  }
+
+  private ApplicationCLI createAndGetAppCLI() {
+    ApplicationCLI cli = new ApplicationCLI();
+    cli.setClient(client);
+    cli.setSysOutPrintStream(sysOut);
+    return cli;
+  }
+
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/client/RMAdminProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/RMAdminProtocolPBClientImpl.java
similarity index 67%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/client/RMAdminProtocolPBClientImpl.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/RMAdminProtocolPBClientImpl.java
index 138d6d7..051df59 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/client/RMAdminProtocolPBClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/RMAdminProtocolPBClientImpl.java
@@ -16,7 +16,7 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.impl.pb.client;
+package org.apache.hadoop.yarn.api.impl.pb.client;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -25,6 +25,32 @@
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.yarn.api.RMAdminProtocol;
+import org.apache.hadoop.yarn.api.RMAdminProtocolPB;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshAdminAclsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshAdminAclsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshNodesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshNodesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshQueuesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshQueuesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshServiceAclsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshServiceAclsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshNodesResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshQueuesRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshQueuesResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshServiceAclsRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshServiceAclsResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsResponsePBImpl;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserRequestProto;
@@ -35,32 +61,6 @@
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto;
-import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocol;
-import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocolPB;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshServiceAclsRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshServiceAclsResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshNodesResponsePBImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshQueuesRequestPBImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshQueuesResponsePBImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshServiceAclsRequestPBImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshServiceAclsResponsePBImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationRequestPBImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsRequestPBImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsResponsePBImpl;
 
 import com.google.protobuf.ServiceException;
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/service/RMAdminProtocolPBServiceImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/RMAdminProtocolPBServiceImpl.java
similarity index 69%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/service/RMAdminProtocolPBServiceImpl.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/RMAdminProtocolPBServiceImpl.java
index 5fdd5db..2124e8d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/service/RMAdminProtocolPBServiceImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/RMAdminProtocolPBServiceImpl.java
@@ -16,32 +16,32 @@
 * limitations under the License.
 */
 
-package org.apache.hadoop.yarn.server.resourcemanager.api.impl.pb.service;
+package org.apache.hadoop.yarn.api.impl.pb.service;
 
 import java.io.IOException;
 
+import org.apache.hadoop.yarn.api.RMAdminProtocol;
+import org.apache.hadoop.yarn.api.RMAdminProtocolPB;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshAdminAclsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshNodesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshQueuesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshServiceAclsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshNodesResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshQueuesRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshQueuesResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshServiceAclsRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshServiceAclsResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsResponsePBImpl;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.*;
-import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocol;
-import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocolPB;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshServiceAclsResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshNodesResponsePBImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshQueuesRequestPBImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshQueuesResponsePBImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshServiceAclsRequestPBImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshServiceAclsResponsePBImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationRequestPBImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsRequestPBImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsResponsePBImpl;
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 16de804..cbe63fd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -21,9 +21,12 @@
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
+import java.util.Arrays;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Splitter;
@@ -111,18 +114,18 @@
   /** Miniumum memory request grant-able by the RM scheduler. */
   public static final String RM_SCHEDULER_MINIMUM_ALLOCATION_MB =
     YARN_PREFIX + "scheduler.minimum-allocation-mb";
-  public static final int DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB = 128;
+  public static final int DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB = 1024;
 
   /** Maximum memory request grant-able by the RM scheduler. */
   public static final String RM_SCHEDULER_MAXIMUM_ALLOCATION_MB =
     YARN_PREFIX + "scheduler.maximum-allocation-mb";
-  public static final int DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB = 10240;
+  public static final int DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB = 8192;
 
   /** Number of threads to handle scheduler interface.*/
   public static final String RM_SCHEDULER_CLIENT_THREAD_COUNT =
     RM_PREFIX + "scheduler.client.thread-count";
   public static final int DEFAULT_RM_SCHEDULER_CLIENT_THREAD_COUNT = 50;
-  
+
   /** The address of the RM web application.*/
   public static final String RM_WEBAPP_ADDRESS = 
     RM_PREFIX + "webapp.address";
@@ -205,6 +208,8 @@
   public static final String RM_SCHEDULER = 
     RM_PREFIX + "scheduler.class";
  
+  public static final String DEFAULT_RM_SCHEDULER = 
+      "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler";
 
   //Delegation token related keys
   public static final String  DELEGATION_KEY_UPDATE_INTERVAL_KEY = 
@@ -281,7 +286,12 @@
 
   /** Environment variables that containers may override rather than use NodeManager's default.*/
   public static final String NM_ENV_WHITELIST = NM_PREFIX + "env-whitelist";
-  public static final String DEFAULT_NM_ENV_WHITELIST = "JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,YARN_HOME";
+  public static final String DEFAULT_NM_ENV_WHITELIST = StringUtils.join(",",
+    Arrays.asList(ApplicationConstants.Environment.JAVA_HOME.key(),
+      ApplicationConstants.Environment.HADOOP_COMMON_HOME.key(),
+      ApplicationConstants.Environment.HADOOP_HDFS_HOME.key(),
+      ApplicationConstants.Environment.HADOOP_CONF_DIR.key(),
+      ApplicationConstants.Environment.HADOOP_YARN_HOME.key()));
   
   /** address of node manager IPC.*/
   public static final String NM_ADDRESS = NM_PREFIX + "address";
@@ -570,12 +580,19 @@
    * CLASSPATH entries
    */
   public static final String[] DEFAULT_YARN_APPLICATION_CLASSPATH = {
-      "$HADOOP_CONF_DIR", "$HADOOP_COMMON_HOME/share/hadoop/common/*",
-      "$HADOOP_COMMON_HOME/share/hadoop/common/lib/*",
-      "$HADOOP_HDFS_HOME/share/hadoop/hdfs/*",
-      "$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*",
-      "$YARN_HOME/share/hadoop/yarn/*",
-      "$YARN_HOME/share/hadoop/yarn/lib/*"};
+      ApplicationConstants.Environment.HADOOP_CONF_DIR.$(),
+      ApplicationConstants.Environment.HADOOP_COMMON_HOME.$()
+          + "/share/hadoop/common/*",
+      ApplicationConstants.Environment.HADOOP_COMMON_HOME.$()
+          + "/share/hadoop/common/lib/*",
+      ApplicationConstants.Environment.HADOOP_HDFS_HOME.$()
+          + "/share/hadoop/hdfs/*",
+      ApplicationConstants.Environment.HADOOP_HDFS_HOME.$()
+          + "/share/hadoop/hdfs/lib/*",
+      ApplicationConstants.Environment.HADOOP_YARN_HOME.$()
+          + "/share/hadoop/yarn/*",
+      ApplicationConstants.Environment.HADOOP_YARN_HOME.$()
+          + "/share/hadoop/yarn/lib/*" };
 
   /** Container temp directory */
   public static final String DEFAULT_CONTAINER_TEMP_DIR = "./tmp";
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
index e58f584..68f727d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
@@ -48,14 +48,16 @@
 
   private ContainerId containerId;
   private String nmHostAddr;
+  private String appSubmitter;
   private Resource resource;
   private long expiryTimeStamp;
   private int masterKeyId;
 
   public ContainerTokenIdentifier(ContainerId containerID, String hostName,
-      Resource r, long expiryTimeStamp, int masterKeyId) {
+      String appSubmitter, Resource r, long expiryTimeStamp, int masterKeyId) {
     this.containerId = containerID;
     this.nmHostAddr = hostName;
+    this.appSubmitter = appSubmitter;
     this.resource = r;
     this.expiryTimeStamp = expiryTimeStamp;
     this.masterKeyId = masterKeyId;
@@ -71,6 +73,10 @@
     return this.containerId;
   }
 
+  public String getApplicationSubmitter() {
+    return this.appSubmitter;
+  }
+
   public String getNmHostAddress() {
     return this.nmHostAddr;
   }
@@ -98,6 +104,7 @@
     out.writeInt(applicationAttemptId.getAttemptId());
     out.writeInt(this.containerId.getId());
     out.writeUTF(this.nmHostAddr);
+    out.writeUTF(this.appSubmitter);
     out.writeInt(this.resource.getMemory());
     out.writeLong(this.expiryTimeStamp);
     out.writeInt(this.masterKeyId);
@@ -112,6 +119,7 @@
     this.containerId = BuilderUtils.newContainerId(applicationAttemptId, in
         .readInt());
     this.nmHostAddr = in.readUTF();
+    this.appSubmitter = in.readUTF();
     this.resource = BuilderUtils.newResource(in.readInt());
     this.expiryTimeStamp = in.readLong();
     this.masterKeyId = in.readInt();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/admin/AdminSecurityInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/admin/AdminSecurityInfo.java
similarity index 92%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/admin/AdminSecurityInfo.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/admin/AdminSecurityInfo.java
index 275da39..1e73882 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/admin/AdminSecurityInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/admin/AdminSecurityInfo.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.yarn.server.resourcemanager.security.admin;
+package org.apache.hadoop.yarn.security.admin;
 
 import java.lang.annotation.Annotation;
 
@@ -24,8 +24,8 @@
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.SecurityInfo;
 import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.yarn.api.RMAdminProtocolPB;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocolPB;
 
 public class AdminSecurityInfo extends SecurityInfo {
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/BaseClientToAMTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/BaseClientToAMTokenSecretManager.java
new file mode 100644
index 0000000..04c192d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/BaseClientToAMTokenSecretManager.java
@@ -0,0 +1,53 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.security.client;
+
+import javax.crypto.SecretKey;
+
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+
+public abstract class BaseClientToAMTokenSecretManager extends
+    SecretManager<ClientTokenIdentifier> {
+
+  public abstract SecretKey getMasterKey(ApplicationId applicationId);
+
+  @Override
+  public synchronized byte[] createPassword(
+      ClientTokenIdentifier identifier) {
+    return createPassword(identifier.getBytes(),
+      getMasterKey(identifier.getApplicationID()));
+  }
+
+  @Override
+  public byte[] retrievePassword(ClientTokenIdentifier identifier)
+      throws SecretManager.InvalidToken {
+    SecretKey masterKey = getMasterKey(identifier.getApplicationID());
+    if (masterKey == null) {
+      throw new SecretManager.InvalidToken("Illegal client-token!");
+    }
+    return createPassword(identifier.getBytes(), masterKey);
+  }
+
+  @Override
+  public ClientTokenIdentifier createIdentifier() {
+    return new ClientTokenIdentifier();
+  }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMSecretManager.java
deleted file mode 100644
index 59252e7..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMSecretManager.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.hadoop.yarn.security.client;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import javax.crypto.SecretKey;
-
-import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.token.SecretManager;
-
-public class ClientToAMSecretManager extends
-    SecretManager<ClientTokenIdentifier> {
-
-  private static Log LOG = LogFactory.getLog(ClientToAMSecretManager.class);
-
-  // Per application masterkeys for managing client-tokens
-  private Map<Text, SecretKey> masterKeys = new HashMap<Text, SecretKey>();
-
-  public void setMasterKey(ClientTokenIdentifier identifier, byte[] key) {
-    SecretKey sk = SecretManager.createSecretKey(key);
-    Text applicationID = identifier.getApplicationID();
-    this.masterKeys.put(applicationID, sk);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Setting master key for "
-          + applicationID
-          + " as "
-          + new String(Base64.encodeBase64(this.masterKeys.get(applicationID)
-              .getEncoded())));
-    }
-  }
-
-  private void addMasterKey(ClientTokenIdentifier identifier) {
-    Text applicationID = identifier.getApplicationID();
-    this.masterKeys.put(applicationID, generateSecret());
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Creating master key for "
-          + applicationID
-          + " as "
-          + new String(Base64.encodeBase64(this.masterKeys.get(applicationID)
-              .getEncoded())));}
-  }
-
-  // TODO: Handle the masterKey invalidation.
-  public synchronized SecretKey getMasterKey(
-      ClientTokenIdentifier identifier) {
-    Text applicationID = identifier.getApplicationID();
-    if (!this.masterKeys.containsKey(applicationID)) {
-      addMasterKey(identifier);
-    }
-    return this.masterKeys.get(applicationID);
-  }
-
-  @Override
-  public synchronized byte[] createPassword(
-      ClientTokenIdentifier identifier) {
-    byte[] password =
-        createPassword(identifier.getBytes(), getMasterKey(identifier));
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Password created is "
-          + new String(Base64.encodeBase64(password)));
-    }
-    return password;
-  }
-
-  @Override
-  public byte[] retrievePassword(ClientTokenIdentifier identifier)
-      throws SecretManager.InvalidToken {
-    byte[] password =
-        createPassword(identifier.getBytes(), getMasterKey(identifier));
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Password retrieved is "
-          + new String(Base64.encodeBase64(password)));
-    }
-    return password;
-  }
-
-  @Override
-  public ClientTokenIdentifier createIdentifier() {
-    return new ClientTokenIdentifier();
-  }
-
-}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenSecretManager.java
new file mode 100644
index 0000000..43aeb392b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenSecretManager.java
@@ -0,0 +1,44 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.security.client;
+
+import javax.crypto.SecretKey;
+
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+
+public class ClientToAMTokenSecretManager extends
+    BaseClientToAMTokenSecretManager {
+
+  // Only one client-token and one master-key for AM
+  private final SecretKey masterKey;
+
+  public ClientToAMTokenSecretManager(ApplicationId applicationID,
+      byte[] secretKeyBytes) {
+    super();
+    this.masterKey = SecretManager.createSecretKey(secretKeyBytes);
+  }
+
+  @Override
+  public SecretKey getMasterKey(ApplicationId applicationID) {
+    // Only one client-token and one master-key for AM, just return that.
+    return this.masterKey;
+  }
+
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientTokenIdentifier.java
index 77d97ce..dbd3a1f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientTokenIdentifier.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientTokenIdentifier.java
@@ -28,36 +28,39 @@
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.util.BuilderUtils;
 
 public class ClientTokenIdentifier extends TokenIdentifier {
 
   public static final Text KIND_NAME = new Text("YARN_CLIENT_TOKEN");
 
-  private Text appId;
+  private ApplicationId applicationId;
 
   // TODO: Add more information in the tokenID such that it is not
   // transferrable, more secure etc.
 
-  public ClientTokenIdentifier(ApplicationId id) {
-    this.appId = new Text(Integer.toString(id.getId()));
-  }
-
   public ClientTokenIdentifier() {
-    this.appId = new Text();
   }
 
-  public Text getApplicationID() {
-    return appId;
+  public ClientTokenIdentifier(ApplicationId id) {
+    this();
+    this.applicationId = id;
+  }
+
+  public ApplicationId getApplicationID() {
+    return this.applicationId;
   }
 
   @Override
   public void write(DataOutput out) throws IOException {
-    appId.write(out);
+    out.writeLong(this.applicationId.getClusterTimestamp());
+    out.writeInt(this.applicationId.getId());
   }
 
   @Override
   public void readFields(DataInput in) throws IOException {
-    appId.readFields(in);
+    this.applicationId =
+        BuilderUtils.newApplicationId(in.readLong(), in.readInt());
   }
 
   @Override
@@ -67,10 +70,10 @@
 
   @Override
   public UserGroupInformation getUser() {
-    if (appId == null || "".equals(appId.toString())) {
+    if (this.applicationId == null) {
       return null;
     }
-    return UserGroupInformation.createRemoteUser(appId.toString());
+    return UserGroupInformation.createRemoteUser(this.applicationId.toString());
   }
 
   @InterfaceAudience.Private
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
index a6c7b6e..4d69056 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
@@ -25,6 +25,7 @@
 import java.security.PrivilegedExceptionAction;
 import java.util.Random;
 import java.util.concurrent.Callable;
+import java.util.regex.Pattern;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -103,9 +104,9 @@
     return dCopy;
   }
 
-  private long unpack(File localrsrc, File dst) throws IOException {
+  private long unpack(File localrsrc, File dst, Pattern pattern) throws IOException {
     switch (resource.getType()) {
-    case ARCHIVE:
+    case ARCHIVE: {
       String lowerDst = dst.getName().toLowerCase();
       if (lowerDst.endsWith(".jar")) {
         RunJar.unJar(localrsrc, dst);
@@ -122,7 +123,39 @@
               + "] to [" + dst + "]");
         }
       }
-      break;
+    }
+    break;
+    case PATTERN: {
+      String lowerDst = dst.getName().toLowerCase();
+      if (lowerDst.endsWith(".jar")) {
+        RunJar.unJar(localrsrc, dst, pattern);
+        File newDst = new File(dst, dst.getName());
+        if (!dst.exists() && !dst.mkdir()) {
+          throw new IOException("Unable to create directory: [" + dst + "]");
+        }
+        if (!localrsrc.renameTo(newDst)) {
+          throw new IOException("Unable to rename file: [" + localrsrc
+              + "] to [" + newDst + "]");
+        }
+      } else if (lowerDst.endsWith(".zip")) {
+        LOG.warn("Treating [" + localrsrc + "] as an archive even though it " +
+        		"was specified as PATTERN");
+        FileUtil.unZip(localrsrc, dst);
+      } else if (lowerDst.endsWith(".tar.gz") ||
+                 lowerDst.endsWith(".tgz") ||
+                 lowerDst.endsWith(".tar")) {
+        LOG.warn("Treating [" + localrsrc + "] as an archive even though it " +
+        "was specified as PATTERN");
+        FileUtil.unTar(localrsrc, dst);
+      } else {
+        LOG.warn("Cannot unpack " + localrsrc);
+        if (!localrsrc.renameTo(dst)) {
+          throw new IOException("Unable to rename file: [" + localrsrc
+              + "] to [" + dst + "]");
+        }
+      }
+    }
+    break;
     case FILE:
     default:
       if (!localrsrc.renameTo(dst)) {
@@ -163,8 +196,13 @@
             public Path run() throws Exception {
               return files.makeQualified(copy(sCopy, dst_work));
             };
-      });
-      unpack(new File(dTmp.toUri()), new File(dFinal.toUri()));
+          });
+      Pattern pattern = null;
+      String p = resource.getPattern();
+      if(p != null) {
+        pattern = Pattern.compile(p);
+      }
+      unpack(new File(dTmp.toUri()), new File(dFinal.toUri()), pattern);
       changePermissions(dFinal.getFileSystem(conf), dFinal);
       files.rename(dst_work, destDirPath, Rename.OVERWRITE);
     } catch (Exception e) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
index 24224b6..c4c6eef 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
@@ -1,4 +1,4 @@
 org.apache.hadoop.yarn.security.client.ClientRMSecurityInfo
 org.apache.hadoop.yarn.security.ContainerManagerSecurityInfo
 org.apache.hadoop.yarn.security.SchedulerSecurityInfo
-
+org.apache.hadoop.yarn.security.admin.AdminSecurityInfo
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 1d0873b..adcf8d5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -185,6 +185,7 @@
   <property>
     <description>The class to use as the resource scheduler.</description>
     <name>yarn.resourcemanager.scheduler.class</name>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
   </property>
 
   <property>
@@ -192,7 +193,7 @@
     in MBs. Memory requests lower than this won't take effect,
     and the specified value will get allocated at minimum.</description>
     <name>yarn.scheduler.minimum-allocation-mb</name>
-    <value>128</value>
+    <value>1024</value>
   </property>
 
   <property>
@@ -200,7 +201,7 @@
     in MBs. Memory requests higher than this won't take effect,
     and will get capped to this value.</description>
     <name>yarn.scheduler.maximum-allocation-mb</name>
-    <value>10240</value>
+    <value>8192</value>
   </property>
 
   <property>
@@ -266,7 +267,7 @@
   <property>
     <description>Environment variables that containers may override rather than use NodeManager's default.</description>
     <name>yarn.nodemanager.env-whitelist</name>
-    <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,YARN_HOME</value>
+    <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,HADOOP_YARN_HOME</value>
   </property>
 
   <property>
@@ -561,7 +562,7 @@
     <description>CLASSPATH for YARN applications. A comma-separated list
     of CLASSPATH entries</description>
      <name>yarn.application.classpath</name>
-     <value>$HADOOP_CONF_DIR,$HADOOP_COMMON_HOME/share/hadoop/common/*,$HADOOP_COMMON_HOME/share/hadoop/common/lib/*,$HADOOP_HDFS_HOME/share/hadoop/hdfs/*,$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,$YARN_HOME/share/hadoop/yarn/*,$YARN_HOME/share/hadoop/yarn/lib/*</value>
+     <value>$HADOOP_CONF_DIR,$HADOOP_COMMON_HOME/share/hadoop/common/*,$HADOOP_COMMON_HOME/share/hadoop/common/lib/*,$HADOOP_HDFS_HOME/share/hadoop/hdfs/*,$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,$HADOOP_YARN_HOME/share/hadoop/yarn/*,$HADOOP_YARN_HOME/share/hadoop/yarn/lib/*</value>
   </property>
 
 </configuration>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestFSDownload.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestFSDownload.java
index e7da8ca..25adf31 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestFSDownload.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestFSDownload.java
@@ -107,8 +107,9 @@
     FileStatus status = files.getFileStatus(p);
     ret.setSize(status.getLen());
     ret.setTimestamp(status.getModificationTime());
-    ret.setType(LocalResourceType.ARCHIVE);
+    ret.setType(LocalResourceType.PATTERN);
     ret.setVisibility(vis);
+    ret.setPattern("classes/.*");
     return ret;
   }
   
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/WebServicesTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/WebServicesTestUtils.java
index abcca51..d82771b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/WebServicesTestUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/WebServicesTestUtils.java
@@ -79,4 +79,11 @@
         got.matches(expected));
   }
 
+  public static void checkStringContains(String print, String expected, String got) {
+    assertTrue(
+        print + " doesn't contain expected string, got: " + got + " expected: " + expected,
+        got.contains(expected));
+  }
+
+
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java
index 16f4b6f..ade32b4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java
@@ -128,7 +128,8 @@
   public byte[] createPassword(ContainerTokenIdentifier identifier) {
     if (LOG.isDebugEnabled()) {
       LOG.debug("Creating password for " + identifier.getContainerID()
-          + " to be run on NM " + identifier.getNmHostAddress());
+          + " for user " + identifier.getUser() + " to be run on NM "
+          + identifier.getNmHostAddress());
     }
     this.readLock.lock();
     try {
@@ -155,7 +156,8 @@
       throws org.apache.hadoop.security.token.SecretManager.InvalidToken {
     if (LOG.isDebugEnabled()) {
       LOG.debug("Retrieving password for " + identifier.getContainerID()
-          + " to be run on NM " + identifier.getNmHostAddress());
+          + " for user " + identifier.getUser() + " to be run on NM "
+          + identifier.getNmHostAddress());
     }
     return createPassword(identifier.getBytes(), masterKey.getSecretKey());
   }
@@ -173,11 +175,12 @@
    * 
    * @param containerId
    * @param nodeId
+   * @param appSubmitter
    * @param capability
    * @return the container-token
    */
   public ContainerToken createContainerToken(ContainerId containerId,
-      NodeId nodeId, Resource capability) {
+      NodeId nodeId, String appSubmitter, Resource capability) {
     byte[] password;
     ContainerTokenIdentifier tokenIdentifier;
     long expiryTimeStamp =
@@ -188,8 +191,8 @@
     try {
       tokenIdentifier =
           new ContainerTokenIdentifier(containerId, nodeId.toString(),
-            capability, expiryTimeStamp, this.currentMasterKey.getMasterKey()
-              .getKeyId());
+            appSubmitter, capability, expiryTimeStamp, this.currentMasterKey
+              .getMasterKey().getKeyId());
       password = this.createPassword(tokenIdentifier);
 
     } finally {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
index 9971567..10362d2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
@@ -19,12 +19,17 @@
 package org.apache.hadoop.yarn.server.nodemanager;
 
 import java.io.File;
-import java.util.concurrent.CopyOnWriteArrayList;
+import java.io.FileNotFoundException;
+import java.io.IOException;
 import java.util.Collections;
 import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 
@@ -66,6 +71,31 @@
   }
 
   /**
+   * Create any non-existent directories and parent directories, updating the
+   * list of valid directories if necessary.
+   * @param localFs local file system to use
+   * @param perm absolute permissions to use for any directories created
+   * @return true if there were no errors, false if at least one error occurred
+   */
+  synchronized boolean createNonExistentDirs(FileContext localFs,
+      FsPermission perm) {
+    boolean failed = false;
+    for (final String dir : localDirs) {
+      try {
+        createDir(localFs, new Path(dir), perm);
+      } catch (IOException e) {
+        LOG.warn("Unable to create directory " + dir + " error " +
+            e.getMessage() + ", removing from the list of valid directories.");
+        localDirs.remove(dir);
+        failedDirs.add(dir);
+        numFailures++;
+        failed = true;
+      }
+    }
+    return !failed;
+  }
+
+  /**
    * Check the health of current set of local directories, updating the list
    * of valid directories if necessary.
    * @return <em>true</em> if there is a new disk-failure identified in
@@ -86,4 +116,20 @@
     }
     return numFailures > oldNumFailures;
   }
+
+  private void createDir(FileContext localFs, Path dir, FsPermission perm)
+      throws IOException {
+    if (dir == null) {
+      return;
+    }
+    try {
+      localFs.getFileStatus(dir);
+    } catch (FileNotFoundException e) {
+      createDir(localFs, dir.getParent(), perm);
+      localFs.mkdir(dir, perm, false);
+      if (!perm.equals(perm.applyUMask(localFs.getUMask()))) {
+        localFs.setPermission(dir, perm);
+      }
+    }
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index be51537..c18a0c9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -32,6 +32,7 @@
 import org.apache.hadoop.util.Shell.ExitCodeException;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
@@ -92,7 +93,9 @@
   }
 
   protected String getContainerExecutorExecutablePath(Configuration conf) {
-    File hadoopBin = new File(System.getenv("YARN_HOME"), "bin");
+    String yarnHomeEnvVar =
+        System.getenv(ApplicationConstants.Environment.HADOOP_YARN_HOME.key());
+    File hadoopBin = new File(yarnHomeEnvVar, "bin");
     String defaultPath =
       new File(hadoopBin, "container-executor").getAbsolutePath();
     return null == conf
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
index 4e07b70..96a58dd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
@@ -19,6 +19,9 @@
 package org.apache.hadoop.yarn.server.nodemanager;
 
 import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Timer;
 import java.util.TimerTask;
@@ -26,9 +29,12 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.service.AbstractService;
 
@@ -74,6 +80,8 @@
 
   /** when disk health checking code was last run */
   private long lastDisksCheckTime;
+  
+  private static String FILE_SCHEME = "file";
 
   /**
    * Class which is used by the {@link Timer} class to periodically execute the
@@ -81,13 +89,13 @@
    */
   private final class MonitoringTimerTask extends TimerTask {
 
-    public MonitoringTimerTask(Configuration conf) {
+    public MonitoringTimerTask(Configuration conf) throws YarnException {
       localDirs = new DirectoryCollection(
-          conf.getTrimmedStrings(YarnConfiguration.NM_LOCAL_DIRS));
+          validatePaths(conf.getTrimmedStrings(YarnConfiguration.NM_LOCAL_DIRS)));
       logDirs = new DirectoryCollection(
-          conf.getTrimmedStrings(YarnConfiguration.NM_LOG_DIRS));
-      localDirsAllocator =
-          new LocalDirAllocator(YarnConfiguration.NM_LOCAL_DIRS);
+          validatePaths(conf.getTrimmedStrings(YarnConfiguration.NM_LOG_DIRS)));
+      localDirsAllocator = new LocalDirAllocator(
+          YarnConfiguration.NM_LOCAL_DIRS);
       logDirsAllocator = new LocalDirAllocator(YarnConfiguration.NM_LOG_DIRS);
     }
 
@@ -103,6 +111,7 @@
 
   /**
    * Method which initializes the timertask and its interval time.
+   * 
    */
   @Override
   public void init(Configuration config) {
@@ -120,6 +129,19 @@
     lastDisksCheckTime = System.currentTimeMillis();
     super.init(conf);
 
+    FileContext localFs;
+    try {
+      localFs = FileContext.getLocalFSFileContext(config);
+    } catch (IOException e) {
+      throw new YarnException("Unable to get the local filesystem", e);
+    }
+    FsPermission perm = new FsPermission((short)0755);
+    boolean createSucceeded = localDirs.createNonExistentDirs(localFs, perm);
+    createSucceeded &= logDirs.createNonExistentDirs(localFs, perm);
+    if (!createSucceeded) {
+      updateDirsAfterFailure();
+    }
+
     // Check the disk health immediately to weed out bad directories
     // before other init code attempts to use them.
     checkDirs();
@@ -229,7 +251,8 @@
    * Set good local dirs and good log dirs in the configuration so that the
    * LocalDirAllocator objects will use this updated configuration only.
    */
-  private void updateDirsInConfiguration() {
+  private void updateDirsAfterFailure() {
+    LOG.info("Disk(s) failed. " + getDisksHealthReport());
     Configuration conf = getConfig();
     List<String> localDirs = getLocalDirs();
     conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,
@@ -237,6 +260,10 @@
     List<String> logDirs = getLogDirs();
     conf.setStrings(YarnConfiguration.NM_LOG_DIRS,
                       logDirs.toArray(new String[logDirs.size()]));
+    if (!areDisksHealthy()) {
+      // Just log.
+      LOG.error("Most of the disks failed. " + getDisksHealthReport());
+    }
   }
 
   private void checkDirs() {
@@ -249,12 +276,7 @@
       }
 
       if (newFailure) {
-        LOG.info("Disk(s) failed. " + getDisksHealthReport());
-        updateDirsInConfiguration();
-        if (!areDisksHealthy()) {
-          // Just log.
-          LOG.error("Most of the disks failed. " + getDisksHealthReport());
-        }
+        updateDirsAfterFailure();
       }
       lastDisksCheckTime = System.currentTimeMillis();
   }
@@ -278,4 +300,31 @@
   public Path getLogPathToRead(String pathStr) throws IOException {
     return logDirsAllocator.getLocalPathToRead(pathStr, getConfig());
   }
+  
+  public static String[] validatePaths(String[] paths) {
+    ArrayList<String> validPaths = new ArrayList<String>();
+    for (int i = 0; i < paths.length; ++i) {
+      try {
+        URI uriPath = new URI(paths[i]);
+        if (uriPath.getScheme() == null
+            || uriPath.getScheme().equals(FILE_SCHEME)) {
+          validPaths.add(uriPath.getPath());
+        } else {
+          LOG.warn(paths[i] + " is not a valid path. Path should be with "
+              + FILE_SCHEME + " scheme or without scheme");
+          throw new YarnException(paths[i]
+              + " is not a valid path. Path should be with " + FILE_SCHEME
+              + " scheme or without scheme");
+        }
+      } catch (URISyntaxException e) {
+        LOG.warn(e.getMessage());
+        throw new YarnException(paths[i]
+            + " is not a valid path. Path should be with " + FILE_SCHEME
+            + " scheme or without scheme");
+      }
+    }
+    String[] arrValidPaths = new String[validPaths.size()];
+    validPaths.toArray(arrValidPaths);
+    return arrValidPaths;
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index f9650fb..7ca6a2c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -329,7 +329,6 @@
             + remoteUgi.getTokenIdentifiers().size());
       }
 
-
       // Get the tokenId from the remote user ugi
       ContainerTokenIdentifier tokenId =
           selectContainerTokenIdentifier(remoteUgi);
@@ -341,8 +340,16 @@
                 + containerIDStr);
       } else {
 
+        // Is the container coming in with correct user-name?
+        if (!tokenId.getApplicationSubmitter().equals(launchContext.getUser())) {
+          unauthorized = true;
+          messageBuilder.append("\n Expected user-name "
+              + tokenId.getApplicationSubmitter() + " but found "
+              + launchContext.getUser());
+        }
+
         // Is the container being relaunched? Or RPC layer let startCall with 
-    	//  tokens generated off old-secret through 
+      	//  tokens generated off old-secret through?
         if (!this.context.getContainerTokenSecretManager()
           .isValidStartContainerRequest(tokenId)) {
           unauthorized = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
index 65bcfbd..74d0227 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
@@ -209,6 +209,7 @@
     }
     switch (rsrc.getType()) {
       case ARCHIVE:
+      case PATTERN:
         return 5 * rsrc.getSize();
       case FILE:
       default:
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourceRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourceRequest.java
index 7754baa..3d0e0fd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourceRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourceRequest.java
@@ -34,6 +34,7 @@
   private final long timestamp;
   private final LocalResourceType type;
   private final LocalResourceVisibility visibility;
+  private final String pattern;
 
   /**
    * Wrap API resource to match against cache of localized resources.
@@ -45,22 +46,28 @@
     this(ConverterUtils.getPathFromYarnURL(resource.getResource()),
         resource.getTimestamp(),
         resource.getType(),
-        resource.getVisibility());
+        resource.getVisibility(),
+        resource.getPattern());
   }
 
   LocalResourceRequest(Path loc, long timestamp, LocalResourceType type,
-      LocalResourceVisibility visibility) {
+      LocalResourceVisibility visibility, String pattern) {
     this.loc = loc;
     this.timestamp = timestamp;
     this.type = type;
     this.visibility = visibility;
+    this.pattern = pattern;
   }
 
   @Override
   public int hashCode() {
-    return loc.hashCode() ^
+    int hash = loc.hashCode() ^
       (int)((timestamp >>> 32) ^ timestamp) *
       type.hashCode();
+    if(pattern != null) {
+      hash = hash ^ pattern.hashCode();
+    }
+    return hash;
   }
 
   @Override
@@ -72,9 +79,14 @@
       return false;
     }
     final LocalResourceRequest other = (LocalResourceRequest) o;
+    String pattern = getPattern();
+    String otherPattern = other.getPattern();
+    boolean patternEquals = (pattern == null && otherPattern == null) || 
+       (pattern != null && otherPattern != null && pattern.equals(otherPattern)); 
     return getPath().equals(other.getPath()) &&
            getTimestamp() == other.getTimestamp() &&
-           getType() == other.getType();
+           getType() == other.getType() &&
+           patternEquals;
   }
 
   @Override
@@ -87,6 +99,19 @@
       ret = (int)(getTimestamp() - other.getTimestamp());
       if (0 == ret) {
         ret = getType().ordinal() - other.getType().ordinal();
+        if (0 == ret) {
+          String pattern = getPattern();
+          String otherPattern = other.getPattern();
+          if (pattern == null && otherPattern == null) {
+            ret = 0;
+          } else if (pattern == null) {
+            ret = -1;
+          } else if (otherPattern == null) {
+            ret = 1;
+          } else {
+            ret = pattern.compareTo(otherPattern);    
+          }
+        }
       }
     }
     return ret;
@@ -122,6 +147,11 @@
   }
 
   @Override
+  public String getPattern() {
+    return pattern;
+  }
+  
+  @Override
   public void setResource(URL resource) {
     throw new UnsupportedOperationException();
   }
@@ -145,14 +175,20 @@
   public void setVisibility(LocalResourceVisibility visibility) {
     throw new UnsupportedOperationException();
   }
-
+  
+  @Override
+  public void setPattern(String pattern) {
+    throw new UnsupportedOperationException();
+  }
+  
   @Override
   public String toString() {
     StringBuilder sb = new StringBuilder();
     sb.append("{ ");
     sb.append(getPath().toString()).append(", ");
     sb.append(getTimestamp()).append(", ");
-    sb.append(getType()).append(" }");
+    sb.append(getType()).append(", ");
+    sb.append(getPattern()).append(" }");
     return sb.toString();
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
index bee9c2d..00709fd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
@@ -219,7 +219,8 @@
       ContainerId container = ctxt.getContainerId();
       rsrc.ref.add(container);
       rsrc.dispatcher.getEventHandler().handle(
-          new LocalizerResourceRequestEvent(rsrc, req.getVisibility(), ctxt));
+          new LocalizerResourceRequestEvent(rsrc, req.getVisibility(), ctxt, 
+              req.getLocalResourceRequest().getPattern()));
     }
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index 71ad968..c21ef51 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -752,6 +752,7 @@
           next.setTimestamp(nextRsrc.getTimestamp());
           next.setType(nextRsrc.getType());
           next.setVisibility(evt.getVisibility());
+          next.setPattern(evt.getPattern());
           scheduled.put(nextRsrc, evt);
           return next;
         }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizerResourceRequestEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizerResourceRequestEvent.java
index f316871..2e05dd7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizerResourceRequestEvent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/LocalizerResourceRequestEvent.java
@@ -32,14 +32,16 @@
   private final LocalizerContext context;
   private final LocalizedResource resource;
   private final LocalResourceVisibility vis;
+  private final String pattern;
 
   public LocalizerResourceRequestEvent(LocalizedResource resource,
-      LocalResourceVisibility vis, LocalizerContext context) {
+      LocalResourceVisibility vis, LocalizerContext context, String pattern) {
     super(LocalizerEventType.REQUEST_RESOURCE_LOCALIZATION,
         ConverterUtils.toString(context.getContainerId()));
     this.vis = vis;
     this.context = context;
     this.resource = resource;
+    this.pattern = pattern;
   }
 
   public LocalizedResource getResource() {
@@ -54,4 +56,8 @@
     return vis;
   }
 
+  public String getPattern() {
+    return pattern;
+  }
+
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java
index 9f6fcf7..4ab61c9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java
@@ -23,7 +23,13 @@
 import java.util.List;
 import java.util.ListIterator;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -65,4 +71,37 @@
     // Verify no ConcurrentModification is thrown
     li.next();
   }
+
+  @Test
+  public void testCreateDirectories() throws IOException {
+    Configuration conf = new Configuration();
+    conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
+    FileContext localFs = FileContext.getLocalFSFileContext(conf);
+
+    String dirA = new File(testDir, "dirA").getPath();
+    String dirB = new File(dirA, "dirB").getPath();
+    String dirC = new File(testDir, "dirC").getPath();
+    Path pathC = new Path(dirC);
+    FsPermission permDirC = new FsPermission((short)0710);
+
+    localFs.mkdir(pathC, null, true);
+    localFs.setPermission(pathC, permDirC);
+
+    String[] dirs = { dirA, dirB, dirC };
+    DirectoryCollection dc = new DirectoryCollection(dirs);
+    FsPermission defaultPerm = FsPermission.getDefault()
+        .applyUMask(new FsPermission((short)FsPermission.DEFAULT_UMASK));
+    boolean createResult = dc.createNonExistentDirs(localFs, defaultPerm);
+    Assert.assertTrue(createResult);
+
+    FileStatus status = localFs.getFileStatus(new Path(dirA));
+    Assert.assertEquals("local dir parent not created with proper permissions",
+        defaultPerm, status.getPermission());
+    status = localFs.getFileStatus(new Path(dirB));
+    Assert.assertEquals("local dir not created with proper permissions",
+        defaultPerm, status.getPermission());
+    status = localFs.getFileStatus(pathC);
+    Assert.assertEquals("existing local directory permissions modified",
+        permDirC, status.getPermission());
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
new file mode 100644
index 0000000..fc6fba0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.service.Service.STATE;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestLocalDirsHandlerService {
+  private static final File testDir = new File("target",
+      TestDirectoryCollection.class.getName()).getAbsoluteFile();
+  private static final File testFile = new File(testDir, "testfile");
+
+  @BeforeClass
+  public static void setup() throws IOException {
+    testDir.mkdirs();
+    testFile.createNewFile();
+  }
+
+  @AfterClass
+  public static void teardown() {
+    FileUtil.fullyDelete(testDir);
+  }
+
+  @Test
+  public void testDirStructure() throws Exception {
+    Configuration conf = new YarnConfiguration();
+    String localDir1 = new File("file:///" + testDir, "localDir1").getPath();
+    conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir1);
+    String logDir1 = new File("file:///" + testDir, "logDir1").getPath();
+    conf.set(YarnConfiguration.NM_LOG_DIRS, logDir1);
+    LocalDirsHandlerService dirSvc = new LocalDirsHandlerService();
+    dirSvc.init(conf);
+    Assert.assertEquals(1, dirSvc.getLocalDirs().size());
+  }
+
+  @Test
+  public void testValidPathsDirHandlerService() {
+    Configuration conf = new YarnConfiguration();
+    String localDir1 = new File("file:///" + testDir, "localDir1").getPath();
+    String localDir2 = new File("hdfs:///" + testDir, "localDir2").getPath();
+    conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir1 + "," + localDir2);
+    String logDir1 = new File("file:///" + testDir, "logDir1").getPath();
+    conf.set(YarnConfiguration.NM_LOG_DIRS, logDir1);
+    LocalDirsHandlerService dirSvc = new LocalDirsHandlerService();
+    try {
+      dirSvc.init(conf);
+      Assert.fail("Service should have thrown an exception due to wrong URI");
+    } catch (YarnException e) {
+    }
+    Assert.assertTrue("Service should not be inited", dirSvc.getServiceState()
+        .compareTo(STATE.NOTINITED) == 0);
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResource.java
index edfe7d7..81446f5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResource.java
@@ -37,7 +37,7 @@
 public class TestLocalResource {
 
   static org.apache.hadoop.yarn.api.records.LocalResource getYarnResource(Path p, long size,
-      long timestamp, LocalResourceType type, LocalResourceVisibility state)
+      long timestamp, LocalResourceType type, LocalResourceVisibility state, String pattern)
       throws URISyntaxException {
     org.apache.hadoop.yarn.api.records.LocalResource ret = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(org.apache.hadoop.yarn.api.records.LocalResource.class);
     ret.setResource(ConverterUtils.getYarnUrlFromURI(p.toUri()));
@@ -45,6 +45,7 @@
     ret.setTimestamp(timestamp);
     ret.setType(type);
     ret.setVisibility(state);
+    ret.setPattern(pattern);
     return ret;
   }
 
@@ -72,9 +73,9 @@
 
     long basetime = r.nextLong() >>> 2;
     org.apache.hadoop.yarn.api.records.LocalResource yA = getYarnResource(
-        new Path("http://yak.org:80/foobar"), -1, basetime, FILE, PUBLIC);
+        new Path("http://yak.org:80/foobar"), -1, basetime, FILE, PUBLIC, null);
     org.apache.hadoop.yarn.api.records.LocalResource yB = getYarnResource(
-        new Path("http://yak.org:80/foobar"), -1, basetime, FILE, PUBLIC);
+        new Path("http://yak.org:80/foobar"), -1, basetime, FILE, PUBLIC, null);
     final LocalResourceRequest a = new LocalResourceRequest(yA);
     LocalResourceRequest b = new LocalResourceRequest(yA);
     checkEqual(a, b);
@@ -83,31 +84,37 @@
 
     // ignore visibility
     yB = getYarnResource(
-        new Path("http://yak.org:80/foobar"), -1, basetime, FILE, PRIVATE);
+        new Path("http://yak.org:80/foobar"), -1, basetime, FILE, PRIVATE, null);
     b = new LocalResourceRequest(yB);
     checkEqual(a, b);
 
     // ignore size
     yB = getYarnResource(
-        new Path("http://yak.org:80/foobar"), 0, basetime, FILE, PRIVATE);
+        new Path("http://yak.org:80/foobar"), 0, basetime, FILE, PRIVATE, null);
     b = new LocalResourceRequest(yB);
     checkEqual(a, b);
 
     // note path
     yB = getYarnResource(
-        new Path("hdfs://dingo.org:80/foobar"), 0, basetime, ARCHIVE, PUBLIC);
+        new Path("hdfs://dingo.org:80/foobar"), 0, basetime, ARCHIVE, PUBLIC, null);
     b = new LocalResourceRequest(yB);
     checkNotEqual(a, b);
 
     // note type
     yB = getYarnResource(
-        new Path("http://yak.org:80/foobar"), 0, basetime, ARCHIVE, PUBLIC);
+        new Path("http://yak.org:80/foobar"), 0, basetime, ARCHIVE, PUBLIC, null);
     b = new LocalResourceRequest(yB);
     checkNotEqual(a, b);
 
     // note timestamp
     yB = getYarnResource(
-        new Path("http://yak.org:80/foobar"), 0, basetime + 1, FILE, PUBLIC);
+        new Path("http://yak.org:80/foobar"), 0, basetime + 1, FILE, PUBLIC, null);
+    b = new LocalResourceRequest(yB);
+    checkNotEqual(a, b);
+
+    // note pattern
+    yB = getYarnResource(
+        new Path("http://yak.org:80/foobar"), 0, basetime + 1, FILE, PUBLIC, "^/foo/.*");
     b = new LocalResourceRequest(yB);
     checkNotEqual(a, b);
   }
@@ -120,24 +127,35 @@
     System.out.println("SEED: " + seed);
     long basetime = r.nextLong() >>> 2;
     org.apache.hadoop.yarn.api.records.LocalResource yA = getYarnResource(
-        new Path("http://yak.org:80/foobar"), -1, basetime, FILE, PUBLIC);
+        new Path("http://yak.org:80/foobar"), -1, basetime, FILE, PUBLIC, "^/foo/.*");
     final LocalResourceRequest a = new LocalResourceRequest(yA);
 
     // Path primary
     org.apache.hadoop.yarn.api.records.LocalResource yB = getYarnResource(
-        new Path("http://yak.org:80/foobaz"), -1, basetime, FILE, PUBLIC);
+        new Path("http://yak.org:80/foobaz"), -1, basetime, FILE, PUBLIC, "^/foo/.*");
     LocalResourceRequest b = new LocalResourceRequest(yB);
     assertTrue(0 > a.compareTo(b));
 
     // timestamp secondary
     yB = getYarnResource(
-        new Path("http://yak.org:80/foobar"), -1, basetime + 1, FILE, PUBLIC);
+        new Path("http://yak.org:80/foobar"), -1, basetime + 1, FILE, PUBLIC, "^/foo/.*");
     b = new LocalResourceRequest(yB);
     assertTrue(0 > a.compareTo(b));
 
     // type tertiary
     yB = getYarnResource(
-        new Path("http://yak.org:80/foobar"), -1, basetime, ARCHIVE, PUBLIC);
+        new Path("http://yak.org:80/foobar"), -1, basetime, ARCHIVE, PUBLIC, "^/foo/.*");
+    b = new LocalResourceRequest(yB);
+    assertTrue(0 != a.compareTo(b)); // don't care about order, just ne
+    
+    // path 4th
+    yB = getYarnResource(
+        new Path("http://yak.org:80/foobar"), -1, basetime, ARCHIVE, PUBLIC, "^/food/.*");
+    b = new LocalResourceRequest(yB);
+    assertTrue(0 != a.compareTo(b)); // don't care about order, just ne
+    
+    yB = getYarnResource(
+        new Path("http://yak.org:80/foobar"), -1, basetime, ARCHIVE, PUBLIC, null);
     b = new LocalResourceRequest(yB);
     assertTrue(0 != a.compareTo(b)); // don't care about order, just ne
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
index 3ee623c..a972945 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
@@ -230,7 +230,7 @@
       long ts, LocalResourceVisibility vis) {
     final LocalResourceRequest req =
         new LocalResourceRequest(new Path("file:///tmp/" + user + "/rsrc" + i),
-            ts + i * 2000, LocalResourceType.FILE, vis);
+            ts + i * 2000, LocalResourceType.FILE, vis, null);
     return req;
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
index 1572f36..aa53a58 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
@@ -35,6 +35,7 @@
 import static org.mockito.Mockito.when;
 
 import java.net.InetSocketAddress;
+import java.net.URI;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -157,6 +158,7 @@
 
       // verify directory creation
       for (Path p : localDirs) {
+        p = new Path((new URI(p.toString())).getPath());
         Path usercache = new Path(p, ContainerLocalizer.USERCACHE);
         verify(spylfs)
           .mkdir(eq(usercache),
@@ -192,7 +194,8 @@
       sDirs[i] = localDirs.get(i).toString();
     }
     conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs);
-
+    String logDir = lfs.makeQualified(new Path(basedir, "logdir " )).toString();
+    conf.set(YarnConfiguration.NM_LOG_DIRS, logDir);
     LocalizerTracker mockLocallilzerTracker = mock(LocalizerTracker.class);
     DrainDispatcher dispatcher = new DrainDispatcher();
     dispatcher.init(conf);
@@ -379,7 +382,8 @@
       sDirs[i] = localDirs.get(i).toString();
     }
     conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs);
-
+    String logDir = lfs.makeQualified(new Path(basedir, "logdir " )).toString();
+    conf.set(YarnConfiguration.NM_LOG_DIRS, logDir);
     DrainDispatcher dispatcher = new DrainDispatcher();
     dispatcher.init(conf);
     dispatcher.start();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceRetention.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceRetention.java
index c425eb5..ee24548 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceRetention.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceRetention.java
@@ -83,7 +83,7 @@
     for (int i = 0; i < nRsrcs; ++i) {
       final LocalResourceRequest req = new LocalResourceRequest(
           new Path("file:///" + user + "/rsrc" + i), timestamp + i * tsstep,
-          LocalResourceType.FILE, LocalResourceVisibility.PUBLIC);
+          LocalResourceType.FILE, LocalResourceVisibility.PUBLIC, null);
       final long ts = timestamp + i * tsstep;
       final Path p = new Path("file:///local/" + user + "/rsrc" + i);
       LocalizedResource rsrc = new LocalizedResource(req, null) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestContainerLogsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestContainerLogsPage.java
index 28985f5..eeeb31b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestContainerLogsPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestContainerLogsPage.java
@@ -43,8 +43,9 @@
 
   @Test
   public void testContainerLogDirs() throws IOException {
-    String logdirwithFile = "file:///target/"
-        + TestNMWebServer.class.getSimpleName() + "LogDir";
+    String absLogDir = new File("target",
+        TestNMWebServer.class.getSimpleName() + "LogDir").getAbsolutePath();
+    String logdirwithFile = "file://" + absLogDir;
     Configuration conf = new Configuration();
     conf.set(YarnConfiguration.NM_LOG_DIRS, logdirwithFile);
     NodeHealthCheckerService healthChecker = new NodeHealthCheckerService();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java
index e5d90b2..f0c5272 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java
@@ -382,7 +382,7 @@
       String message = exception.getString("message");
       String type = exception.getString("exception");
       String classname = exception.getString("javaClassName");
-      verifyStatInvalidException(message, type, classname);
+      verifyStateInvalidException(message, type, classname);
     }
   }
 
@@ -412,7 +412,7 @@
       String message = exception.getString("message");
       String type = exception.getString("exception");
       String classname = exception.getString("javaClassName");
-      verifyStatInvalidException(message, type, classname);
+      verifyStateInvalidException(message, type, classname);
     }
   }
 
@@ -450,16 +450,16 @@
       String type = WebServicesTestUtils.getXmlString(element, "exception");
       String classname = WebServicesTestUtils.getXmlString(element,
           "javaClassName");
-      verifyStatInvalidException(message, type, classname);
+      verifyStateInvalidException(message, type, classname);
     }
   }
 
-  private void verifyStatInvalidException(String message, String type,
+  private void verifyStateInvalidException(String message, String type,
       String classname) {
     WebServicesTestUtils
-        .checkStringMatch(
+        .checkStringContains(
             "exception message",
-            "No enum const class org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState.FOO_STATE",
+            "org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState.FOO_STATE",
             message);
     WebServicesTestUtils.checkStringMatch("exception type",
         "IllegalArgumentException", type);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
index efe126e..7f5465c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
@@ -58,66 +58,6 @@
           </execution>
         </executions>
       </plugin>
-
-      <plugin>
-        <artifactId>maven-antrun-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>create-protobuf-generated-sources-directory</id>
-            <phase>initialize</phase>
-            <configuration>
-              <target>
-                <mkdir dir="target/generated-sources/proto" />
-              </target>
-            </configuration>
-            <goals>
-              <goal>run</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>exec-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>generate-sources</id>
-            <phase>generate-sources</phase>
-            <configuration>
-              <executable>protoc</executable>
-              <arguments>
-                <argument>-Isrc/main/proto/</argument>
-                <argument>--java_out=target/generated-sources/proto</argument>
-                <argument>src/main/proto/yarn_server_resourcemanager_service_protos.proto</argument>
-                <argument>src/main/proto/RMAdminProtocol.proto</argument>
-              </arguments>
-            </configuration>
-            <goals>
-              <goal>exec</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>add-source</id>
-            <phase>generate-sources</phase>
-            <goals>
-              <goal>add-source</goal>
-            </goals>
-            <configuration>
-              <sources>
-                <source>target/generated-sources/proto</source>
-              </sources>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
     </plugins>
   </build>
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
index 4a36fd2..e1fb6d3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
@@ -32,6 +32,19 @@
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.yarn.api.RMAdminProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshAdminAclsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshAdminAclsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshNodesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshNodesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshQueuesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshQueuesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshServiceAclsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshServiceAclsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -39,19 +52,6 @@
 import org.apache.hadoop.yarn.ipc.RPCUtil;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants;
-import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocol;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshServiceAclsRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshServiceAclsResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider;
 import org.apache.hadoop.yarn.service.AbstractService;
@@ -189,15 +189,14 @@
       throws YarnRemoteException {
     UserGroupInformation user = checkAcls("refreshNodes");
     try {
-      this.nodesListManager.refreshNodes();
+      this.nodesListManager.refreshNodes(new YarnConfiguration());
       RMAuditLogger.logSuccess(user.getShortUserName(), "refreshNodes",
           "AdminService");
       return recordFactory.newRecordInstance(RefreshNodesResponse.class);
     } catch (IOException ioe) {
       LOG.info("Exception refreshing nodes ", ioe);
       RMAuditLogger.logFailure(user.getShortUserName(), "refreshNodes",
-          adminAcl.toString(), "AdminService",
-          "Exception refreshing nodes");
+          adminAcl.toString(), "AdminService", "Exception refreshing nodes");
       throw RPCUtil.getRemoteException(ioe);
     }
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
index 33c79f6..41b5881 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
@@ -103,8 +103,16 @@
     }
   }
 
-  public void refreshNodes() throws IOException {
+  public void refreshNodes(Configuration yarnConf) throws IOException {
     synchronized (hostsReader) {
+      if (null == yarnConf) {
+        yarnConf = new YarnConfiguration();
+      }
+      hostsReader.updateFileNames(yarnConf.get(
+          YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,
+          YarnConfiguration.DEFAULT_RM_NODES_INCLUDE_FILE_PATH), yarnConf.get(
+          YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH,
+          YarnConfiguration.DEFAULT_RM_NODES_EXCLUDE_FILE_PATH));
       hostsReader.refresh();
       printConfiguredHosts();
     }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index ad28d6c..d2c03b9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -34,7 +34,6 @@
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.ipc.RPCUtil;
-import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager;
 import org.apache.hadoop.yarn.security.client.ClientTokenIdentifier;
 import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore;
@@ -45,6 +44,7 @@
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRejectedEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 
 /**
@@ -58,14 +58,14 @@
   private LinkedList<ApplicationId> completedApps = new LinkedList<ApplicationId>();
 
   private final RMContext rmContext;
-  private final ClientToAMSecretManager clientToAMSecretManager;
+  private final ClientToAMTokenSecretManagerInRM clientToAMSecretManager;
   private final ApplicationMasterService masterService;
   private final YarnScheduler scheduler;
   private final ApplicationACLsManager applicationACLsManager;
   private Configuration conf;
 
   public RMAppManager(RMContext context,
-      ClientToAMSecretManager clientToAMSecretManager,
+      ClientToAMTokenSecretManagerInRM clientToAMSecretManager,
       YarnScheduler scheduler, ApplicationMasterService masterService,
       ApplicationACLsManager applicationACLsManager, Configuration conf) {
     this.rmContext = context;
@@ -230,8 +230,9 @@
     ApplicationId applicationId = submissionContext.getApplicationId();
     RMApp application = null;
     try {
+      // TODO: This needs to move to per-AppAttempt
+      this.clientToAMSecretManager.registerApplication(applicationId);
       String clientTokenStr = null;
-      String user = UserGroupInformation.getCurrentUser().getShortUserName();
       if (UserGroupInformation.isSecurityEnabled()) {
         Token<ClientTokenIdentifier> clientToken = new 
             Token<ClientTokenIdentifier>(
@@ -256,11 +257,12 @@
           submissionContext);
 
       // Create RMApp
-      application = new RMAppImpl(applicationId, rmContext,
-          this.conf, submissionContext.getApplicationName(), user,
-          submissionContext.getQueue(), submissionContext, clientTokenStr,
-          appStore, this.scheduler,
-          this.masterService, submitTime);
+      application =
+          new RMAppImpl(applicationId, rmContext, this.conf,
+            submissionContext.getApplicationName(),
+            submissionContext.getUser(), submissionContext.getQueue(),
+            submissionContext, clientTokenStr, appStore, this.scheduler,
+            this.masterService, submitTime);
 
       // Sanity check - duplicate?
       if (rmContext.getRMApps().putIfAbsent(applicationId, application) != 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index e9e5340..2c5869e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -42,7 +42,6 @@
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;
-import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager;
 import org.apache.hadoop.yarn.server.RMDelegationTokenSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
@@ -64,9 +63,9 @@
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.security.ApplicationTokenSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer;
+import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
 import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebApp;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
@@ -97,8 +96,8 @@
   private static final Log LOG = LogFactory.getLog(ResourceManager.class);
   public static final long clusterTimeStamp = System.currentTimeMillis();
 
-  protected ClientToAMSecretManager clientToAMSecretManager =
-      new ClientToAMSecretManager();
+  protected ClientToAMTokenSecretManagerInRM clientToAMSecretManager =
+      new ClientToAMTokenSecretManagerInRM();
   
   protected RMContainerTokenSecretManager containerTokenSecretManager;
 
@@ -256,10 +255,22 @@
   }
 
   protected ResourceScheduler createScheduler() {
-    return ReflectionUtils.newInstance(this.conf.getClass(
-        YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
-        ResourceScheduler.class), this.conf);
-  }
+    String schedulerClassName = conf.get(YarnConfiguration.RM_SCHEDULER,
+        YarnConfiguration.DEFAULT_RM_SCHEDULER);
+    LOG.info("Using Scheduler: " + schedulerClassName);
+    try {
+      Class<?> schedulerClazz = Class.forName(schedulerClassName);
+      if (ResourceScheduler.class.isAssignableFrom(schedulerClazz)) {
+        return (ResourceScheduler) ReflectionUtils.newInstance(schedulerClazz,
+            this.conf);
+      } else {
+        throw new YarnException("Class: " + schedulerClassName
+            + " not instance of " + ResourceScheduler.class.getCanonicalName());
+      }
+    } catch (ClassNotFoundException e) {
+      throw new YarnException("Could not instantiate Scheduler: "
+          + schedulerClassName, e);
+    }  }
 
   protected ApplicationMasterLauncher createAMLauncher() {
     return new ApplicationMasterLauncher(this.clientToAMSecretManager,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
index aa9d2c2..2d633a0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
@@ -42,6 +42,7 @@
 import org.apache.hadoop.yarn.api.ContainerManager;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -54,13 +55,12 @@
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.security.ApplicationTokenIdentifier;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
-import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager;
-import org.apache.hadoop.yarn.security.client.ClientTokenIdentifier;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptLaunchFailedEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
 import org.apache.hadoop.yarn.util.ProtoUtils;
 
 /**
@@ -76,7 +76,7 @@
   private final Configuration conf;
   private final RecordFactory recordFactory = 
       RecordFactoryProvider.getRecordFactory(null);
-  private final ClientToAMSecretManager clientToAMSecretManager;
+  private final ClientToAMTokenSecretManagerInRM clientToAMSecretManager;
   private final AMLauncherEventType eventType;
   private final RMContext rmContext;
   
@@ -85,7 +85,7 @@
   
   public AMLauncher(RMContext rmContext, RMAppAttempt application,
       AMLauncherEventType eventType,
-      ClientToAMSecretManager clientToAMSecretManager, Configuration conf) {
+      ClientToAMTokenSecretManagerInRM clientToAMSecretManager, Configuration conf) {
     this.application = application;
     this.conf = conf;
     this.clientToAMSecretManager = clientToAMSecretManager;
@@ -194,10 +194,12 @@
     String parts[] =
         application.getMasterContainer().getNodeHttpAddress().split(":");
     environment.put(ApplicationConstants.NM_HTTP_PORT_ENV, parts[1]);
+    ApplicationId applicationId =
+        application.getAppAttemptId().getApplicationId();
     environment.put(
         ApplicationConstants.APP_SUBMIT_TIME_ENV,
         String.valueOf(rmContext.getRMApps()
-            .get(application.getAppAttemptId().getApplicationId())
+            .get(applicationId)
             .getSubmitTime()));
  
     if (UserGroupInformation.isSecurityEnabled()) {
@@ -237,10 +239,8 @@
       container.setContainerTokens(
           ByteBuffer.wrap(dob.getData(), 0, dob.getLength()));
 
-      ClientTokenIdentifier identifier = new ClientTokenIdentifier(
-          application.getAppAttemptId().getApplicationId());
       SecretKey clientSecretKey =
-          this.clientToAMSecretManager.getMasterKey(identifier);
+          this.clientToAMSecretManager.getMasterKey(applicationId);
       String encoded =
           Base64.encodeBase64URLSafeString(clientSecretKey.getEncoded());
       environment.put(
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java
index 52d201d..f65d6dc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java
@@ -25,9 +25,10 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.yarn.event.EventHandler;
-import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager;
+import org.apache.hadoop.yarn.security.client.BaseClientToAMTokenSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
 import org.apache.hadoop.yarn.service.AbstractService;
 
 
@@ -41,11 +42,11 @@
   private final BlockingQueue<Runnable> masterEvents
     = new LinkedBlockingQueue<Runnable>();
   
-  private ClientToAMSecretManager clientToAMSecretManager;
+  private ClientToAMTokenSecretManagerInRM clientToAMSecretManager;
   protected final RMContext context;
   
   public ApplicationMasterLauncher(
-      ClientToAMSecretManager clientToAMSecretManager, RMContext context) {
+      ClientToAMTokenSecretManagerInRM clientToAMSecretManager, RMContext context) {
     super(ApplicationMasterLauncher.class.getName());
     this.context = context;
     this.launcherPool = new ThreadPoolExecutor(10, 10, 1, 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/RMAdminProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/RMAdminProtocol.java
deleted file mode 100644
index 7849d5a..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/RMAdminProtocol.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.hadoop.yarn.server.resourcemanager.api;
-
-import org.apache.hadoop.tools.GetUserMappingsProtocol;
-import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshServiceAclsRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshServiceAclsResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
-
-public interface RMAdminProtocol extends GetUserMappingsProtocol {
-  public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request) 
-  throws YarnRemoteException;
-  
-  public RefreshNodesResponse refreshNodes(RefreshNodesRequest request)
-  throws YarnRemoteException;
-  
-  public RefreshSuperUserGroupsConfigurationResponse 
-  refreshSuperUserGroupsConfiguration(
-      RefreshSuperUserGroupsConfigurationRequest request)
-  throws YarnRemoteException;
-
-  public RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings(
-      RefreshUserToGroupsMappingsRequest request)
-  throws YarnRemoteException;
-  
-  public RefreshAdminAclsResponse refreshAdminAcls(
-      RefreshAdminAclsRequest request)
-  throws YarnRemoteException;
-  
-  public RefreshServiceAclsResponse refreshServiceAcls(
-      RefreshServiceAclsRequest request)
-  throws YarnRemoteException;
-}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemStore.java
index 85693dd..8c23f30 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemStore.java
@@ -1,19 +1,3 @@
-package org.apache.hadoop.yarn.server.resourcemanager.recovery;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationMaster;
-import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
-import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.NodeId;
-import org.apache.hadoop.yarn.factories.RecordFactory;
-import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
-import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
-
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -31,6 +15,21 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+package org.apache.hadoop.yarn.server.resourcemanager.recovery;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationMaster;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 
 public class MemStore implements Store {
   RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 3bcdda6..c4966a1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -591,6 +591,10 @@
 
       RMAppAttemptRejectedEvent rejectedEvent = (RMAppAttemptRejectedEvent) event;
 
+      // Tell the AMS. Unregister from the ApplicationMasterService
+      appAttempt.masterService
+          .unregisterAttempt(appAttempt.applicationAttemptId);
+      
       // Save the diagnostic message
       String message = rejectedEvent.getMessage();
       appAttempt.setDiagnostics(message);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 7e7bbee..d222b90 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -1197,7 +1197,7 @@
     if (UserGroupInformation.isSecurityEnabled()) {
       containerToken =
           containerTokenSecretManager.createContainerToken(containerId, nodeId,
-            capability);
+            application.getUser(), capability);
       if (containerToken == null) {
         return null; // Try again later.
       }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java
index 7b46d84..3f97c96 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java
@@ -161,7 +161,7 @@
     if (UserGroupInformation.isSecurityEnabled()) {
       containerToken =
           containerTokenSecretManager.createContainerToken(containerId, nodeId,
-            capability);
+            application.getUser(), capability);
       if (containerToken == null) {
         return null; // Try again later.
       }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueSchedulable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueSchedulable.java
index 33625a7..592b310 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueSchedulable.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueSchedulable.java
@@ -65,6 +65,17 @@
   long lastTimeAtMinShare;
   long lastTimeAtHalfFairShare;
 
+  // Constructor for tests
+  protected FSQueueSchedulable(FairScheduler scheduler, FSQueue fsQueue,
+      QueueManager qMgr, QueueMetrics metrics, long minShare, long fairShare) {
+    this.scheduler = scheduler;
+    this.queueMgr = qMgr;
+    this.queue = fsQueue;
+    this.metrics = metrics;
+    this.lastTimeAtMinShare = minShare;
+    this.lastTimeAtHalfFairShare = fairShare;
+  }
+
   public FSQueueSchedulable(FairScheduler scheduler, FSQueue queue) {
     this.scheduler = scheduler;
     this.queue = queue;
@@ -93,19 +104,27 @@
    */
   @Override
   public void updateDemand() {
+    // Compute demand by iterating through apps in the queue
+    // Limit demand to maxResources
+    Resource maxRes = queueMgr.getMaxResources(queue.getName());
     demand = Resources.createResource(0);
     for (AppSchedulable sched: appScheds) {
       sched.updateDemand();
       Resource toAdd = sched.getDemand();
-      LOG.debug("Counting resource from " + sched.getName() + " " + toAdd.toString());
-      LOG.debug("Total resource consumption for " + this.getName() + " now " + demand.toString());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Counting resource from " + sched.getName() + " " + toAdd
+            + "; Total resource consumption for " + this.getName() + " now "
+            + demand);
+      }
       demand = Resources.add(demand, toAdd);
-
+      if (Resources.greaterThanOrEqual(demand, maxRes)) {
+        demand = maxRes;
+        break;
+      }
     }
-    // if demand exceeds the cap for this queue, limit to the max
-    Resource maxRes = queueMgr.getMaxResources(queue.getName());
-    if(Resources.greaterThan(demand, maxRes)) {
-      demand = maxRes;
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("The updated demand for " + this.getName() + " is " + demand
+          + "; the max is " + maxRes);
     }
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
index aebf989a..a69374c4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
@@ -539,7 +539,8 @@
         if (UserGroupInformation.isSecurityEnabled()) {
           containerToken =
               this.rmContext.getContainerTokenSecretManager()
-                .createContainerToken(containerId, nodeId, capability);
+                .createContainerToken(containerId, nodeId,
+                  application.getUser(), capability);
           if (containerToken == null) {
             return i; // Try again later.
           }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ClientToAMTokenSecretManagerInRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ClientToAMTokenSecretManagerInRM.java
new file mode 100644
index 0000000..9976da5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ClientToAMTokenSecretManagerInRM.java
@@ -0,0 +1,48 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.security;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.crypto.SecretKey;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.security.client.BaseClientToAMTokenSecretManager;
+
+public class ClientToAMTokenSecretManagerInRM extends
+    BaseClientToAMTokenSecretManager {
+
+  // Per application master-keys for managing client-tokens
+  private Map<ApplicationId, SecretKey> masterKeys =
+      new HashMap<ApplicationId, SecretKey>();
+
+  public synchronized void registerApplication(ApplicationId applicationID) {
+    this.masterKeys.put(applicationID, generateSecret());
+  }
+
+  public synchronized void unRegisterApplication(ApplicationId applicationID) {
+    this.masterKeys.remove(applicationID);
+  }
+
+  @Override
+  public synchronized SecretKey getMasterKey(ApplicationId applicationID) {
+    return this.masterKeys.get(applicationID);
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/authorize/RMPolicyProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/authorize/RMPolicyProvider.java
index ba58f3e..9571108 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/authorize/RMPolicyProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/authorize/RMPolicyProvider.java
@@ -24,9 +24,9 @@
 import org.apache.hadoop.yarn.api.AMRMProtocolPB;
 import org.apache.hadoop.yarn.api.ClientRMProtocolPB;
 import org.apache.hadoop.yarn.api.ContainerManagerPB;
+import org.apache.hadoop.yarn.api.RMAdminProtocolPB;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.api.ResourceTrackerPB;
-import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocolPB;
 
 /**
  * {@link PolicyProvider} for YARN ResourceManager protocols.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
deleted file mode 100644
index 1f58977..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.hadoop.yarn.server.resourcemanager.security.admin.AdminSecurityInfo
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml
index 9de7989..65ef0a2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml
@@ -68,7 +68,7 @@
   </property>
 
   <property>
-    <name>yarn.scheduler.capacity.root.default.acl_submit_jobs</name>
+    <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
     <value>*</value>
     <description>
       The ACL of who can submit jobs to the default queue.
@@ -76,7 +76,7 @@
   </property>
 
   <property>
-    <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
+    <name>yarn.scheduler.capacity.root.default.acl_administer_queue</name>
     <value>*</value>
     <description>
       The ACL of who can administer jobs on the default queue.
@@ -84,15 +84,6 @@
   </property>
 
   <property>
-    <name>yarn.scheduler.capacity.root.acl_administer_queues</name>
-    <value>*</value>
-    <description>
-      The ACL for who can administer this queue i.e. change sub-queue 
-      allocations.
-    </description>
-  </property>
-
-  <property>
     <name>yarn.scheduler.capacity.node-locality-delay</name>
     <value>-1</value>
     <description>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
index accba56..b097ea3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
@@ -112,7 +112,7 @@
       ResourceRequest hostReq = createResourceReq(host, memory, priority,
           containers);
       reqs.add(hostReq);
-      ResourceRequest rackReq = createResourceReq("default-rack", memory,
+      ResourceRequest rackReq = createResourceReq("/default-rack", memory,
           priority, containers);
       reqs.add(rackReq);
     }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index 9f8633d..ef935e6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -36,6 +36,7 @@
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
@@ -58,12 +59,12 @@
 public class MockRM extends ResourceManager {
 
   public MockRM() {
-    this(new Configuration());
+    this(new YarnConfiguration());
   }
 
   public MockRM(Configuration conf) {
-    super(StoreFactory.getStore(conf));
-    init(conf);
+    super(StoreFactory.getStore(conf));    
+    init(conf instanceof YarnConfiguration ? conf : new YarnConfiguration(conf));
     Logger rootLogger = LogManager.getRootLogger();
     rootLogger.setLevel(Level.DEBUG);
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRMWithCustomAMLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRMWithCustomAMLauncher.java
new file mode 100644
index 0000000..06105fd
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRMWithCustomAMLauncher.java
@@ -0,0 +1,61 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.ContainerManager;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncher;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+
+public class MockRMWithCustomAMLauncher extends MockRM {
+
+  private final ContainerManager containerManager;
+
+  public MockRMWithCustomAMLauncher(ContainerManager containerManager) {
+    this(new Configuration(), containerManager);
+  }
+
+  public MockRMWithCustomAMLauncher(Configuration conf,
+      ContainerManager containerManager) {
+    super(conf);
+    this.containerManager = containerManager;
+  }
+
+  @Override
+  protected ApplicationMasterLauncher createAMLauncher() {
+    return new ApplicationMasterLauncher(super.clientToAMSecretManager,
+      getRMContext()) {
+      @Override
+      protected Runnable createRunnableLauncher(RMAppAttempt application,
+          AMLauncherEventType event) {
+        return new AMLauncher(context, application, event,
+          clientToAMSecretManager, getConfig()) {
+          @Override
+          protected ContainerManager getContainerMgrProxy(
+              ContainerId containerId) {
+            return containerManager;
+          }
+        };
+      }
+    };
+  }
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java
index 9d03e91..3250501 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java
@@ -47,7 +47,6 @@
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
-import org.apache.hadoop.yarn.server.resourcemanager.TestApplicationMasterLauncher.MockRMWithCustomAMLauncher;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
index 25dc899..d84bd57 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
@@ -37,7 +37,6 @@
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
-import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.MockRMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
@@ -49,6 +48,7 @@
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.service.Service;
 import org.junit.Test;
@@ -140,7 +140,7 @@
     }
 
     public TestRMAppManager(RMContext context,
-        ClientToAMSecretManager clientToAMSecretManager,
+        ClientToAMTokenSecretManagerInRM clientToAMSecretManager,
         YarnScheduler scheduler, ApplicationMasterService masterService,
         ApplicationACLsManager applicationACLsManager, Configuration conf) {
       super(context, clientToAMSecretManager, scheduler, masterService,
@@ -342,7 +342,7 @@
     ApplicationMasterService masterService =
         new ApplicationMasterService(rmContext, scheduler);
     TestRMAppManager appMonitor = new TestRMAppManager(rmContext,
-        new ClientToAMSecretManager(), scheduler, masterService,
+        new ClientToAMTokenSecretManagerInRM(), scheduler, masterService,
         new ApplicationACLsManager(conf), conf);
 
     ApplicationId appID = MockApps.newAppID(1);
@@ -390,7 +390,7 @@
     ApplicationMasterService masterService =
         new ApplicationMasterService(rmContext, scheduler);
     TestRMAppManager appMonitor = new TestRMAppManager(rmContext,
-        new ClientToAMSecretManager(), scheduler, masterService,
+        new ClientToAMTokenSecretManagerInRM(), scheduler, masterService,
         new ApplicationACLsManager(conf), conf);
 
     ApplicationId appID = MockApps.newAppID(10);
@@ -438,7 +438,7 @@
     ApplicationMasterService masterService =
         new ApplicationMasterService(rmContext, scheduler);
     TestRMAppManager appMonitor = new TestRMAppManager(rmContext,
-        new ClientToAMSecretManager(), scheduler, masterService,
+        new ClientToAMTokenSecretManagerInRM(), scheduler, masterService,
         new ApplicationACLsManager(conf), conf);
 
     ApplicationId appID = MockApps.newAppID(0);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java
index 7777258..90620ed 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java
@@ -22,7 +22,6 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.ContainerManager;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
@@ -35,9 +34,6 @@
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
-import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncher;
-import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
-import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
@@ -106,40 +102,6 @@
 
   }
 
-  static class MockRMWithCustomAMLauncher extends MockRM {
-
-    private final ContainerManager containerManager;
-
-    public MockRMWithCustomAMLauncher(ContainerManager containerManager) {
-      this(new Configuration(), containerManager);
-    }
-
-    public MockRMWithCustomAMLauncher(Configuration conf,
-        ContainerManager containerManager) {
-      super(conf);
-      this.containerManager = containerManager;
-    }
-
-    @Override
-    protected ApplicationMasterLauncher createAMLauncher() {
-      return new ApplicationMasterLauncher(super.clientToAMSecretManager,
-        getRMContext()) {
-        @Override
-        protected Runnable createRunnableLauncher(RMAppAttempt application,
-            AMLauncherEventType event) {
-          return new AMLauncher(context, application, event,
-            clientToAMSecretManager, getConfig()) {
-            @Override
-            protected ContainerManager getContainerMgrProxy(
-                ContainerId containerId) {
-              return containerManager;
-            }
-          };
-        }
-      };
-    }
-  }
-
   @Test
   public void testAMLaunchAndCleanup() throws Exception {
     Logger rootLogger = LogManager.getRootLogger();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java
index d291b38..968b709 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java
@@ -52,18 +52,27 @@
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 public class TestFifoScheduler {
   private static final Log LOG = LogFactory.getLog(TestFifoScheduler.class);
   
   private final int GB = 1024;
+  private static YarnConfiguration conf;
+  
+  @BeforeClass
+  public static void setup() {
+    conf = new YarnConfiguration();
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, 
+        FifoScheduler.class, ResourceScheduler.class);
+  }
   
   @Test
   public void test() throws Exception {
     Logger rootLogger = LogManager.getRootLogger();
     rootLogger.setLevel(Level.DEBUG);
-    MockRM rm = new MockRM();
+    MockRM rm = new MockRM(conf);
     rm.start();
     MockNM nm1 = rm.registerNode("h1:1234", 6 * GB);
     MockNM nm2 = rm.registerNode("h2:5678", 4 * GB);
@@ -178,15 +187,15 @@
   public void testDefaultMinimumAllocation() throws Exception {
     // Test with something lesser than default
     testMinimumAllocation(
-        new YarnConfiguration(),
+        new YarnConfiguration(TestFifoScheduler.conf),
         YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB / 2);
   }
 
   @Test
   public void testNonDefaultMinimumAllocation() throws Exception {
     // Set custom min-alloc to test tweaking it
-    int allocMB = 512;
-    YarnConfiguration conf = new YarnConfiguration();
+    int allocMB = 1536;
+    YarnConfiguration conf = new YarnConfiguration(TestFifoScheduler.conf);
     conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, allocMB);
     // Test for something lesser than this.
     testMinimumAllocation(conf, allocMB / 2);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
index 536aa67..ce64065 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
@@ -30,6 +30,7 @@
 import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
@@ -45,7 +46,7 @@
   
   @Before
   public void setUp() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new YarnConfiguration();
     Store store = StoreFactory.getStore(conf);
     resourceManager = new ResourceManager(store);
     resourceManager.init(conf);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
index d785255..45b0c99 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
@@ -54,7 +54,7 @@
   private MockRM rm;
 
   /**
-   * decommissioning using a include hosts file
+   * Decommissioning using a pre-configured include hosts file
    */
   @Test
   public void testDecommissionWithIncludeHosts() throws Exception {
@@ -86,7 +86,7 @@
     String ip = NetUtils.normalizeHostName("localhost");
     writeToHostsFile("host1", ip);
 
-    rm.getNodesListManager().refreshNodes();
+    rm.getNodesListManager().refreshNodes(conf);
 
     nodeHeartbeat = nm1.nodeHeartbeat(true);
     Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
@@ -106,7 +106,7 @@
   }
 
   /**
-   * decommissioning using a exclude hosts file
+   * Decommissioning using a pre-configured exclude hosts file
    */
   @Test
   public void testDecommissionWithExcludeHosts() throws Exception {
@@ -133,7 +133,7 @@
     String ip = NetUtils.normalizeHostName("localhost");
     writeToHostsFile("host2", ip);
 
-    rm.getNodesListManager().refreshNodes();
+    rm.getNodesListManager().refreshNodes(conf);
 
     nodeHeartbeat = nm1.nodeHeartbeat(true);
     Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
@@ -147,7 +147,81 @@
         NodeAction.SHUTDOWN.equals(nodeHeartbeat.getNodeAction()));
     checkDecommissionedNMCount(rm, ++metricCount);
   }
+
+  /**
+  * Decommissioning using a post-configured include hosts file
+  */
+  @Test
+  public void testAddNewIncludePathToConfiguration() throws Exception {
+    Configuration conf = new Configuration();
+    rm = new MockRM(conf);
+    rm.start();
+    MockNM nm1 = rm.registerNode("host1:1234", 5120);
+    MockNM nm2 = rm.registerNode("host2:5678", 10240);
+    ClusterMetrics metrics = ClusterMetrics.getMetrics();
+    assert(metrics != null);
+    int initialMetricCount = metrics.getNumDecommisionedNMs();
+    HeartbeatResponse nodeHeartbeat = nm1.nodeHeartbeat(true);
+    Assert.assertEquals(
+        NodeAction.NORMAL,
+        nodeHeartbeat.getNodeAction());
+    nodeHeartbeat = nm2.nodeHeartbeat(true);
+    Assert.assertEquals(
+        NodeAction.NORMAL,
+        nodeHeartbeat.getNodeAction());
+    writeToHostsFile("host1");
+    conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, hostFile
+        .getAbsolutePath());
+    rm.getNodesListManager().refreshNodes(conf);
+    nodeHeartbeat = nm1.nodeHeartbeat(true);
+    Assert.assertEquals(
+        "Node should not have been decomissioned.",
+        NodeAction.NORMAL,
+        nodeHeartbeat.getNodeAction());
+    nodeHeartbeat = nm2.nodeHeartbeat(true);
+    Assert.assertEquals("Node should have been decomissioned but is in state" +
+        nodeHeartbeat.getNodeAction(),
+        NodeAction.SHUTDOWN, nodeHeartbeat.getNodeAction());
+    checkDecommissionedNMCount(rm, ++initialMetricCount);
+  }
   
+  /**
+   * Decommissioning using a post-configured exclude hosts file
+   */
+  @Test
+  public void testAddNewExcludePathToConfiguration() throws Exception {
+    Configuration conf = new Configuration();
+    rm = new MockRM(conf);
+    rm.start();
+    MockNM nm1 = rm.registerNode("host1:1234", 5120);
+    MockNM nm2 = rm.registerNode("host2:5678", 10240);
+    ClusterMetrics metrics = ClusterMetrics.getMetrics();
+    assert(metrics != null);
+    int initialMetricCount = metrics.getNumDecommisionedNMs();
+    HeartbeatResponse nodeHeartbeat = nm1.nodeHeartbeat(true);
+    Assert.assertEquals(
+        NodeAction.NORMAL,
+        nodeHeartbeat.getNodeAction());
+    nodeHeartbeat = nm2.nodeHeartbeat(true);
+    Assert.assertEquals(
+        NodeAction.NORMAL,
+        nodeHeartbeat.getNodeAction());
+    writeToHostsFile("host2");
+    conf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH, hostFile
+        .getAbsolutePath());
+    rm.getNodesListManager().refreshNodes(conf);
+    nodeHeartbeat = nm1.nodeHeartbeat(true);
+    Assert.assertEquals(
+        "Node should not have been decomissioned.",
+        NodeAction.NORMAL,
+        nodeHeartbeat.getNodeAction());
+    nodeHeartbeat = nm2.nodeHeartbeat(true);
+    Assert.assertEquals("Node should have been decomissioned but is in state" +
+        nodeHeartbeat.getNodeAction(),
+        NodeAction.SHUTDOWN, nodeHeartbeat.getNodeAction());
+    checkDecommissionedNMCount(rm, ++initialMetricCount);
+  }
+
   @Test
   public void testNodeRegistrationFailure() throws Exception {
     writeToHostsFile("host1");
@@ -268,10 +342,12 @@
     MockNM nm2 = rm.registerNode("host2:5678", 5120);
     nm1.nodeHeartbeat(true);
     nm2.nodeHeartbeat(false);
+    dispatcher.await();
     checkUnealthyNMCount(rm, nm2, true, 1);
     final int expectedNMs = ClusterMetrics.getMetrics().getNumActiveNMs();
     QueueMetrics metrics = rm.getResourceScheduler().getRootQueueMetrics();
-    Assert.assertEquals(5120 + 5120, metrics.getAvailableMB());
+    // TODO Metrics incorrect in case of the FifoScheduler
+    Assert.assertEquals(5120, metrics.getAvailableMB());
 
     // reconnect of healthy node
     nm1 = rm.registerNode("host1:1234", 5120);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCNodeUpdates.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCNodeUpdates.java
index d709bee..d607c01 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCNodeUpdates.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRMRPCNodeUpdates.java
@@ -90,10 +90,10 @@
   @Test
   public void testAMRMUnusableNodes() throws Exception {
     
-    MockNM nm1 = rm.registerNode("h1:1234", 5000);
-    MockNM nm2 = rm.registerNode("h2:1234", 5000);
-    MockNM nm3 = rm.registerNode("h3:1234", 5000);
-    MockNM nm4 = rm.registerNode("h4:1234", 5000);
+    MockNM nm1 = rm.registerNode("h1:1234", 10000);
+    MockNM nm2 = rm.registerNode("h2:1234", 10000);
+    MockNM nm3 = rm.registerNode("h3:1234", 10000);
+    MockNM nm4 = rm.registerNode("h4:1234", 10000);
 
     RMApp app1 = rm.submitApp(2000);
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
index c3da2c3..d7bb90b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
@@ -262,6 +262,10 @@
     assertEquals(0, applicationAttempt.getRanNodes().size());
     assertNull(applicationAttempt.getFinalApplicationStatus());
     
+    // Check events
+    verify(masterService).
+        unregisterAttempt(applicationAttempt.getAppAttemptId());
+    
     // this works for unmanaged and managed AM's because this is actually doing
     // verify(application).handle(anyObject());
     verify(application).handle(any(RMAppRejectedEvent.class));
@@ -527,7 +531,8 @@
     // launch AM and verify attempt failed
     applicationAttempt.handle(new RMAppAttemptRegistrationEvent(
         applicationAttempt.getAppAttemptId(), "host", 8042, "oldtrackingurl"));
-    testAppAttemptSubmittedToFailedState("Unmanaged AM must register after AM attempt reaches LAUNCHED state.");
+    testAppAttemptSubmittedToFailedState(
+        "Unmanaged AM must register after AM attempt reaches LAUNCHED state.");
   }
 
   @Test
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSQueueSchedulable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSQueueSchedulable.java
new file mode 100644
index 0000000..0fc7479
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSQueueSchedulable.java
@@ -0,0 +1,42 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
+
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+public class TestFSQueueSchedulable {
+  private FSQueueSchedulable schedulable = null;
+  private Resource maxResource = Resources.createResource(10);
+
+  @Before
+  public void setup() {
+    String queueName = "testFSQueue";
+    FSQueue mockQueue = mock(FSQueue.class);
+    when(mockQueue.getName()).thenReturn(queueName);
+
+    QueueManager mockMgr = mock(QueueManager.class);
+    when(mockMgr.getMaxResources(queueName)).thenReturn(maxResource);
+
+    schedulable = new FSQueueSchedulable(null, mockQueue, mockMgr, null, 0, 0);
+  }
+
+  @Test
+  public void testUpdateDemand() {
+    AppSchedulable app = mock(AppSchedulable.class);
+    Mockito.when(app.getDemand()).thenReturn(maxResource);
+
+    schedulable.addApp(app);
+    schedulable.addApp(app);
+
+    schedulable.updateDemand();
+
+    assertTrue("Demand is greater than max allowed ",
+        Resources.equals(schedulable.getDemand(), maxResource));
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 8419eb4..4a26920 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -52,6 +52,7 @@
 import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
@@ -93,7 +94,7 @@
   @Before
   public void setUp() throws IOException {
     scheduler = new FairScheduler();
-    Configuration conf = new Configuration();
+    Configuration conf = createConfiguration();
     // All tests assume only one assignment per node update
     conf.set(FairSchedulerConfiguration.ASSIGN_MULTIPLE, "false");
     Store store = StoreFactory.getStore(conf);
@@ -109,6 +110,13 @@
     resourceManager = null;
   }
 
+  private Configuration createConfiguration() {
+    Configuration conf = new YarnConfiguration();
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class,
+        ResourceScheduler.class);
+    return conf;
+  }
+
   private ApplicationAttemptId createAppAttemptId(int appId, int attemptId) {
     ApplicationAttemptId attId = recordFactory.newRecordInstance(ApplicationAttemptId.class);
     ApplicationId appIdImpl = recordFactory.newRecordInstance(ApplicationId.class);
@@ -217,7 +225,9 @@
       new ArrayList<ContainerStatus>(), new ArrayList<ContainerStatus>());
     scheduler.handle(updateEvent);
 
-    assertEquals(512, scheduler.getQueueManager().getQueue("queue1").
+    // Asked for less than min_allocation.
+    assertEquals(YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
+        scheduler.getQueueManager().getQueue("queue1").
         getQueueSchedulable().getResourceUsage().getMemory());
 
     NodeUpdateSchedulerEvent updateEvent2 = new NodeUpdateSchedulerEvent(node2,
@@ -278,7 +288,7 @@
 
   @Test
   public void testUserAsDefaultQueue() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = createConfiguration();
     conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE, "true");
     scheduler.reinitialize(conf, resourceManager.getRMContext());
     AppAddedSchedulerEvent appAddedEvent = new AppAddedSchedulerEvent(
@@ -299,7 +309,7 @@
 
   @Test
   public void testFairShareWithMinAlloc() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = createConfiguration();
     conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
     scheduler.reinitialize(conf, resourceManager.getRMContext());
 
@@ -354,31 +364,35 @@
     ApplicationAttemptId id22 = createAppAttemptId(2, 2);
     scheduler.addApplication(id22, "queue2", "user1");
 
-    // First ask, queue1 requests 1024
+    int minReqSize = YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB;
+    
+    // First ask, queue1 requests 1 large (minReqSize * 2).
     List<ResourceRequest> ask1 = new ArrayList<ResourceRequest>();
-    ResourceRequest request1 = createResourceRequest(1024, "*", 1, 1);
+    ResourceRequest request1 = createResourceRequest(minReqSize * 2, "*", 1, 1);
     ask1.add(request1);
     scheduler.allocate(id11, ask1, new ArrayList<ContainerId>());
 
-    // Second ask, queue2 requests 1024 + (2 * 512)
+    // Second ask, queue2 requests 1 large + (2 * minReqSize)
     List<ResourceRequest> ask2 = new ArrayList<ResourceRequest>();
-    ResourceRequest request2 = createResourceRequest(1024, "foo", 1, 1);
-    ResourceRequest request3 = createResourceRequest(512, "bar", 1, 2);
+    ResourceRequest request2 = createResourceRequest(2 * minReqSize, "foo", 1, 1);
+    ResourceRequest request3 = createResourceRequest(minReqSize, "bar", 1, 2);
     ask2.add(request2);
     ask2.add(request3);
     scheduler.allocate(id21, ask2, new ArrayList<ContainerId>());
 
-    // Third ask, queue2 requests 1024
+    // Third ask, queue2 requests 1 large
     List<ResourceRequest> ask3 = new ArrayList<ResourceRequest>();
-    ResourceRequest request4 = createResourceRequest(1024, "*", 1, 1);
+    ResourceRequest request4 = createResourceRequest(2 * minReqSize, "*", 1, 1);
     ask3.add(request4);
     scheduler.allocate(id22, ask3, new ArrayList<ContainerId>());
 
     scheduler.update();
 
-    assertEquals(1024, scheduler.getQueueManager().getQueue("queue1").getQueueSchedulable().getDemand().getMemory());
-    assertEquals(1024 + 1024 + (2 * 512), scheduler.getQueueManager().getQueue("queue2").getQueueSchedulable().getDemand().getMemory());
-
+    assertEquals(2 * minReqSize, scheduler.getQueueManager().getQueue("queue1")
+        .getQueueSchedulable().getDemand().getMemory());
+    assertEquals(2 * minReqSize + 2 * minReqSize + (2 * minReqSize), scheduler
+        .getQueueManager().getQueue("queue2").getQueueSchedulable().getDemand()
+        .getMemory());
   }
 
   @Test
@@ -405,7 +419,7 @@
 
   @Test
   public void testAllocationFileParsing() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = createConfiguration();
     conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
     scheduler.reinitialize(conf, resourceManager.getRMContext());
 
@@ -508,7 +522,7 @@
 
   @Test
   public void testBackwardsCompatibleAllocationFileParsing() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = createConfiguration();
     conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
     scheduler.reinitialize(conf, resourceManager.getRMContext());
 
@@ -611,7 +625,7 @@
 
   @Test
   public void testIsStarvedForMinShare() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = createConfiguration();
     conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
     scheduler.reinitialize(conf, resourceManager.getRMContext());
 
@@ -670,7 +684,7 @@
 
   @Test
   public void testIsStarvedForFairShare() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = createConfiguration();
     conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
     scheduler.reinitialize(conf, resourceManager.getRMContext());
 
@@ -734,7 +748,7 @@
    * now this means decreasing order of priority.
    */
   public void testChoiceOfPreemptedContainers() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = createConfiguration();
     conf.set(FairSchedulerConfiguration.ALLOCATION_FILE + ".allocation.file", ALLOC_FILE);
     scheduler.reinitialize(conf, resourceManager.getRMContext());
 
@@ -867,7 +881,7 @@
    * Tests the timing of decision to preempt tasks.
    */
   public void testPreemptionDecision() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = createConfiguration();
     conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
     MockClock clock = new MockClock();
     scheduler.setClock(clock);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
index 317f892..2b21b2d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
@@ -31,6 +31,7 @@
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.server.resourcemanager.Application;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
@@ -43,6 +44,7 @@
 import org.apache.hadoop.yarn.server.resourcemanager.resourcetracker.InlineDispatcher;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import org.apache.hadoop.yarn.util.BuilderUtils;
@@ -59,7 +61,10 @@
   public void setUp() throws Exception {
     Store store = StoreFactory.getStore(new Configuration());
     resourceManager = new ResourceManager(store);
-    resourceManager.init(new Configuration());
+    Configuration conf = new Configuration();
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, 
+        FifoScheduler.class, ResourceScheduler.class);
+    resourceManager.init(conf);
   }
 
   @After
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientTokens.java
new file mode 100644
index 0000000..032e6c2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientTokens.java
@@ -0,0 +1,315 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.security;
+
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.lang.annotation.Annotation;
+import java.net.InetSocketAddress;
+import java.security.PrivilegedExceptionAction;
+
+import junit.framework.Assert;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.security.token.TokenSelector;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.ContainerManager;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.DrainDispatcher;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager;
+import org.apache.hadoop.yarn.security.client.ClientTokenIdentifier;
+import org.apache.hadoop.yarn.security.client.ClientTokenSelector;
+import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRMWithCustomAMLauncher;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.apache.hadoop.yarn.util.Records;
+import org.junit.Test;
+
+public class TestClientTokens {
+
+  private interface CustomProtocol {
+    public static final long versionID = 1L;
+
+    public void ping();
+  }
+
+  private static class CustomSecurityInfo extends SecurityInfo {
+
+    @Override
+    public TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
+      return new TokenInfo() {
+
+        @Override
+        public Class<? extends Annotation> annotationType() {
+          return null;
+        }
+
+        @Override
+        public Class<? extends TokenSelector<? extends TokenIdentifier>>
+            value() {
+          return ClientTokenSelector.class;
+        }
+      };
+    }
+
+    @Override
+    public KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) {
+      return null;
+    }
+  };
+
+  private static class CustomAM extends AbstractService implements
+      CustomProtocol {
+
+    private final ApplicationId appId;
+    private final String secretKey;
+    private InetSocketAddress address;
+    private boolean pinged = false;
+
+    public CustomAM(ApplicationId appId, String secretKeyStr) {
+      super("CustomAM");
+      this.appId = appId;
+      this.secretKey = secretKeyStr;
+    }
+
+    @Override
+    public void ping() {
+      this.pinged = true;
+    }
+
+    @Override
+    public synchronized void start() {
+      Configuration conf = getConfig();
+
+      ClientToAMTokenSecretManager secretManager = null;
+      byte[] bytes = Base64.decodeBase64(this.secretKey);
+      secretManager = new ClientToAMTokenSecretManager(this.appId, bytes);
+      Server server;
+      try {
+        server =
+            new RPC.Builder(conf).setProtocol(CustomProtocol.class)
+              .setNumHandlers(1).setSecretManager(secretManager)
+              .setInstance(this).build();
+      } catch (Exception e) {
+        throw new YarnException(e);
+      }
+      server.start();
+      this.address = NetUtils.getConnectAddress(server);
+      super.start();
+    }
+  }
+
+  private static class CustomNM implements ContainerManager {
+
+    public String clientTokensSecret;
+
+    @Override
+    public StartContainerResponse startContainer(StartContainerRequest request)
+        throws YarnRemoteException {
+      this.clientTokensSecret =
+          request.getContainerLaunchContext().getEnvironment()
+            .get(ApplicationConstants.APPLICATION_CLIENT_SECRET_ENV_NAME);
+      return null;
+    }
+
+    @Override
+    public StopContainerResponse stopContainer(StopContainerRequest request)
+        throws YarnRemoteException {
+      return null;
+    }
+
+    @Override
+    public GetContainerStatusResponse getContainerStatus(
+        GetContainerStatusRequest request) throws YarnRemoteException {
+      return null;
+    }
+
+  }
+
+  @Test
+  public void testClientTokens() throws Exception {
+
+    final Configuration conf = new Configuration();
+    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+      "kerberos");
+    UserGroupInformation.setConfiguration(conf);
+
+    CustomNM containerManager = new CustomNM();
+    final DrainDispatcher dispatcher = new DrainDispatcher();
+
+    MockRM rm = new MockRMWithCustomAMLauncher(conf, containerManager) {
+      protected ClientRMService createClientRMService() {
+        return new ClientRMService(this.rmContext, scheduler,
+          this.rmAppManager, this.applicationACLsManager,
+          this.rmDTSecretManager);
+      };
+
+      @Override
+      protected Dispatcher createDispatcher() {
+        return dispatcher;
+      }
+
+      @Override
+      protected void doSecureLogin() throws IOException {
+      }
+    };
+    rm.start();
+
+    // Submit an app
+    RMApp app = rm.submitApp(1024);
+    dispatcher.await();
+
+    // Set up a node.
+    MockNM nm1 = rm.registerNode("localhost:1234", 3072);
+    nm1.nodeHeartbeat(true);
+    dispatcher.await();
+
+    // Get the app-report.
+    GetApplicationReportRequest request =
+        Records.newRecord(GetApplicationReportRequest.class);
+    request.setApplicationId(app.getApplicationId());
+    GetApplicationReportResponse reportResponse =
+        rm.getClientRMService().getApplicationReport(request);
+    ApplicationReport appReport = reportResponse.getApplicationReport();
+    String clientTokenEncoded = appReport.getClientToken();
+
+    // Wait till AM is 'launched'
+    int waitTime = 0;
+    while (containerManager.clientTokensSecret == null && waitTime++ < 20) {
+      Thread.sleep(1000);
+    }
+    Assert.assertNotNull(containerManager.clientTokensSecret);
+
+    // Start the AM with the correct shared-secret.
+    final CustomAM am =
+        new CustomAM(app.getApplicationId(),
+          containerManager.clientTokensSecret);
+    am.init(conf);
+    am.start();
+
+    // Now the real test!
+    // Set up clients to be able to pick up correct tokens.
+    SecurityUtil.setSecurityInfoProviders(new CustomSecurityInfo());
+
+    // Verify denial for unauthenticated user
+    try {
+      CustomProtocol client =
+          (CustomProtocol) RPC.getProxy(CustomProtocol.class, 1L, am.address,
+            conf);
+      client.ping();
+      fail("Access by unauthenticated user should fail!!");
+    } catch (Exception e) {
+      Assert.assertFalse(am.pinged);
+    }
+
+    // Verify denial for a malicious user
+    UserGroupInformation ugi = UserGroupInformation.createRemoteUser("me");
+    Token<ClientTokenIdentifier> clientToken =
+        new Token<ClientTokenIdentifier>();
+    clientToken.decodeFromUrlString(clientTokenEncoded);
+    // RPC layer client expects ip:port as service for tokens
+    SecurityUtil.setTokenService(clientToken, am.address);
+
+    // Malicious user, messes with appId
+    ClientTokenIdentifier maliciousID =
+        new ClientTokenIdentifier(BuilderUtils.newApplicationId(app
+          .getApplicationId().getClusterTimestamp(), 42));
+
+    Token<ClientTokenIdentifier> maliciousToken =
+        new Token<ClientTokenIdentifier>(maliciousID.getBytes(),
+          clientToken.getPassword(), clientToken.getKind(),
+          clientToken.getService());
+    ugi.addToken(maliciousToken);
+
+    try {
+      ugi.doAs(new PrivilegedExceptionAction<Void>() {
+        @Override
+        public Void run() throws Exception {
+          CustomProtocol client =
+              (CustomProtocol) RPC.getProxy(CustomProtocol.class, 1L,
+                am.address, conf);
+          client.ping();
+          fail("Connection initiation with illegally modified "
+              + "tokens is expected to fail.");
+          return null;
+        }
+      });
+    } catch (YarnRemoteException e) {
+      fail("Cannot get a YARN remote exception as "
+          + "it will indicate RPC success");
+    } catch (Exception e) {
+      Assert
+        .assertEquals(java.lang.reflect.UndeclaredThrowableException.class
+          .getCanonicalName(), e.getClass().getCanonicalName());
+      Assert.assertTrue(e
+        .getCause()
+        .getMessage()
+        .contains(
+          "DIGEST-MD5: digest response format violation. "
+              + "Mismatched response."));
+      Assert.assertFalse(am.pinged);
+    }
+
+    // Now for an authenticated user
+    ugi = UserGroupInformation.createRemoteUser("me");
+    ugi.addToken(clientToken);
+
+    ugi.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        CustomProtocol client =
+            (CustomProtocol) RPC.getProxy(CustomProtocol.class, 1L, am.address,
+              conf);
+        client.ping();
+        Assert.assertTrue(am.pinged);
+        return null;
+      }
+    });
+  }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
index bcbdd07..c410478 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
@@ -38,6 +38,7 @@
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.service.Service.STATE;
 import org.apache.hadoop.yarn.util.YarnVersionInfo;
@@ -74,7 +75,10 @@
       bind(JAXBContextResolver.class);
       bind(RMWebServices.class);
       bind(GenericExceptionHandler.class);
-      rm = new MockRM(new Configuration());
+      Configuration conf = new Configuration();
+      conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
+          ResourceScheduler.class);
+      rm = new MockRM(conf);
       bind(ResourceManager.class).toInstance(rm);
       bind(RMContext.class).toInstance(rm.getRMContext());
       bind(ApplicationACLsManager.class).toInstance(
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
index bd4399f..b81237d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
@@ -45,6 +45,8 @@
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppFailedAttemptEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
 import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
@@ -82,6 +84,8 @@
       bind(GenericExceptionHandler.class);
       Configuration conf = new Configuration();
       conf.setInt(YarnConfiguration.RM_AM_MAX_RETRIES, 2);
+      conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
+          ResourceScheduler.class);
       rm = new MockRM(conf);
       bind(ResourceManager.class).toInstance(rm);
       bind(RMContext.class).toInstance(rm.getRMContext());
@@ -276,9 +280,9 @@
       String type = exception.getString("exception");
       String classname = exception.getString("javaClassName");
       WebServicesTestUtils
-          .checkStringMatch(
+          .checkStringContains(
               "exception message",
-              "No enum const class org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState.INVALID_test",
+              "org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState.INVALID_test",
               message);
       WebServicesTestUtils.checkStringMatch("exception type",
           "IllegalArgumentException", type);
@@ -355,9 +359,9 @@
       String type = exception.getString("exception");
       String classname = exception.getString("javaClassName");
       WebServicesTestUtils
-          .checkStringMatch(
+          .checkStringContains(
               "exception message",
-              "No enum const class org.apache.hadoop.yarn.api.records.FinalApplicationStatus.INVALID_test",
+              "org.apache.hadoop.yarn.api.records.FinalApplicationStatus.INVALID_test",
               message);
       WebServicesTestUtils.checkStringMatch("exception type",
           "IllegalArgumentException", type);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
index 084dcff..533ea11 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
@@ -229,9 +229,9 @@
       String type = exception.getString("exception");
       String classname = exception.getString("javaClassName");
       WebServicesTestUtils
-          .checkStringMatch(
+          .checkStringContains(
               "exception message",
-              "No enum const class org.apache.hadoop.yarn.api.records.NodeState.BOGUSSTATE",
+              "org.apache.hadoop.yarn.api.records.NodeState.BOGUSSTATE",
               message);
       WebServicesTestUtils.checkStringMatch("exception type",
           "IllegalArgumentException", type);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
index 1a0885f..9a7a978 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
@@ -102,6 +102,12 @@
       nodeManagers[index] = new CustomNodeManager();
     }
   }
+  
+  @Override
+  public void init(Configuration conf) {
+    super.init(conf instanceof YarnConfiguration ? conf
+        : new YarnConfiguration(conf));
+  }
 
   public File getTestWorkDir() {
     return testWorkDir;
@@ -201,7 +207,7 @@
     }
 
     public synchronized void init(Configuration conf) {                          
-      Configuration config = new Configuration(conf);                            
+      Configuration config = new YarnConfiguration(conf);                            
       super.init(config);                                                        
     }                                                                            
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
index 1c7933a..0523a3f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
@@ -219,9 +219,10 @@
 
     // Malice user modifies the resource amount
     Resource modifiedResource = BuilderUtils.newResource(2048);
-    ContainerTokenIdentifier modifiedIdentifier = new ContainerTokenIdentifier(
-        dummyIdentifier.getContainerID(), dummyIdentifier.getNmHostAddress(),
-        modifiedResource, Long.MAX_VALUE, dummyIdentifier.getMasterKeyId());
+    ContainerTokenIdentifier modifiedIdentifier =
+        new ContainerTokenIdentifier(dummyIdentifier.getContainerID(),
+          dummyIdentifier.getNmHostAddress(), "testUser", modifiedResource,
+          Long.MAX_VALUE, dummyIdentifier.getMasterKeyId());
     Token<ContainerTokenIdentifier> modifiedToken = new Token<ContainerTokenIdentifier>(
         modifiedIdentifier.getBytes(), containerToken.getPassword().array(),
         new Text(containerToken.getKind()), new Text(containerToken
@@ -320,12 +321,14 @@
 
         callWithIllegalContainerID(client, tokenId);
         callWithIllegalResource(client, tokenId);
+        callWithIllegalUserName(client, tokenId);
 
         return client;
       }
     });
     
-    /////////// End of testing for illegal containerIDs and illegal Resources
+    // ///////// End of testing for illegal containerIDs, illegal Resources and
+    // illegal users
 
     /////////// Test calls with expired tokens
     RPC.stopProxy(client);
@@ -336,7 +339,7 @@
       resourceManager.getRMContainerTokenSecretManager(); 
     final ContainerTokenIdentifier newTokenId =
         new ContainerTokenIdentifier(tokenId.getContainerID(),
-          tokenId.getNmHostAddress(), tokenId.getResource(),
+          tokenId.getNmHostAddress(), "testUser", tokenId.getResource(),
           System.currentTimeMillis() - 1, 
           containerTokenSecreteManager.getCurrentKey().getKeyId());
     byte[] passowrd =
@@ -346,9 +349,7 @@
     token = new Token<ContainerTokenIdentifier>(
         newTokenId.getBytes(), passowrd, new Text(
             containerToken.getKind()), new Text(containerToken.getService()));
-    
-    
-    
+
     unauthorizedUser.addToken(token);
     unauthorizedUser.doAs(new PrivilegedAction<Void>() {
       @Override
@@ -567,6 +568,29 @@
     }
   }
 
+  void callWithIllegalUserName(ContainerManager client,
+      ContainerTokenIdentifier tokenId) {
+    StartContainerRequest request = recordFactory
+        .newRecordInstance(StartContainerRequest.class);
+    // Authenticated but unauthorized, due to wrong resource
+    ContainerLaunchContext context =
+        createContainerLaunchContextForTest(tokenId);
+    context.setUser("Saruman"); // Set a different user-name.
+    request.setContainerLaunchContext(context);
+    try {
+      client.startContainer(request);
+      fail("Connection initiation with unauthorized "
+          + "access is expected to fail.");
+    } catch (YarnRemoteException e) {
+      LOG.info("Got exception : ", e);
+      Assert.assertTrue(e.getMessage().contains(
+          "Unauthorized request to start container. "));
+      Assert.assertTrue(e.getMessage().contains(
+        "Expected user-name " + tokenId.getApplicationSubmitter()
+            + " but found " + context.getUser()));
+    }
+  }
+
   private ContainerLaunchContext createContainerLaunchContextForTest(
       ContainerTokenIdentifier tokenId) {
     ContainerLaunchContext context =
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm
index 0f57f33..58123cf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm
@@ -278,17 +278,17 @@
 | | Existing applications continue to completion, thus the queue can be 
 | | <drained> gracefully. | 
 *--------------------------------------+--------------------------------------+
-| <<<yarn.scheduler.capacity.root.<queue-path>.acl_submit_jobs>>> | |
-| | The <ACL> which controls who can <submit> jobs to the given queue. |
+| <<<yarn.scheduler.capacity.root.<queue-path>.acl_submit_applications>>> | |
+| | The <ACL> which controls who can <submit> applications to the given queue. |
 | | If the given user/group has necessary ACLs on the given queue or |
-| | <one of the parent queues in the hierarchy> they can submit jobs. |
+| | <one of the parent queues in the hierarchy> they can submit applications. |
 | | <ACLs> for this property <are> inherited from the parent queue |
 | | if not specified. |
 *--------------------------------------+--------------------------------------+
-| <<<yarn.scheduler.capacity.root.<queue-path>.acl_administer_jobs>>> | |
-| | The <ACL> which controls who can <administer> jobs on the given queue. |
+| <<<yarn.scheduler.capacity.root.<queue-path>.acl_administer_queue>>> | |
+| | The <ACL> which controls who can <administer> applications on the given queue. |
 | | If the given user/group has necessary ACLs on the given queue or |
-| | <one of the parent queues in the hierarchy> they can administer jobs. |
+| | <one of the parent queues in the hierarchy> they can administer applications. |
 | | <ACLs> for this property <are> inherited from the parent queue |
 | | if not specified. |
 *--------------------------------------+--------------------------------------+
@@ -318,7 +318,7 @@
   
 ----
 $ vi $HADOOP_CONF_DIR/capacity-scheduler.xml
-$ $YARN_HOME/bin/yarn rmadmin -refreshQueues
+$ $HADOOP_YARN_HOME/bin/yarn rmadmin -refreshQueues
 ----  
 
   <Note:> Queues cannot be <deleted>, only addition of new queues is supported -
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm
index 6731255..c038937 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm
@@ -497,20 +497,20 @@
     ResourceManager:

   

 ----

-  $ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start resourcemanager 

+  $ $HADOOP_YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start resourcemanager 

 ----    	  

 

     Run a script to start NodeManagers on all slaves:

 

 ----

-  $ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start nodemanager 

+  $ $HADOOP_YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start nodemanager 

 ----    	  

 

     Start a standalone WebAppProxy server.  If multiple servers

     are used with load balancing it should be run on each of them:

 

 ----

-  $ $YARN_HOME/bin/yarn start proxyserver --config $HADOOP_CONF_DIR  

+  $ $HADOOP_YARN_HOME/bin/yarn start proxyserver --config $HADOOP_CONF_DIR  

 ----

 

     Start the MapReduce JobHistory Server with the following command, run on the  

@@ -539,20 +539,20 @@
     ResourceManager:

   

 ----

-  $ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop resourcemanager 

+  $ $HADOOP_YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop resourcemanager 

 ----    	  

 

     Run a script to stop NodeManagers on all slaves:

 

 ----

-  $ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop nodemanager 

+  $ $HADOOP_YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop nodemanager 

 ----    	  

 

     Stop the WebAppProxy server. If multiple servers are used with load

     balancing it should be run on each of them:

 

 ----

-  $ $YARN_HOME/bin/yarn stop proxyserver --config $HADOOP_CONF_DIR  

+  $ $HADOOP_YARN_HOME/bin/yarn stop proxyserver --config $HADOOP_CONF_DIR  

 ----

 

 

@@ -883,7 +883,7 @@
       The path passed in <<<-Dcontainer-executor.conf.dir>>> should be the 

       path on the cluster nodes where a configuration file for the setuid 

       executable should be located. The executable should be installed in

-      $YARN_HOME/bin.

+      $HADOOP_YARN_HOME/bin.

 

       The executable must have specific permissions: 6050 or --Sr-s--- 

       permissions user-owned by <root> (super-user) and group-owned by a 

@@ -1040,13 +1040,13 @@
     ResourceManager as <yarn>:

   

 ----

-[yarn]$ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start resourcemanager 

+[yarn]$ $HADOOP_YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start resourcemanager 

 ----    	  

 

     Run a script to start NodeManagers on all slaves as <yarn>:

 

 ----

-[yarn]$ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start nodemanager 

+[yarn]$ $HADOOP_YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start nodemanager 

 ----    	  

 

     Start a standalone WebAppProxy server. Run on the WebAppProxy 

@@ -1054,7 +1054,7 @@
     it should be run on each of them:

 

 ----

-[yarn]$ $YARN_HOME/bin/yarn start proxyserver --config $HADOOP_CONF_DIR  

+[yarn]$ $HADOOP_YARN_HOME/bin/yarn start proxyserver --config $HADOOP_CONF_DIR  

 ----    	  

 

     Start the MapReduce JobHistory Server with the following command, run on the  

@@ -1083,13 +1083,13 @@
     ResourceManager as <yarn>:

   

 ----

-[yarn]$ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop resourcemanager 

+[yarn]$ $HADOOP_YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop resourcemanager 

 ----    	  

 

     Run a script to stop NodeManagers on all slaves as <yarn>:

 

 ----

-[yarn]$ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop nodemanager 

+[yarn]$ $HADOOP_YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop nodemanager 

 ----    	  

 

     Stop the WebAppProxy server. Run on the WebAppProxy  server as

@@ -1097,7 +1097,7 @@
     should be run on each of them:

 

 ----

-[yarn]$ $YARN_HOME/bin/yarn stop proxyserver --config $HADOOP_CONF_DIR  

+[yarn]$ $HADOOP_YARN_HOME/bin/yarn stop proxyserver --config $HADOOP_CONF_DIR  

 ----

 

     Stop the MapReduce JobHistory Server with the following command, run on the  

diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/SingleCluster.apt.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/SingleCluster.apt.vm
index f4ea1fe..0cec916 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/SingleCluster.apt.vm
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/SingleCluster.apt.vm
@@ -43,7 +43,7 @@
   Assuming you have installed hadoop-common/hadoop-hdfs and exported
   <<$HADOOP_COMMON_HOME>>/<<$HADOOP_HDFS_HOME>>, untar hadoop mapreduce 
   tarball and set environment variable <<$HADOOP_MAPRED_HOME>> to the 
-  untarred directory. Set <<$YARN_HOME>> the same as <<$HADOOP_MAPRED_HOME>>. 
+  untarred directory. Set <<$HADOOP_YARN_HOME>> the same as <<$HADOOP_MAPRED_HOME>>. 
  
   <<NOTE:>> The following instructions assume you have hdfs running.
 
@@ -174,7 +174,7 @@
 * Running daemons.
 
   Assuming that the environment variables <<$HADOOP_COMMON_HOME>>, <<$HADOOP_HDFS_HOME>>, <<$HADOO_MAPRED_HOME>>,
-  <<$YARN_HOME>>, <<$JAVA_HOME>> and <<$HADOOP_CONF_DIR>> have been set appropriately.
+  <<$HADOOP_YARN_HOME>>, <<$JAVA_HOME>> and <<$HADOOP_CONF_DIR>> have been set appropriately.
   Set $<<$YARN_CONF_DIR>> the same as $<<HADOOP_CONF_DIR>>
  
   Run ResourceManager and NodeManager as:
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/YarnCommands.apt.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/YarnCommands.apt.vm
index a57f07c..386be09 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/YarnCommands.apt.vm
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/YarnCommands.apt.vm
@@ -52,6 +52,40 @@
   Usage: yarn jar <jar> [mainClass] args...
 -------
 
+** application
+
+  Prints application(s) report/kill application
+
+-------
+  Usage: yarn application <options>
+-------
+
+*---------------+--------------+
+|| COMMAND_OPTIONS || Description                   |
+*---------------+--------------+
+| -status  ApplicationId | Specify an application id |
+*---------------+--------------+
+| -list | Lists all the Applications from RM |
+*---------------+--------------+
+| -kill ApplicationId | Specify an application id |
+*---------------+--------------+
+
+** node
+
+  Prints node report(s)
+
+-------
+  Usage: yarn node <options>
+-------
+
+*---------------+--------------+
+|| COMMAND_OPTIONS || Description                   |
+*---------------+--------------+
+| -status NodeId | Specify a node id |
+*---------------+--------------+
+| -list | Lists all the Nodes |
+*---------------+--------------+
+
 ** logs
 
   Dump the container logs
diff --git a/hadoop-yarn-project/pom.xml b/hadoop-yarn-project/pom.xml
index 819519a..c1e3896 100644
--- a/hadoop-yarn-project/pom.xml
+++ b/hadoop-yarn-project/pom.xml
@@ -148,7 +148,6 @@
     <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
-      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.jboss.netty</groupId>