Merge r1346682 through r1354801 from trunk.


git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3092@1354832 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index fb75b90..9d4d619 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -418,8 +418,8 @@
   echo "======================================================================"
   echo ""
   echo ""
-  echo "$MVN clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1"
-  $MVN clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1
+  echo "$MVN clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Pnative -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1"
+  $MVN clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Pnative -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1
   if [[ $? != 0 ]] ; then
     JIRA_COMMENT="$JIRA_COMMENT
 
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-raid-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-raid-dist.xml
new file mode 100644
index 0000000..a7da364
--- /dev/null
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-raid-dist.xml
@@ -0,0 +1,60 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<assembly>
+  <id>hadoop-raid-dist</id>
+  <formats>
+    <format>dir</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+  <fileSets>
+    <!-- Configuration files -->
+    <fileSet>
+      <directory>${basedir}/src/main/conf</directory>
+      <outputDirectory>/etc/hadoop</outputDirectory>
+      <includes>
+        <include>*</include>
+      </includes>
+    </fileSet>
+    <fileSet>
+      <directory>${basedir}/src/main/sbin</directory>
+      <outputDirectory>/sbin</outputDirectory>
+      <includes>
+        <include>*</include>
+      </includes>
+      <fileMode>0755</fileMode>
+    </fileSet>
+    <fileSet>
+      <directory>${basedir}/src/main/libexec</directory>
+      <outputDirectory>/libexec</outputDirectory>
+      <includes>
+        <include>*</include>
+      </includes>
+      <fileMode>0755</fileMode>
+    </fileSet>
+    <!-- Documentation -->
+    <fileSet>
+      <directory>${project.build.directory}/site</directory>
+      <outputDirectory>/share/doc/hadoop/raid</outputDirectory>
+    </fileSet>
+  </fileSets>
+  <dependencySets>
+    <dependencySet>
+      <outputDirectory>/share/hadoop/${hadoop.component}/lib</outputDirectory>
+      <unpack>false</unpack>
+      <scope>runtime</scope>
+      <useProjectArtifact>true</useProjectArtifact>
+    </dependencySet>
+  </dependencySets>
+</assembly>
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml
new file mode 100644
index 0000000..1e3356d
--- /dev/null
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml
@@ -0,0 +1,67 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
+  <id>hadoop-tools</id>
+  <formats>
+    <format>dir</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+  <fileSets>
+    <fileSet>
+      <directory>../hadoop-pipes/src/main/native/pipes/api/hadoop</directory>
+      <includes>
+        <include>*.hh</include>
+      </includes>
+      <outputDirectory>/include</outputDirectory>
+    </fileSet>
+    <fileSet>
+      <directory>../hadoop-pipes/src/main/native/utils/api/hadoop</directory>
+      <includes>
+        <include>*.hh</include>
+      </includes>
+      <outputDirectory>/include</outputDirectory>
+    </fileSet>
+    <fileSet>
+      <directory>../hadoop-pipes/target/native</directory>
+      <includes>
+        <include>*.a</include>
+      </includes>
+      <outputDirectory>lib/native</outputDirectory>
+    </fileSet>
+  </fileSets>
+  <dependencySets>
+    <dependencySet>
+      <outputDirectory>/share/hadoop/${hadoop.component}/lib</outputDirectory>
+      <unpack>false</unpack>
+      <scope>runtime</scope>
+      <useProjectArtifact>false</useProjectArtifact>
+      <!-- Exclude hadoop artifacts. They will be found via HADOOP* env -->
+      <excludes>
+        <exclude>org.apache.hadoop:hadoop-common</exclude>
+        <exclude>org.apache.hadoop:hadoop-hdfs</exclude>
+        <exclude>org.apache.hadoop:hadoop-mapreduce</exclude>
+        <!-- pipes is native stuff, this just keeps pom from being package-->
+        <exclude>org.apache.hadoop:hadoop-pipes</exclude>
+        <!-- use slf4j from common to avoid multiple binding warnings -->
+        <exclude>org.slf4j:slf4j-api</exclude>
+        <exclude>org.slf4j:slf4j-log4j12</exclude>
+      </excludes>
+    </dependencySet>
+  </dependencySets>
+</assembly>
diff --git a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/InterfaceAudience.java b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/InterfaceAudience.java
index 019874f..cd62a94 100644
--- a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/InterfaceAudience.java
+++ b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/InterfaceAudience.java
@@ -21,6 +21,24 @@
 
 /**
  * Annotation to inform users of a package, class or method's intended audience.
+ * Currently the audience can be {@link Public}, {@link LimitedPrivate} or
+ * {@link Private}. <br>
+ * All public classes must have InterfaceAudience annotation. <br>
+ * <ul>
+ * <li>Public classes that are not marked with this annotation must be
+ * considered by default as {@link Private}.</li> 
+ * 
+ * <li>External applications must only use classes that are marked
+ * {@link Public}. Avoid using non public classes as these classes
+ * could be removed or change in incompatible ways.</li>
+ * 
+ * <li>Hadoop projects must only use classes that are marked
+ * {@link LimitedPrivate} or {@link Public}</li>
+ * 
+ * <li> Methods may have a different annotation that it is more restrictive
+ * compared to the audience classification of the class. Example: A class 
+ * might be {@link Public}, but a method may be {@link LimitedPrivate}
+ * </li></ul>
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
diff --git a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/InterfaceStability.java b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/InterfaceStability.java
index 0ebf949..f78acc2 100644
--- a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/InterfaceStability.java
+++ b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/InterfaceStability.java
@@ -19,9 +19,20 @@
 
 import java.lang.annotation.Documented;
 
+import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+
 /**
  * Annotation to inform users of how much to rely on a particular package,
- * class or method not changing over time.
+ * class or method not changing over time. Currently the stability can be
+ * {@link Stable}, {@link Evolving} or {@link Unstable}. <br>
+ * 
+ * <ul><li>All classes that are annotated with {@link Public} or
+ * {@link LimitedPrivate} must have InterfaceStability annotation. </li>
+ * <li>Classes that are {@link Private} are to be considered unstable unless
+ * a different InterfaceStability annotation states otherwise.</li>
+ * <li>Incompatible changes must not be made to classes marked as stable.</li>
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
index 5a44660..3c59d40 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
@@ -266,6 +266,7 @@
         }
       }
     } else {
+      token.set(null);
       throw new AuthenticationException("Authentication failed, status: " + conn.getResponseCode() +
                                         ", message: " + conn.getResponseMessage());
     }
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
index 28a4d3d..0bd78f5 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
@@ -341,45 +341,50 @@
         LOG.warn("AuthenticationToken ignored: " + ex.getMessage());
         token = null;
       }
-      if (token == null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Request [{}] triggering authentication", getRequestURL(httpRequest));
+      if (authHandler.managementOperation(token, httpRequest, httpResponse)) {
+        if (token == null) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Request [{}] triggering authentication", getRequestURL(httpRequest));
+          }
+          token = authHandler.authenticate(httpRequest, httpResponse);
+          if (token != null && token.getExpires() != 0 &&
+              token != AuthenticationToken.ANONYMOUS) {
+            token.setExpires(System.currentTimeMillis() + getValidity() * 1000);
+          }
+          newToken = true;
         }
-        token = authHandler.authenticate(httpRequest, httpResponse);
-        if (token != null && token != AuthenticationToken.ANONYMOUS) {
-          token.setExpires(System.currentTimeMillis() + getValidity() * 1000);
+        if (token != null) {
+          unauthorizedResponse = false;
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Request [{}] user [{}] authenticated", getRequestURL(httpRequest), token.getUserName());
+          }
+          final AuthenticationToken authToken = token;
+          httpRequest = new HttpServletRequestWrapper(httpRequest) {
+
+            @Override
+            public String getAuthType() {
+              return authToken.getType();
+            }
+
+            @Override
+            public String getRemoteUser() {
+              return authToken.getUserName();
+            }
+
+            @Override
+            public Principal getUserPrincipal() {
+              return (authToken != AuthenticationToken.ANONYMOUS) ? authToken : null;
+            }
+          };
+          if (newToken && !token.isExpired() && token != AuthenticationToken.ANONYMOUS) {
+            String signedToken = signer.sign(token.toString());
+            Cookie cookie = createCookie(signedToken);
+            httpResponse.addCookie(cookie);
+          }
+          filterChain.doFilter(httpRequest, httpResponse);
         }
-        newToken = true;
-      }
-      if (token != null) {
+      } else {
         unauthorizedResponse = false;
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Request [{}] user [{}] authenticated", getRequestURL(httpRequest), token.getUserName());
-        }
-        final AuthenticationToken authToken = token;
-        httpRequest = new HttpServletRequestWrapper(httpRequest) {
-
-          @Override
-          public String getAuthType() {
-            return authToken.getType();
-          }
-
-          @Override
-          public String getRemoteUser() {
-            return authToken.getUserName();
-          }
-
-          @Override
-          public Principal getUserPrincipal() {
-            return (authToken != AuthenticationToken.ANONYMOUS) ? authToken : null;
-          }
-        };
-        if (newToken && token != AuthenticationToken.ANONYMOUS) {
-          String signedToken = signer.sign(token.toString());
-          Cookie cookie = createCookie(signedToken);
-          httpResponse.addCookie(cookie);
-        }
-        filterChain.doFilter(httpRequest, httpResponse);
       }
     } catch (AuthenticationException ex) {
       unauthorizedMsg = ex.toString();
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java
index 958680f..7cafe8b 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java
@@ -59,6 +59,34 @@
   public void destroy();
 
   /**
+   * Performs an authentication management operation.
+   * <p/>
+   * This is useful for handling operations like get/renew/cancel
+   * delegation tokens which are being handled as operations of the
+   * service end-point.
+   * <p/>
+   * If the method returns <code>TRUE</code> the request will continue normal
+   * processing, this means the method has not produced any HTTP response.
+   * <p/>
+   * If the method returns <code>FALSE</code> the request will end, this means 
+   * the method has produced the corresponding HTTP response.
+   *
+   * @param token the authentication token if any, otherwise <code>NULL</code>.
+   * @param request the HTTP client request.
+   * @param response the HTTP client response.
+   * @return <code>TRUE</code> if the request should be processed as a regular
+   * request,
+   * <code>FALSE</code> otherwise.
+   *
+   * @throws IOException thrown if an IO error occurred.
+   * @throws AuthenticationException thrown if an Authentication error occurred.
+   */
+  public boolean managementOperation(AuthenticationToken token,
+                                     HttpServletRequest request,
+                                     HttpServletResponse response)
+    throws IOException, AuthenticationException;
+
+  /**
    * Performs an authentication step for the given HTTP client request.
    * <p/>
    * This method is invoked by the {@link AuthenticationFilter} only if the HTTP client request is
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java
index e2856a3..ff68847 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java
@@ -115,10 +115,10 @@
    */
   private void generateToken() {
     StringBuffer sb = new StringBuffer();
-    sb.append(USER_NAME).append("=").append(userName).append(ATTR_SEPARATOR);
-    sb.append(PRINCIPAL).append("=").append(principal).append(ATTR_SEPARATOR);
-    sb.append(TYPE).append("=").append(type).append(ATTR_SEPARATOR);
-    sb.append(EXPIRES).append("=").append(expires);
+    sb.append(USER_NAME).append("=").append(getUserName()).append(ATTR_SEPARATOR);
+    sb.append(PRINCIPAL).append("=").append(getName()).append(ATTR_SEPARATOR);
+    sb.append(TYPE).append("=").append(getType()).append(ATTR_SEPARATOR);
+    sb.append(EXPIRES).append("=").append(getExpires());
     token = sb.toString();
   }
 
@@ -165,7 +165,7 @@
    * @return if the token has expired.
    */
   public boolean isExpired() {
-    return expires != -1 && System.currentTimeMillis() > expires;
+    return getExpires() != -1 && System.currentTimeMillis() > getExpires();
   }
 
   /**
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
index 8cad2cc..07b64f4 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
@@ -233,6 +233,27 @@
   }
 
   /**
+   * This is an empty implementation, it always returns <code>TRUE</code>.
+   *
+   *
+   *
+   * @param token the authentication token if any, otherwise <code>NULL</code>.
+   * @param request the HTTP client request.
+   * @param response the HTTP client response.
+   *
+   * @return <code>TRUE</code>
+   * @throws IOException it is never thrown.
+   * @throws AuthenticationException it is never thrown.
+   */
+  @Override
+  public boolean managementOperation(AuthenticationToken token,
+                                     HttpServletRequest request,
+                                     HttpServletResponse response)
+    throws IOException, AuthenticationException {
+    return true;
+  }
+
+  /**
    * It enforces the the Kerberos SPNEGO authentication sequence returning an {@link AuthenticationToken} only
    * after the Kerberos SPNEGO sequence has completed successfully.
    * <p/>
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java
index 336c36e..1a2f98c 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java
@@ -94,6 +94,27 @@
   }
 
   /**
+   * This is an empty implementation, it always returns <code>TRUE</code>.
+   *
+   *
+   *
+   * @param token the authentication token if any, otherwise <code>NULL</code>.
+   * @param request the HTTP client request.
+   * @param response the HTTP client response.
+   *
+   * @return <code>TRUE</code>
+   * @throws IOException it is never thrown.
+   * @throws AuthenticationException it is never thrown.
+   */
+  @Override
+  public boolean managementOperation(AuthenticationToken token,
+                                     HttpServletRequest request,
+                                     HttpServletResponse response)
+    throws IOException, AuthenticationException {
+    return true;
+  }
+
+  /**
    * Authenticates an HTTP client request.
    * <p/>
    * It extracts the {@link PseudoAuthenticator#USER_NAME} parameter from the query string and creates
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java
index 525af62..2138187 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java
@@ -13,6 +13,7 @@
  */
 package org.apache.hadoop.security.authentication.client;
 
+import junit.framework.Assert;
 import junit.framework.TestCase;
 import org.mockito.Mockito;
 
@@ -100,11 +101,14 @@
     headers.put("Set-Cookie", cookies);
     Mockito.when(conn.getHeaderFields()).thenReturn(headers);
 
+    AuthenticatedURL.Token token = new AuthenticatedURL.Token();
+    token.set("bar");
     try {
-      AuthenticatedURL.extractToken(conn, new AuthenticatedURL.Token());
+      AuthenticatedURL.extractToken(conn, token);
       fail();
     } catch (AuthenticationException ex) {
       // Expected
+      Assert.assertFalse(token.isSet());
     } catch (Exception ex) {
       fail();
     }
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
index 4f1bc11..1c31e54 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
@@ -71,7 +71,9 @@
 
   public static class DummyAuthenticationHandler implements AuthenticationHandler {
     public static boolean init;
+    public static boolean managementOperationReturn;
     public static boolean destroy;
+    public static boolean expired;
 
     public static final String TYPE = "dummy";
 
@@ -83,6 +85,20 @@
     @Override
     public void init(Properties config) throws ServletException {
       init = true;
+      managementOperationReturn =
+        config.getProperty("management.operation.return", "true").equals("true");
+      expired = config.getProperty("expired.token", "false").equals("true");
+    }
+
+    @Override
+    public boolean managementOperation(AuthenticationToken token,
+                                       HttpServletRequest request,
+                                       HttpServletResponse response)
+      throws IOException, AuthenticationException {
+      if (!managementOperationReturn) {
+        response.setStatus(HttpServletResponse.SC_ACCEPTED);
+      }
+      return managementOperationReturn;
     }
 
     @Override
@@ -102,7 +118,7 @@
       String param = request.getParameter("authenticated");
       if (param != null && param.equals("true")) {
         token = new AuthenticationToken("u", "p", "t");
-        token.setExpires(System.currentTimeMillis() + 1000);
+        token.setExpires((expired) ? 0 : System.currentTimeMillis() + 1000);
       } else {
         response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
       }
@@ -170,10 +186,14 @@
     filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).
+        thenReturn("true");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
+        new Vector<String>(
+          Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        "management.operation.return")).elements());
       filter.init(config);
       assertTrue(DummyAuthenticationHandler.init);
     } finally {
@@ -201,10 +221,14 @@
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).
+        thenReturn("true");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
+        new Vector<String>(
+          Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        "management.operation.return")).elements());
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -221,12 +245,16 @@
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).
+        thenReturn("true");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
-                                 AuthenticationFilter.SIGNATURE_SECRET)).elements());
+        new Vector<String>(
+          Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        AuthenticationFilter.SIGNATURE_SECRET,
+                        "management.operation.return")).elements());
       filter.init(config);
 
       AuthenticationToken token = new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
@@ -250,12 +278,15 @@
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).thenReturn("true");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
-                                 AuthenticationFilter.SIGNATURE_SECRET)).elements());
+        new Vector<String>(
+          Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        AuthenticationFilter.SIGNATURE_SECRET,
+                        "management.operation.return")).elements());
       filter.init(config);
 
       AuthenticationToken token = new AuthenticationToken("u", "p", "invalidtype");
@@ -284,12 +315,16 @@
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).
+        thenReturn("true");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
-                                 AuthenticationFilter.SIGNATURE_SECRET)).elements());
+        new Vector<String>(
+          Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        AuthenticationFilter.SIGNATURE_SECRET,
+                        "management.operation.return")).elements());
       filter.init(config);
 
       AuthenticationToken token = new AuthenticationToken("u", "p", "invalidtype");
@@ -318,10 +353,14 @@
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).
+        thenReturn("true");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
+        new Vector<String>(
+          Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        "management.operation.return")).elements());
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -349,10 +388,16 @@
     }
   }
 
-  private void _testDoFilterAuthentication(boolean withDomainPath, boolean invalidToken) throws Exception {
+  private void _testDoFilterAuthentication(boolean withDomainPath,
+                                           boolean invalidToken,
+                                           boolean expired) throws Exception {
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).
+        thenReturn("true");
+      Mockito.when(config.getInitParameter("expired.token")).
+        thenReturn(Boolean.toString(expired));
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn("1000");
@@ -360,7 +405,9 @@
       Mockito.when(config.getInitParameterNames()).thenReturn(
         new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                                  AuthenticationFilter.AUTH_TOKEN_VALIDITY,
-                                 AuthenticationFilter.SIGNATURE_SECRET)).elements());
+                                 AuthenticationFilter.SIGNATURE_SECRET,
+                                 "management.operation.return",
+                                 "expired.token")).elements());
 
       if (withDomainPath) {
         Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_DOMAIN)).thenReturn(".foo.com");
@@ -370,7 +417,8 @@
                                    AuthenticationFilter.AUTH_TOKEN_VALIDITY,
                                    AuthenticationFilter.SIGNATURE_SECRET,
                                    AuthenticationFilter.COOKIE_DOMAIN,
-                                   AuthenticationFilter.COOKIE_PATH)).elements());
+                                   AuthenticationFilter.COOKIE_PATH,
+                                   "management.operation.return")).elements());
       }
 
       filter.init(config);
@@ -416,26 +464,32 @@
 
       filter.doFilter(request, response, chain);
 
-      assertNotNull(setCookie[0]);
-      assertEquals(AuthenticatedURL.AUTH_COOKIE, setCookie[0].getName());
-      assertTrue(setCookie[0].getValue().contains("u="));
-      assertTrue(setCookie[0].getValue().contains("p="));
-      assertTrue(setCookie[0].getValue().contains("t="));
-      assertTrue(setCookie[0].getValue().contains("e="));
-      assertTrue(setCookie[0].getValue().contains("s="));
-      assertTrue(calledDoFilter[0]);
-
-      Signer signer = new Signer("secret".getBytes());
-      String value = signer.verifyAndExtract(setCookie[0].getValue());
-      AuthenticationToken token = AuthenticationToken.parse(value);
-      assertEquals(System.currentTimeMillis() + 1000 * 1000, token.getExpires(), 100);
-
-      if (withDomainPath) {
-        assertEquals(".foo.com", setCookie[0].getDomain());
-        assertEquals("/bar", setCookie[0].getPath());
+      if (expired) {
+        Mockito.verify(response, Mockito.never()).
+          addCookie(Mockito.any(Cookie.class));
       } else {
-        assertNull(setCookie[0].getDomain());
-        assertNull(setCookie[0].getPath());
+        assertNotNull(setCookie[0]);
+        assertEquals(AuthenticatedURL.AUTH_COOKIE, setCookie[0].getName());
+        assertTrue(setCookie[0].getValue().contains("u="));
+        assertTrue(setCookie[0].getValue().contains("p="));
+        assertTrue(setCookie[0].getValue().contains("t="));
+        assertTrue(setCookie[0].getValue().contains("e="));
+        assertTrue(setCookie[0].getValue().contains("s="));
+        assertTrue(calledDoFilter[0]);
+
+        Signer signer = new Signer("secret".getBytes());
+        String value = signer.verifyAndExtract(setCookie[0].getValue());
+        AuthenticationToken token = AuthenticationToken.parse(value);
+        assertEquals(System.currentTimeMillis() + 1000 * 1000,
+                     token.getExpires(), 100);
+
+        if (withDomainPath) {
+          assertEquals(".foo.com", setCookie[0].getDomain());
+          assertEquals("/bar", setCookie[0].getPath());
+        } else {
+          assertNull(setCookie[0].getDomain());
+          assertNull(setCookie[0].getPath());
+        }
       }
     } finally {
       filter.destroy();
@@ -443,25 +497,33 @@
   }
 
   public void testDoFilterAuthentication() throws Exception {
-    _testDoFilterAuthentication(false, false);
+    _testDoFilterAuthentication(false, false, false);
+  }
+
+  public void testDoFilterAuthenticationImmediateExpiration() throws Exception {
+    _testDoFilterAuthentication(false, false, true);
   }
 
   public void testDoFilterAuthenticationWithInvalidToken() throws Exception {
-    _testDoFilterAuthentication(false, true);
+    _testDoFilterAuthentication(false, true, false);
   }
 
   public void testDoFilterAuthenticationWithDomainPath() throws Exception {
-    _testDoFilterAuthentication(true, false);
+    _testDoFilterAuthentication(true, false, false);
   }
 
   public void testDoFilterAuthenticated() throws Exception {
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).
+        thenReturn("true");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
+        new Vector<String>(
+          Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        "management.operation.return")).elements());
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -503,10 +565,14 @@
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).
+        thenReturn("true");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
+        new Vector<String>(
+          Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        "management.operation.return")).elements());
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -563,10 +629,14 @@
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).
+        thenReturn("true");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
+        new Vector<String>(
+          Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        "management.operation.return")).elements());
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -618,4 +688,50 @@
     }
   }
 
+  public void testManagementOperation() throws Exception {
+    AuthenticationFilter filter = new AuthenticationFilter();
+    try {
+      FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter("management.operation.return")).
+        thenReturn("false");
+      Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).
+        thenReturn(DummyAuthenticationHandler.class.getName());
+      Mockito.when(config.getInitParameterNames()).thenReturn(
+        new Vector<String>(
+          Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                        "management.operation.return")).elements());
+      filter.init(config);
+
+      HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+      Mockito.when(request.getRequestURL()).
+        thenReturn(new StringBuffer("http://foo:8080/bar"));
+
+      HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+
+      FilterChain chain = Mockito.mock(FilterChain.class);
+
+      filter.doFilter(request, response, chain);
+      Mockito.verify(response).setStatus(HttpServletResponse.SC_ACCEPTED);
+      Mockito.verifyNoMoreInteractions(response);
+
+      Mockito.reset(request);
+      Mockito.reset(response);
+
+      AuthenticationToken token = new AuthenticationToken("u", "p", "t");
+      token.setExpires(System.currentTimeMillis() + 1000);
+      Signer signer = new Signer("secret".getBytes());
+      String tokenSigned = signer.sign(token.toString());
+      Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
+      Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});
+
+      filter.doFilter(request, response, chain);
+
+      Mockito.verify(response).setStatus(HttpServletResponse.SC_ACCEPTED);
+      Mockito.verifyNoMoreInteractions(response);
+
+    } finally {
+      filter.destroy();
+    }
+  }
+
 }
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 259b5a4..a9c99ae 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -9,11 +9,11 @@
 
   NEW FEATURES
     
-    HADOOP-8135. Add ByteBufferReadable interface to FSDataInputStream. (Henry
-    Robinson via atm)
+    HADOOP-8469. Make NetworkTopology class pluggable.  (Junping Du via
+    szetszwo)
 
-    HDFS-3042. Automatic failover support for NameNode HA (todd)
-    (see dedicated section below for breakdown of subtasks)
+    HADOOP-8470. Add NetworkTopologyWithNodeGroup, a 4-layer implementation
+    of NetworkTopology.  (Junping Du via szetszwo)
 
   IMPROVEMENTS
 
@@ -55,9 +55,6 @@
     HADOOP-7994. Remove getProtocolVersion and getProtocolSignature from the 
     client side translator and server side implementation. (jitendra)
 
-    HADOOP-8244. Improve comments on ByteBufferReadable.read. (Henry Robinson
-    via atm)
-
     HADOOP-7757. Test file reference count is at least 3x actual value (Jon
     Eagles via bobby)
 
@@ -82,6 +79,9 @@
     HADOOP-7659. fs -getmerge isn't guaranteed to work well over non-HDFS
     filesystems (harsh)
 
+    HADOOP-8059. Add javadoc to InterfaceAudience and InterfaceStability.
+    (Brandon Li via suresh)
+
   BUG FIXES
 
     HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName.
@@ -165,39 +165,7 @@
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
 
-  BREAKDOWN OF HDFS-3042 SUBTASKS
-
-    HADOOP-8220. ZKFailoverController doesn't handle failure to become active
-    correctly (todd)
-    
-    HADOOP-8228. Auto HA: Refactor tests and add stress tests. (todd)
-    
-    HADOOP-8215. Security support for ZK Failover controller (todd)
-    
-    HADOOP-8245. Fix flakiness in TestZKFailoverController (todd)
-    
-    HADOOP-8257. TestZKFailoverControllerStress occasionally fails with Mockito
-    error (todd)
-    
-    HADOOP-8260. Replace ClientBaseWithFixes with our own modified copy of the
-    class (todd)
-    
-    HADOOP-8246. Auto-HA: automatically scope znode by nameservice ID (todd)
-    
-    HADOOP-8247. Add a config to enable auto-HA, which disables manual
-    FailoverController (todd)
-    
-    HADOOP-8306. ZKFC: improve error message when ZK is not running. (todd)
-    
-    HADOOP-8279. Allow manual failover to be invoked when auto-failover is
-    enabled. (todd)
-    
-    HADOOP-8276. Auto-HA: add config for java options to pass to zkfc daemon
-    (todd via eli)
-    
-    HADOOP-8405. ZKFC tests leak ZK instances. (todd)
-
-Release 2.0.1-alpha - UNRELEASED
+Branch-2 ( Unreleased changes )
 
   INCOMPATIBLE CHANGES
 
@@ -206,6 +174,17 @@
 
   NEW FEATURES
  
+    HDFS-3042. Automatic failover support for NameNode HA (todd)
+    (see dedicated section below for breakdown of subtasks)
+
+    HADOOP-8135. Add ByteBufferReadable interface to FSDataInputStream. (Henry
+    Robinson via atm)
+
+    HADOOP-8458. Add management hook to AuthenticationHandler to enable 
+    delegation token operations support (tucu)
+
+    HADOOP-8465. hadoop-auth should support ephemeral authentication (tucu)
+
   IMPROVEMENTS
 
     HADOOP-8340. SNAPSHOT build versions should compare as less than their eventual
@@ -228,6 +207,14 @@
 
     HADOOP-8450. Remove src/test/system. (eli)
 
+    HADOOP-8244. Improve comments on ByteBufferReadable.read. (Henry Robinson
+    via atm)
+
+    HADOOP-8368. Use CMake rather than autotools to build native code (ccccabe via tucu)
+
+    HADOOP-8524. Allow users to get source of a Configuration
+    parameter (harsh)
+
   BUG FIXES
 
     HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname
@@ -267,6 +254,54 @@
     HADOOP-8481. update BUILDING.txt to talk about cmake rather than autotools.
     (Colin Patrick McCabe via eli)
 
+    HADOOP-8485. Don't hardcode "Apache Hadoop 0.23" in the docs. (eli)
+
+    HADOOP-8488. test-patch.sh gives +1 even if the native build fails.
+    (Colin Patrick McCabe via eli)
+
+    HADOOP-8507. Avoid OOM while deserializing DelegationTokenIdentifer.
+    (Colin Patrick McCabe via eli)
+
+    HADOOP-8433. Don't set HADOOP_LOG_DIR in hadoop-env.sh.
+    (Brahma Reddy Battula via eli)
+
+    HADOOP-8509. JarFinder duplicate entry: META-INF/MANIFEST.MF exception (tucu)
+
+    HADOOP-8512. AuthenticatedURL should reset the Token when the server returns 
+    other than OK on authentication (tucu)
+
+  BREAKDOWN OF HDFS-3042 SUBTASKS
+
+    HADOOP-8220. ZKFailoverController doesn't handle failure to become active
+    correctly (todd)
+
+    HADOOP-8228. Auto HA: Refactor tests and add stress tests. (todd)
+    
+    HADOOP-8215. Security support for ZK Failover controller (todd)
+    
+    HADOOP-8245. Fix flakiness in TestZKFailoverController (todd)
+    
+    HADOOP-8257. TestZKFailoverControllerStress occasionally fails with Mockito
+    error (todd)
+    
+    HADOOP-8260. Replace ClientBaseWithFixes with our own modified copy of the
+    class (todd)
+    
+    HADOOP-8246. Auto-HA: automatically scope znode by nameservice ID (todd)
+    
+    HADOOP-8247. Add a config to enable auto-HA, which disables manual
+    FailoverController (todd)
+    
+    HADOOP-8306. ZKFC: improve error message when ZK is not running. (todd)
+    
+    HADOOP-8279. Allow manual failover to be invoked when auto-failover is
+    enabled. (todd)
+    
+    HADOOP-8276. Auto-HA: add config for java options to pass to zkfc daemon
+    (todd via eli)
+    
+    HADOOP-8405. ZKFC tests leak ZK instances. (todd)
+
 Release 2.0.0-alpha - 05-23-2012
 
   INCOMPATIBLE CHANGES
@@ -696,6 +731,12 @@
 
     HADOOP-8373. Port RPC.getServerAddress to 0.23 (Daryn Sharp via bobby)
 
+    HADOOP-8495. Update Netty to avoid leaking file descriptors during shuffle
+    (Jason Lowe via tgraves)
+
+    HADOOP-8129. ViewFileSystemTestSetup setupForViewFileSystem is erring
+    (Ahmed Radwan and Ravi Prakash via bobby)
+
 Release 0.23.2 - UNRELEASED 
 
   INCOMPATIBLE CHANGES
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index a36b74d..8142080 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -536,32 +536,11 @@
         <snappy.prefix>/usr/local</snappy.prefix>
         <snappy.lib>${snappy.prefix}/lib</snappy.lib>
         <snappy.include>${snappy.prefix}/include</snappy.include>
+        <runas.home></runas.home>
       </properties>
       <build>
         <plugins>
           <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-antrun-plugin</artifactId>
-            <executions>
-              <execution>
-                <id>compile</id>
-                <phase>compile</phase>
-                <goals>
-                  <goal>run</goal>
-                </goals>
-                <configuration>
-                  <target>
-                    <mkdir dir="${project.build.directory}/native/javah"/>
-                    <copy toDir="${project.build.directory}/native">
-                      <fileset dir="${basedir}/src/main/native"/>
-                    </copy>
-                    <mkdir dir="${project.build.directory}/native/m4"/>
-                  </target>
-                </configuration>
-              </execution>
-            </executions>
-          </plugin>
-          <plugin>
             <groupId>org.codehaus.mojo</groupId>
             <artifactId>native-maven-plugin</artifactId>
             <executions>
@@ -590,73 +569,27 @@
             </executions>
           </plugin>
           <plugin>
-            <groupId>org.codehaus.mojo</groupId>
-            <artifactId>make-maven-plugin</artifactId>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-antrun-plugin</artifactId>
             <executions>
               <execution>
-                <id>compile</id>
+                <id>make</id>
                 <phase>compile</phase>
-                <goals>
-                  <goal>autoreconf</goal>
-                  <goal>configure</goal>
-                  <goal>make-install</goal>
-                </goals>
+                <goals><goal>run</goal></goals>
+                <configuration>
+                  <target>
+                    <exec executable="cmake" dir="${project.build.directory}/native" failonerror="true">
+                      <arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model}"/>
+                      <env key="CFLAGS" value="-I${snappy.include}"/>
+                      <env key="LDFLAGS" value="-L${snappy.lib}"/>
+                    </exec>
+                    <exec executable="make" dir="${project.build.directory}/native" failonerror="true">
+                      <arg line="VERBOSE=1"/>
+                    </exec>
+                  </target>
+                </configuration>
               </execution>
             </executions>
-            <configuration>
-              <!-- autoreconf settings -->
-              <workDir>${project.build.directory}/native</workDir>
-              <arguments>
-                <argument>-i</argument>
-                <argument>-f</argument>
-              </arguments>
-
-              <!-- configure settings -->
-              <configureEnvironment>
-                <property>
-                  <name>OS_NAME</name>
-                  <value>${os.name}</value>
-                </property>
-                <property>
-                  <name>OS_ARCH</name>
-                  <value>${os.arch}</value>
-                </property>
-                <property>
-                  <name>JVM_DATA_MODEL</name>
-                  <value>${sun.arch.data.model}</value>
-                </property>
-              </configureEnvironment>
-              <configureOptions>
-                <configureOption>CPPFLAGS=-I${snappy.include}</configureOption>
-                <configureOption>LDFLAGS=-L${snappy.lib}</configureOption>
-              </configureOptions>
-              <configureWorkDir>${project.build.directory}/native</configureWorkDir>
-              <prefix>/usr/local</prefix>
-
-              <!-- make settings -->
-              <installEnvironment>
-                <property>
-                  <name>OS_NAME</name>
-                  <value>${os.name}</value>
-                </property>
-                <property>
-                  <name>OS_ARCH</name>
-                  <value>${os.arch}</value>
-                </property>
-                <property>
-                  <name>JVM_DATA_MODEL</name>
-                  <value>${sun.arch.data.model}</value>
-                </property>
-                <property>
-                  <name>HADOOP_NATIVE_SRCDIR</name>
-                  <value>${project.build.directory}/native</value>
-                </property>
-              </installEnvironment>
-
-              <!-- configure & make settings -->
-              <destDir>${project.build.directory}/native/target</destDir>
-
-            </configuration>
           </plugin>
         </plugins>
       </build>
@@ -700,7 +633,7 @@
             <artifactId>maven-antrun-plugin</artifactId>
             <executions>
               <execution>
-                <id>compile</id>
+                <id>kdc</id>
                 <phase>compile</phase>
                 <goals>
                   <goal>run</goal>
diff --git a/hadoop-common-project/hadoop-common/src/CMakeLists.txt b/hadoop-common-project/hadoop-common/src/CMakeLists.txt
new file mode 100644
index 0000000..c632531
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/CMakeLists.txt
@@ -0,0 +1,126 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
+
+# Default to release builds
+set(CMAKE_BUILD_TYPE, Release)
+
+# If JVM_ARCH_DATA_MODEL is 32, compile all binaries as 32-bit.
+# This variable is set by maven.
+if (JVM_ARCH_DATA_MODEL EQUAL 32)
+    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -m32")
+    set(CMAKE_LD_FLAGS "${CMAKE_LD_FLAGS} -m32")
+    if (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64")
+        set(CMAKE_SYSTEM_PROCESSOR "i686")
+    endif ()
+endif (JVM_ARCH_DATA_MODEL EQUAL 32)
+
+# Compile a library with both shared and static variants
+function(add_dual_library LIBNAME)
+    add_library(${LIBNAME} SHARED ${ARGN})
+    add_library(${LIBNAME}_static STATIC ${ARGN})
+    set_target_properties(${LIBNAME}_static PROPERTIES OUTPUT_NAME ${LIBNAME})
+endfunction(add_dual_library)
+
+# Link both a static and a dynamic target against some libraries
+function(target_link_dual_libraries LIBNAME)
+    target_link_libraries(${LIBNAME} ${ARGN})
+    target_link_libraries(${LIBNAME}_static ${ARGN})
+endfunction(target_link_dual_libraries)
+
+function(output_directory TGT DIR)
+    SET_TARGET_PROPERTIES(${TGT} PROPERTIES
+        RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
+    SET_TARGET_PROPERTIES(${TGT} PROPERTIES
+        ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
+    SET_TARGET_PROPERTIES(${TGT} PROPERTIES
+        LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
+endfunction(output_directory TGT DIR)
+
+function(dual_output_directory TGT DIR)
+    output_directory(${TGT} "${DIR}")
+    output_directory(${TGT}_static "${DIR}")
+endfunction(dual_output_directory TGT DIR)
+
+if (NOT GENERATED_JAVAH)
+    # Must identify where the generated headers have been placed
+    MESSAGE(FATAL_ERROR "You must set the cmake variable GENERATED_JAVAH")
+endif (NOT GENERATED_JAVAH)
+find_package(JNI REQUIRED)
+find_package(ZLIB REQUIRED)
+
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2")
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_FILE_OFFSET_BITS=64")
+set(D main/native/src/org/apache/hadoop)
+
+GET_FILENAME_COMPONENT(HADOOP_ZLIB_LIBRARY ${ZLIB_LIBRARIES} NAME)
+
+INCLUDE(CheckFunctionExists)
+INCLUDE(CheckCSourceCompiles)
+CHECK_FUNCTION_EXISTS(sync_file_range HAVE_SYNC_FILE_RANGE)
+CHECK_FUNCTION_EXISTS(posix_fadvise HAVE_POSIX_FADVISE)
+
+find_library(SNAPPY_LIBRARY NAMES snappy PATHS)
+find_path(SNAPPY_INCLUDE_DIR NAMES snappy.h PATHS)
+if (SNAPPY_LIBRARY)
+    GET_FILENAME_COMPONENT(HADOOP_SNAPPY_LIBRARY ${SNAPPY_LIBRARY} NAME)
+    set(SNAPPY_SOURCE_FILES
+        "${D}/io/compress/snappy/SnappyCompressor.c"
+        "${D}/io/compress/snappy/SnappyDecompressor.c")
+else (${SNAPPY_LIBRARY})
+    set(SNAPPY_INCLUDE_DIR "")
+    set(SNAPPY_SOURCE_FILES "")
+endif (SNAPPY_LIBRARY)
+
+include_directories(
+    ${GENERATED_JAVAH}
+    main/native/src
+    ${CMAKE_CURRENT_SOURCE_DIR}
+    ${CMAKE_CURRENT_SOURCE_DIR}/src
+    ${CMAKE_BINARY_DIR}
+    ${JNI_INCLUDE_DIRS}
+    ${ZLIB_INCLUDE_DIRS}
+    ${SNAPPY_INCLUDE_DIR}
+)
+CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/config.h.cmake ${CMAKE_BINARY_DIR}/config.h)
+
+add_dual_library(hadoop
+    ${D}/io/compress/lz4/Lz4Compressor.c
+    ${D}/io/compress/lz4/Lz4Decompressor.c
+    ${D}/io/compress/lz4/lz4.c
+    ${SNAPPY_SOURCE_FILES}
+    ${D}/io/compress/zlib/ZlibCompressor.c
+    ${D}/io/compress/zlib/ZlibDecompressor.c
+    ${D}/io/nativeio/NativeIO.c
+    ${D}/io/nativeio/errno_enum.c
+    ${D}/io/nativeio/file_descriptor.c
+    ${D}/security/JniBasedUnixGroupsMapping.c
+    ${D}/security/JniBasedUnixGroupsNetgroupMapping.c
+    ${D}/security/getGroup.c
+    ${D}/util/NativeCrc32.c
+    ${D}/util/bulk_crc32.c
+)
+target_link_dual_libraries(hadoop
+    dl
+    ${JAVA_JVM_LIBRARY}
+)
+SET(LIBHADOOP_VERSION "1.0.0")
+SET_TARGET_PROPERTIES(hadoop PROPERTIES
+    SOVERSION ${LIBHADOOP_VERSION})
+dual_output_directory(hadoop target/usr/local/lib)
diff --git a/hadoop-common-project/hadoop-common/src/config.h.cmake b/hadoop-common-project/hadoop-common/src/config.h.cmake
new file mode 100644
index 0000000..9098b68
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/config.h.cmake
@@ -0,0 +1,10 @@
+#ifndef CONFIG_H
+#define CONFIG_H
+
+#cmakedefine HADOOP_ZLIB_LIBRARY "@HADOOP_ZLIB_LIBRARY@"
+#cmakedefine HADOOP_RUNAS_HOME "@HADOOP_RUNAS_HOME@"
+#cmakedefine HADOOP_SNAPPY_LIBRARY "@HADOOP_SNAPPY_LIBRARY@"
+#cmakedefine HAVE_SYNC_FILE_RANGE
+#cmakedefine HAVE_POSIX_FADVISE
+
+#endif
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
index 33abeca..72e8c63 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
@@ -61,7 +61,7 @@
 export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
 
 # Where log files are stored.  $HADOOP_HOME/logs by default.
-export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
 
 # Where log files are stored in the secure data environment.
 export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 917f97c..c54070e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -1071,6 +1071,38 @@
   }
 
   /**
+   * Gets the absolute path to the resource object (file, URL, etc.), for a given
+   * property name.
+   *
+   * @param name - The property name to get the source of.
+   * @return null - If the property or its source wasn't found or if the property
+   * was defined in code (i.e. in a Configuration instance, not from a physical
+   * resource). Otherwise, returns the absolute path of the resource that loaded
+   * the property name, as a String.
+   */
+  @InterfaceStability.Unstable
+  public synchronized String getPropertySource(String name) {
+    if (properties == null) {
+      // If properties is null, it means a resource was newly added
+      // but the props were cleared so as to load it upon future
+      // requests. So lets force a load by asking a properties list.
+      getProps();
+    }
+    // Return a null right away if our properties still
+    // haven't loaded or the resource mapping isn't defined
+    if (properties == null || updatingResource == null) {
+      return null;
+    } else {
+      String source = updatingResource.get(name);
+      if (source == null || source.equals(UNKNOWN_RESOURCE)) {
+        return null;
+      } else {
+        return source;
+      }
+    }
+  }
+
+  /**
    * A class that represents a set of positive integer ranges. It parses 
    * strings of the form: "2-3,5,7-" where ranges are separated by comma and 
    * the lower/upper bounds are separated by dash. Either the lower or upper 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 67f3bc5..c367294 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -63,7 +63,9 @@
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY =
     "net.topology.node.switch.mapping.impl";
-  
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String  NET_TOPOLOGY_IMPL_KEY =
+    "net.topology.impl";
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY =
     "net.topology.table.file.name";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenRenewer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
similarity index 97%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenRenewer.java
rename to hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
index 349d71b..2d49759 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenRenewer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hdfs.security.token.delegation;
+package org.apache.hadoop.fs;
 
 import java.io.IOException;
 import java.lang.ref.WeakReference;
@@ -25,7 +25,6 @@
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 
@@ -161,4 +160,4 @@
       }
     }
   }
-}
\ No newline at end of file
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
index 4cc2c18..2757475 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
@@ -254,7 +254,7 @@
   // Writable
   //////////////////////////////////////////////////
   public void write(DataOutput out) throws IOException {
-    Text.writeString(out, getPath().toString(), Text.ONE_MEGABYTE);
+    Text.writeString(out, getPath().toString(), Text.DEFAULT_MAX_LEN);
     out.writeLong(getLen());
     out.writeBoolean(isDirectory());
     out.writeShort(getReplication());
@@ -262,16 +262,16 @@
     out.writeLong(getModificationTime());
     out.writeLong(getAccessTime());
     getPermission().write(out);
-    Text.writeString(out, getOwner(), Text.ONE_MEGABYTE);
-    Text.writeString(out, getGroup(), Text.ONE_MEGABYTE);
+    Text.writeString(out, getOwner(), Text.DEFAULT_MAX_LEN);
+    Text.writeString(out, getGroup(), Text.DEFAULT_MAX_LEN);
     out.writeBoolean(isSymlink());
     if (isSymlink()) {
-      Text.writeString(out, getSymlink().toString(), Text.ONE_MEGABYTE);
+      Text.writeString(out, getSymlink().toString(), Text.DEFAULT_MAX_LEN);
     }
   }
 
   public void readFields(DataInput in) throws IOException {
-    String strPath = Text.readString(in, Text.ONE_MEGABYTE);
+    String strPath = Text.readString(in, Text.DEFAULT_MAX_LEN);
     this.path = new Path(strPath);
     this.length = in.readLong();
     this.isdir = in.readBoolean();
@@ -280,10 +280,10 @@
     modification_time = in.readLong();
     access_time = in.readLong();
     permission.readFields(in);
-    owner = Text.readString(in, Text.ONE_MEGABYTE);
-    group = Text.readString(in, Text.ONE_MEGABYTE);
+    owner = Text.readString(in, Text.DEFAULT_MAX_LEN);
+    group = Text.readString(in, Text.DEFAULT_MAX_LEN);
     if (in.readBoolean()) {
-      this.symlink = new Path(Text.readString(in, Text.ONE_MEGABYTE));
+      this.symlink = new Path(Text.readString(in, Text.DEFAULT_MAX_LEN));
     } else {
       this.symlink = null;
     }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java
index 5642d0f..f47226f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java
@@ -84,8 +84,8 @@
 
   /** {@inheritDoc} */
   public void readFields(DataInput in) throws IOException {
-    username = Text.readString(in, Text.ONE_MEGABYTE);
-    groupname = Text.readString(in, Text.ONE_MEGABYTE);
+    username = Text.readString(in, Text.DEFAULT_MAX_LEN);
+    groupname = Text.readString(in, Text.DEFAULT_MAX_LEN);
     permission = FsPermission.read(in);
   }
 
@@ -110,8 +110,8 @@
                            String username, 
                            String groupname,
                            FsPermission permission) throws IOException {
-    Text.writeString(out, username, Text.ONE_MEGABYTE);
-    Text.writeString(out, groupname, Text.ONE_MEGABYTE);
+    Text.writeString(out, username, Text.DEFAULT_MAX_LEN);
+    Text.writeString(out, groupname, Text.DEFAULT_MAX_LEN);
     permission.write(out);
   }
 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
index ded6870..2f693b4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
@@ -52,7 +52,9 @@
 import org.apache.hadoop.jmx.JMXJsonServlet;
 import org.apache.hadoop.log.LogLevel;
 import org.apache.hadoop.metrics.MetricsServlet;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.mortbay.io.Buffer;
@@ -606,6 +608,24 @@
     sslListener.setNeedClientAuth(needCertsAuth);
     webServer.addConnector(sslListener);
   }
+  
+  protected void initSpnego(Configuration conf,
+      String usernameConfKey, String keytabConfKey) throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    String principalInConf = conf.get(usernameConfKey);
+    if (principalInConf != null && !principalInConf.isEmpty()) {
+      params.put("kerberos.principal",
+                 SecurityUtil.getServerPrincipal(principalInConf, listener.getHost()));
+    }
+    String httpKeytab = conf.get(keytabConfKey);
+    if (httpKeytab != null && !httpKeytab.isEmpty()) {
+      params.put("kerberos.keytab", httpKeytab);
+    }
+    params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");
+  
+    defineFilter(webAppContext, SPNEGO_FILTER,
+                 AuthenticationFilter.class.getName(), params, null);
+  }
 
   /**
    * Start the server. Does not wait for the server to start.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index f5875d8..6969d190 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -20,6 +20,9 @@
 
 import java.io.*;
 import java.net.Socket;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.channels.WritableByteChannel;
 
 import org.apache.commons.logging.Log;
 
@@ -245,4 +248,34 @@
     public void write(int b) throws IOException {
     }
   }  
+  
+  /**
+   * Write a ByteBuffer to a WritableByteChannel, handling short writes.
+   * 
+   * @param bc               The WritableByteChannel to write to
+   * @param buf              The input buffer
+   * @throws IOException     On I/O error
+   */
+  public static void writeFully(WritableByteChannel bc, ByteBuffer buf)
+      throws IOException {
+    do {
+      bc.write(buf);
+    } while (buf.remaining() > 0);
+  }
+
+  /**
+   * Write a ByteBuffer to a FileChannel at a given offset, 
+   * handling short writes.
+   * 
+   * @param fc               The FileChannel to write to
+   * @param buf              The input buffer
+   * @param offset           The offset in the file to start writing at
+   * @throws IOException     On I/O error
+   */
+  public static void writeFully(FileChannel fc, ByteBuffer buf,
+      long offset) throws IOException {
+    do {
+      offset += fc.write(buf, offset);
+    } while (buf.remaining() > 0);
+  }
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
index 78748b0..a4f80ea 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
@@ -287,6 +287,20 @@
     in.readFully(bytes, 0, newLength);
     length = newLength;
   }
+  
+  public void readFields(DataInput in, int maxLength) throws IOException {
+    int newLength = WritableUtils.readVInt(in);
+    if (newLength < 0) {
+      throw new IOException("tried to deserialize " + newLength +
+          " bytes of data!  newLength must be non-negative.");
+    } else if (newLength >= maxLength) {
+      throw new IOException("tried to deserialize " + newLength +
+          " bytes of data, but maxLength = " + maxLength);
+    }
+    setCapacity(newLength, false);
+    in.readFully(bytes, 0, newLength);
+    length = newLength;
+  }
 
   /** Skips over one Text in the input. */
   public static void skip(DataInput in) throws IOException {
@@ -304,6 +318,16 @@
     out.write(bytes, 0, length);
   }
 
+  public void write(DataOutput out, int maxLength) throws IOException {
+    if (length > maxLength) {
+      throw new IOException("data was too long to write!  Expected " +
+          "less than or equal to " + maxLength + " bytes, but got " +
+          length + " bytes.");
+    }
+    WritableUtils.writeVInt(out, length);
+    out.write(bytes, 0, length);
+  }
+
   /** Returns true iff <code>o</code> is a Text with the same contents.  */
   public boolean equals(Object o) {
     if (o instanceof Text)
@@ -417,7 +441,7 @@
     return bytes;
   }
 
-  static final public int ONE_MEGABYTE = 1024 * 1024;
+  static final public int DEFAULT_MAX_LEN = 1024 * 1024;
 
   /** Read a UTF8 encoded string from in
    */
@@ -432,7 +456,7 @@
    */
   public static String readString(DataInput in, int maxLength)
       throws IOException {
-    int length = WritableUtils.readVIntInRange(in, 0, maxLength - 1);
+    int length = WritableUtils.readVIntInRange(in, 0, maxLength);
     byte [] bytes = new byte[length];
     in.readFully(bytes, 0, length);
     return decode(bytes);
@@ -454,9 +478,9 @@
       throws IOException {
     ByteBuffer bytes = encode(s);
     int length = bytes.limit();
-    if (length >= maxLength) {
+    if (length > maxLength) {
       throw new IOException("string was too long to write!  Expected " +
-          "less than " + maxLength + " bytes, but got " +
+          "less than or equal to " + maxLength + " bytes, but got " +
           length + " bytes.");
     }
     WritableUtils.writeVInt(out, length);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
index 323542c..381ce3e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
@@ -33,7 +33,7 @@
 
 class RetryInvocationHandler implements RpcInvocationHandler {
   public static final Log LOG = LogFactory.getLog(RetryInvocationHandler.class);
-  private FailoverProxyProvider proxyProvider;
+  private final FailoverProxyProvider proxyProvider;
 
   /**
    * The number of times the associated proxyProvider has ever been failed over.
@@ -41,26 +41,25 @@
   private long proxyProviderFailoverCount = 0;
   private volatile boolean hasMadeASuccessfulCall = false;
   
-  private RetryPolicy defaultPolicy;
-  private Map<String,RetryPolicy> methodNameToPolicyMap;
+  private final RetryPolicy defaultPolicy;
+  private final Map<String,RetryPolicy> methodNameToPolicyMap;
   private Object currentProxy;
   
   public RetryInvocationHandler(FailoverProxyProvider proxyProvider,
       RetryPolicy retryPolicy) {
-    this.proxyProvider = proxyProvider;
-    this.defaultPolicy = retryPolicy;
-    this.methodNameToPolicyMap = Collections.emptyMap();
-    this.currentProxy = proxyProvider.getProxy();
+    this(proxyProvider, retryPolicy, Collections.<String, RetryPolicy>emptyMap());
   }
-  
+
   public RetryInvocationHandler(FailoverProxyProvider proxyProvider,
+      RetryPolicy defaultPolicy,
       Map<String, RetryPolicy> methodNameToPolicyMap) {
     this.proxyProvider = proxyProvider;
-    this.defaultPolicy = RetryPolicies.TRY_ONCE_THEN_FAIL;
+    this.defaultPolicy = defaultPolicy;
     this.methodNameToPolicyMap = methodNameToPolicyMap;
     this.currentProxy = proxyProvider.getProxy();
   }
 
+  @Override
   public Object invoke(Object proxy, Method method, Object[] args)
     throws Throwable {
     RetryPolicy policy = methodNameToPolicyMap.get(method.getName());
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index 2be8b75..8b8387c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -22,10 +22,13 @@
 import java.net.NoRouteToHostException;
 import java.net.SocketException;
 import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
-import java.util.Random;
 import java.util.Map.Entry;
+import java.util.Random;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
@@ -33,8 +36,6 @@
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.StandbyException;
 
-import com.google.common.annotations.VisibleForTesting;
-
 /**
  * <p>
  * A collection of useful implementations of {@link RetryPolicy}.
@@ -44,7 +45,12 @@
   
   public static final Log LOG = LogFactory.getLog(RetryPolicies.class);
   
-  private static final Random RAND = new Random();
+  private static ThreadLocal<Random> RANDOM = new ThreadLocal<Random>() {
+    @Override
+    protected Random initialValue() {
+      return new Random();
+    }
+  };
   
   /**
    * <p>
@@ -157,17 +163,35 @@
     }
   }
   
+  /**
+   * Retry up to maxRetries.
+   * The actual sleep time of the n-th retry is f(n, sleepTime),
+   * where f is a function provided by the subclass implementation.
+   *
+   * The object of the subclasses should be immutable;
+   * otherwise, the subclass must override hashCode(), equals(..) and toString().
+   */
   static abstract class RetryLimited implements RetryPolicy {
-    int maxRetries;
-    long sleepTime;
-    TimeUnit timeUnit;
+    final int maxRetries;
+    final long sleepTime;
+    final TimeUnit timeUnit;
     
-    public RetryLimited(int maxRetries, long sleepTime, TimeUnit timeUnit) {
+    private String myString;
+
+    RetryLimited(int maxRetries, long sleepTime, TimeUnit timeUnit) {
+      if (maxRetries < 0) {
+        throw new IllegalArgumentException("maxRetries = " + maxRetries+" < 0");
+      }
+      if (sleepTime < 0) {
+        throw new IllegalArgumentException("sleepTime = " + sleepTime + " < 0");
+      }
+
       this.maxRetries = maxRetries;
       this.sleepTime = sleepTime;
       this.timeUnit = timeUnit;
     }
 
+    @Override
     public RetryAction shouldRetry(Exception e, int retries, int failovers,
         boolean isMethodIdempotent) throws Exception {
       if (retries >= maxRetries) {
@@ -178,6 +202,30 @@
     }
     
     protected abstract long calculateSleepTime(int retries);
+    
+    @Override
+    public int hashCode() {
+      return toString().hashCode();
+    }
+    
+    @Override
+    public boolean equals(final Object that) {
+      if (this == that) {
+        return true;
+      } else if (that == null || this.getClass() != that.getClass()) {
+        return false;
+      }
+      return this.toString().equals(that.toString());
+    }
+
+    @Override
+    public String toString() {
+      if (myString == null) {
+        myString = getClass().getSimpleName() + "(maxRetries=" + maxRetries
+            + ", sleepTime=" + sleepTime + " " + timeUnit + ")";
+      }
+      return myString;
+    }
   }
   
   static class RetryUpToMaximumCountWithFixedSleep extends RetryLimited {
@@ -208,6 +256,169 @@
     }
   }
   
+  /**
+   * Given pairs of number of retries and sleep time (n0, t0), (n1, t1), ...,
+   * the first n0 retries sleep t0 milliseconds on average,
+   * the following n1 retries sleep t1 milliseconds on average, and so on.
+   * 
+   * For all the sleep, the actual sleep time is randomly uniform distributed
+   * in the close interval [0.5t, 1.5t], where t is the sleep time specified.
+   *
+   * The objects of this class are immutable.
+   */
+  public static class MultipleLinearRandomRetry implements RetryPolicy {
+    /** Pairs of numRetries and sleepSeconds */
+    public static class Pair {
+      final int numRetries;
+      final int sleepMillis;
+      
+      public Pair(final int numRetries, final int sleepMillis) {
+        if (numRetries < 0) {
+          throw new IllegalArgumentException("numRetries = " + numRetries+" < 0");
+        }
+        if (sleepMillis < 0) {
+          throw new IllegalArgumentException("sleepMillis = " + sleepMillis + " < 0");
+        }
+
+        this.numRetries = numRetries;
+        this.sleepMillis = sleepMillis;
+      }
+      
+      @Override
+      public String toString() {
+        return numRetries + "x" + sleepMillis + "ms";
+      }
+    }
+
+    private final List<Pair> pairs;
+    private String myString;
+
+    public MultipleLinearRandomRetry(List<Pair> pairs) {
+      if (pairs == null || pairs.isEmpty()) {
+        throw new IllegalArgumentException("pairs must be neither null nor empty.");
+      }
+      this.pairs = Collections.unmodifiableList(pairs);
+    }
+
+    @Override
+    public RetryAction shouldRetry(Exception e, int curRetry, int failovers,
+        boolean isMethodIdempotent) throws Exception {
+      final Pair p = searchPair(curRetry);
+      if (p == null) {
+        //no more retries.
+        return RetryAction.FAIL;
+      }
+
+      //calculate sleep time and return.
+      final double ratio = RANDOM.get().nextDouble() + 0.5;//0.5 <= ratio <=1.5
+      final long sleepTime = Math.round(p.sleepMillis * ratio);
+      return new RetryAction(RetryAction.RetryDecision.RETRY, sleepTime);
+    }
+
+    /**
+     * Given the current number of retry, search the corresponding pair.
+     * @return the corresponding pair,
+     *   or null if the current number of retry > maximum number of retry. 
+     */
+    private Pair searchPair(int curRetry) {
+      int i = 0;
+      for(; i < pairs.size() && curRetry > pairs.get(i).numRetries; i++) {
+        curRetry -= pairs.get(i).numRetries;
+      }
+      return i == pairs.size()? null: pairs.get(i);
+    }
+    
+    @Override
+    public int hashCode() {
+      return toString().hashCode();
+    }
+    
+    @Override
+    public boolean equals(final Object that) {
+      if (this == that) {
+        return true;
+      } else if (that == null || this.getClass() != that.getClass()) {
+        return false;
+      }
+      return this.toString().equals(that.toString());
+    }
+
+    @Override
+    public String toString() {
+      if (myString == null) {
+        myString = getClass().getSimpleName() + pairs;
+      }
+      return myString;
+    }
+
+    /**
+     * Parse the given string as a MultipleLinearRandomRetry object.
+     * The format of the string is "t_1, n_1, t_2, n_2, ...",
+     * where t_i and n_i are the i-th pair of sleep time and number of retires.
+     * Note that the white spaces in the string are ignored.
+     *
+     * @return the parsed object, or null if the parsing fails.
+     */
+    public static MultipleLinearRandomRetry parseCommaSeparatedString(String s) {
+      final String[] elements = s.split(",");
+      if (elements.length == 0) {
+        LOG.warn("Illegal value: there is no element in \"" + s + "\".");
+        return null;
+      }
+      if (elements.length % 2 != 0) {
+        LOG.warn("Illegal value: the number of elements in \"" + s + "\" is "
+            + elements.length + " but an even number of elements is expected.");
+        return null;
+      }
+
+      final List<RetryPolicies.MultipleLinearRandomRetry.Pair> pairs
+          = new ArrayList<RetryPolicies.MultipleLinearRandomRetry.Pair>();
+   
+      for(int i = 0; i < elements.length; ) {
+        //parse the i-th sleep-time
+        final int sleep = parsePositiveInt(elements, i++, s);
+        if (sleep == -1) {
+          return null; //parse fails
+        }
+
+        //parse the i-th number-of-retries
+        final int retries = parsePositiveInt(elements, i++, s);
+        if (retries == -1) {
+          return null; //parse fails
+        }
+
+        pairs.add(new RetryPolicies.MultipleLinearRandomRetry.Pair(retries, sleep));
+      }
+      return new RetryPolicies.MultipleLinearRandomRetry(pairs);
+    }
+
+    /**
+     * Parse the i-th element as an integer.
+     * @return -1 if the parsing fails or the parsed value <= 0;
+     *   otherwise, return the parsed value.
+     */
+    private static int parsePositiveInt(final String[] elements,
+        final int i, final String originalString) {
+      final String s = elements[i].trim();
+      final int n;
+      try {
+        n = Integer.parseInt(s);
+      } catch(NumberFormatException nfe) {
+        LOG.warn("Failed to parse \"" + s + "\", which is the index " + i
+            + " element in \"" + originalString + "\"", nfe);
+        return -1;
+      }
+
+      if (n <= 0) {
+        LOG.warn("The value " + n + " <= 0: it is parsed from the string \""
+            + s + "\" which is the index " + i + " element in \""
+            + originalString + "\"");
+        return -1;
+      }
+      return n;
+    }
+  }
+
   static class ExceptionDependentRetry implements RetryPolicy {
 
     RetryPolicy defaultPolicy;
@@ -265,6 +476,14 @@
     public ExponentialBackoffRetry(
         int maxRetries, long sleepTime, TimeUnit timeUnit) {
       super(maxRetries, sleepTime, timeUnit);
+
+      if (maxRetries < 0) {
+        throw new IllegalArgumentException("maxRetries = " + maxRetries + " < 0");
+      } else if (maxRetries >= Long.SIZE - 1) {
+        //calculateSleepTime may overflow. 
+        throw new IllegalArgumentException("maxRetries = " + maxRetries
+            + " >= " + (Long.SIZE - 1));
+      }
     }
     
     @Override
@@ -353,11 +572,10 @@
    * @param cap value at which to cap the base sleep time
    * @return an amount of time to sleep
    */
-  @VisibleForTesting
-  public static long calculateExponentialTime(long time, int retries,
+  private static long calculateExponentialTime(long time, int retries,
       long cap) {
-    long baseTime = Math.min(time * ((long)1 << retries), cap);
-    return (long) (baseTime * (RAND.nextFloat() + 0.5));
+    long baseTime = Math.min(time * (1L << retries), cap);
+    return (long) (baseTime * (RANDOM.get().nextDouble() + 0.5));
   }
 
   private static long calculateExponentialTime(long time, int retries) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicy.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicy.java
index ed673e9..e1f3899 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicy.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicy.java
@@ -60,6 +60,12 @@
       this.reason = reason;
     }
     
+    @Override
+    public String toString() {
+      return getClass().getSimpleName() + "(action=" + action
+          + ", delayMillis=" + delayMillis + ", reason=" + reason + ")";
+    }
+    
     public enum RetryDecision {
       FAIL,
       RETRY,
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryProxy.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryProxy.java
index 13e8a41..3cc6a2e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryProxy.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryProxy.java
@@ -75,9 +75,10 @@
    */
   public static Object create(Class<?> iface, Object implementation,
                               Map<String,RetryPolicy> methodNameToPolicyMap) {
-    return RetryProxy.create(iface,
+    return create(iface,
         new DefaultFailoverProxyProvider(iface, implementation),
-        methodNameToPolicyMap);
+        methodNameToPolicyMap,
+        RetryPolicies.TRY_ONCE_THEN_FAIL);
   }
 
   /**
@@ -92,11 +93,13 @@
    * @return the retry proxy
    */
   public static Object create(Class<?> iface, FailoverProxyProvider proxyProvider,
-      Map<String,RetryPolicy> methodNameToPolicyMap) {
+      Map<String,RetryPolicy> methodNameToPolicyMap,
+      RetryPolicy defaultPolicy) {
     return Proxy.newProxyInstance(
         proxyProvider.getInterface().getClassLoader(),
         new Class<?>[] { iface },
-        new RetryInvocationHandler(proxyProvider, methodNameToPolicyMap)
+        new RetryInvocationHandler(proxyProvider, defaultPolicy,
+            methodNameToPolicyMap)
         );
   }
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index ef32cfd..d382c99 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -18,47 +18,51 @@
 
 package org.apache.hadoop.ipc;
 
-import java.net.InetAddress;
-import java.net.Socket;
-import java.net.InetSocketAddress;
-import java.net.SocketTimeoutException;
-import java.net.UnknownHostException;
-import java.io.IOException;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
 import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
 import java.io.FilterInputStream;
+import java.io.IOException;
 import java.io.InputStream;
+import java.io.InterruptedIOException;
 import java.io.OutputStream;
-
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+import java.net.SocketTimeoutException;
+import java.net.UnknownHostException;
 import java.security.PrivilegedExceptionAction;
 import java.util.Hashtable;
 import java.util.Iterator;
+import java.util.Map.Entry;
 import java.util.Random;
 import java.util.Set;
-import java.util.Map.Entry;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 
 import javax.net.SocketFactory;
 
-import org.apache.commons.logging.*;
-
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
 import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
 import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadHeaderProto;
 import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadOperationProto;
 import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcResponseHeaderProto;
 import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcStatusProto;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.SaslRpcClient;
@@ -67,8 +71,8 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.token.TokenSelector;
 import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.security.token.TokenSelector;
 import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.ReflectionUtils;
 
@@ -80,8 +84,8 @@
  */
 public class Client {
   
-  public static final Log LOG =
-    LogFactory.getLog(Client.class);
+  public static final Log LOG = LogFactory.getLog(Client.class);
+
   private Hashtable<ConnectionId, Connection> connections =
     new Hashtable<ConnectionId, Connection>();
 
@@ -228,8 +232,7 @@
     private int rpcTimeout;
     private int maxIdleTime; //connections will be culled if it was idle for 
     //maxIdleTime msecs
-    private int maxRetries; //the max. no. of retries for socket connections
-    // the max. no. of retries for socket connections on time out exceptions
+    private final RetryPolicy connectionRetryPolicy;
     private int maxRetriesOnSocketTimeouts;
     private boolean tcpNoDelay; // if T then disable Nagle's Algorithm
     private boolean doPing; //do we need to send ping message
@@ -253,7 +256,7 @@
       }
       this.rpcTimeout = remoteId.getRpcTimeout();
       this.maxIdleTime = remoteId.getMaxIdleTime();
-      this.maxRetries = remoteId.getMaxRetries();
+      this.connectionRetryPolicy = remoteId.connectionRetryPolicy;
       this.maxRetriesOnSocketTimeouts = remoteId.getMaxRetriesOnSocketTimeouts();
       this.tcpNoDelay = remoteId.getTcpNoDelay();
       this.doPing = remoteId.getDoPing();
@@ -488,7 +491,7 @@
           if (updateAddress()) {
             timeoutFailures = ioFailures = 0;
           }
-          handleConnectionFailure(ioFailures++, maxRetries, ie);
+          handleConnectionFailure(ioFailures++, ie);
         }
       }
     }
@@ -680,8 +683,36 @@
         Thread.sleep(1000);
       } catch (InterruptedException ignored) {}
       
-      LOG.info("Retrying connect to server: " + server + 
-          ". Already tried " + curRetries + " time(s).");
+      LOG.info("Retrying connect to server: " + server + ". Already tried "
+          + curRetries + " time(s); maxRetries=" + maxRetries);
+    }
+
+    private void handleConnectionFailure(int curRetries, IOException ioe
+        ) throws IOException {
+      closeConnection();
+
+      final RetryAction action;
+      try {
+        action = connectionRetryPolicy.shouldRetry(ioe, curRetries, 0, true);
+      } catch(Exception e) {
+        throw e instanceof IOException? (IOException)e: new IOException(e);
+      }
+      if (action.action == RetryAction.RetryDecision.FAIL) {
+        if (action.reason != null) {
+          LOG.warn("Failed to connect to server: " + server + ": "
+              + action.reason, ioe);
+        }
+        throw ioe;
+      }
+
+      try {
+        Thread.sleep(action.delayMillis);
+      } catch (InterruptedException e) {
+        throw (IOException)new InterruptedIOException("Interrupted: action="
+            + action + ", retry policy=" + connectionRetryPolicy).initCause(e);
+      }
+      LOG.info("Retrying connect to server: " + server + ". Already tried "
+          + curRetries + " time(s); retry policy is " + connectionRetryPolicy);
     }
 
     /**
@@ -849,6 +880,10 @@
       try {
         RpcResponseHeaderProto response = 
             RpcResponseHeaderProto.parseDelimitedFrom(in);
+        if (response == null) {
+          throw new IOException("Response is null.");
+        }
+
         int callId = response.getCallId();
         if (LOG.isDebugEnabled())
           LOG.debug(getName() + " got value #" + callId);
@@ -1287,7 +1322,7 @@
     private final String serverPrincipal;
     private final int maxIdleTime; //connections will be culled if it was idle for 
     //maxIdleTime msecs
-    private final int maxRetries; //the max. no. of retries for socket connections
+    private final RetryPolicy connectionRetryPolicy;
     // the max. no. of retries for socket connections on time out exceptions
     private final int maxRetriesOnSocketTimeouts;
     private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
@@ -1297,7 +1332,7 @@
     ConnectionId(InetSocketAddress address, Class<?> protocol, 
                  UserGroupInformation ticket, int rpcTimeout,
                  String serverPrincipal, int maxIdleTime, 
-                 int maxRetries, int maxRetriesOnSocketTimeouts,
+                 RetryPolicy connectionRetryPolicy, int maxRetriesOnSocketTimeouts,
                  boolean tcpNoDelay, boolean doPing, int pingInterval) {
       this.protocol = protocol;
       this.address = address;
@@ -1305,7 +1340,7 @@
       this.rpcTimeout = rpcTimeout;
       this.serverPrincipal = serverPrincipal;
       this.maxIdleTime = maxIdleTime;
-      this.maxRetries = maxRetries;
+      this.connectionRetryPolicy = connectionRetryPolicy;
       this.maxRetriesOnSocketTimeouts = maxRetriesOnSocketTimeouts;
       this.tcpNoDelay = tcpNoDelay;
       this.doPing = doPing;
@@ -1336,10 +1371,6 @@
       return maxIdleTime;
     }
     
-    int getMaxRetries() {
-      return maxRetries;
-    }
-    
     /** max connection retries on socket time outs */
     public int getMaxRetriesOnSocketTimeouts() {
       return maxRetriesOnSocketTimeouts;
@@ -1357,6 +1388,12 @@
       return pingInterval;
     }
     
+    static ConnectionId getConnectionId(InetSocketAddress addr,
+        Class<?> protocol, UserGroupInformation ticket, int rpcTimeout,
+        Configuration conf) throws IOException {
+      return getConnectionId(addr, protocol, ticket, rpcTimeout, null, conf);
+    }
+
     /**
      * Returns a ConnectionId object. 
      * @param addr Remote address for the connection.
@@ -1367,9 +1404,18 @@
      * @return A ConnectionId instance
      * @throws IOException
      */
-    public static ConnectionId getConnectionId(InetSocketAddress addr,
+    static ConnectionId getConnectionId(InetSocketAddress addr,
         Class<?> protocol, UserGroupInformation ticket, int rpcTimeout,
-        Configuration conf) throws IOException {
+        RetryPolicy connectionRetryPolicy, Configuration conf) throws IOException {
+
+      if (connectionRetryPolicy == null) {
+        final int max = conf.getInt(
+            CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
+            CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT);
+        connectionRetryPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
+            max, 1, TimeUnit.SECONDS);
+      }
+
       String remotePrincipal = getRemotePrincipal(conf, addr, protocol);
       boolean doPing =
         conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
@@ -1377,8 +1423,7 @@
           rpcTimeout, remotePrincipal,
           conf.getInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
               CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT),
-          conf.getInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
-              CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT),
+          connectionRetryPolicy,
           conf.getInt(
             CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
             CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT),
@@ -1421,7 +1466,7 @@
         return isEqual(this.address, that.address)
             && this.doPing == that.doPing
             && this.maxIdleTime == that.maxIdleTime
-            && this.maxRetries == that.maxRetries
+            && isEqual(this.connectionRetryPolicy, that.connectionRetryPolicy)
             && this.pingInterval == that.pingInterval
             && isEqual(this.protocol, that.protocol)
             && this.rpcTimeout == that.rpcTimeout
@@ -1434,11 +1479,10 @@
     
     @Override
     public int hashCode() {
-      int result = 1;
+      int result = connectionRetryPolicy.hashCode();
       result = PRIME * result + ((address == null) ? 0 : address.hashCode());
       result = PRIME * result + (doPing ? 1231 : 1237);
       result = PRIME * result + maxIdleTime;
-      result = PRIME * result + maxRetries;
       result = PRIME * result + pingInterval;
       result = PRIME * result + ((protocol == null) ? 0 : protocol.hashCode());
       result = PRIME * result + rpcTimeout;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 1338419..d355a85 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -36,9 +36,9 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.DataOutputOutputStream;
 import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.RPC.RpcInvoker;
-
 import org.apache.hadoop.ipc.protobuf.HadoopRpcProtos.HadoopRpcRequestProto;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager;
@@ -66,15 +66,24 @@
 
   private static final ClientCache CLIENTS = new ClientCache();
 
+  public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
+      InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
+      SocketFactory factory, int rpcTimeout) throws IOException {
+    return getProxy(protocol, clientVersion, addr, ticket, conf, factory,
+        rpcTimeout, null);
+  }
+
   @Override
   @SuppressWarnings("unchecked")
   public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
       InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
-      SocketFactory factory, int rpcTimeout) throws IOException {
+      SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy
+      ) throws IOException {
 
-    return new ProtocolProxy<T>(protocol, (T) Proxy.newProxyInstance(protocol
-        .getClassLoader(), new Class[] { protocol }, new Invoker(protocol,
-        addr, ticket, conf, factory, rpcTimeout)), false);
+    final Invoker invoker = new Invoker(protocol, addr, ticket, conf, factory,
+        rpcTimeout, connectionRetryPolicy);
+    return new ProtocolProxy<T>(protocol, (T) Proxy.newProxyInstance(
+        protocol.getClassLoader(), new Class[]{protocol}, invoker), false);
   }
   
   @Override
@@ -97,11 +106,12 @@
     private final long clientProtocolVersion;
     private final String protocolName;
 
-    public Invoker(Class<?> protocol, InetSocketAddress addr,
+    private Invoker(Class<?> protocol, InetSocketAddress addr,
         UserGroupInformation ticket, Configuration conf, SocketFactory factory,
-        int rpcTimeout) throws IOException {
-      this(protocol, Client.ConnectionId.getConnectionId(addr, protocol,
-          ticket, rpcTimeout, conf), conf, factory);
+        int rpcTimeout, RetryPolicy connectionRetryPolicy) throws IOException {
+      this(protocol, Client.ConnectionId.getConnectionId(
+          addr, protocol, ticket, rpcTimeout, connectionRetryPolicy, conf),
+          conf, factory);
     }
     
     /**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index 56fbd7d..6a8a71f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -41,6 +41,7 @@
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.io.*;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolInfoService;
 import org.apache.hadoop.net.NetUtils;
@@ -326,7 +327,7 @@
                              long clientVersion,
                              InetSocketAddress addr, Configuration conf,
                              long connTimeout) throws IOException { 
-    return waitForProtocolProxy(protocol, clientVersion, addr, conf, 0, connTimeout);
+    return waitForProtocolProxy(protocol, clientVersion, addr, conf, 0, null, connTimeout);
   }
   
   /**
@@ -347,7 +348,7 @@
                              int rpcTimeout,
                              long timeout) throws IOException {
     return waitForProtocolProxy(protocol, clientVersion, addr,
-        conf, rpcTimeout, timeout).getProxy();
+        conf, rpcTimeout, null, timeout).getProxy();
   }
 
   /**
@@ -367,6 +368,7 @@
                                long clientVersion,
                                InetSocketAddress addr, Configuration conf,
                                int rpcTimeout,
+                               RetryPolicy connectionRetryPolicy,
                                long timeout) throws IOException { 
     long startTime = System.currentTimeMillis();
     IOException ioe;
@@ -374,7 +376,7 @@
       try {
         return getProtocolProxy(protocol, clientVersion, addr, 
             UserGroupInformation.getCurrentUser(), conf, NetUtils
-            .getDefaultSocketFactory(conf), rpcTimeout);
+            .getDefaultSocketFactory(conf), rpcTimeout, connectionRetryPolicy);
       } catch(ConnectException se) {  // namenode has not been started
         LOG.info("Server at " + addr + " not available yet, Zzzzz...");
         ioe = se;
@@ -463,7 +465,7 @@
                                 Configuration conf,
                                 SocketFactory factory) throws IOException {
     return getProtocolProxy(
-        protocol, clientVersion, addr, ticket, conf, factory, 0);
+        protocol, clientVersion, addr, ticket, conf, factory, 0, null);
   }
   
   /**
@@ -489,7 +491,7 @@
                                 SocketFactory factory,
                                 int rpcTimeout) throws IOException {
     return getProtocolProxy(protocol, clientVersion, addr, ticket,
-             conf, factory, rpcTimeout).getProxy();
+             conf, factory, rpcTimeout, null).getProxy();
   }
   
   /**
@@ -512,12 +514,13 @@
                                 UserGroupInformation ticket,
                                 Configuration conf,
                                 SocketFactory factory,
-                                int rpcTimeout) throws IOException {    
+                                int rpcTimeout,
+                                RetryPolicy connectionRetryPolicy) throws IOException {    
     if (UserGroupInformation.isSecurityEnabled()) {
       SaslRpcServer.init(conf);
     }
-    return getProtocolEngine(protocol,conf).getProxy(protocol,
-        clientVersion, addr, ticket, conf, factory, rpcTimeout);
+    return getProtocolEngine(protocol,conf).getProxy(protocol, clientVersion,
+        addr, ticket, conf, factory, rpcTimeout, connectionRetryPolicy);
   }
 
    /**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
index d431b4a..f74aa88 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
@@ -97,8 +97,9 @@
     return new RemoteException(attrs.getValue("class"),
         attrs.getValue("message")); 
   }
-  
+
+  @Override
   public String toString() {
-    return className + ": " + getMessage();
+    return getClass().getName() + "(" + className + "): " + getMessage();
   }
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcEngine.java
index 09980da..5dc48ad 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcEngine.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcEngine.java
@@ -26,6 +26,7 @@
 
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager;
@@ -40,7 +41,8 @@
   <T> ProtocolProxy<T> getProxy(Class<T> protocol,
                   long clientVersion, InetSocketAddress addr,
                   UserGroupInformation ticket, Configuration conf,
-                  SocketFactory factory, int rpcTimeout) throws IOException;
+                  SocketFactory factory, int rpcTimeout,
+                  RetryPolicy connectionRetryPolicy) throws IOException;
 
   /** Expert: Make multiple, parallel calls to a set of servers. */
   Object[] call(Method method, Object[][] params, InetSocketAddress[] addrs,
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
index 2ebf42a..f61f0f2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
@@ -31,6 +31,7 @@
 import org.apache.commons.logging.*;
 
 import org.apache.hadoop.io.*;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.RPC.RpcInvoker;
 import org.apache.hadoop.ipc.VersionedProtocol;
@@ -259,9 +260,14 @@
   public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
                          InetSocketAddress addr, UserGroupInformation ticket,
                          Configuration conf, SocketFactory factory,
-                         int rpcTimeout)
+                         int rpcTimeout, RetryPolicy connectionRetryPolicy)
     throws IOException {    
 
+    if (connectionRetryPolicy != null) {
+      throw new UnsupportedOperationException(
+          "Not supported: connectionRetryPolicy=" + connectionRetryPolicy);
+    }
+
     T proxy = (T) Proxy.newProxyInstance(protocol.getClassLoader(),
         new Class[] { protocol }, new Invoker(protocol, addr, ticket, conf,
             factory, rpcTimeout));
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index da8fab2..892ba07 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -19,6 +19,7 @@
 
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.List;
 import java.util.Random;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -55,8 +56,8 @@
   /** InnerNode represents a switch/router of a data center or rack.
    * Different from a leaf node, it has non-null children.
    */
-  private class InnerNode extends NodeBase {
-    private ArrayList<Node> children=new ArrayList<Node>();
+  static class InnerNode extends NodeBase {
+    protected List<Node> children=new ArrayList<Node>();
     private int numOfLeaves;
         
     /** Construct an InnerNode from a path-like string */
@@ -76,7 +77,7 @@
     }
         
     /** @return its children */
-    Collection<Node> getChildren() {return children;}
+    List<Node> getChildren() {return children;}
         
     /** @return the number of children this node has */
     int getNumOfChildren() {
@@ -182,7 +183,23 @@
         }
       }
     }
-        
+
+    /**
+     * Creates a parent node to be added to the list of children.  
+     * Creates a node using the InnerNode four argument constructor specifying 
+     * the name, location, parent, and level of this node.
+     * 
+     * <p>To be overridden in subclasses for specific InnerNode implementations,
+     * as alternative to overriding the full {@link #add(Node)} method.
+     * 
+     * @param parentName The name of the parent node
+     * @return A new inner node
+     * @see InnerNode#InnerNode(String, String, InnerNode, int)
+     */
+    protected InnerNode createParentNode(String parentName) {
+      return new InnerNode(parentName, getPath(this), this, this.getLevel()+1);
+    }
+
     /** Remove node <i>n</i> from the subtree of this node
      * @param n node to be deleted 
      * @return true if the node is deleted; false otherwise
@@ -263,7 +280,7 @@
      * @param excludedNode an excluded node (can be null)
      * @return
      */
-    private Node getLeaf(int leafIndex, Node excludedNode) {
+    Node getLeaf(int leafIndex, Node excludedNode) {
       int count=0;
       // check if the excluded node a leaf
       boolean isLeaf =
@@ -308,7 +325,21 @@
         return null;
       }
     }
-        
+    
+    /**
+      * Determine if children a leaves, default implementation calls {@link #isRack()}
+      * <p>To be overridden in subclasses for specific InnerNode implementations,
+      * as alternative to overriding the full {@link #getLeaf(int, Node)} method.
+      * 
+      * @return true if children are leaves, false otherwise
+      */
+    protected boolean areChildrenLeaves() {
+      return isRack();
+    }
+
+    /**
+     * Get number of leaves.
+     */
     int getNumOfLeaves() {
       return numOfLeaves;
     }
@@ -317,18 +348,18 @@
   /**
    * the root cluster map
    */
-  InnerNode clusterMap = new InnerNode(InnerNode.ROOT);
+  InnerNode clusterMap;
   /** Depth of all leaf nodes */
   private int depthOfAllLeaves = -1;
   /** rack counter */
-  private int numOfRacks = 0;
+  protected int numOfRacks = 0;
   /** the lock used to manage access */
-  private ReadWriteLock netlock;
-    
+  protected ReadWriteLock netlock = new ReentrantReadWriteLock();
+
   public NetworkTopology() {
-    netlock = new ReentrantReadWriteLock();
+    clusterMap = new InnerNode(InnerNode.ROOT);
   }
-    
+
   /** Add a leaf node
    * Update node counter & rack counter if necessary
    * @param node node to be added; can be null
@@ -344,7 +375,7 @@
     }
     netlock.writeLock().lock();
     try {
-      Node rack = getNode(node.getNetworkLocation());
+      Node rack = getNodeForNetworkLocation(node);
       if (rack != null && !(rack instanceof InnerNode)) {
         throw new IllegalArgumentException("Unexpected data node " 
                                            + node.toString() 
@@ -376,7 +407,26 @@
       netlock.writeLock().unlock();
     }
   }
-    
+  
+  /**
+   * Return a reference to the node given its string representation.
+   * Default implementation delegates to {@link #getNode(String)}.
+   * 
+   * <p>To be overridden in subclasses for specific NetworkTopology 
+   * implementations, as alternative to overriding the full {@link #add(Node)}
+   *  method.
+   * 
+   * @param node The string representation of this node's network location is
+   * used to retrieve a Node object. 
+   * @return a reference to the node; null if the node is not in the tree
+   * 
+   * @see #add(Node)
+   * @see #getNode(String)
+   */
+  protected Node getNodeForNetworkLocation(Node node) {
+    return getNode(node.getNetworkLocation());
+  }
+  
   /** Remove a node
    * Update node counter and rack counter if necessary
    * @param node node to be removed; can be null
@@ -403,7 +453,7 @@
       netlock.writeLock().unlock();
     }
   }
-       
+
   /** Check if the tree contains node <i>node</i>
    * 
    * @param node a node
@@ -443,7 +493,21 @@
       netlock.readLock().unlock();
     }
   }
-    
+  
+  /** Given a string representation of a rack for a specific network
+   *  location
+   * 
+   * To be overridden in subclasses for specific NetworkTopology 
+   * implementations, as alternative to overriding the full 
+   * {@link #getRack(String)} method.
+   * @param loc
+   *          a path-like string representation of a network location
+   * @return a rack string
+   */
+  public String getRack(String loc) {
+    return loc;
+  }
+  
   /** @return the total number of racks */
   public int getNumOfRacks() {
     netlock.readLock().lock();
@@ -453,7 +517,7 @@
       netlock.readLock().unlock();
     }
   }
-    
+
   /** @return the total number of leaf nodes */
   public int getNumOfLeaves() {
     netlock.readLock().lock();
@@ -463,7 +527,7 @@
       netlock.readLock().unlock();
     }
   }
-    
+
   /** Return the distance between two nodes
    * It is assumed that the distance from one node to its parent is 1
    * The distance between two nodes is calculated by summing up their distances
@@ -509,8 +573,8 @@
       return Integer.MAX_VALUE;
     }
     return dis+2;
-  } 
-    
+  }
+
   /** Check if two nodes are on the same rack
    * @param node1 one node (can be null)
    * @param node2 another node (can be null)
@@ -525,13 +589,44 @@
       
     netlock.readLock().lock();
     try {
-      return node1.getParent()==node2.getParent();
+      return isSameParents(node1, node2);
     } finally {
       netlock.readLock().unlock();
     }
   }
-    
-  final private static Random r = new Random();
+  
+  /**
+   * Check if network topology is aware of NodeGroup
+   */
+  public boolean isNodeGroupAware() {
+    return false;
+  }
+  
+  /** 
+   * Return false directly as not aware of NodeGroup, to be override in sub-class
+   */
+  public boolean isOnSameNodeGroup(Node node1, Node node2) {
+    return false;
+  }
+
+  /**
+   * Compare the parents of each node for equality
+   * 
+   * <p>To be overridden in subclasses for specific NetworkTopology 
+   * implementations, as alternative to overriding the full 
+   * {@link #isOnSameRack(Node, Node)} method.
+   * 
+   * @param node1 the first node to compare
+   * @param node2 the second node to compare
+   * @return true if their parents are equal, false otherwise
+   * 
+   * @see #isOnSameRack(Node, Node)
+   */
+  protected boolean isSameParents(Node node1, Node node2) {
+    return node1.getParent()==node2.getParent();
+  }
+
+  final protected static Random r = new Random();
   /** randomly choose one node from <i>scope</i>
    * if scope starts with ~, choose one from the all nodes except for the
    * ones in <i>scope</i>; otherwise, choose one from <i>scope</i>
@@ -550,7 +645,7 @@
       netlock.readLock().unlock();
     }
   }
-    
+
   private Node chooseRandom(String scope, String excludedScope){
     if (excludedScope != null) {
       if (scope.startsWith(excludedScope)) {
@@ -579,7 +674,25 @@
     int leaveIndex = r.nextInt(numOfDatanodes);
     return innerNode.getLeaf(leaveIndex, node);
   }
-       
+
+  /** return leaves in <i>scope</i>
+   * @param scope a path string
+   * @return leaves nodes under specific scope
+   */
+  public List<Node> getLeaves(String scope) {
+    Node node = getNode(scope);
+    List<Node> leafNodes = new ArrayList<Node>();
+    if (!(node instanceof InnerNode)) {
+      leafNodes.add(node);
+    } else {
+      InnerNode innerNode = (InnerNode) node;
+      for (int i=0;i<innerNode.getNumOfLeaves();i++) {
+        leafNodes.add(innerNode.getLeaf(i, null));
+      }
+    }
+    return leafNodes;
+  }
+
   /** return the number of leaves in <i>scope</i> but not in <i>excludedNodes</i>
    * if scope starts with ~, return the number of nodes that are not
    * in <i>scope</i> and <i>excludedNodes</i>; 
@@ -619,7 +732,7 @@
       netlock.readLock().unlock();
     }
   }
-    
+
   /** convert a network tree to a string */
   public String toString() {
     // print the number of racks
@@ -640,13 +753,12 @@
     return tree.toString();
   }
 
-  /* swap two array items */
-  static private void swap(Node[] nodes, int i, int j) {
+  /** swap two array items */
+  static protected void swap(Node[] nodes, int i, int j) {
     Node tempNode;
     tempNode = nodes[j];
     nodes[j] = nodes[i];
     nodes[i] = tempNode;
-    
   }
   
   /** Sort nodes array by their distances to <i>reader</i>
@@ -697,4 +809,5 @@
       swap(nodes, 0, r.nextInt(nodes.length));
     }
   }
+  
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
new file mode 100644
index 0000000..6066cd2
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
@@ -0,0 +1,398 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.net;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * The class extends NetworkTopology to represents a cluster of computer with
+ *  a 4-layers hierarchical network topology.
+ * In this network topology, leaves represent data nodes (computers) and inner
+ * nodes represent switches/routers that manage traffic in/out of data centers,
+ * racks or physical host (with virtual switch).
+ * 
+ * @see NetworkTopology
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Unstable
+public class NetworkTopologyWithNodeGroup extends NetworkTopology {
+
+  public final static String DEFAULT_NODEGROUP = "/default-nodegroup";
+
+  public NetworkTopologyWithNodeGroup() {
+    clusterMap = new InnerNodeWithNodeGroup(InnerNode.ROOT);
+  }
+
+  @Override
+  protected Node getNodeForNetworkLocation(Node node) {
+    // if node only with default rack info, here we need to add default
+    // nodegroup info
+    if (NetworkTopology.DEFAULT_RACK.equals(node.getNetworkLocation())) {
+      node.setNetworkLocation(node.getNetworkLocation()
+          + DEFAULT_NODEGROUP);
+    }
+    Node nodeGroup = getNode(node.getNetworkLocation());
+    if (nodeGroup == null) {
+      nodeGroup = new InnerNode(node.getNetworkLocation());
+    }
+    return getNode(nodeGroup.getNetworkLocation());
+  }
+
+  @Override
+  public String getRack(String loc) {
+    netlock.readLock().lock();
+    try {
+      loc = InnerNode.normalize(loc);
+      Node locNode = getNode(loc);
+      if (locNode instanceof InnerNodeWithNodeGroup) {
+        InnerNodeWithNodeGroup node = (InnerNodeWithNodeGroup) locNode;
+        if (node.isRack()) {
+          return loc;
+        } else if (node.isNodeGroup()) {
+          return node.getNetworkLocation();
+        } else {
+          // may be a data center
+          return null;
+        }
+      } else {
+        // not in cluster map, don't handle it
+        return loc;
+      }
+    } finally {
+      netlock.readLock().unlock();
+    }
+  }
+
+  /**
+   * Given a string representation of a node group for a specific network
+   * location
+   * 
+   * @param loc
+   *            a path-like string representation of a network location
+   * @return a node group string
+   */
+  public String getNodeGroup(String loc) {
+    netlock.readLock().lock();
+    try {
+      loc = InnerNode.normalize(loc);
+      Node locNode = getNode(loc);
+      if (locNode instanceof InnerNodeWithNodeGroup) {
+        InnerNodeWithNodeGroup node = (InnerNodeWithNodeGroup) locNode;
+        if (node.isNodeGroup()) {
+          return loc;
+        } else if (node.isRack()) {
+          // not sure the node group for a rack
+          return null;
+        } else {
+          // may be a leaf node
+          return getNodeGroup(node.getNetworkLocation());
+        }
+      } else {
+        // not in cluster map, don't handle it
+        return loc;
+      }
+    } finally {
+      netlock.readLock().unlock();
+    }
+  }
+
+  @Override
+  public boolean isOnSameRack( Node node1,  Node node2) {
+    if (node1 == null || node2 == null ||
+        node1.getParent() == null || node2.getParent() == null) {
+      return false;
+    }
+      
+    netlock.readLock().lock();
+    try {
+      return isSameParents(node1.getParent(), node2.getParent());
+    } finally {
+      netlock.readLock().unlock();
+    }
+  }
+
+  /**
+   * Check if two nodes are on the same node group (hypervisor) The
+   * assumption here is: each nodes are leaf nodes.
+   * 
+   * @param node1
+   *            one node (can be null)
+   * @param node2
+   *            another node (can be null)
+   * @return true if node1 and node2 are on the same node group; false
+   *         otherwise
+   * @exception IllegalArgumentException
+   *                when either node1 or node2 is null, or node1 or node2 do
+   *                not belong to the cluster
+   */
+  @Override
+  public boolean isOnSameNodeGroup(Node node1, Node node2) {
+    if (node1 == null || node2 == null) {
+      return false;
+    }
+    netlock.readLock().lock();
+    try {
+      return isSameParents(node1, node2);
+    } finally {
+      netlock.readLock().unlock();
+    }
+  }
+
+  /**
+   * Check if network topology is aware of NodeGroup
+   */
+  @Override
+  public boolean isNodeGroupAware() {
+    return true;
+  }
+
+  /** Add a leaf node
+   * Update node counter & rack counter if necessary
+   * @param node node to be added; can be null
+   * @exception IllegalArgumentException if add a node to a leave 
+   *                                     or node to be added is not a leaf
+   */
+  @Override
+  public void add(Node node) {
+    if (node==null) return;
+    if( node instanceof InnerNode ) {
+      throw new IllegalArgumentException(
+        "Not allow to add an inner node: "+NodeBase.getPath(node));
+    }
+    netlock.writeLock().lock();
+    try {
+      Node rack = null;
+
+      // if node only with default rack info, here we need to add default 
+      // nodegroup info
+      if (NetworkTopology.DEFAULT_RACK.equals(node.getNetworkLocation())) {
+        node.setNetworkLocation(node.getNetworkLocation() + 
+            NetworkTopologyWithNodeGroup.DEFAULT_NODEGROUP);
+      }
+      Node nodeGroup = getNode(node.getNetworkLocation());
+      if (nodeGroup == null) {
+        nodeGroup = new InnerNodeWithNodeGroup(node.getNetworkLocation());
+      }
+      rack = getNode(nodeGroup.getNetworkLocation());
+
+      if (rack != null && !(rack instanceof InnerNode)) {
+        throw new IllegalArgumentException("Unexpected data node " 
+            + node.toString() 
+            + " at an illegal network location");
+      }
+      if (clusterMap.add(node)) {
+        LOG.info("Adding a new node: " + NodeBase.getPath(node));
+        if (rack == null) {
+          // We only track rack number here
+          numOfRacks++;
+        }
+      }
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("NetworkTopology became:\n" + this.toString());
+      }
+    } finally {
+      netlock.writeLock().unlock();
+    }
+  }
+
+  /** Remove a node
+   * Update node counter and rack counter if necessary
+   * @param node node to be removed; can be null
+   */
+  @Override
+  public void remove(Node node) {
+    if (node==null) return;
+    if( node instanceof InnerNode ) {
+      throw new IllegalArgumentException(
+          "Not allow to remove an inner node: "+NodeBase.getPath(node));
+    }
+    LOG.info("Removing a node: "+NodeBase.getPath(node));
+    netlock.writeLock().lock();
+    try {
+      if (clusterMap.remove(node)) {
+        Node nodeGroup = getNode(node.getNetworkLocation());
+        if (nodeGroup == null) {
+          nodeGroup = new InnerNode(node.getNetworkLocation());
+        }
+        InnerNode rack = (InnerNode)getNode(nodeGroup.getNetworkLocation());
+        if (rack == null) {
+          numOfRacks--;
+        }
+      }
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("NetworkTopology became:\n" + this.toString());
+      }
+    } finally {
+      netlock.writeLock().unlock();
+    }
+  }
+
+  /** Sort nodes array by their distances to <i>reader</i>
+   * It linearly scans the array, if a local node is found, swap it with
+   * the first element of the array.
+   * If a local node group node is found, swap it with the first element 
+   * following the local node.
+   * If a local rack node is found, swap it with the first element following
+   * the local node group node.
+   * If neither local node, node group node or local rack node is found, put a 
+   * random replica location at position 0.
+   * It leaves the rest nodes untouched.
+   * @param reader the node that wishes to read a block from one of the nodes
+   * @param nodes the list of nodes containing data for the reader
+   */
+  @Override
+  public void pseudoSortByDistance( Node reader, Node[] nodes ) {
+
+    if (reader != null && !this.contains(reader)) {
+      // if reader is not a datanode (not in NetworkTopology tree), we will 
+      // replace this reader with a sibling leaf node in tree.
+      Node nodeGroup = getNode(reader.getNetworkLocation());
+      if (nodeGroup != null && nodeGroup instanceof InnerNode) {
+        InnerNode parentNode = (InnerNode) nodeGroup;
+        // replace reader with the first children of its parent in tree
+        reader = parentNode.getLeaf(0, null);
+      } else {
+        return;
+      }
+    }
+    int tempIndex = 0;
+    int localRackNode = -1;
+    int localNodeGroupNode = -1;
+    if (reader != null) {  
+      //scan the array to find the local node & local rack node
+      for (int i = 0; i < nodes.length; i++) {
+        if (tempIndex == 0 && reader == nodes[i]) { //local node
+          //swap the local node and the node at position 0
+          if (i != 0) {
+            swap(nodes, tempIndex, i);
+          }
+          tempIndex=1;
+
+          if (localRackNode != -1 && (localNodeGroupNode !=-1)) {
+            if (localRackNode == 0) {
+              localRackNode = i;
+            }
+            if (localNodeGroupNode == 0) {
+              localNodeGroupNode = i;
+            }
+            break;
+          }
+        } else if (localNodeGroupNode == -1 && isOnSameNodeGroup(reader, 
+            nodes[i])) {
+          //local node group
+          localNodeGroupNode = i;
+          // node local and rack local are already found
+          if(tempIndex != 0 && localRackNode != -1) break;
+        } else if (localRackNode == -1 && isOnSameRack(reader, nodes[i])) {
+          localRackNode = i;
+          if (tempIndex != 0 && localNodeGroupNode != -1) break;
+        }
+      }
+
+      // swap the local nodegroup node and the node at position tempIndex
+      if(localNodeGroupNode != -1 && localNodeGroupNode != tempIndex) {
+        swap(nodes, tempIndex, localNodeGroupNode);
+        if (localRackNode == tempIndex) {
+          localRackNode = localNodeGroupNode;
+        }
+        tempIndex++;
+      }
+
+      // swap the local rack node and the node at position tempIndex
+      if(localRackNode != -1 && localRackNode != tempIndex) {
+        swap(nodes, tempIndex, localRackNode);
+        tempIndex++;
+      }
+    }
+
+    // put a random node at position 0 if there is not a local/local-nodegroup/
+    // local-rack node
+    if (tempIndex == 0 && localNodeGroupNode == -1 && localRackNode == -1
+        && nodes.length != 0) {
+      swap(nodes, 0, r.nextInt(nodes.length));
+    }
+  }
+
+  /** InnerNodeWithNodeGroup represents a switch/router of a data center, rack
+   * or physical host. Different from a leaf node, it has non-null children.
+   */
+  static class InnerNodeWithNodeGroup extends InnerNode {
+    public InnerNodeWithNodeGroup(String name, String location, 
+        InnerNode parent, int level) {
+      super(name, location, parent, level);
+    }
+
+    public InnerNodeWithNodeGroup(String name, String location) {
+      super(name, location);
+    }
+
+    public InnerNodeWithNodeGroup(String path) {
+      super(path);
+    }
+
+    @Override
+    boolean isRack() {
+      // it is node group
+      if (getChildren().isEmpty()) {
+        return false;
+      }
+
+      Node firstChild = children.get(0);
+
+      if (firstChild instanceof InnerNode) {
+        Node firstGrandChild = (((InnerNode) firstChild).children).get(0);
+        if (firstGrandChild instanceof InnerNode) {
+          // it is datacenter
+          return false;
+        } else {
+          return true;
+        }
+      }
+      return false;
+    }
+
+    /**
+     * Judge if this node represents a node group
+     * 
+     * @return true if it has no child or its children are not InnerNodes
+     */
+    boolean isNodeGroup() {
+      if (children.isEmpty()) {
+        return true;
+      }
+      Node firstChild = children.get(0);
+      if (firstChild instanceof InnerNode) {
+        // it is rack or datacenter
+        return false;
+      }
+      return true;
+    }
+
+    @Override
+    protected InnerNode createParentNode(String parentName) {
+      return new InnerNodeWithNodeGroup(parentName, getPath(this), this,
+          this.getLevel() + 1);
+    }
+
+    @Override
+    protected boolean areChildrenLeaves() {
+      return isNodeGroup();
+    }
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java
index c773808..8c3c1b2d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java
@@ -31,6 +31,8 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.TokenIdentifier;
 
+import com.google.common.annotations.VisibleForTesting;
+
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
 public abstract class AbstractDelegationTokenIdentifier 
@@ -173,16 +175,17 @@
 	throw new IOException("Unknown version of delegation token " + 
                               version);
     }
-    owner.readFields(in);
-    renewer.readFields(in);
-    realUser.readFields(in);
+    owner.readFields(in, Text.DEFAULT_MAX_LEN);
+    renewer.readFields(in, Text.DEFAULT_MAX_LEN);
+    realUser.readFields(in, Text.DEFAULT_MAX_LEN);
     issueDate = WritableUtils.readVLong(in);
     maxDate = WritableUtils.readVLong(in);
     sequenceNumber = WritableUtils.readVInt(in);
     masterKeyId = WritableUtils.readVInt(in);
   }
 
-  public void write(DataOutput out) throws IOException {
+  @VisibleForTesting
+  void writeImpl(DataOutput out) throws IOException {
     out.writeByte(VERSION);
     owner.write(out);
     renewer.write(out);
@@ -193,6 +196,19 @@
     WritableUtils.writeVInt(out, masterKeyId);
   }
   
+  public void write(DataOutput out) throws IOException {
+    if (owner.getLength() > Text.DEFAULT_MAX_LEN) {
+      throw new IOException("owner is too long to be serialized!");
+    }
+    if (renewer.getLength() > Text.DEFAULT_MAX_LEN) {
+      throw new IOException("renewer is too long to be serialized!");
+    }
+    if (realUser.getLength() > Text.DEFAULT_MAX_LEN) {
+      throw new IOException("realuser is too long to be serialized!");
+    }
+    writeImpl(out);
+  }
+  
   public String toString() {
     StringBuilder buffer = new StringBuilder();
     buffer
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
index f907e3e..989c96a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
@@ -30,7 +30,7 @@
 
 /**
  * The <code>ShutdownHookManager</code> enables running shutdownHook
- * in a determistic order, higher priority first.
+ * in a deterministic order, higher priority first.
  * <p/>
  * The JVM runs ShutdownHooks in a non-deterministic order or in parallel.
  * This class registers a single JVM shutdownHook and run all the
@@ -169,7 +169,7 @@
   }
 
   /**
-   * Indicates if a shutdownHook is registered or nt.
+   * Indicates if a shutdownHook is registered or not.
    *
    * @param shutdownHook shutdownHook to check if registered.
    * @return TRUE/FALSE depending if the shutdownHook is is registered.
@@ -177,5 +177,14 @@
   public boolean hasShutdownHook(Runnable shutdownHook) {
     return hooks.contains(new HookEntry(shutdownHook, 0));
   }
+  
+  /**
+   * Indicates if shutdown is in progress or not.
+   * 
+   * @return TRUE if the shutdown is in progress, otherwise FALSE.
+   */
+  public boolean isShutdownInProgress() {
+    return shutdownInProgress.get();
+  }
 
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/native/.autom4te.cfg b/hadoop-common-project/hadoop-common/src/main/native/.autom4te.cfg
deleted file mode 100644
index a69c197..0000000
--- a/hadoop-common-project/hadoop-common/src/main/native/.autom4te.cfg
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# autom4te configuration for hadoop-native library
-#
-
-begin-language: "Autoheader-preselections"
-args: --no-cache 
-end-language: "Autoheader-preselections"
-
-begin-language: "Automake-preselections"
-args: --no-cache 
-end-language: "Automake-preselections"
-
-begin-language: "Autoreconf-preselections"
-args: --no-cache 
-end-language: "Autoreconf-preselections"
-
-begin-language: "Autoconf-without-aclocal-m4"
-args: --no-cache 
-end-language: "Autoconf-without-aclocal-m4"
-
-begin-language: "Autoconf"
-args: --no-cache 
-end-language: "Autoconf"
-
diff --git a/hadoop-common-project/hadoop-common/src/main/native/Makefile.am b/hadoop-common-project/hadoop-common/src/main/native/Makefile.am
deleted file mode 100644
index c4ca564..0000000
--- a/hadoop-common-project/hadoop-common/src/main/native/Makefile.am
+++ /dev/null
@@ -1,66 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# Notes: 
-# 1. This makefile is designed to do the actual builds in $(HADOOP_PREFIX)/build/native/${os.name}-${os-arch}.
-# 2. This makefile depends on the following environment variables to function correctly:
-#    * HADOOP_NATIVE_SRCDIR 
-#    * JAVA_HOME
-#    * JVM_DATA_MODEL
-#    * OS_NAME
-#    * OS_ARCH 
-#    All these are setup by build.xml. 
-#
-
-# Export $(PLATFORM) to prevent proliferation of sub-shells
-export PLATFORM = $(shell echo $$OS_NAME | tr [A-Z] [a-z])
-
-ACLOCAL_AMFLAGS = -I m4 
-AM_CPPFLAGS = @JNI_CPPFLAGS@ -I$(HADOOP_NATIVE_SRCDIR)/src \
-              -I$(HADOOP_NATIVE_SRCDIR)/javah
-AM_LDFLAGS = @JNI_LDFLAGS@
-AM_CFLAGS = -g -Wall -fPIC -O2
-if SPECIFY_DATA_MODEL
-AM_LDFLAGS += -m$(JVM_DATA_MODEL)
-AM_CFLAGS += -m$(JVM_DATA_MODEL)
-endif
-
-lib_LTLIBRARIES = libhadoop.la
-libhadoop_la_SOURCES = src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c \
-                       src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c \
-                       src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c \
-                       src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c \
-                       src/org/apache/hadoop/io/compress/lz4/lz4.c \
-                       src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c \
-                       src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c \
-                       src/org/apache/hadoop/security/getGroup.c \
-                       src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c \
-                       src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c \
-                       src/org/apache/hadoop/io/nativeio/file_descriptor.c \
-                       src/org/apache/hadoop/io/nativeio/errno_enum.c \
-                       src/org/apache/hadoop/io/nativeio/NativeIO.c \
-                       src/org/apache/hadoop/util/NativeCrc32.c \
-                       src/org/apache/hadoop/util/bulk_crc32.c
-
-libhadoop_la_LDFLAGS = -version-info 1:0:0 $(AM_LDFLAGS)
-libhadoop_la_LIBADD = -ldl -ljvm
-
-#
-#vim: sw=4: ts=4: noet
-#
diff --git a/hadoop-common-project/hadoop-common/src/main/native/acinclude.m4 b/hadoop-common-project/hadoop-common/src/main/native/acinclude.m4
deleted file mode 100644
index 93e05b8..0000000
--- a/hadoop-common-project/hadoop-common/src/main/native/acinclude.m4
+++ /dev/null
@@ -1,28 +0,0 @@
-# AC_COMPUTE_NEEDED_DSO(LIBRARY, TEST_PROGRAM, PREPROC_SYMBOL)
-# --------------------------------------------------
-# Compute the 'actual' dynamic-library used 
-# for LIBRARY and set it to PREPROC_SYMBOL
-AC_DEFUN([AC_COMPUTE_NEEDED_DSO],
-[
-AC_CACHE_CHECK([Checking for the 'actual' dynamic-library for '-l$1'], ac_cv_libname_$1,
-  [
-  echo '$2' > conftest.c
-  if test -z "`${CC} ${LDFLAGS} -o conftest conftest.c -l$1 2>&1`"; then
-    dnl Try objdump and ldd in that order to get the dynamic library
-    if test ! -z "`which objdump | grep -v 'no objdump'`"; then
-      ac_cv_libname_$1="`objdump -p conftest | grep NEEDED | grep $1 | sed 's/\W*NEEDED\W*\(.*\)\W*$/\"\1\"/'`"
-    elif test ! -z "`which ldd | grep -v 'no ldd'`"; then
-      ac_cv_libname_$1="`ldd conftest | grep $1 | sed 's/^[[[^A-Za-z0-9]]]*\([[[A-Za-z0-9\.]]]*\)[[[^A-Za-z0-9]]]*=>.*$/\"\1\"/'`"
-    elif test ! -z "`which otool | grep -v 'no otool'`"; then
-      ac_cv_libname_$1=\"`otool -L conftest | grep $1 | sed -e 's/^[	 ]*//' -e 's/ .*//' -e 's/.*\/\(.*\)$/\1/'`\";
-    else
-      AC_MSG_ERROR(Can't find either 'objdump' or 'ldd' or 'otool' to compute the dynamic library for '-l$1')
-    fi
-  else
-    ac_cv_libname_$1=libnotfound.so
-  fi
-  rm -f conftest*
-  ]
-)
-AC_DEFINE_UNQUOTED($3, ${ac_cv_libname_$1}, [The 'actual' dynamic-library for '-l$1'])
-])# AC_COMPUTE_NEEDED_DSO
diff --git a/hadoop-common-project/hadoop-common/src/main/native/configure.ac b/hadoop-common-project/hadoop-common/src/main/native/configure.ac
deleted file mode 100644
index 34408d6..0000000
--- a/hadoop-common-project/hadoop-common/src/main/native/configure.ac
+++ /dev/null
@@ -1,130 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# configure.ac for hadoop native code. 
-#
-
-# Notes: 
-# 1. This configure.ac depends on the following environment variables to function correctly:
-#    * HADOOP_NATIVE_SRCDIR 
-#    * JAVA_HOME
-#    * JVM_DATA_MODEL
-#    * OS_NAME
-#    * OS_ARCH 
-#    All these are setup by build.xml. 
-
-#                                               -*- Autoconf -*-
-# Process this file with autoconf to produce a configure script.
-#
-
-AC_PREREQ(2.59)
-AC_INIT(src/org_apache_hadoop.h)
-AC_CONFIG_SRCDIR([src/org_apache_hadoop.h])
-AC_CONFIG_AUX_DIR([config])
-AC_CONFIG_MACRO_DIR([m4])
-AC_CONFIG_HEADER([config.h])
-AC_SYS_LARGEFILE
-AC_GNU_SOURCE
-
-AM_INIT_AUTOMAKE(hadoop,1.0.0)
-
-# Checks for programs.
-AC_PROG_CC
-AC_PROG_LIBTOOL
-
-# Checks for libraries.
-dnl Check for '-ldl'
-AC_CHECK_LIB([dl], [dlopen])
-
-dnl Check for '-ljvm'
-JNI_LDFLAGS=""
-if test $JAVA_HOME != ""
-then
-  JNI_LDFLAGS="-L$JAVA_HOME/jre/lib/$OS_ARCH/server"
-  JVMSOPATH=`find $JAVA_HOME/jre/ -name libjvm.so | head -n 1`
-  JNI_LDFLAGS="$JNI_LDFLAGS -L`dirname $JVMSOPATH`"
-fi
-LDFLAGS="$LDFLAGS $JNI_LDFLAGS"
-AC_CHECK_LIB([jvm], [JNI_GetCreatedJavaVMs])
-AC_SUBST([JNI_LDFLAGS])
-
-# Checks for header files.
-dnl Check for Ansi C headers
-AC_HEADER_STDC
-
-dnl Check for other standard C headers
-AC_CHECK_HEADERS([stdio.h stddef.h], [], AC_MSG_ERROR(Some system headers not found... please ensure their presence on your platform.))
-
-dnl Check for JNI headers
-JNI_CPPFLAGS=""
-if test $JAVA_HOME != ""
-then
-  for dir in `find $JAVA_HOME/include -follow -type d`
-  do
-    JNI_CPPFLAGS="$JNI_CPPFLAGS -I$dir"
-  done
-fi
-cppflags_bak=$CPPFLAGS
-CPPFLAGS="$CPPFLAGS $JNI_CPPFLAGS"
-AC_CHECK_HEADERS([jni.h], [], AC_MSG_ERROR([Native java headers not found. Is \$JAVA_HOME set correctly?]))
-CPPFLAGS=$cppflags_bak
-AC_SUBST([JNI_CPPFLAGS])
-
-dnl Check for zlib headers
-AC_CHECK_HEADERS([zlib.h zconf.h],
-  AC_COMPUTE_NEEDED_DSO(z,
-    [#include "zlib.h"
-    int main(int argc, char **argv){zlibVersion();return 0;}],
-    HADOOP_ZLIB_LIBRARY),
-  AC_MSG_ERROR(Zlib headers were not found... native-hadoop library needs zlib to build. Please install the requisite zlib development package.))
-
-dnl Check for snappy headers
-AC_CHECK_HEADERS([snappy-c.h],
-  AC_COMPUTE_NEEDED_DSO(snappy,
-    [#include "snappy-c.h"
-    int main(int argc, char **argv){snappy_compress(0,0,0,0);return 0;}],
-    HADOOP_SNAPPY_LIBRARY),
-  AC_MSG_WARN(Snappy headers were not found... building without snappy.))
-
-dnl Check for headers needed by the native Group resolution implementation
-AC_CHECK_HEADERS([fcntl.h stdlib.h string.h unistd.h], [], AC_MSG_ERROR(Some system headers not found... please ensure their presence on your platform.))
-
-dnl check for posix_fadvise
-AC_CHECK_HEADERS(fcntl.h, [AC_CHECK_FUNCS(posix_fadvise)])
-
-dnl check for sync_file_range
-AC_CHECK_HEADERS(fcntl.h, [AC_CHECK_FUNCS(sync_file_range)])
-
-# Checks for typedefs, structures, and compiler characteristics.
-AC_C_CONST
-
-# Checks for library functions.
-AC_CHECK_FUNCS([memset])
-
-# Check for nonstandard STRERROR_R
-AC_FUNC_STRERROR_R
-
-AM_CONDITIONAL([SPECIFY_DATA_MODEL], [case $host_cpu in arm*) false;; *) true;; esac])
-
-AC_CONFIG_FILES([Makefile])
-AC_OUTPUT
-
-#
-#vim: sw=2: ts=2: noet
-#
diff --git a/hadoop-common-project/hadoop-common/src/main/native/lib/Makefile.am b/hadoop-common-project/hadoop-common/src/main/native/lib/Makefile.am
deleted file mode 100644
index 9b536ff..0000000
--- a/hadoop-common-project/hadoop-common/src/main/native/lib/Makefile.am
+++ /dev/null
@@ -1,47 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# Makefile template for building libhadoop.so 
-#
-
-#
-# Notes: 
-# 1. This makefile is designed to do the actual builds in $(HADOOP_PREFIX)/build/native/${os.name}-${os.arch}/lib 
-# 2. This makefile depends on the following environment variables to function correctly:
-#    * HADOOP_NATIVE_SRCDIR 
-#    * JAVA_HOME
-#    * OS_ARCH 
-#    All these are setup by build.xml and/or the top-level makefile.
-#
-
-# Add .lo files in $(SUBDIRS) to construct libhadoop.so
-HADOOP_OBJS = $(foreach path,$(addprefix ../,$(SUBDIRS)),$(wildcard $(path)/*.lo))
-AM_LDFLAGS = @JNI_LDFLAGS@
-if SPECIFY_DATA_MODEL
-AM_LDFLAGS += -m$(JVM_DATA_MODEL)
-endif
-
-lib_LTLIBRARIES = libhadoop.la
-libhadoop_la_SOURCES = 
-libhadoop_la_LDFLAGS = -version-info 1:0:0 $(AM_LDFLAGS)
-libhadoop_la_LIBADD = $(HADOOP_OBJS) -ldl -ljvm
-
-#
-#vim: sw=4: ts=4: noet
-#
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
index d52a4f6..641ecd7 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
@@ -16,10 +16,7 @@
  * limitations under the License.
  */
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
-
+#include "config.h"
 #include "org_apache_hadoop.h"
 #include "org_apache_hadoop_io_compress_lz4_Lz4Compressor.h"
 
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c
index 547b027..3eebc18 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c
@@ -16,10 +16,7 @@
  * limitations under the License.
  */
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
-
+#include "config.h"
 #include "org_apache_hadoop.h"
 #include "org_apache_hadoop_io_compress_lz4_Lz4Decompressor.h"
 
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c
index 13991c2..96a2402 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c
@@ -16,36 +16,12 @@
  * limitations under the License.
  */
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
+#include <dlfcn.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
 
-#if defined HADOOP_SNAPPY_LIBRARY
-
-#if defined HAVE_STDIO_H
-  #include <stdio.h>
-#else
-  #error 'stdio.h not found'
-#endif
-
-#if defined HAVE_STDLIB_H
-  #include <stdlib.h>
-#else
-  #error 'stdlib.h not found'
-#endif
-
-#if defined HAVE_STRING_H
-  #include <string.h>
-#else
-  #error 'string.h not found'
-#endif
-
-#if defined HAVE_DLFCN_H
-  #include <dlfcn.h>
-#else
-  #error 'dlfcn.h not found'
-#endif
-
+#include "config.h"
 #include "org_apache_hadoop_io_compress_snappy.h"
 #include "org_apache_hadoop_io_compress_snappy_SnappyCompressor.h"
 
@@ -123,5 +99,3 @@
 
   return (jint)compressed_direct_buf_len;
 }
-
-#endif //define HADOOP_SNAPPY_LIBRARY
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c
index 767c5f4..a5f07ca 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c
@@ -16,36 +16,12 @@
  * limitations under the License.
  */
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
+#include <dlfcn.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
 
-#if defined HADOOP_SNAPPY_LIBRARY
-
-#if defined HAVE_STDIO_H
-  #include <stdio.h>
-#else
-  #error 'stdio.h not found'
-#endif
-
-#if defined HAVE_STDLIB_H
-  #include <stdlib.h>
-#else
-  #error 'stdlib.h not found'
-#endif
-
-#if defined HAVE_STRING_H
-  #include <string.h>
-#else
-  #error 'string.h not found'
-#endif
-
-#if defined HAVE_DLFCN_H
-  #include <dlfcn.h>
-#else
-  #error 'dlfcn.h not found'
-#endif
-
+#include "config.h"
 #include "org_apache_hadoop_io_compress_snappy.h"
 #include "org_apache_hadoop_io_compress_snappy_SnappyDecompressor.h"
 
@@ -127,5 +103,3 @@
 
   return (jint)uncompressed_direct_buf_len;
 }
-
-#endif //define HADOOP_SNAPPY_LIBRARY
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h
index 815e030..3e99d5d 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h
@@ -17,42 +17,13 @@
  */
 
 
-#if !defined ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H
+#ifndef ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H
 #define ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H
 
-
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
-
-#if defined HADOOP_SNAPPY_LIBRARY
-
-  #if defined HAVE_STDDEF_H
-    #include <stddef.h>
-  #else
-    #error 'stddef.h not found'
-  #endif
-
-  #if defined HAVE_SNAPPY_C_H
-    #include <snappy-c.h>
-  #else
-    #error 'Please install snappy-development packages for your platform.'
-  #endif
-
-  #if defined HAVE_DLFCN_H
-    #include <dlfcn.h>
-  #else
-    #error "dlfcn.h not found"
-  #endif
-
-  #if defined HAVE_JNI_H
-    #include <jni.h>
-  #else
-    #error 'jni.h not found'
-  #endif
-
-  #include "org_apache_hadoop.h"
-
-#endif //define HADOOP_SNAPPY_LIBRARY
+#include "org_apache_hadoop.h"
+#include <dlfcn.h>
+#include <jni.h>
+#include <snappy-c.h>
+#include <stddef.h>
 
 #endif //ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/Makefile.am b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/Makefile.am
deleted file mode 100644
index 821f33f..0000000
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/Makefile.am
+++ /dev/null
@@ -1,53 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# Makefile template for building native 'zlib' for hadoop.
-#
-
-#
-# Notes: 
-# 1. This makefile is designed to do the actual builds in $(HADOOP_PREFIX)/build/native/${os.name}-${os.arch}/$(subdir) .
-# 2. This makefile depends on the following environment variables to function correctly:
-#    * HADOOP_NATIVE_SRCDIR 
-#    * JAVA_HOME
-#    * JVM_DATA_MODEL
-#    * OS_ARCH 
-#    * PLATFORM
-#    All these are setup by build.xml and/or the top-level makefile.
-# 3. The creation of requisite jni headers/stubs are also done by build.xml and they are
-#    assumed to be in $(HADOOP_PREFIX)/build/native/src/org/apache/hadoop/io/compress/zlib.
-#
-
-# The 'vpath directive' to locate the actual source files 
-vpath %.c $(HADOOP_NATIVE_SRCDIR)/$(subdir)
-
-AM_CPPFLAGS = @JNI_CPPFLAGS@ -I$(HADOOP_NATIVE_SRCDIR)/src
-AM_LDFLAGS = @JNI_LDFLAGS@
-AM_CFLAGS = -g -Wall -fPIC -O2
-if SPECIFY_DATA_MODEL
-AM_CFLAGS += -m$(JVM_DATA_MODEL)
-endif
-
-noinst_LTLIBRARIES = libnativezlib.la
-libnativezlib_la_SOURCES = ZlibCompressor.c ZlibDecompressor.c
-libnativezlib_la_LIBADD = -ldl -ljvm
-
-#
-#vim: sw=4: ts=4: noet
-#
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c
index 9ada3f0..689c783 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c
@@ -16,34 +16,12 @@
  * limitations under the License.
  */
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
+#include <dlfcn.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
 
-#if defined HAVE_STDIO_H
-  #include <stdio.h>
-#else
-  #error 'stdio.h not found'
-#endif  
-
-#if defined HAVE_STDLIB_H
-  #include <stdlib.h>
-#else
-  #error 'stdlib.h not found'
-#endif  
-
-#if defined HAVE_STRING_H
-  #include <string.h>
-#else
-  #error 'string.h not found'
-#endif  
-
-#if defined HAVE_DLFCN_H
-  #include <dlfcn.h>
-#else
-  #error 'dlfcn.h not found'
-#endif  
-
+#include "config.h"
 #include "org_apache_hadoop_io_compress_zlib.h"
 #include "org_apache_hadoop_io_compress_zlib_ZlibCompressor.h"
 
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c
index 3047dba..6abe363 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c
@@ -16,34 +16,12 @@
  * limitations under the License.
  */
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
+#include <dlfcn.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
 
-#if defined HAVE_STDIO_H
-  #include <stdio.h>
-#else
-  #error 'stdio.h not found'
-#endif  
-
-#if defined HAVE_STDLIB_H
-  #include <stdlib.h>
-#else
-  #error 'stdlib.h not found'
-#endif  
-
-#if defined HAVE_STRING_H
-  #include <string.h>
-#else
-  #error 'string.h not found'
-#endif  
-
-#if defined HAVE_DLFCN_H
-  #include <dlfcn.h>
-#else
-  #error 'dlfcn.h not found'
-#endif  
-
+#include "config.h"
 #include "org_apache_hadoop_io_compress_zlib.h"
 #include "org_apache_hadoop_io_compress_zlib_ZlibDecompressor.h"
 
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h
index 16b607b..c53aa53 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h
@@ -19,40 +19,13 @@
 #if !defined ORG_APACHE_HADOOP_IO_COMPRESS_ZLIB_ZLIB_H
 #define ORG_APACHE_HADOOP_IO_COMPRESS_ZLIB_ZLIB_H
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
+#include <dlfcn.h>
+#include <jni.h>
+#include <stddef.h>
+#include <zconf.h>
+#include <zlib.h>
 
-#if defined HAVE_STDDEF_H
-  #include <stddef.h>
-#else
-  #error 'stddef.h not found'
-#endif
-    
-#if defined HAVE_ZLIB_H
-  #include <zlib.h>
-#else
-  #error 'Please install zlib-development packages for your platform.'
-#endif
-    
-#if defined HAVE_ZCONF_H
-  #include <zconf.h>
-#else
-  #error 'Please install zlib-development packages for your platform.'
-#endif
-
-#if defined HAVE_DLFCN_H
-  #include <dlfcn.h>
-#else
-  #error "dlfcn.h not found"
-#endif  
-
-#if defined HAVE_JNI_H    
-  #include <jni.h>
-#else
-  #error 'jni.h not found'
-#endif
-
+#include "config.h"
 #include "org_apache_hadoop.h"
 
 /* A helper macro to convert the java 'stream-handle' to a z_stream pointer. */
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
index fbcf956..c08ea03 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
@@ -16,9 +16,6 @@
  * limitations under the License.
  */
 
-// get the autoconf settings
-#include "config.h"
-
 #include <assert.h>
 #include <errno.h>
 #include <fcntl.h>
@@ -32,6 +29,7 @@
 #include <sys/syscall.h>
 #include <unistd.h>
 
+#include "config.h"
 #include "org_apache_hadoop.h"
 #include "org_apache_hadoop_io_nativeio_NativeIO.h"
 #include "file_descriptor.h"
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c
index 869c2ba..dd51c0a 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c
@@ -16,9 +16,6 @@
  * limitations under the License.
  */
 
-// get the autoconf settings
-#include "config.h"
-
 #include <arpa/inet.h>
 #include <assert.h>
 #include <stdlib.h>
@@ -26,6 +23,7 @@
 #include <string.h>
 #include <unistd.h>
 
+#include "config.h"
 #include "org_apache_hadoop.h"
 #include "org_apache_hadoop_util_NativeCrc32.h"
 #include "gcc_optimizations.h"
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org_apache_hadoop.h b/hadoop-common-project/hadoop-common/src/main/native/src/org_apache_hadoop.h
index 7a777c2..a50c41d 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org_apache_hadoop.h
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org_apache_hadoop.h
@@ -24,21 +24,10 @@
 #if !defined ORG_APACHE_HADOOP_H
 #define ORG_APACHE_HADOOP_H
 
-#if defined HAVE_CONFIG_H
-  #include <config.h>
-#endif
+#include <dlfcn.h>
+#include <jni.h>
 
-#if defined HAVE_DLFCN_H
-  #include <dlfcn.h>
-#else
-  #error "dlfcn.h not found"
-#endif  
-
-#if defined HAVE_JNI_H    
-  #include <jni.h>
-#else
-  #error 'jni.h not found'
-#endif
+#include "config.h"
 
 /* A helper macro to 'throw' a java exception. */ 
 #define THROW(env, exception_name, message) \
diff --git a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh
index d8c731e..77693fb 100644
--- a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh
+++ b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh
@@ -65,7 +65,7 @@
 export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
 
 # Where log files are stored.  $HADOOP_HOME/logs by default.
-export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
 
 # Where log files are stored in the secure data environment.
 export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 1e72e36..c968ff2 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -599,10 +599,9 @@
   </description>
 </property>
 
-<!-- Rack Configuration -->
-
+<!-- Topology Configuration -->
 <property>
-	<name>net.topology.node.switch.mapping.impl</name>
+  <name>net.topology.node.switch.mapping.impl</name>
   <value>org.apache.hadoop.net.ScriptBasedMapping</value>
   <description> The default implementation of the DNSToSwitchMapping. It
     invokes a script specified in net.topology.script.file.name to resolve
@@ -612,6 +611,13 @@
 </property>
 
 <property>
+  <name>net.topology.impl</name>
+  <value>org.apache.hadoop.net.NetworkTopology</value>
+  <description> The default implementation of NetworkTopology which is classic three layer one.
+  </description>
+</property>
+
+<property>
   <name>net.topology.script.file.name</name>
   <value></value>
   <description> The script name that should be invoked to resolve DNS names to
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 34a1780..4878031 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -663,6 +663,26 @@
                  conf.getPattern("test.pattern3", defaultPattern).pattern());
   }
 
+  public void testPropertySource() throws IOException {
+    out = new BufferedWriter(new FileWriter(CONFIG));
+    startConfig();
+    appendProperty("test.foo", "bar");
+    endConfig();
+    Path fileResource = new Path(CONFIG);
+    conf.addResource(fileResource);
+    conf.set("fs.defaultFS", "value");
+    assertEquals(
+        "Resource string returned for a file-loaded property" +
+        " must be a proper absolute path",
+        fileResource,
+        new Path(conf.getPropertySource("test.foo")));
+    assertEquals("Resource string returned for a set() property must be null",
+        null,
+        conf.getPropertySource("fs.defaultFS"));
+    assertEquals("Resource string returned for an unset property must be null",
+        null, conf.getPropertySource("fs.defaultFoo"));
+  }
+
   public void testSocketAddress() throws IOException {
     Configuration conf = new Configuration();
     final String defaultAddr = "host:1";
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java
index 11f4d7a..525f28b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java
@@ -51,7 +51,19 @@
     /**
      * create the test root on local_fs - the  mount table will point here
      */
-    fsTarget.mkdirs(FileSystemTestHelper.getTestRootPath(fsTarget));
+    Path targetOfTests = FileSystemTestHelper.getTestRootPath(fsTarget);
+    // In case previous test was killed before cleanup
+    fsTarget.delete(targetOfTests, true);
+    fsTarget.mkdirs(targetOfTests);
+
+    // Setup a link from viewfs to targetfs for the first component of
+    // path of testdir.
+    String testDir = FileSystemTestHelper.getTestRootPath(fsTarget).toUri()
+        .getPath();
+    int indexOf2ndSlash = testDir.indexOf('/', 1);
+    String testDirFirstComponent = testDir.substring(0, indexOf2ndSlash);
+    ConfigUtil.addLink(conf, testDirFirstComponent, fsTarget.makeQualified(
+        new Path(testDirFirstComponent)).toUri());
 
     // viewFs://home => fsTarget://home
     String homeDirRoot = fsTarget.getHomeDirectory()
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
index d4f5057..60c0703 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
@@ -21,9 +21,13 @@
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
+import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
 
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -32,7 +36,8 @@
  * Test cases for IOUtils.java
  */
 public class TestIOUtils {
-
+  private static final String TEST_FILE_NAME = "test_file";
+  
   @Test
   public void testCopyBytesShouldCloseStreamsWhenCloseIsTrue() throws Exception {
     InputStream inputStream = Mockito.mock(InputStream.class);
@@ -110,4 +115,41 @@
     Mockito.verify(outputStream, Mockito.atLeastOnce()).close();
   }
   
+  @Test
+  public void testWriteFully() throws IOException {
+    final int INPUT_BUFFER_LEN = 10000;
+    final int HALFWAY = 1 + (INPUT_BUFFER_LEN / 2);
+    byte[] input = new byte[INPUT_BUFFER_LEN];
+    for (int i = 0; i < input.length; i++) {
+      input[i] = (byte)(i & 0xff);
+    }
+    byte[] output = new byte[input.length];
+    
+    try {
+      RandomAccessFile raf = new RandomAccessFile(TEST_FILE_NAME, "rw");
+      FileChannel fc = raf.getChannel();
+      ByteBuffer buf = ByteBuffer.wrap(input);
+      IOUtils.writeFully(fc, buf);
+      raf.seek(0);
+      raf.read(output);
+      for (int i = 0; i < input.length; i++) {
+        assertEquals(input[i], output[i]);
+      }
+      buf.rewind();
+      IOUtils.writeFully(fc, buf, HALFWAY);
+      for (int i = 0; i < HALFWAY; i++) {
+        assertEquals(input[i], output[i]);
+      }
+      raf.seek(0);
+      raf.read(output);
+      for (int i = HALFWAY; i < input.length; i++) {
+        assertEquals(input[i - HALFWAY], output[i]);
+      }
+    } finally {
+      File f = new File(TEST_FILE_NAME);
+      if (f.exists()) {
+        f.delete();
+      }
+    }
+  }
 }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
index 9bf83b9..21da8c0 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
@@ -137,38 +137,38 @@
     }
   }
   
-  public void doTestLimitedIO(String str, int strLen) throws IOException {
+  public void doTestLimitedIO(String str, int len) throws IOException {
     DataOutputBuffer out = new DataOutputBuffer();
     DataInputBuffer in = new DataInputBuffer();
 
     out.reset();
     try {
-      Text.writeString(out, str, strLen);
+      Text.writeString(out, str, len);
       fail("expected writeString to fail when told to write a string " +
           "that was too long!  The string was '" + str + "'");
     } catch (IOException e) {
     }
-    Text.writeString(out, str, strLen + 1);
+    Text.writeString(out, str, len + 1);
 
     // test that it reads correctly
     in.reset(out.getData(), out.getLength());
-    in.mark(strLen);
+    in.mark(len);
     String after;
     try {
-      after = Text.readString(in, strLen);
+      after = Text.readString(in, len);
       fail("expected readString to fail when told to read a string " +
           "that was too long!  The string was '" + str + "'");
     } catch (IOException e) {
     }
     in.reset();
-    after = Text.readString(in, strLen + 1);
+    after = Text.readString(in, len + 1);
     assertTrue(str.equals(after));
   }
   
   public void testLimitedIO() throws Exception {
-    doTestLimitedIO("abcd", 4);
-    doTestLimitedIO("", 0);
-    doTestLimitedIO("1", 1);
+    doTestLimitedIO("abcd", 3);
+    doTestLimitedIO("foo bar baz", 10);
+    doTestLimitedIO("1", 0);
   }
 
   public void testCompare() throws Exception {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index cc0c5c9..5d3d335 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -18,50 +18,55 @@
 
 package org.apache.hadoop.ipc;
 
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounterGt;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 import java.io.Closeable;
 import java.io.IOException;
-import java.net.ConnectException;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
 import java.lang.management.ManagementFactory;
 import java.lang.management.ThreadInfo;
 import java.lang.management.ThreadMXBean;
 import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
+import java.net.ConnectException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
 import java.util.Arrays;
 
 import javax.net.SocketFactory;
 
-import org.apache.commons.logging.*;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.UTF8;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
-import org.apache.hadoop.ipc.TestSaslRPC.TestSaslImpl;
-import org.apache.hadoop.ipc.TestSaslRPC.TestSaslProtocol;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.Service;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.MockitoUtil;
 import org.junit.Test;
-import static org.junit.Assert.*;
 
 import com.google.protobuf.DescriptorProtos;
 import com.google.protobuf.DescriptorProtos.EnumDescriptorProto;
 
-import static org.apache.hadoop.test.MetricsAsserts.*;
-
 /** Unit tests for RPC. */
 @SuppressWarnings("deprecation")
 public class TestRPC {
@@ -250,7 +255,8 @@
     @Override
     public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
         InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
-        SocketFactory factory, int rpcTimeout) throws IOException {
+        SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy
+        ) throws IOException {
       T proxy = (T) Proxy.newProxyInstance(protocol.getClassLoader(),
               new Class[] { protocol }, new StoppedInvocationHandler());
       return new ProtocolProxy<T>(protocol, proxy, false);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java
index e2388ad..9126810 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.security.token.delegation;
 
 import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
 import java.io.DataInput;
 import java.io.DataInputStream;
 import java.io.DataOutput;
@@ -387,4 +388,46 @@
     }
   }
 
+  private boolean testDelegationTokenIdentiferSerializationRoundTrip(Text owner,
+      Text renewer, Text realUser) throws IOException {
+    TestDelegationTokenIdentifier dtid = new TestDelegationTokenIdentifier(
+        owner, renewer, realUser);
+    DataOutputBuffer out = new DataOutputBuffer();
+    dtid.writeImpl(out);
+    DataInputBuffer in = new DataInputBuffer();
+    in.reset(out.getData(), out.getLength());
+    try {
+      TestDelegationTokenIdentifier dtid2 =
+          new TestDelegationTokenIdentifier();
+      dtid2.readFields(in);
+      assertTrue(dtid.equals(dtid2));
+      return true;
+    } catch(IOException e){
+      return false;
+    }
+  }
+      
+  @Test
+  public void testSimpleDtidSerialization() throws IOException {
+    assertTrue(testDelegationTokenIdentiferSerializationRoundTrip(
+        new Text("owner"), new Text("renewer"), new Text("realUser")));
+    assertTrue(testDelegationTokenIdentiferSerializationRoundTrip(
+        new Text(""), new Text(""), new Text("")));
+    assertTrue(testDelegationTokenIdentiferSerializationRoundTrip(
+        new Text(""), new Text("b"), new Text("")));
+  }
+  
+  @Test
+  public void testOverlongDtidSerialization() throws IOException {
+    byte[] bigBuf = new byte[Text.DEFAULT_MAX_LEN + 1];
+    for (int i = 0; i < bigBuf.length; i++) {
+      bigBuf[i] = 0;
+    }
+    assertFalse(testDelegationTokenIdentiferSerializationRoundTrip(
+        new Text(bigBuf), new Text("renewer"), new Text("realUser")));
+    assertFalse(testDelegationTokenIdentiferSerializationRoundTrip(
+        new Text("owner"), new Text(bigBuf), new Text("realUser")));
+    assertFalse(testDelegationTokenIdentiferSerializationRoundTrip(
+        new Text("owner"), new Text("renewer"), new Text(bigBuf)));
+  }
 }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java
index 37a561e..7178382 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java
@@ -15,15 +15,18 @@
 
 import com.google.common.base.Preconditions;
 
+import java.io.BufferedOutputStream;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.io.OutputStream;
 import java.net.URL;
 import java.net.URLDecoder;
 import java.text.MessageFormat;
 import java.util.Enumeration;
+import java.util.jar.JarFile;
 import java.util.jar.JarOutputStream;
 import java.util.jar.Manifest;
 import java.util.zip.ZipEntry;
@@ -37,10 +40,37 @@
  */
 public class JarFinder {
 
-  private static void zipDir(File dir, String relativePath, ZipOutputStream zos)
+  private static void copyToZipStream(InputStream is, ZipEntry entry,
+                              ZipOutputStream zos) throws IOException {
+    zos.putNextEntry(entry);
+    byte[] arr = new byte[4096];
+    int read = is.read(arr);
+    while (read > -1) {
+      zos.write(arr, 0, read);
+      read = is.read(arr);
+    }
+    is.close();
+    zos.closeEntry();
+  }
+
+  public static void jarDir(File dir, String relativePath, ZipOutputStream zos)
     throws IOException {
     Preconditions.checkNotNull(relativePath, "relativePath");
     Preconditions.checkNotNull(zos, "zos");
+
+    // by JAR spec, if there is a manifest, it must be the first entry in the
+    // ZIP.
+    File manifestFile = new File(dir, JarFile.MANIFEST_NAME);
+    ZipEntry manifestEntry = new ZipEntry(JarFile.MANIFEST_NAME);
+    if (!manifestFile.exists()) {
+      zos.putNextEntry(manifestEntry);
+      new Manifest().write(new BufferedOutputStream(zos));
+      zos.closeEntry();
+    } else {
+      InputStream is = new FileInputStream(manifestFile);
+      copyToZipStream(is, manifestEntry, zos);
+    }
+    zos.closeEntry();
     zipDir(dir, relativePath, zos, true);
     zos.close();
   }
@@ -62,17 +92,12 @@
           zipDir(file, relativePath + f.getName() + "/", zos, false);
         }
         else {
-          ZipEntry anEntry = new ZipEntry(relativePath + f.getName());
-          zos.putNextEntry(anEntry);
-          InputStream is = new FileInputStream(f);
-          byte[] arr = new byte[4096];
-          int read = is.read(arr);
-          while (read > -1) {
-            zos.write(arr, 0, read);
-            read = is.read(arr);
+          String path = relativePath + f.getName();
+          if (!path.equals(JarFile.MANIFEST_NAME)) {
+            ZipEntry anEntry = new ZipEntry(path);
+            InputStream is = new FileInputStream(f);
+            copyToZipStream(is, anEntry, zos);
           }
-          is.close();
-          zos.closeEntry();
         }
       }
     }
@@ -88,9 +113,8 @@
                                                    jarDir));
       }
     }
-    JarOutputStream zos = new JarOutputStream(new FileOutputStream(jarFile),
-                                              new Manifest());
-    zipDir(dir, "", zos);
+    JarOutputStream zos = new JarOutputStream(new FileOutputStream(jarFile));
+    jarDir(dir, "", zos);
   }
 
   /**
@@ -142,5 +166,4 @@
     }
     return null;
   }
-
 }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java
index a311a9f..4997b7a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java
@@ -22,21 +22,105 @@
 import org.junit.Assert;
 import org.junit.Test;
 
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
 import java.io.File;
+import java.io.FileOutputStream;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.Writer;
+import java.text.MessageFormat;
+import java.util.Properties;
+import java.util.jar.JarInputStream;
+import java.util.jar.JarOutputStream;
+import java.util.jar.Manifest;
 
 public class TestJarFinder {
 
   @Test
-  public void testAppend() throws Exception {
+  public void testJar() throws Exception {
 
     //picking a class that is for sure in a JAR in the classpath
     String jar = JarFinder.getJar(LogFactory.class);
     Assert.assertTrue(new File(jar).exists());
+  }
 
+  private static void delete(File file) throws IOException {
+    if (file.getAbsolutePath().length() < 5) {
+      throw new IllegalArgumentException(
+        MessageFormat.format("Path [{0}] is too short, not deleting",
+                             file.getAbsolutePath()));
+    }
+    if (file.exists()) {
+      if (file.isDirectory()) {
+        File[] children = file.listFiles();
+        if (children != null) {
+          for (File child : children) {
+            delete(child);
+          }
+        }
+      }
+      if (!file.delete()) {
+        throw new RuntimeException(
+          MessageFormat.format("Could not delete path [{0}]",
+                               file.getAbsolutePath()));
+      }
+    }
+  }
+
+  @Test
+  public void testExpandedClasspath() throws Exception {
     //picking a class that is for sure in a directory in the classpath
     //in this case the JAR is created on the fly
-    jar = JarFinder.getJar(TestJarFinder.class);
+    String jar = JarFinder.getJar(TestJarFinder.class);
     Assert.assertTrue(new File(jar).exists());
   }
 
+  @Test
+  public void testExistingManifest() throws Exception {
+    File dir = new File(System.getProperty("test.build.dir", "target/test-dir"),
+                        TestJarFinder.class.getName() + "-testExistingManifest");
+    delete(dir);
+    dir.mkdirs();
+
+    File metaInfDir = new File(dir, "META-INF");
+    metaInfDir.mkdirs();
+    File manifestFile = new File(metaInfDir, "MANIFEST.MF");
+    Manifest manifest = new Manifest();
+    OutputStream os = new FileOutputStream(manifestFile);
+    manifest.write(os);
+    os.close();
+
+    File propsFile = new File(dir, "props.properties");
+    Writer writer = new FileWriter(propsFile);
+    new Properties().store(writer, "");
+    writer.close();
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    JarOutputStream zos = new JarOutputStream(baos);
+    JarFinder.jarDir(dir, "", zos);
+    JarInputStream jis =
+      new JarInputStream(new ByteArrayInputStream(baos.toByteArray()));
+    Assert.assertNotNull(jis.getManifest());
+    jis.close();
+  }
+
+  @Test
+  public void testNoManifest() throws Exception {
+    File dir = new File(System.getProperty("test.build.dir", "target/test-dir"),
+                        TestJarFinder.class.getName() + "-testNoManifest");
+    delete(dir);
+    dir.mkdirs();
+    File propsFile = new File(dir, "props.properties");
+    Writer writer = new FileWriter(propsFile);
+    new Properties().store(writer, "");
+    writer.close();
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    JarOutputStream zos = new JarOutputStream(baos);
+    JarFinder.jarDir(dir, "", zos);
+    JarInputStream jis =
+      new JarInputStream(new ByteArrayInputStream(baos.toByteArray()));
+    Assert.assertNotNull(jis.getManifest());
+    jis.close();
+  }
 }
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index 7fc6e56..442cdcac 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -52,6 +52,11 @@
       <artifactId>hadoop-yarn-api</artifactId>
       <scope>provided</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs-raid</artifactId>
+      <scope>provided</scope>
+    </dependency>
   </dependencies>
 
   <build>
@@ -120,6 +125,7 @@
                       run cp -r $ROOT/hadoop-common-project/hadoop-common/target/hadoop-common-${project.version}/* .
                       run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${project.version}/* .
                       run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-httpfs/target/hadoop-hdfs-httpfs-${project.version}/* .
+                      run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-raid/target/hadoop-hdfs-raid-${project.version}/* .
                       run cp -r $ROOT/hadoop-mapreduce-project/target/hadoop-mapreduce-${project.version}/* .
                       run cp -r $ROOT/hadoop-tools/hadoop-tools-dist/target/hadoop-tools-dist-${project.version}/* .
                       echo
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 4a1a205..fa28ba3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -154,42 +154,34 @@
 
   public static final int HTTP_TEMPORARY_REDIRECT = 307;
 
-
-  /**
-   * Get operations.
-   */
-  public enum GetOpValues {
-    OPEN, GETFILESTATUS, LISTSTATUS, GETHOMEDIRECTORY, GETCONTENTSUMMARY, GETFILECHECKSUM,
-    GETDELEGATIONTOKEN, GETFILEBLOCKLOCATIONS, INSTRUMENTATION
-  }
-
-  /**
-   * Post operations.
-   */
-  public static enum PostOpValues {
-    APPEND
-  }
-
-  /**
-   * Put operations.
-   */
-  public static enum PutOpValues {
-    CREATE, MKDIRS, RENAME, SETOWNER, SETPERMISSION, SETREPLICATION, SETTIMES,
-    RENEWDELEGATIONTOKEN, CANCELDELEGATIONTOKEN
-  }
-
-  /**
-   * Delete operations.
-   */
-  public static enum DeleteOpValues {
-    DELETE
-  }
-
   private static final String HTTP_GET = "GET";
   private static final String HTTP_PUT = "PUT";
   private static final String HTTP_POST = "POST";
   private static final String HTTP_DELETE = "DELETE";
 
+  public enum Operation {
+    OPEN(HTTP_GET), GETFILESTATUS(HTTP_GET), LISTSTATUS(HTTP_GET),
+    GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET),
+    GETFILECHECKSUM(HTTP_GET),  GETFILEBLOCKLOCATIONS(HTTP_GET),
+    INSTRUMENTATION(HTTP_GET),
+    APPEND(HTTP_POST),
+    CREATE(HTTP_PUT), MKDIRS(HTTP_PUT), RENAME(HTTP_PUT), SETOWNER(HTTP_PUT),
+    SETPERMISSION(HTTP_PUT), SETREPLICATION(HTTP_PUT), SETTIMES(HTTP_PUT),
+    DELETE(HTTP_DELETE);
+
+    private String httpMethod;
+
+    Operation(String httpMethod) {
+      this.httpMethod = httpMethod;
+    }
+
+    public String getMethod() {
+      return httpMethod;
+    }
+
+  }
+
+
   private AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
   private URI uri;
   private Path workingDir;
@@ -402,10 +394,12 @@
   @Override
   public FSDataInputStream open(Path f, int bufferSize) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, GetOpValues.OPEN.toString());
-    HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
+    params.put(OP_PARAM, Operation.OPEN.toString());
+    HttpURLConnection conn = getConnection(Operation.OPEN.getMethod(), params,
+                                           f, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
-    return new FSDataInputStream(new HttpFSDataInputStream(conn.getInputStream(), bufferSize));
+    return new FSDataInputStream(
+      new HttpFSDataInputStream(conn.getInputStream(), bufferSize));
   }
 
   /**
@@ -508,15 +502,18 @@
    * @see #setPermission(Path, FsPermission)
    */
   @Override
-  public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize,
-                                   short replication, long blockSize, Progressable progress) throws IOException {
+  public FSDataOutputStream create(Path f, FsPermission permission,
+                                   boolean overwrite, int bufferSize,
+                                   short replication, long blockSize,
+                                   Progressable progress) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, PutOpValues.CREATE.toString());
+    params.put(OP_PARAM, Operation.CREATE.toString());
     params.put(OVERWRITE_PARAM, Boolean.toString(overwrite));
     params.put(REPLICATION_PARAM, Short.toString(replication));
     params.put(BLOCKSIZE_PARAM, Long.toString(blockSize));
     params.put(PERMISSION_PARAM, permissionToString(permission));
-    return uploadData(HTTP_PUT, f, params, bufferSize, HttpURLConnection.HTTP_CREATED);
+    return uploadData(Operation.CREATE.getMethod(), f, params, bufferSize,
+                      HttpURLConnection.HTTP_CREATED);
   }
 
 
@@ -532,10 +529,12 @@
    * @throws IOException
    */
   @Override
-  public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException {
+  public FSDataOutputStream append(Path f, int bufferSize,
+                                   Progressable progress) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, PostOpValues.APPEND.toString());
-    return uploadData(HTTP_POST, f, params, bufferSize, HttpURLConnection.HTTP_OK);
+    params.put(OP_PARAM, Operation.APPEND.toString());
+    return uploadData(Operation.APPEND.getMethod(), f, params, bufferSize,
+                      HttpURLConnection.HTTP_OK);
   }
 
   /**
@@ -545,9 +544,10 @@
   @Override
   public boolean rename(Path src, Path dst) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, PutOpValues.RENAME.toString());
+    params.put(OP_PARAM, Operation.RENAME.toString());
     params.put(DESTINATION_PARAM, dst.toString());
-    HttpURLConnection conn = getConnection(HTTP_PUT, params, src, true);
+    HttpURLConnection conn = getConnection(Operation.RENAME.getMethod(),
+                                           params, src, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
     JSONObject json = (JSONObject) jsonParse(conn);
     return (Boolean) json.get(RENAME_JSON);
@@ -580,9 +580,10 @@
   @Override
   public boolean delete(Path f, boolean recursive) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, DeleteOpValues.DELETE.toString());
+    params.put(OP_PARAM, Operation.DELETE.toString());
     params.put(RECURSIVE_PARAM, Boolean.toString(recursive));
-    HttpURLConnection conn = getConnection(HTTP_DELETE, params, f, true);
+    HttpURLConnection conn = getConnection(Operation.DELETE.getMethod(),
+                                           params, f, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
     JSONObject json = (JSONObject) jsonParse(conn);
     return (Boolean) json.get(DELETE_JSON);
@@ -601,8 +602,9 @@
   @Override
   public FileStatus[] listStatus(Path f) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, GetOpValues.LISTSTATUS.toString());
-    HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
+    params.put(OP_PARAM, Operation.LISTSTATUS.toString());
+    HttpURLConnection conn = getConnection(Operation.LISTSTATUS.getMethod(),
+                                           params, f, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
     JSONObject json = (JSONObject) jsonParse(conn);
     json = (JSONObject) json.get(FILE_STATUSES_JSON);
@@ -647,9 +649,10 @@
   @Override
   public boolean mkdirs(Path f, FsPermission permission) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, PutOpValues.MKDIRS.toString());
+    params.put(OP_PARAM, Operation.MKDIRS.toString());
     params.put(PERMISSION_PARAM, permissionToString(permission));
-    HttpURLConnection conn = getConnection(HTTP_PUT, params, f, true);
+    HttpURLConnection conn = getConnection(Operation.MKDIRS.getMethod(),
+                                           params, f, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
     JSONObject json = (JSONObject) jsonParse(conn);
     return (Boolean) json.get(MKDIRS_JSON);
@@ -668,8 +671,9 @@
   @Override
   public FileStatus getFileStatus(Path f) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, GetOpValues.GETFILESTATUS.toString());
-    HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
+    params.put(OP_PARAM, Operation.GETFILESTATUS.toString());
+    HttpURLConnection conn = getConnection(Operation.GETFILESTATUS.getMethod(),
+                                           params, f, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
     JSONObject json = (JSONObject) jsonParse(conn);
     json = (JSONObject) json.get(FILE_STATUS_JSON);
@@ -684,9 +688,11 @@
   @Override
   public Path getHomeDirectory() {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, GetOpValues.GETHOMEDIRECTORY.toString());
+    params.put(OP_PARAM, Operation.GETHOMEDIRECTORY.toString());
     try {
-      HttpURLConnection conn = getConnection(HTTP_GET, params, new Path(getUri().toString(), "/"), false);
+      HttpURLConnection conn =
+        getConnection(Operation.GETHOMEDIRECTORY.getMethod(), params,
+                      new Path(getUri().toString(), "/"), false);
       validateResponse(conn, HttpURLConnection.HTTP_OK);
       JSONObject json = (JSONObject) jsonParse(conn);
       return new Path((String) json.get(HOME_DIR_JSON));
@@ -704,12 +710,14 @@
    * @param groupname If it is null, the original groupname remains unchanged.
    */
   @Override
-  public void setOwner(Path p, String username, String groupname) throws IOException {
+  public void setOwner(Path p, String username, String groupname)
+    throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, PutOpValues.SETOWNER.toString());
+    params.put(OP_PARAM, Operation.SETOWNER.toString());
     params.put(OWNER_PARAM, username);
     params.put(GROUP_PARAM, groupname);
-    HttpURLConnection conn = getConnection(HTTP_PUT, params, p, true);
+    HttpURLConnection conn = getConnection(Operation.SETOWNER.getMethod(),
+                                           params, p, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
   }
 
@@ -722,9 +730,9 @@
   @Override
   public void setPermission(Path p, FsPermission permission) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, PutOpValues.SETPERMISSION.toString());
+    params.put(OP_PARAM, Operation.SETPERMISSION.toString());
     params.put(PERMISSION_PARAM, permissionToString(permission));
-    HttpURLConnection conn = getConnection(HTTP_PUT, params, p, true);
+    HttpURLConnection conn = getConnection(Operation.SETPERMISSION.getMethod(), params, p, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
   }
 
@@ -742,10 +750,11 @@
   @Override
   public void setTimes(Path p, long mtime, long atime) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, PutOpValues.SETTIMES.toString());
+    params.put(OP_PARAM, Operation.SETTIMES.toString());
     params.put(MODIFICATION_TIME_PARAM, Long.toString(mtime));
     params.put(ACCESS_TIME_PARAM, Long.toString(atime));
-    HttpURLConnection conn = getConnection(HTTP_PUT, params, p, true);
+    HttpURLConnection conn = getConnection(Operation.SETTIMES.getMethod(),
+                                           params, p, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
   }
 
@@ -761,11 +770,13 @@
    * @throws IOException
    */
   @Override
-  public boolean setReplication(Path src, short replication) throws IOException {
+  public boolean setReplication(Path src, short replication)
+    throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, PutOpValues.SETREPLICATION.toString());
+    params.put(OP_PARAM, Operation.SETREPLICATION.toString());
     params.put(REPLICATION_PARAM, Short.toString(replication));
-    HttpURLConnection conn = getConnection(HTTP_PUT, params, src, true);
+    HttpURLConnection conn =
+      getConnection(Operation.SETREPLICATION.getMethod(), params, src, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
     JSONObject json = (JSONObject) jsonParse(conn);
     return (Boolean) json.get(SET_REPLICATION_JSON);
@@ -814,10 +825,12 @@
   @Override
   public ContentSummary getContentSummary(Path f) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, GetOpValues.GETCONTENTSUMMARY.toString());
-    HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
+    params.put(OP_PARAM, Operation.GETCONTENTSUMMARY.toString());
+    HttpURLConnection conn =
+      getConnection(Operation.GETCONTENTSUMMARY.getMethod(), params, f, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
-    JSONObject json = (JSONObject) ((JSONObject) jsonParse(conn)).get(CONTENT_SUMMARY_JSON);
+    JSONObject json =
+      (JSONObject) ((JSONObject) jsonParse(conn)).get(CONTENT_SUMMARY_JSON);
     return new ContentSummary((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON),
                               (Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON),
                               (Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON),
@@ -830,10 +843,12 @@
   @Override
   public FileChecksum getFileChecksum(Path f) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
-    params.put(OP_PARAM, GetOpValues.GETFILECHECKSUM.toString());
-    HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
+    params.put(OP_PARAM, Operation.GETFILECHECKSUM.toString());
+    HttpURLConnection conn =
+      getConnection(Operation.GETFILECHECKSUM.getMethod(), params, f, true);
     validateResponse(conn, HttpURLConnection.HTTP_OK);
-    final JSONObject json = (JSONObject) ((JSONObject) jsonParse(conn)).get(FILE_CHECKSUM_JSON);
+    final JSONObject json =
+      (JSONObject) ((JSONObject) jsonParse(conn)).get(FILE_CHECKSUM_JSON);
     return new FileChecksum() {
       @Override
       public String getAlgorithmName() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/CheckUploadContentTypeFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/CheckUploadContentTypeFilter.java
index 7e73666..abd382d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/CheckUploadContentTypeFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/CheckUploadContentTypeFilter.java
@@ -30,7 +30,6 @@
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import java.io.IOException;
-import java.net.InetAddress;
 import java.util.HashSet;
 import java.util.Set;
 
@@ -43,8 +42,8 @@
   private static final Set<String> UPLOAD_OPERATIONS = new HashSet<String>();
 
   static {
-    UPLOAD_OPERATIONS.add(HttpFSFileSystem.PostOpValues.APPEND.toString());
-    UPLOAD_OPERATIONS.add(HttpFSFileSystem.PutOpValues.CREATE.toString());
+    UPLOAD_OPERATIONS.add(HttpFSFileSystem.Operation.APPEND.toString());
+    UPLOAD_OPERATIONS.add(HttpFSFileSystem.Operation.CREATE.toString());
   }
 
   /**
@@ -82,7 +81,7 @@
     if (method.equals("PUT") || method.equals("POST")) {
       String op = httpReq.getParameter(HttpFSFileSystem.OP_PARAM);
       if (op != null && UPLOAD_OPERATIONS.contains(op.toUpperCase())) {
-        if ("true".equalsIgnoreCase(httpReq.getParameter(HttpFSParams.DataParam.NAME))) {
+        if ("true".equalsIgnoreCase(httpReq.getParameter(HttpFSParametersProvider.DataParam.NAME))) {
           String contentType = httpReq.getContentType();
           contentTypeOK =
             HttpFSFileSystem.UPLOAD_CONTENT_TYPE.equalsIgnoreCase(contentType);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java
index 26dff49..b999a72 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.fs.http.server;
 
+import com.sun.jersey.api.container.ContainerException;
 import org.apache.hadoop.lib.service.FileSystemAccessException;
 import org.apache.hadoop.lib.wsrs.ExceptionProvider;
 import org.slf4j.Logger;
@@ -59,6 +60,9 @@
     if (throwable instanceof FileSystemAccessException) {
       throwable = throwable.getCause();
     }
+    if (throwable instanceof ContainerException) {
+      throwable = throwable.getCause();
+    }
     if (throwable instanceof SecurityException) {
       status = Response.Status.UNAUTHORIZED;
     } else if (throwable instanceof FileNotFoundException) {
@@ -67,6 +71,8 @@
       status = Response.Status.INTERNAL_SERVER_ERROR;
     } else if (throwable instanceof UnsupportedOperationException) {
       status = Response.Status.BAD_REQUEST;
+    } else if (throwable instanceof IllegalArgumentException) {
+      status = Response.Status.BAD_REQUEST;
     } else {
       status = Response.Status.INTERNAL_SERVER_ERROR;
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
new file mode 100644
index 0000000..0ab10179
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
@@ -0,0 +1,398 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.http.server;
+
+import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
+import org.apache.hadoop.fs.http.client.HttpFSFileSystem.Operation;
+import org.apache.hadoop.lib.wsrs.BooleanParam;
+import org.apache.hadoop.lib.wsrs.EnumParam;
+import org.apache.hadoop.lib.wsrs.LongParam;
+import org.apache.hadoop.lib.wsrs.Param;
+import org.apache.hadoop.lib.wsrs.ParametersProvider;
+import org.apache.hadoop.lib.wsrs.ShortParam;
+import org.apache.hadoop.lib.wsrs.StringParam;
+import org.apache.hadoop.lib.wsrs.UserProvider;
+import org.slf4j.MDC;
+
+import javax.ws.rs.ext.Provider;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.regex.Pattern;
+
+/**
+ * HttpFS ParametersProvider.
+ */
+@Provider
+public class HttpFSParametersProvider extends ParametersProvider {
+
+  private static final Map<Enum, Class<Param<?>>[]> PARAMS_DEF =
+    new HashMap<Enum, Class<Param<?>>[]>();
+
+  static {
+    PARAMS_DEF.put(Operation.OPEN,
+      new Class[]{DoAsParam.class, OffsetParam.class, LenParam.class});
+    PARAMS_DEF.put(Operation.GETFILESTATUS, new Class[]{DoAsParam.class});
+    PARAMS_DEF.put(Operation.LISTSTATUS,
+      new Class[]{DoAsParam.class, FilterParam.class});
+    PARAMS_DEF.put(Operation.GETHOMEDIRECTORY, new Class[]{DoAsParam.class});
+    PARAMS_DEF.put(Operation.GETCONTENTSUMMARY, new Class[]{DoAsParam.class});
+    PARAMS_DEF.put(Operation.GETFILECHECKSUM, new Class[]{DoAsParam.class});
+    PARAMS_DEF.put(Operation.GETFILEBLOCKLOCATIONS,
+      new Class[]{DoAsParam.class});
+    PARAMS_DEF.put(Operation.INSTRUMENTATION, new Class[]{DoAsParam.class});
+    PARAMS_DEF.put(Operation.APPEND,
+      new Class[]{DoAsParam.class, DataParam.class});
+    PARAMS_DEF.put(Operation.CREATE,
+      new Class[]{DoAsParam.class, PermissionParam.class, OverwriteParam.class,
+                  ReplicationParam.class, BlockSizeParam.class, DataParam.class});
+    PARAMS_DEF.put(Operation.MKDIRS,
+      new Class[]{DoAsParam.class, PermissionParam.class});
+    PARAMS_DEF.put(Operation.RENAME,
+      new Class[]{DoAsParam.class, DestinationParam.class});
+    PARAMS_DEF.put(Operation.SETOWNER,
+      new Class[]{DoAsParam.class, OwnerParam.class, GroupParam.class});
+    PARAMS_DEF.put(Operation.SETPERMISSION,
+      new Class[]{DoAsParam.class, PermissionParam.class});
+    PARAMS_DEF.put(Operation.SETREPLICATION,
+      new Class[]{DoAsParam.class, ReplicationParam.class});
+    PARAMS_DEF.put(Operation.SETTIMES,
+      new Class[]{DoAsParam.class, ModifiedTimeParam.class,
+                  AccessTimeParam.class});
+    PARAMS_DEF.put(Operation.DELETE,
+      new Class[]{DoAsParam.class, RecursiveParam.class});
+  }
+
+  public HttpFSParametersProvider() {
+    super(HttpFSFileSystem.OP_PARAM, HttpFSFileSystem.Operation.class,
+          PARAMS_DEF);
+  }
+
+  /**
+   * Class for access-time parameter.
+   */
+  public static class AccessTimeParam extends LongParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.ACCESS_TIME_PARAM;
+    /**
+     * Constructor.
+     */
+    public AccessTimeParam() {
+      super(NAME, -1l);
+    }
+  }
+
+  /**
+   * Class for block-size parameter.
+   */
+  public static class BlockSizeParam extends LongParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.BLOCKSIZE_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public BlockSizeParam() {
+      super(NAME, -1l);
+    }
+  }
+
+  /**
+   * Class for data parameter.
+   */
+  public static class DataParam extends BooleanParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = "data";
+
+    /**
+     * Constructor.
+     */
+    public DataParam() {
+      super(NAME, false);
+    }
+  }
+
+  /**
+   * Class for operation parameter.
+   */
+  public static class OperationParam extends EnumParam<HttpFSFileSystem.Operation> {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.OP_PARAM;
+    /**
+     * Constructor.
+     */
+    public OperationParam(String operation) {
+      super(NAME, HttpFSFileSystem.Operation.class,
+            HttpFSFileSystem.Operation.valueOf(operation.toUpperCase()));
+    }
+  }
+
+  /**
+   * Class for delete's recursive parameter.
+   */
+  public static class RecursiveParam extends BooleanParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.RECURSIVE_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public RecursiveParam() {
+      super(NAME, false);
+    }
+  }
+
+  /**
+   * Class for do-as parameter.
+   */
+  public static class DoAsParam extends StringParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.DO_AS_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public DoAsParam() {
+      super(NAME, null, UserProvider.USER_PATTERN);
+    }
+
+    /**
+     * Delegates to parent and then adds do-as user to
+     * MDC context for logging purposes.
+     *
+     *
+     * @param str parameter value.
+     *
+     * @return parsed parameter
+     */
+    @Override
+    public String parseParam(String str) {
+      String doAs = super.parseParam(str);
+      MDC.put(getName(), (doAs != null) ? doAs : "-");
+      return doAs;
+    }
+  }
+
+  /**
+   * Class for filter parameter.
+   */
+  public static class FilterParam extends StringParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = "filter";
+
+    /**
+     * Constructor.
+     */
+    public FilterParam() {
+      super(NAME, null);
+    }
+
+  }
+
+  /**
+   * Class for group parameter.
+   */
+  public static class GroupParam extends StringParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.GROUP_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public GroupParam() {
+      super(NAME, null, UserProvider.USER_PATTERN);
+    }
+
+  }
+
+  /**
+   * Class for len parameter.
+   */
+  public static class LenParam extends LongParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = "len";
+
+    /**
+     * Constructor.
+     */
+    public LenParam() {
+      super(NAME, -1l);
+    }
+  }
+
+  /**
+   * Class for modified-time parameter.
+   */
+  public static class ModifiedTimeParam extends LongParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.MODIFICATION_TIME_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public ModifiedTimeParam() {
+      super(NAME, -1l);
+    }
+  }
+
+  /**
+   * Class for offset parameter.
+   */
+  public static class OffsetParam extends LongParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = "offset";
+
+    /**
+     * Constructor.
+     */
+    public OffsetParam() {
+      super(NAME, 0l);
+    }
+  }
+
+  /**
+   * Class for overwrite parameter.
+   */
+  public static class OverwriteParam extends BooleanParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.OVERWRITE_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public OverwriteParam() {
+      super(NAME, true);
+    }
+  }
+
+  /**
+   * Class for owner parameter.
+   */
+  public static class OwnerParam extends StringParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.OWNER_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public OwnerParam() {
+      super(NAME, null, UserProvider.USER_PATTERN);
+    }
+
+  }
+
+  /**
+   * Class for permission parameter.
+   */
+  public static class PermissionParam extends StringParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.PERMISSION_PARAM;
+
+    /**
+     * Symbolic Unix permissions regular expression pattern.
+     */
+    private static final Pattern PERMISSION_PATTERN =
+      Pattern.compile(HttpFSFileSystem.DEFAULT_PERMISSION +
+                      "|[0-1]?[0-7][0-7][0-7]");
+
+    /**
+     * Constructor.
+     */
+    public PermissionParam() {
+      super(NAME, HttpFSFileSystem.DEFAULT_PERMISSION, PERMISSION_PATTERN);
+    }
+
+  }
+
+  /**
+   * Class for replication parameter.
+   */
+  public static class ReplicationParam extends ShortParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.REPLICATION_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public ReplicationParam() {
+      super(NAME, (short) -1);
+    }
+  }
+
+  /**
+   * Class for to-path parameter.
+   */
+  public static class DestinationParam extends StringParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.DESTINATION_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public DestinationParam() {
+      super(NAME, null);
+    }
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java
deleted file mode 100644
index 3c7b5f7..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java
+++ /dev/null
@@ -1,551 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.http.server;
-
-import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
-import org.apache.hadoop.lib.wsrs.BooleanParam;
-import org.apache.hadoop.lib.wsrs.EnumParam;
-import org.apache.hadoop.lib.wsrs.LongParam;
-import org.apache.hadoop.lib.wsrs.ShortParam;
-import org.apache.hadoop.lib.wsrs.StringParam;
-import org.apache.hadoop.lib.wsrs.UserProvider;
-import org.slf4j.MDC;
-
-import java.util.regex.Pattern;
-
-/**
- * HttpFS HTTP Parameters used by {@link HttpFSServer}.
- */
-public class HttpFSParams {
-
-  /**
-   * To avoid instantiation.
-   */
-  private HttpFSParams() {
-  }
-
-  /**
-   * Class for access-time parameter.
-   */
-  public static class AccessTimeParam extends LongParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.ACCESS_TIME_PARAM;
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "-1";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public AccessTimeParam(String str) {
-      super(NAME, str);
-    }
-  }
-
-  /**
-   * Class for block-size parameter.
-   */
-  public static class BlockSizeParam extends LongParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.BLOCKSIZE_PARAM;
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "-1";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public BlockSizeParam(String str) {
-      super(NAME, str);
-    }
-  }
-
-  /**
-   * Class for data parameter.
-   */
-  public static class DataParam extends BooleanParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = "data";
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "false";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public DataParam(String str) {
-      super(NAME, str);
-    }
-  }
-
-  /**
-   * Class for DELETE operation parameter.
-   */
-  public static class DeleteOpParam extends EnumParam<HttpFSFileSystem.DeleteOpValues> {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.OP_PARAM;
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public DeleteOpParam(String str) {
-      super(NAME, str, HttpFSFileSystem.DeleteOpValues.class);
-    }
-  }
-
-  /**
-   * Class for delete's recursive parameter.
-   */
-  public static class DeleteRecursiveParam extends BooleanParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.RECURSIVE_PARAM;
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "false";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public DeleteRecursiveParam(String str) {
-      super(NAME, str);
-    }
-  }
-
-  /**
-   * Class for do-as parameter.
-   */
-  public static class DoAsParam extends StringParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.DO_AS_PARAM;
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public DoAsParam(String str) {
-      super(NAME, str, UserProvider.USER_PATTERN);
-    }
-
-    /**
-     * Delegates to parent and then adds do-as user to
-     * MDC context for logging purposes.
-     *
-     * @param name parameter name.
-     * @param str parameter value.
-     *
-     * @return parsed parameter
-     */
-    @Override
-    public String parseParam(String name, String str) {
-      String doAs = super.parseParam(name, str);
-      MDC.put(NAME, (doAs != null) ? doAs : "-");
-      return doAs;
-    }
-  }
-
-  /**
-   * Class for filter parameter.
-   */
-  public static class FilterParam extends StringParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = "filter";
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "";
-
-    /**
-     * Constructor.
-     *
-     * @param expr parameter value.
-     */
-    public FilterParam(String expr) {
-      super(NAME, expr);
-    }
-
-  }
-
-  /**
-   * Class for path parameter.
-   */
-  public static class FsPathParam extends StringParam {
-
-    /**
-     * Constructor.
-     *
-     * @param path parameter value.
-     */
-    public FsPathParam(String path) {
-      super("path", path);
-    }
-
-    /**
-     * Makes the path absolute adding '/' to it.
-     * <p/>
-     * This is required because JAX-RS resolution of paths does not add
-     * the root '/'.
-     */
-    public void makeAbsolute() {
-      String path = value();
-      path = "/" + ((path != null) ? path : "");
-      setValue(path);
-    }
-
-  }
-
-  /**
-   * Class for GET operation parameter.
-   */
-  public static class GetOpParam extends EnumParam<HttpFSFileSystem.GetOpValues> {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.OP_PARAM;
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public GetOpParam(String str) {
-      super(NAME, str, HttpFSFileSystem.GetOpValues.class);
-    }
-  }
-
-  /**
-   * Class for group parameter.
-   */
-  public static class GroupParam extends StringParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.GROUP_PARAM;
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public GroupParam(String str) {
-      super(NAME, str, UserProvider.USER_PATTERN);
-    }
-
-  }
-
-  /**
-   * Class for len parameter.
-   */
-  public static class LenParam extends LongParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = "len";
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "-1";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public LenParam(String str) {
-      super(NAME, str);
-    }
-  }
-
-  /**
-   * Class for modified-time parameter.
-   */
-  public static class ModifiedTimeParam extends LongParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.MODIFICATION_TIME_PARAM;
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "-1";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public ModifiedTimeParam(String str) {
-      super(NAME, str);
-    }
-  }
-
-  /**
-   * Class for offset parameter.
-   */
-  public static class OffsetParam extends LongParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = "offset";
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "0";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public OffsetParam(String str) {
-      super(NAME, str);
-    }
-  }
-
-  /**
-   * Class for overwrite parameter.
-   */
-  public static class OverwriteParam extends BooleanParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.OVERWRITE_PARAM;
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "true";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public OverwriteParam(String str) {
-      super(NAME, str);
-    }
-  }
-
-  /**
-   * Class for owner parameter.
-   */
-  public static class OwnerParam extends StringParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.OWNER_PARAM;
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public OwnerParam(String str) {
-      super(NAME, str, UserProvider.USER_PATTERN);
-    }
-
-  }
-
-  /**
-   * Class for permission parameter.
-   */
-  public static class PermissionParam extends StringParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.PERMISSION_PARAM;
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = HttpFSFileSystem.DEFAULT_PERMISSION;
-
-
-    /**
-     * Symbolic Unix permissions regular expression pattern.
-     */
-    private static final Pattern PERMISSION_PATTERN =
-      Pattern.compile(DEFAULT + "|[0-1]?[0-7][0-7][0-7]");
-
-    /**
-     * Constructor.
-     *
-     * @param permission parameter value.
-     */
-    public PermissionParam(String permission) {
-      super(NAME, permission.toLowerCase(), PERMISSION_PATTERN);
-    }
-
-  }
-
-  /**
-   * Class for POST operation parameter.
-   */
-  public static class PostOpParam extends EnumParam<HttpFSFileSystem.PostOpValues> {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.OP_PARAM;
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public PostOpParam(String str) {
-      super(NAME, str, HttpFSFileSystem.PostOpValues.class);
-    }
-  }
-
-  /**
-   * Class for PUT operation parameter.
-   */
-  public static class PutOpParam extends EnumParam<HttpFSFileSystem.PutOpValues> {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.OP_PARAM;
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public PutOpParam(String str) {
-      super(NAME, str, HttpFSFileSystem.PutOpValues.class);
-    }
-  }
-
-  /**
-   * Class for replication parameter.
-   */
-  public static class ReplicationParam extends ShortParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.REPLICATION_PARAM;
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "-1";
-
-    /**
-     * Constructor.
-     *
-     * @param str parameter value.
-     */
-    public ReplicationParam(String str) {
-      super(NAME, str);
-    }
-  }
-
-  /**
-   * Class for to-path parameter.
-   */
-  public static class ToPathParam extends StringParam {
-
-    /**
-     * Parameter name.
-     */
-    public static final String NAME = HttpFSFileSystem.DESTINATION_PARAM;
-
-    /**
-     * Default parameter value.
-     */
-    public static final String DEFAULT = "";
-
-    /**
-     * Constructor.
-     *
-     * @param path parameter value.
-     */
-    public ToPathParam(String path) {
-      super(NAME, path);
-    }
-  }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
index cf90485..22a173a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
@@ -21,26 +21,22 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
-import org.apache.hadoop.fs.http.server.HttpFSParams.AccessTimeParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.BlockSizeParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.DataParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.DeleteOpParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.DeleteRecursiveParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.DoAsParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.FilterParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.FsPathParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.GetOpParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.GroupParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.LenParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.ModifiedTimeParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.OffsetParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.OverwriteParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.OwnerParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.PermissionParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.PostOpParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.PutOpParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.ReplicationParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.ToPathParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OperationParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AccessTimeParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.BlockSizeParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DataParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.RecursiveParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DoAsParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.FilterParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.GroupParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.LenParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ModifiedTimeParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OffsetParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OverwriteParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OwnerParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.PermissionParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ReplicationParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DestinationParam;
 import org.apache.hadoop.lib.service.FileSystemAccess;
 import org.apache.hadoop.lib.service.FileSystemAccessException;
 import org.apache.hadoop.lib.service.Groups;
@@ -49,6 +45,7 @@
 import org.apache.hadoop.lib.servlet.FileSystemReleaseFilter;
 import org.apache.hadoop.lib.servlet.HostnameFilter;
 import org.apache.hadoop.lib.wsrs.InputStreamEntity;
+import org.apache.hadoop.lib.wsrs.Parameters;
 import org.apache.hadoop.security.authentication.server.AuthenticationToken;
 import org.json.simple.JSONObject;
 import org.slf4j.Logger;
@@ -57,7 +54,6 @@
 
 import javax.ws.rs.Consumes;
 import javax.ws.rs.DELETE;
-import javax.ws.rs.DefaultValue;
 import javax.ws.rs.GET;
 import javax.ws.rs.POST;
 import javax.ws.rs.PUT;
@@ -90,39 +86,6 @@
   private static Logger AUDIT_LOG = LoggerFactory.getLogger("httpfsaudit");
 
   /**
-   * Special binding for '/' as it is not handled by the wildcard binding.
-   *
-   * @param user principal making the request.
-   * @param op GET operation, default value is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#OPEN}.
-   * @param filter Glob filter, default value is none. Used only if the
-   * operation is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#LISTSTATUS}
-   * @param doAs user being impersonated, defualt value is none. It can be used
-   * only if the current user is a HttpFSServer proxyuser.
-   *
-   * @return the request response
-   *
-   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
-   * handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
-   * exceptions are handled by {@link HttpFSExceptionProvider}.
-   */
-  @GET
-  @Path("/")
-  @Produces(MediaType.APPLICATION_JSON)
-  public Response root(@Context Principal user,
-                       @QueryParam(GetOpParam.NAME) GetOpParam op,
-                       @QueryParam(FilterParam.NAME) @DefaultValue(FilterParam.DEFAULT) FilterParam filter,
-                       @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
-    throws IOException, FileSystemAccessException {
-    return get(user, new FsPathParam(""), op, new OffsetParam(OffsetParam.DEFAULT),
-               new LenParam(LenParam.DEFAULT), filter, doAs,
-               new OverwriteParam(OverwriteParam.DEFAULT),
-               new BlockSizeParam(BlockSizeParam.DEFAULT),
-               new PermissionParam(PermissionParam.DEFAULT),
-               new ReplicationParam(ReplicationParam.DEFAULT));
-  }
-
-  /**
    * Resolves the effective user that will be used to request a FileSystemAccess filesystem.
    * <p/>
    * If the doAs-user is NULL or the same as the user, it returns the user.
@@ -207,145 +170,261 @@
     return fs;
   }
 
+  private void enforceRootPath(HttpFSFileSystem.Operation op, String path) {
+    if (!path.equals("/")) {
+      throw new UnsupportedOperationException(
+        MessageFormat.format("Operation [{0}], invalid path [{1}], must be '/'",
+                             op, path));
+    }
+  }
+
   /**
-   * Binding to handle all GET requests, supported operations are
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues}.
-   * <p/>
-   * The @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#INSTRUMENTATION} operation is available only
-   * to users that are in HttpFSServer's admin group (see {@link HttpFSServer}. It returns
-   * HttpFSServer instrumentation data. The specified path must be '/'.
+   * Special binding for '/' as it is not handled by the wildcard binding.
    *
-   * @param user principal making the request.
-   * @param path path for the GET request.
-   * @param op GET operation, default value is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#OPEN}.
-   * @param offset of the  file being fetch, used only with
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#OPEN} operations.
-   * @param len amounts of bytes, used only with @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#OPEN}
-   * operations.
-   * @param filter Glob filter, default value is none. Used only if the
-   * operation is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#LISTSTATUS}
-   * @param doAs user being impersonated, defualt value is none. It can be used
-   * only if the current user is a HttpFSServer proxyuser.
-   * @param override default is true. Used only for
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#CREATE} operations.
-   * @param blockSize block size to set, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#CREATE} operations.
-   * @param permission permission to set, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETPERMISSION}.
-   * @param replication replication factor to set, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETREPLICATION}.
+   * @param user the principal of the user making the request.
+   * @param op the HttpFS operation of the request.
+   * @param params the HttpFS parameters of the request.
    *
    * @return the request response.
    *
    * @throws IOException thrown if an IO error occurred. Thrown exceptions are
    * handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
-   * exceptions are handled by {@link HttpFSExceptionProvider}.
+   * @throws FileSystemAccessException thrown if a FileSystemAccess releated
+   * error occurred. Thrown exceptions are handled by
+   * {@link HttpFSExceptionProvider}.
+   */
+  @GET
+  @Path("/")
+  @Produces(MediaType.APPLICATION_JSON)
+  public Response getRoot(@Context Principal user,
+                          @QueryParam(OperationParam.NAME) OperationParam op,
+                          @Context Parameters params)
+    throws IOException, FileSystemAccessException {
+    return get(user, "", op, params);
+  }
+
+  private String makeAbsolute(String path) {
+    return "/" + ((path != null) ? path : "");
+  }
+
+  /**
+   * Binding to handle GET requests, supported operations are
+   *
+   * @param user the principal of the user making the request.
+   * @param path the path for operation.
+   * @param op the HttpFS operation of the request.
+   * @param params the HttpFS parameters of the request.
+   *
+   * @return the request response.
+   *
+   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
+   * handled by {@link HttpFSExceptionProvider}.
+   * @throws FileSystemAccessException thrown if a FileSystemAccess releated
+   * error occurred. Thrown exceptions are handled by
+   * {@link HttpFSExceptionProvider}.
    */
   @GET
   @Path("{path:.*}")
   @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
   public Response get(@Context Principal user,
-                      @PathParam("path") @DefaultValue("") FsPathParam path,
-                      @QueryParam(GetOpParam.NAME) GetOpParam op,
-                      @QueryParam(OffsetParam.NAME) @DefaultValue(OffsetParam.DEFAULT) OffsetParam offset,
-                      @QueryParam(LenParam.NAME) @DefaultValue(LenParam.DEFAULT) LenParam len,
-                      @QueryParam(FilterParam.NAME) @DefaultValue(FilterParam.DEFAULT) FilterParam filter,
-                      @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs,
-
-                      //these params are only for createHandle operation acceptance purposes
-                      @QueryParam(OverwriteParam.NAME) @DefaultValue(OverwriteParam.DEFAULT) OverwriteParam override,
-                      @QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT) BlockSizeParam blockSize,
-                      @QueryParam(PermissionParam.NAME) @DefaultValue(PermissionParam.DEFAULT)
-                      PermissionParam permission,
-                      @QueryParam(ReplicationParam.NAME) @DefaultValue(ReplicationParam.DEFAULT)
-                      ReplicationParam replication
-  )
+                      @PathParam("path") String path,
+                      @QueryParam(OperationParam.NAME) OperationParam op,
+                      @Context Parameters params)
     throws IOException, FileSystemAccessException {
-    Response response = null;
-    if (op == null) {
-      throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", GetOpParam.NAME));
-    } else {
-      path.makeAbsolute();
-      MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
-      switch (op.value()) {
-        case OPEN: {
-          //Invoking the command directly using an unmanaged FileSystem that is released by the
-          //FileSystemReleaseFilter
-          FSOperations.FSOpen command = new FSOperations.FSOpen(path.value());
-          FileSystem fs = createFileSystem(user, doAs.value());
-          InputStream is = command.execute(fs);
-          AUDIT_LOG.info("[{}] offset [{}] len [{}]", new Object[]{path, offset, len});
-          InputStreamEntity entity = new InputStreamEntity(is, offset.value(), len.value());
-          response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM).build();
-          break;
-        }
-        case GETFILESTATUS: {
-          FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path.value());
-          Map json = fsExecute(user, doAs.value(), command);
-          AUDIT_LOG.info("[{}]", path);
-          response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-          break;
-        }
-        case LISTSTATUS: {
-          FSOperations.FSListStatus command = new FSOperations.FSListStatus(path.value(), filter.value());
-          Map json = fsExecute(user, doAs.value(), command);
-          if (filter.value() == null) {
-            AUDIT_LOG.info("[{}]", path);
-          } else {
-            AUDIT_LOG.info("[{}] filter [{}]", path, filter.value());
-          }
-          response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-          break;
-        }
-        case GETHOMEDIRECTORY: {
-          FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
-          JSONObject json = fsExecute(user, doAs.value(), command);
-          AUDIT_LOG.info("");
-          response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-          break;
-        }
-        case INSTRUMENTATION: {
-          if (!path.value().equals("/")) {
-            throw new UnsupportedOperationException(
-              MessageFormat.format("Invalid path for {0}={1}, must be '/'",
-                                   GetOpParam.NAME, HttpFSFileSystem.GetOpValues.INSTRUMENTATION));
-          }
-          Groups groups = HttpFSServerWebApp.get().get(Groups.class);
-          List<String> userGroups = groups.getGroups(user.getName());
-          if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) {
-            throw new AccessControlException("User not in HttpFSServer admin group");
-          }
-          Instrumentation instrumentation = HttpFSServerWebApp.get().get(Instrumentation.class);
-          Map snapshot = instrumentation.getSnapshot();
-          response = Response.ok(snapshot).build();
-          break;
-        }
-        case GETCONTENTSUMMARY: {
-          FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path.value());
-          Map json = fsExecute(user, doAs.value(), command);
-          AUDIT_LOG.info("[{}]", path);
-          response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-          break;
-        }
-        case GETFILECHECKSUM: {
-          FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path.value());
-          Map json = fsExecute(user, doAs.value(), command);
-          AUDIT_LOG.info("[{}]", path);
-          response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-          break;
-        }
-        case GETDELEGATIONTOKEN: {
-          response = Response.status(Response.Status.BAD_REQUEST).build();
-          break;
-        }
-        case GETFILEBLOCKLOCATIONS: {
-          response = Response.status(Response.Status.BAD_REQUEST).build();
-          break;
-        }
+    Response response;
+    path = makeAbsolute(path);
+    MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
+    String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
+    switch (op.value()) {
+      case OPEN: {
+        //Invoking the command directly using an unmanaged FileSystem that is
+        // released by the FileSystemReleaseFilter
+        FSOperations.FSOpen command = new FSOperations.FSOpen(path);
+        FileSystem fs = createFileSystem(user, doAs);
+        InputStream is = command.execute(fs);
+        Long offset = params.get(OffsetParam.NAME, OffsetParam.class);
+        Long len = params.get(LenParam.NAME, LenParam.class);
+        AUDIT_LOG.info("[{}] offset [{}] len [{}]",
+                       new Object[]{path, offset, len});
+        InputStreamEntity entity = new InputStreamEntity(is, offset, len);
+        response =
+          Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM).build();
+        break;
       }
-      return response;
+      case GETFILESTATUS: {
+        FSOperations.FSFileStatus command =
+          new FSOperations.FSFileStatus(path);
+        Map json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}]", path);
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
+      case LISTSTATUS: {
+        String filter = params.get(FilterParam.NAME, FilterParam.class);
+        FSOperations.FSListStatus command = new FSOperations.FSListStatus(
+          path, filter);
+        Map json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] filter [{}]", path,
+                       (filter != null) ? filter : "-");
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
+      case GETHOMEDIRECTORY: {
+        enforceRootPath(op.value(), path);
+        FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
+        JSONObject json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("");
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
+      case INSTRUMENTATION: {
+        enforceRootPath(op.value(), path);
+        Groups groups = HttpFSServerWebApp.get().get(Groups.class);
+        List<String> userGroups = groups.getGroups(user.getName());
+        if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) {
+          throw new AccessControlException(
+            "User not in HttpFSServer admin group");
+        }
+        Instrumentation instrumentation =
+          HttpFSServerWebApp.get().get(Instrumentation.class);
+        Map snapshot = instrumentation.getSnapshot();
+        response = Response.ok(snapshot).build();
+        break;
+      }
+      case GETCONTENTSUMMARY: {
+        FSOperations.FSContentSummary command =
+          new FSOperations.FSContentSummary(path);
+        Map json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}]", path);
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
+      case GETFILECHECKSUM: {
+        FSOperations.FSFileChecksum command =
+          new FSOperations.FSFileChecksum(path);
+        Map json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}]", path);
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
+      case GETFILEBLOCKLOCATIONS: {
+        response = Response.status(Response.Status.BAD_REQUEST).build();
+        break;
+      }
+      default: {
+        throw new IOException(
+          MessageFormat.format("Invalid HTTP GET operation [{0}]",
+                               op.value()));
+      }
     }
+    return response;
+  }
+
+
+  /**
+   * Binding to handle DELETE requests.
+   *
+   * @param user the principal of the user making the request.
+   * @param path the path for operation.
+   * @param op the HttpFS operation of the request.
+   * @param params the HttpFS parameters of the request.
+   *
+   * @return the request response.
+   *
+   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
+   * handled by {@link HttpFSExceptionProvider}.
+   * @throws FileSystemAccessException thrown if a FileSystemAccess releated
+   * error occurred. Thrown exceptions are handled by
+   * {@link HttpFSExceptionProvider}.
+   */
+  @DELETE
+  @Path("{path:.*}")
+  @Produces(MediaType.APPLICATION_JSON)
+  public Response delete(@Context Principal user,
+                      @PathParam("path") String path,
+                      @QueryParam(OperationParam.NAME) OperationParam op,
+                      @Context Parameters params)
+    throws IOException, FileSystemAccessException {
+    Response response;
+    path = makeAbsolute(path);
+    MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
+    String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
+    switch (op.value()) {
+      case DELETE: {
+        Boolean recursive =
+          params.get(RecursiveParam.NAME,  RecursiveParam.class);
+        AUDIT_LOG.info("[{}] recursive [{}]", path, recursive);
+        FSOperations.FSDelete command =
+          new FSOperations.FSDelete(path, recursive);
+        JSONObject json = fsExecute(user, doAs, command);
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
+      default: {
+        throw new IOException(
+          MessageFormat.format("Invalid HTTP DELETE operation [{0}]",
+                               op.value()));
+      }
+    }
+    return response;
+  }
+
+  /**
+   * Binding to handle POST requests.
+   *
+   * @param is the inputstream for the request payload.
+   * @param user the principal of the user making the request.
+   * @param uriInfo the of the request.
+   * @param path the path for operation.
+   * @param op the HttpFS operation of the request.
+   * @param params the HttpFS parameters of the request.
+   *
+   * @return the request response.
+   *
+   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
+   * handled by {@link HttpFSExceptionProvider}.
+   * @throws FileSystemAccessException thrown if a FileSystemAccess releated
+   * error occurred. Thrown exceptions are handled by
+   * {@link HttpFSExceptionProvider}.
+   */
+  @POST
+  @Path("{path:.*}")
+  @Consumes({"*/*"})
+  @Produces({MediaType.APPLICATION_JSON})
+  public Response post(InputStream is,
+                       @Context Principal user,
+                       @Context UriInfo uriInfo,
+                       @PathParam("path") String path,
+                       @QueryParam(OperationParam.NAME) OperationParam op,
+                       @Context Parameters params)
+    throws IOException, FileSystemAccessException {
+    Response response;
+    path = makeAbsolute(path);
+    MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
+    String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
+    switch (op.value()) {
+      case APPEND: {
+        boolean hasData = params.get(DataParam.NAME, DataParam.class);
+        if (!hasData) {
+          response = Response.temporaryRedirect(
+            createUploadRedirectionURL(uriInfo,
+              HttpFSFileSystem.Operation.APPEND)).build();
+        } else {
+          FSOperations.FSAppend command =
+            new FSOperations.FSAppend(is, path);
+          fsExecute(user, doAs, command);
+          AUDIT_LOG.info("[{}]", path);
+          response = Response.ok().type(MediaType.APPLICATION_JSON).build();
+        }
+        break;
+      }
+      default: {
+        throw new IOException(
+          MessageFormat.format("Invalid HTTP POST operation [{0}]",
+                               op.value()));
+      }
+    }
+    return response;
   }
 
   /**
@@ -358,251 +437,138 @@
    */
   protected URI createUploadRedirectionURL(UriInfo uriInfo, Enum<?> uploadOperation) {
     UriBuilder uriBuilder = uriInfo.getRequestUriBuilder();
-    uriBuilder = uriBuilder.replaceQueryParam(PutOpParam.NAME, uploadOperation).
+    uriBuilder = uriBuilder.replaceQueryParam(OperationParam.NAME, uploadOperation).
       queryParam(DataParam.NAME, Boolean.TRUE);
     return uriBuilder.build(null);
   }
 
+
   /**
-   * Binding to handle all DELETE requests.
+   * Binding to handle PUT requests.
    *
-   * @param user principal making the request.
-   * @param path path for the DELETE request.
-   * @param op DELETE operation, default value is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.DeleteOpValues#DELETE}.
-   * @param recursive indicates if the delete is recursive, default is <code>false</code>
-   * @param doAs user being impersonated, defualt value is none. It can be used
-   * only if the current user is a HttpFSServer proxyuser.
+   * @param is the inputstream for the request payload.
+   * @param user the principal of the user making the request.
+   * @param uriInfo the of the request.
+   * @param path the path for operation.
+   * @param op the HttpFS operation of the request.
+   * @param params the HttpFS parameters of the request.
    *
    * @return the request response.
    *
    * @throws IOException thrown if an IO error occurred. Thrown exceptions are
    * handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
-   * exceptions are handled by {@link HttpFSExceptionProvider}.
-   */
-  @DELETE
-  @Path("{path:.*}")
-  @Produces(MediaType.APPLICATION_JSON)
-  public Response delete(@Context Principal user,
-                         @PathParam("path") FsPathParam path,
-                         @QueryParam(DeleteOpParam.NAME) DeleteOpParam op,
-                         @QueryParam(DeleteRecursiveParam.NAME) @DefaultValue(DeleteRecursiveParam.DEFAULT)
-                         DeleteRecursiveParam recursive,
-                         @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
-    throws IOException, FileSystemAccessException {
-    Response response = null;
-    if (op == null) {
-      throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", DeleteOpParam.NAME));
-    }
-    switch (op.value()) {
-      case DELETE: {
-        path.makeAbsolute();
-        MDC.put(HttpFSFileSystem.OP_PARAM, "DELETE");
-        AUDIT_LOG.info("[{}] recursive [{}]", path, recursive);
-        FSOperations.FSDelete command = new FSOperations.FSDelete(path.value(), recursive.value());
-        JSONObject json = fsExecute(user, doAs.value(), command);
-        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
-        break;
-      }
-    }
-    return response;
-  }
-
-
-  /**
-   * Binding to handle all PUT requests, supported operations are
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues}.
-   *
-   * @param is request input stream, used only for
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PostOpValues#APPEND} operations.
-   * @param user principal making the request.
-   * @param uriInfo the request uriInfo.
-   * @param path path for the PUT request.
-   * @param op PUT operation, no default value.
-   * @param toPath new path, used only for
-   * {@link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#RENAME} operations.
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETTIMES}.
-   * @param owner owner to set, used only for
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETOWNER} operations.
-   * @param group group to set, used only for
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETOWNER} operations.
-   * @param override default is true. Used only for
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#CREATE} operations.
-   * @param blockSize block size to set, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#CREATE} operations.
-   * @param permission permission to set, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETPERMISSION}.
-   * @param replication replication factor to set, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETREPLICATION}.
-   * @param modifiedTime modified time, in seconds since EPOC, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETTIMES}.
-   * @param accessTime accessed time, in seconds since EPOC, used only by
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETTIMES}.
-   * @param hasData indicates if the append request is uploading data or not
-   * (just getting the handle).
-   * @param doAs user being impersonated, defualt value is none. It can be used
-   * only if the current user is a HttpFSServer proxyuser.
-   *
-   * @return the request response.
-   *
-   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
-   * handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
-   * exceptions are handled by {@link HttpFSExceptionProvider}.
+   * @throws FileSystemAccessException thrown if a FileSystemAccess releated
+   * error occurred. Thrown exceptions are handled by
+   * {@link HttpFSExceptionProvider}.
    */
   @PUT
   @Path("{path:.*}")
   @Consumes({"*/*"})
   @Produces({MediaType.APPLICATION_JSON})
   public Response put(InputStream is,
-                      @Context Principal user,
-                      @Context UriInfo uriInfo,
-                      @PathParam("path") FsPathParam path,
-                      @QueryParam(PutOpParam.NAME) PutOpParam op,
-                      @QueryParam(ToPathParam.NAME) @DefaultValue(ToPathParam.DEFAULT) ToPathParam toPath,
-                      @QueryParam(OwnerParam.NAME) @DefaultValue(OwnerParam.DEFAULT) OwnerParam owner,
-                      @QueryParam(GroupParam.NAME) @DefaultValue(GroupParam.DEFAULT) GroupParam group,
-                      @QueryParam(OverwriteParam.NAME) @DefaultValue(OverwriteParam.DEFAULT) OverwriteParam override,
-                      @QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT) BlockSizeParam blockSize,
-                      @QueryParam(PermissionParam.NAME) @DefaultValue(PermissionParam.DEFAULT)
-                      PermissionParam permission,
-                      @QueryParam(ReplicationParam.NAME) @DefaultValue(ReplicationParam.DEFAULT)
-                      ReplicationParam replication,
-                      @QueryParam(ModifiedTimeParam.NAME) @DefaultValue(ModifiedTimeParam.DEFAULT)
-                      ModifiedTimeParam modifiedTime,
-                      @QueryParam(AccessTimeParam.NAME) @DefaultValue(AccessTimeParam.DEFAULT)
-                      AccessTimeParam accessTime,
-                      @QueryParam(DataParam.NAME) @DefaultValue(DataParam.DEFAULT) DataParam hasData,
-                      @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
+                       @Context Principal user,
+                       @Context UriInfo uriInfo,
+                       @PathParam("path") String path,
+                       @QueryParam(OperationParam.NAME) OperationParam op,
+                       @Context Parameters params)
     throws IOException, FileSystemAccessException {
-    Response response = null;
-    if (op == null) {
-      throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", PutOpParam.NAME));
-    }
-    path.makeAbsolute();
+    Response response;
+    path = makeAbsolute(path);
     MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
+    String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
     switch (op.value()) {
       case CREATE: {
-        if (!hasData.value()) {
+        boolean hasData = params.get(DataParam.NAME, DataParam.class);
+        if (!hasData) {
           response = Response.temporaryRedirect(
-            createUploadRedirectionURL(uriInfo, HttpFSFileSystem.PutOpValues.CREATE)).build();
+            createUploadRedirectionURL(uriInfo,
+              HttpFSFileSystem.Operation.CREATE)).build();
         } else {
-          FSOperations.FSCreate
-            command = new FSOperations.FSCreate(is, path.value(), permission.value(), override.value(),
-                                                replication.value(), blockSize.value());
-          fsExecute(user, doAs.value(), command);
-          AUDIT_LOG.info("[{}] permission [{}] override [{}] replication [{}] blockSize [{}]",
-                         new Object[]{path, permission, override, replication, blockSize});
+          String permission = params.get(PermissionParam.NAME,
+                                         PermissionParam.class);
+          boolean override = params.get(OverwriteParam.NAME,
+                                        OverwriteParam.class);
+          short replication = params.get(ReplicationParam.NAME,
+                                         ReplicationParam.class);
+          long blockSize = params.get(BlockSizeParam.NAME,
+                                      BlockSizeParam.class);
+          FSOperations.FSCreate command =
+            new FSOperations.FSCreate(is, path, permission, override,
+                                      replication, blockSize);
+          fsExecute(user, doAs, command);
+          AUDIT_LOG.info(
+            "[{}] permission [{}] override [{}] replication [{}] blockSize [{}]",
+            new Object[]{path, permission, override, replication, blockSize});
           response = Response.status(Response.Status.CREATED).build();
         }
         break;
       }
       case MKDIRS: {
-        FSOperations.FSMkdirs command = new FSOperations.FSMkdirs(path.value(), permission.value());
-        JSONObject json = fsExecute(user, doAs.value(), command);
-        AUDIT_LOG.info("[{}] permission [{}]", path, permission.value());
+        String permission = params.get(PermissionParam.NAME,
+                                       PermissionParam.class);
+        FSOperations.FSMkdirs command =
+          new FSOperations.FSMkdirs(path, permission);
+        JSONObject json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] permission [{}]", path, permission);
         response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
         break;
       }
       case RENAME: {
-        FSOperations.FSRename command = new FSOperations.FSRename(path.value(), toPath.value());
-        JSONObject json = fsExecute(user, doAs.value(), command);
+        String toPath = params.get(DestinationParam.NAME, DestinationParam.class);
+        FSOperations.FSRename command =
+          new FSOperations.FSRename(path, toPath);
+        JSONObject json = fsExecute(user, doAs, command);
         AUDIT_LOG.info("[{}] to [{}]", path, toPath);
         response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
         break;
       }
       case SETOWNER: {
-        FSOperations.FSSetOwner command = new FSOperations.FSSetOwner(path.value(), owner.value(), group.value());
-        fsExecute(user, doAs.value(), command);
-        AUDIT_LOG.info("[{}] to (O/G)[{}]", path, owner.value() + ":" + group.value());
+        String owner = params.get(OwnerParam.NAME, OwnerParam.class);
+        String group = params.get(GroupParam.NAME, GroupParam.class);
+        FSOperations.FSSetOwner command =
+          new FSOperations.FSSetOwner(path, owner, group);
+        fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] to (O/G)[{}]", path, owner + ":" + group);
         response = Response.ok().build();
         break;
       }
       case SETPERMISSION: {
-        FSOperations.FSSetPermission command = new FSOperations.FSSetPermission(path.value(), permission.value());
-        fsExecute(user, doAs.value(), command);
-        AUDIT_LOG.info("[{}] to [{}]", path, permission.value());
+        String permission = params.get(PermissionParam.NAME,
+                                       PermissionParam.class);
+        FSOperations.FSSetPermission command =
+          new FSOperations.FSSetPermission(path, permission);
+        fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] to [{}]", path, permission);
         response = Response.ok().build();
         break;
       }
       case SETREPLICATION: {
-        FSOperations.FSSetReplication command = new FSOperations.FSSetReplication(path.value(), replication.value());
-        JSONObject json = fsExecute(user, doAs.value(), command);
-        AUDIT_LOG.info("[{}] to [{}]", path, replication.value());
+        short replication = params.get(ReplicationParam.NAME,
+                                       ReplicationParam.class);
+        FSOperations.FSSetReplication command =
+          new FSOperations.FSSetReplication(path, replication);
+        JSONObject json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] to [{}]", path, replication);
         response = Response.ok(json).build();
         break;
       }
       case SETTIMES: {
-        FSOperations.FSSetTimes
-          command = new FSOperations.FSSetTimes(path.value(), modifiedTime.value(), accessTime.value());
-        fsExecute(user, doAs.value(), command);
-        AUDIT_LOG.info("[{}] to (M/A)[{}]", path, modifiedTime.value() + ":" + accessTime.value());
+        long modifiedTime = params.get(ModifiedTimeParam.NAME,
+                                       ModifiedTimeParam.class);
+        long accessTime = params.get(AccessTimeParam.NAME,
+                                     AccessTimeParam.class);
+        FSOperations.FSSetTimes command =
+          new FSOperations.FSSetTimes(path, modifiedTime, accessTime);
+        fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] to (M/A)[{}]", path,
+                       modifiedTime + ":" + accessTime);
         response = Response.ok().build();
         break;
       }
-      case RENEWDELEGATIONTOKEN: {
-        response = Response.status(Response.Status.BAD_REQUEST).build();
-        break;
-      }
-      case CANCELDELEGATIONTOKEN: {
-        response = Response.status(Response.Status.BAD_REQUEST).build();
-        break;
-      }
-    }
-    return response;
-  }
-
-  /**
-   * Binding to handle all OPST requests, supported operations are
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PostOpValues}.
-   *
-   * @param is request input stream, used only for
-   * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PostOpValues#APPEND} operations.
-   * @param user principal making the request.
-   * @param uriInfo the request uriInfo.
-   * @param path path for the POST request.
-   * @param op POST operation, default is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PostOpValues#APPEND}.
-   * @param hasData indicates if the append request is uploading data or not (just getting the handle).
-   * @param doAs user being impersonated, defualt value is none. It can be used
-   * only if the current user is a HttpFSServer proxyuser.
-   *
-   * @return the request response.
-   *
-   * @throws IOException thrown if an IO error occurred. Thrown exceptions are
-   * handled by {@link HttpFSExceptionProvider}.
-   * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
-   * exceptions are handled by {@link HttpFSExceptionProvider}.
-   */
-  @POST
-  @Path("{path:.*}")
-  @Consumes({"*/*"})
-  @Produces({MediaType.APPLICATION_JSON})
-  public Response post(InputStream is,
-                       @Context Principal user,
-                       @Context UriInfo uriInfo,
-                       @PathParam("path") FsPathParam path,
-                       @QueryParam(PostOpParam.NAME) PostOpParam op,
-                       @QueryParam(DataParam.NAME) @DefaultValue(DataParam.DEFAULT) DataParam hasData,
-                       @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
-    throws IOException, FileSystemAccessException {
-    Response response = null;
-    if (op == null) {
-      throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", PostOpParam.NAME));
-    }
-    path.makeAbsolute();
-    MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
-    switch (op.value()) {
-      case APPEND: {
-        if (!hasData.value()) {
-          response = Response.temporaryRedirect(
-            createUploadRedirectionURL(uriInfo, HttpFSFileSystem.PostOpValues.APPEND)).build();
-        } else {
-          FSOperations.FSAppend command = new FSOperations.FSAppend(is, path.value());
-          fsExecute(user, doAs.value(), command);
-          AUDIT_LOG.info("[{}]", path);
-          response = Response.ok().type(MediaType.APPLICATION_JSON).build();
-        }
-        break;
+      default: {
+        throw new IOException(
+          MessageFormat.format("Invalid HTTP PUT operation [{0}]",
+                               op.value()));
       }
     }
     return response;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java
index 7bc3a14..e4e6355 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java
@@ -22,15 +22,14 @@
 
 public abstract class BooleanParam extends Param<Boolean> {
 
-  public BooleanParam(String name, String str) {
-    value = parseParam(name, str);
+  public BooleanParam(String name, Boolean defaultValue) {
+    super(name, defaultValue);
   }
 
   protected Boolean parse(String str) throws Exception {
     if (str.equalsIgnoreCase("true")) {
       return true;
-    }
-    if (str.equalsIgnoreCase("false")) {
+    } else if (str.equalsIgnoreCase("false")) {
       return false;
     }
     throw new IllegalArgumentException(MessageFormat.format("Invalid value [{0}], must be a boolean", str));
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java
index aa9408f..96b46c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java
@@ -20,8 +20,8 @@
 
 public abstract class ByteParam extends Param<Byte> {
 
-  public ByteParam(String name, String str) {
-    value = parseParam(name, str);
+  public ByteParam(String name, Byte defaultValue) {
+    super(name, defaultValue);
   }
 
   protected Byte parse(String str) throws Exception {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java
index ff86406..f605bd2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java
@@ -25,9 +25,9 @@
 public abstract class EnumParam<E extends Enum<E>> extends Param<E> {
   Class<E> klass;
 
-  public EnumParam(String label, String str, Class<E> e) {
+  public EnumParam(String name, Class<E> e, E defaultValue) {
+    super(name, defaultValue);
     klass = e;
-    value = parseParam(label, str);
   }
 
   protected E parse(String str) throws Exception {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java
index 6eddaa2..7c0f081 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java
@@ -20,8 +20,8 @@
 
 public abstract class IntegerParam extends Param<Integer> {
 
-  public IntegerParam(String name, String str) {
-    value = parseParam(name, str);
+  public IntegerParam(String name, Integer defaultValue) {
+    super(name, defaultValue);
   }
 
   protected Integer parse(String str) throws Exception {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java
index 354a550..ec601bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java
@@ -20,8 +20,8 @@
 
 public abstract class LongParam extends Param<Long> {
 
-  public LongParam(String name, String str) {
-    value = parseParam(name, str);
+  public LongParam(String name, Long defaultValue) {
+    super(name, defaultValue);
   }
 
   protected Long parse(String str) throws Exception {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java
index 68a41d5..62af481 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java
@@ -23,32 +23,39 @@
 import java.text.MessageFormat;
 
 public abstract class Param<T> {
+  private String name;
   protected T value;
 
-  public T parseParam(String name, String str) {
-    Check.notNull(name, "name");
+  public Param(String name, T defaultValue) {
+    this.name = name;
+    this.value = defaultValue;
+  }
+
+  public String getName() {
+    return name;
+  }
+
+  public T parseParam(String str) {
     try {
-      return (str != null && str.trim().length() > 0) ? parse(str) : null;
+      value = (str != null && str.trim().length() > 0) ? parse(str) : value;
     } catch (Exception ex) {
       throw new IllegalArgumentException(
         MessageFormat.format("Parameter [{0}], invalid value [{1}], value must be [{2}]",
                              name, str, getDomain()));
     }
+    return value;
   }
 
   public T value() {
     return value;
   }
 
-  protected void setValue(T value) {
-    this.value = value;
-  }
-
   protected abstract String getDomain();
 
   protected abstract T parse(String str) throws Exception;
 
   public String toString() {
-    return value.toString();
+    return (value != null) ? value.toString() : "NULL";
   }
+
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java
new file mode 100644
index 0000000..b5ec214
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.lib.wsrs;
+
+import java.util.Map;
+
+/**
+ * Class that contains all parsed JAX-RS parameters.
+ * <p/>
+ * Instances are created by the {@link ParametersProvider} class.
+ */
+public class Parameters {
+  private Map<String, Param<?>> params;
+
+  /**
+   * Constructor that receives the request parsed parameters.
+   *
+   * @param params the request parsed parameters.
+   */
+  public Parameters(Map<String, Param<?>> params) {
+    this.params = params;
+  }
+
+  /**
+   * Returns the value of a request parsed parameter.
+   *
+   * @param name parameter name.
+   * @param klass class of the parameter, used for value casting.
+  * @return the value of the parameter.
+   */
+  @SuppressWarnings("unchecked")
+  public <V, T extends Param<V>> V get(String name, Class<T> klass) {
+    return ((T)params.get(name)).value();
+  }
+  
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java
new file mode 100644
index 0000000..3d41d99
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java
@@ -0,0 +1,107 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.lib.wsrs;
+
+import com.sun.jersey.api.core.HttpContext;
+import com.sun.jersey.core.spi.component.ComponentContext;
+import com.sun.jersey.core.spi.component.ComponentScope;
+import com.sun.jersey.server.impl.inject.AbstractHttpContextInjectable;
+import com.sun.jersey.spi.inject.Injectable;
+import com.sun.jersey.spi.inject.InjectableProvider;
+
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.MultivaluedMap;
+import java.lang.reflect.Type;
+import java.text.MessageFormat;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Jersey provider that parses the request parameters based on the
+ * given parameter definition. 
+ */
+public class ParametersProvider
+  extends AbstractHttpContextInjectable<Parameters>
+  implements InjectableProvider<Context, Type> {
+
+  private String driverParam;
+  private Class<? extends Enum> enumClass;
+  private Map<Enum, Class<Param<?>>[]> paramsDef;
+
+  public ParametersProvider(String driverParam, Class<? extends Enum> enumClass,
+                            Map<Enum, Class<Param<?>>[]> paramsDef) {
+    this.driverParam = driverParam;
+    this.enumClass = enumClass;
+    this.paramsDef = paramsDef;
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public Parameters getValue(HttpContext httpContext) {
+    Map<String, Param<?>> map = new HashMap<String, Param<?>>();
+    MultivaluedMap<String, String> queryString =
+      httpContext.getRequest().getQueryParameters();
+    String str = queryString.getFirst(driverParam);
+    if (str == null) {
+      throw new IllegalArgumentException(
+        MessageFormat.format("Missing Operation parameter [{0}]",
+                             driverParam));
+    }
+    Enum op;
+    try {
+      op = Enum.valueOf(enumClass, str.toUpperCase());
+    } catch (IllegalArgumentException ex) {
+      throw new IllegalArgumentException(
+        MessageFormat.format("Invalid Operation [{0}]", str));
+    }
+    if (!paramsDef.containsKey(op)) {
+      throw new IllegalArgumentException(
+        MessageFormat.format("Unsupported Operation [{0}]", op));
+    }
+    for (Class<Param<?>> paramClass : paramsDef.get(op)) {
+      Param<?> param;
+      try {
+        param = paramClass.newInstance();
+      } catch (Exception ex) {
+        throw new UnsupportedOperationException(
+          MessageFormat.format(
+            "Param class [{0}] does not have default constructor",
+            paramClass.getName()));
+      }
+      try {
+        param.parseParam(queryString.getFirst(param.getName()));
+      }
+      catch (Exception ex) {
+        throw new IllegalArgumentException(ex.toString(), ex);
+      }
+      map.put(param.getName(), param);
+    }
+    return new Parameters(map);
+  }
+
+  @Override
+  public ComponentScope getScope() {
+    return ComponentScope.PerRequest;
+  }
+
+  @Override
+  public Injectable getInjectable(ComponentContext componentContext, Context context, Type type) {
+    return (type.equals(Parameters.class)) ? this : null;
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java
index a3995ba..cc75a86 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java
@@ -20,8 +20,8 @@
 
 public abstract class ShortParam extends Param<Short> {
 
-  public ShortParam(String name, String str) {
-    value = parseParam(name, str);
+  public ShortParam(String name, Short defaultValue) {
+    super(name, defaultValue);
   }
 
   protected Short parse(String str) throws Exception {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java
index 4b3a927..79e6336 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java
@@ -15,42 +15,38 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.lib.wsrs;
 
-import org.apache.hadoop.lib.util.Check;
-
 import java.text.MessageFormat;
 import java.util.regex.Pattern;
 
 public abstract class StringParam extends Param<String> {
   private Pattern pattern;
 
-  public StringParam(String name, String str) {
-    this(name, str, null);
+  public StringParam(String name, String defaultValue) {
+    this(name, defaultValue, null);
   }
 
-  public StringParam(String name, String str, Pattern pattern) {
+  public StringParam(String name, String defaultValue, Pattern pattern) {
+    super(name, defaultValue);
     this.pattern = pattern;
-    value = parseParam(name, str);
+    parseParam(defaultValue);
   }
 
-  public String parseParam(String name, String str) {
-    String ret = null;
-    Check.notNull(name, "name");
+  public String parseParam(String str) {
     try {
       if (str != null) {
         str = str.trim();
         if (str.length() > 0) {
-          return parse(str);
+          value = parse(str);
         }
       }
     } catch (Exception ex) {
       throw new IllegalArgumentException(
         MessageFormat.format("Parameter [{0}], invalid value [{1}], value must be [{2}]",
-                             name, str, getDomain()));
+                             getName(), str, getDomain()));
     }
-    return ret;
+    return value;
   }
 
   protected String parse(String str) throws Exception {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java
index a55d5e2..e2f8b84 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java
@@ -475,6 +475,7 @@
       ops[i] = new Object[]{Operation.values()[i]};
     }
     return Arrays.asList(ops);
+//    return Arrays.asList(new Object[][]{ new Object[]{Operation.CREATE}});
   }
 
   private Operation operation;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java
index 2596be9..9996e0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java
@@ -31,34 +31,34 @@
 
   @Test
   public void putUpload() throws Exception {
-    test("PUT", HttpFSFileSystem.PutOpValues.CREATE.toString(), "application/octet-stream", true, false);
+    test("PUT", HttpFSFileSystem.Operation.CREATE.toString(), "application/octet-stream", true, false);
   }
 
   @Test
   public void postUpload() throws Exception {
-    test("POST", HttpFSFileSystem.PostOpValues.APPEND.toString(), "APPLICATION/OCTET-STREAM", true, false);
+    test("POST", HttpFSFileSystem.Operation.APPEND.toString(), "APPLICATION/OCTET-STREAM", true, false);
   }
 
   @Test
   public void putUploadWrong() throws Exception {
-    test("PUT", HttpFSFileSystem.PutOpValues.CREATE.toString(), "plain/text", false, false);
-    test("PUT", HttpFSFileSystem.PutOpValues.CREATE.toString(), "plain/text", true, true);
+    test("PUT", HttpFSFileSystem.Operation.CREATE.toString(), "plain/text", false, false);
+    test("PUT", HttpFSFileSystem.Operation.CREATE.toString(), "plain/text", true, true);
   }
 
   @Test
   public void postUploadWrong() throws Exception {
-    test("POST", HttpFSFileSystem.PostOpValues.APPEND.toString(), "plain/text", false, false);
-    test("POST", HttpFSFileSystem.PostOpValues.APPEND.toString(), "plain/text", true, true);
+    test("POST", HttpFSFileSystem.Operation.APPEND.toString(), "plain/text", false, false);
+    test("POST", HttpFSFileSystem.Operation.APPEND.toString(), "plain/text", true, true);
   }
 
   @Test
   public void getOther() throws Exception {
-    test("GET", HttpFSFileSystem.GetOpValues.GETHOMEDIRECTORY.toString(), "plain/text", false, false);
+    test("GET", HttpFSFileSystem.Operation.GETHOMEDIRECTORY.toString(), "plain/text", false, false);
   }
 
   @Test
   public void putOther() throws Exception {
-    test("PUT", HttpFSFileSystem.PutOpValues.MKDIRS.toString(), "plain/text", false, false);
+    test("PUT", HttpFSFileSystem.Operation.MKDIRS.toString(), "plain/text", false, false);
   }
 
   private void test(String method, String operation, String contentType,
@@ -68,7 +68,7 @@
     Mockito.reset(request);
     Mockito.when(request.getMethod()).thenReturn(method);
     Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).thenReturn(operation);
-    Mockito.when(request.getParameter(HttpFSParams.DataParam.NAME)).
+    Mockito.when(request.getParameter(HttpFSParametersProvider.DataParam.NAME)).
       thenReturn(Boolean.toString(upload));
     Mockito.when(request.getContentType()).thenReturn(contentType);
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestBooleanParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestBooleanParam.java
deleted file mode 100644
index b1b140d..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestBooleanParam.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-import junit.framework.Assert;
-import org.junit.Test;
-
-public class TestBooleanParam {
-
-  @Test
-  public void param() throws Exception {
-    BooleanParam param = new BooleanParam("p", "true") {
-    };
-    Assert.assertEquals(param.getDomain(), "a boolean");
-    Assert.assertEquals(param.value(), Boolean.TRUE);
-    Assert.assertEquals(param.toString(), "true");
-    param = new BooleanParam("p", "false") {
-    };
-    Assert.assertEquals(param.value(), Boolean.FALSE);
-    param = new BooleanParam("p", null) {
-    };
-    Assert.assertEquals(param.value(), null);
-    param = new BooleanParam("p", "") {
-    };
-    Assert.assertEquals(param.value(), null);
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void invalid() throws Exception {
-    new BooleanParam("p", "x") {
-    };
-  }
-
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestByteParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestByteParam.java
deleted file mode 100644
index 6b1a5ef..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestByteParam.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-
-import junit.framework.Assert;
-import org.junit.Test;
-
-public class TestByteParam {
-
-  @Test
-  public void param() throws Exception {
-    ByteParam param = new ByteParam("p", "1") {
-    };
-    Assert.assertEquals(param.getDomain(), "a byte");
-    Assert.assertEquals(param.value(), new Byte((byte) 1));
-    Assert.assertEquals(param.toString(), "1");
-    param = new ByteParam("p", null) {
-    };
-    Assert.assertEquals(param.value(), null);
-    param = new ByteParam("p", "") {
-    };
-    Assert.assertEquals(param.value(), null);
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void invalid1() throws Exception {
-    new ByteParam("p", "x") {
-    };
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void invalid2() throws Exception {
-    new ByteParam("p", "256") {
-    };
-  }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestEnumParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestEnumParam.java
deleted file mode 100644
index bb37f75..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestEnumParam.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-
-import junit.framework.Assert;
-import org.junit.Test;
-
-public class TestEnumParam {
-
-  public static enum ENUM {
-    FOO, BAR
-  }
-
-  @Test
-  public void param() throws Exception {
-    EnumParam<ENUM> param = new EnumParam<ENUM>("p", "FOO", ENUM.class) {
-    };
-    Assert.assertEquals(param.getDomain(), "FOO,BAR");
-    Assert.assertEquals(param.value(), ENUM.FOO);
-    Assert.assertEquals(param.toString(), "FOO");
-    param = new EnumParam<ENUM>("p", null, ENUM.class) {
-    };
-    Assert.assertEquals(param.value(), null);
-    param = new EnumParam<ENUM>("p", "", ENUM.class) {
-    };
-    Assert.assertEquals(param.value(), null);
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void invalid1() throws Exception {
-    new EnumParam<ENUM>("p", "x", ENUM.class) {
-    };
-  }
-
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestIntegerParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestIntegerParam.java
deleted file mode 100644
index 634dbe7..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestIntegerParam.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-import junit.framework.Assert;
-import org.junit.Test;
-
-public class TestIntegerParam {
-
-  @Test
-  public void param() throws Exception {
-    IntegerParam param = new IntegerParam("p", "1") {
-    };
-    Assert.assertEquals(param.getDomain(), "an integer");
-    Assert.assertEquals(param.value(), new Integer(1));
-    Assert.assertEquals(param.toString(), "1");
-    param = new IntegerParam("p", null) {
-    };
-    Assert.assertEquals(param.value(), null);
-    param = new IntegerParam("p", "") {
-    };
-    Assert.assertEquals(param.value(), null);
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void invalid1() throws Exception {
-    new IntegerParam("p", "x") {
-    };
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void invalid2() throws Exception {
-    new IntegerParam("p", "" + Long.MAX_VALUE) {
-    };
-  }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestLongParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestLongParam.java
deleted file mode 100644
index 1a7ddd8..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestLongParam.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-import junit.framework.Assert;
-import org.junit.Test;
-
-public class TestLongParam {
-
-  @Test
-  public void param() throws Exception {
-    LongParam param = new LongParam("p", "1") {
-    };
-    Assert.assertEquals(param.getDomain(), "a long");
-    Assert.assertEquals(param.value(), new Long(1));
-    Assert.assertEquals(param.toString(), "1");
-    param = new LongParam("p", null) {
-    };
-    Assert.assertEquals(param.value(), null);
-    param = new LongParam("p", "") {
-    };
-    Assert.assertEquals(param.value(), null);
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void invalid1() throws Exception {
-    new LongParam("p", "x") {
-    };
-  }
-
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java
new file mode 100644
index 0000000..ed79c86
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java
@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.lib.wsrs;
+
+import junit.framework.Assert;
+import org.junit.Test;
+
+import java.util.regex.Pattern;
+
+public class TestParam {
+
+  private <T> void test(Param<T> param, String name,
+                   String domain, T defaultValue, T validValue,
+                   String invalidStrValue, String outOfRangeValue) throws Exception {
+
+    Assert.assertEquals(name, param.getName());
+    Assert.assertEquals(domain, param.getDomain());
+    Assert.assertEquals(defaultValue, param.value());
+    Assert.assertEquals(defaultValue, param.parseParam(""));
+    Assert.assertEquals(defaultValue, param.parseParam(null));
+    Assert.assertEquals(validValue, param.parseParam(validValue.toString()));
+    if (invalidStrValue != null) {
+      try {
+        param.parseParam(invalidStrValue);
+        Assert.fail();
+      } catch (IllegalArgumentException ex) {
+        //NOP
+      } catch (Exception ex) {
+        Assert.fail();
+      }
+    }
+    if (outOfRangeValue != null) {
+      try {
+        param.parseParam(outOfRangeValue);
+        Assert.fail();
+      } catch (IllegalArgumentException ex) {
+        //NOP
+      } catch (Exception ex) {
+        Assert.fail();
+      }
+    }
+   }
+
+  @Test
+  public void testBoolean() throws Exception {
+    Param<Boolean> param = new BooleanParam("b", false) {
+    };
+    test(param, "b", "a boolean", false, true, "x", null);
+  }
+
+  @Test
+  public void testByte() throws Exception {
+    Param<Byte> param = new ByteParam("B", (byte) 1) {
+    };
+    test(param, "B", "a byte", (byte) 1, (byte) 2, "x", "256");
+  }
+
+  @Test
+  public void testShort() throws Exception {
+    Param<Short> param = new ShortParam("S", (short) 1) {
+    };
+    test(param, "S", "a short", (short) 1, (short) 2, "x",
+         "" + ((int)Short.MAX_VALUE + 1));
+  }
+
+  @Test
+  public void testInteger() throws Exception {
+    Param<Integer> param = new IntegerParam("I", 1) {
+    };
+    test(param, "I", "an integer", 1, 2, "x", "" + ((long)Integer.MAX_VALUE + 1));
+  }
+
+  @Test
+  public void testLong() throws Exception {
+    Param<Long> param = new LongParam("L", 1L) {
+    };
+    test(param, "L", "a long", 1L, 2L, "x", null);
+  }
+
+  public static enum ENUM {
+    FOO, BAR
+  }
+
+  @Test
+  public void testEnum() throws Exception {
+    EnumParam<ENUM> param = new EnumParam<ENUM>("e", ENUM.class, ENUM.FOO) {
+    };
+    test(param, "e", "FOO,BAR", ENUM.FOO, ENUM.BAR, "x", null);
+  }
+
+  @Test
+  public void testString() throws Exception {
+    Param<String> param = new StringParam("s", "foo") {
+    };
+    test(param, "s", "a string", "foo", "bar", null, null);
+  }
+
+  @Test
+  public void testRegEx() throws Exception {
+    Param<String> param = new StringParam("r", "aa", Pattern.compile("..")) {
+    };
+    test(param, "r", "..", "aa", "bb", "c", null);
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestShortParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestShortParam.java
deleted file mode 100644
index b37bddf..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestShortParam.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-
-import junit.framework.Assert;
-import org.junit.Test;
-
-public class TestShortParam {
-
-  @Test
-  public void param() throws Exception {
-    ShortParam param = new ShortParam("p", "1") {
-    };
-    Assert.assertEquals(param.getDomain(), "a short");
-    Assert.assertEquals(param.value(), new Short((short) 1));
-    Assert.assertEquals(param.toString(), "1");
-    param = new ShortParam("p", null) {
-    };
-    Assert.assertEquals(param.value(), null);
-    param = new ShortParam("p", "") {
-    };
-    Assert.assertEquals(param.value(), null);
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void invalid1() throws Exception {
-    new ShortParam("p", "x") {
-    };
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void invalid2() throws Exception {
-    new ShortParam("p", "" + Integer.MAX_VALUE) {
-    };
-  }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestStringParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestStringParam.java
deleted file mode 100644
index feb489e..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestStringParam.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-
-import junit.framework.Assert;
-import org.junit.Test;
-
-import java.util.regex.Pattern;
-
-public class TestStringParam {
-
-  @Test
-  public void param() throws Exception {
-    StringParam param = new StringParam("p", "s") {
-    };
-    Assert.assertEquals(param.getDomain(), "a string");
-    Assert.assertEquals(param.value(), "s");
-    Assert.assertEquals(param.toString(), "s");
-    param = new StringParam("p", null) {
-    };
-    Assert.assertEquals(param.value(), null);
-    param = new StringParam("p", "") {
-    };
-    Assert.assertEquals(param.value(), null);
-
-    param.setValue("S");
-    Assert.assertEquals(param.value(), "S");
-  }
-
-  @Test
-  public void paramRegEx() throws Exception {
-    StringParam param = new StringParam("p", "Aaa", Pattern.compile("A.*")) {
-    };
-    Assert.assertEquals(param.getDomain(), "A.*");
-    Assert.assertEquals(param.value(), "Aaa");
-    Assert.assertEquals(param.toString(), "Aaa");
-    param = new StringParam("p", null) {
-    };
-    Assert.assertEquals(param.value(), null);
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void paramInvalidRegEx() throws Exception {
-    new StringParam("p", "Baa", Pattern.compile("A.*")) {
-    };
-  }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/httpfs-log4j.properties b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/httpfs-log4j.properties
new file mode 100644
index 0000000..7517512
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/httpfs-log4j.properties
@@ -0,0 +1,22 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+#log4j.appender.test=org.apache.log4j.varia.NullAppender
+#log4j.appender.test=org.apache.log4j.ConsoleAppender
+log4j.appender.test=org.apache.log4j.FileAppender
+log4j.appender.test.File=${test.dir}/test.log
+log4j.appender.test.Append=true
+log4j.appender.test.layout=org.apache.log4j.PatternLayout
+log4j.appender.test.layout.ConversionPattern=%d{ISO8601} %5p %20c{1}: %4L - %m%n
+log4j.rootLogger=ALL, test
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs-raid/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-raid/pom.xml
new file mode 100644
index 0000000..a7fa6eb
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/pom.xml
@@ -0,0 +1,170 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+
+-->
+<project>
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-project-dist</artifactId>
+    <version>3.0.0-SNAPSHOT</version>
+    <relativePath>../../hadoop-project-dist</relativePath>
+  </parent>
+  <groupId>org.apache.hadoop</groupId>
+  <artifactId>hadoop-hdfs-raid</artifactId>
+  <version>3.0.0-SNAPSHOT</version>
+  <packaging>jar</packaging>
+
+  <name>Apache Hadoop HDFS Raid</name>
+  <description>Apache Hadoop HDFS Raid</description>
+
+
+  <properties>
+    <hadoop.component>raid</hadoop.component>
+    <is.hadoop.component>false</is.hadoop.component>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-annotations</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minicluster</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-client</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-archives</artifactId>
+      <scope>provided</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+
+    <plugins>
+      <plugin>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>create-mrapp-generated-classpath</id>
+            <phase>generate-test-resources</phase>
+            <goals>
+              <goal>build-classpath</goal>
+            </goals>
+            <configuration>
+              <!--
+              This is needed to run the unit tests. It generates the required classpath
+              that is required in the env of the launch container in the mini mr/yarn cluster.
+              -->
+              <outputFile>${project.build.directory}/test-classes/mrapp-generated-classpath</outputFile>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+      <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <configuration>
+          <excludes>
+          </excludes>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>findbugs-maven-plugin</artifactId>
+        <configuration>
+          <excludeFilterFile combine.self="override"></excludeFilterFile>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+
+  <profiles>
+    <profile>
+      <id>docs</id>
+      <activation>
+        <activeByDefault>false</activeByDefault>
+      </activation>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-site-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>docs</id>
+                <phase>prepare-package</phase>
+                <goals>
+                  <goal>site</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+
+    <profile>
+      <id>dist</id>
+      <activation>
+        <activeByDefault>false</activeByDefault>
+      </activation>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-assembly-plugin</artifactId>
+            <dependencies>
+              <dependency>
+                <groupId>org.apache.hadoop</groupId>
+                <artifactId>hadoop-assemblies</artifactId>
+                <version>${project.version}</version>
+              </dependency>
+            </dependencies>
+            <executions>
+              <execution>
+                <id>dist</id>
+                <phase>prepare-package</phase>
+                <goals>
+                  <goal>single</goal>
+                </goals>
+                <configuration>
+                  <finalName>${project.artifactId}-${project.version}</finalName>
+                  <appendAssemblyId>false</appendAssemblyId>
+                  <attach>false</attach>
+                  <descriptorRefs>
+                    <descriptorRef>hadoop-raid-dist</descriptorRef>
+                  </descriptorRefs>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
+</project>
diff --git a/hadoop-mapreduce-project/src/contrib/raid/conf/raid.xml b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/conf/raid.xml
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/conf/raid.xml
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/conf/raid.xml
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/DistributedRaidFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/hdfs/DistributedRaidFileSystem.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/DistributedRaidFileSystem.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/hdfs/DistributedRaidFileSystem.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/RaidDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/hdfs/RaidDFSUtil.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/RaidDFSUtil.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/hdfs/RaidDFSUtil.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java
similarity index 63%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java
index a29a3ca..7eb6e16 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/hdfs/server/datanode/RaidBlockSender.java
@@ -34,7 +34,9 @@
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
+import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.net.SocketOutputStream;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
@@ -56,8 +58,10 @@
   private DataInputStream checksumIn; // checksum datastream
   private DataChecksum checksum; // checksum stream
   private long offset; // starting position to read
+  /** Initial position to read */
+  private long initialOffset;
   private long endOffset; // ending position
-  private int bytesPerChecksum; // chunk size
+  private int chunkSize; // chunk size
   private int checksumSize; // checksum size
   private boolean corruptChecksumOk; // if need to verify checksum
   private boolean chunkOffsetOK; // if need to send chunk offset
@@ -74,6 +78,8 @@
    * not sure if there will be much more improvement.
    */
   private static final int MIN_BUFFER_WITH_TRANSFERTO = 64*1024;
+  private static final int TRANSFERTO_BUFFER_SIZE = Math.max(
+      HdfsConstants.IO_FILE_BUFFER_SIZE, MIN_BUFFER_WITH_TRANSFERTO);
   private volatile ChunkChecksum lastChunkChecksum = null;
 
   
@@ -125,12 +131,13 @@
        * is mostly corrupted. For now just truncate bytesPerchecksum to
        * blockLength.
        */        
-      bytesPerChecksum = checksum.getBytesPerChecksum();
-      if (bytesPerChecksum > 10*1024*1024 && bytesPerChecksum > replicaVisibleLength) {
+      int size = checksum.getBytesPerChecksum();
+      if (size > 10*1024*1024 && size > replicaVisibleLength) {
         checksum = DataChecksum.newDataChecksum(checksum.getChecksumType(),
             Math.max((int)replicaVisibleLength, 10*1024*1024));
-        bytesPerChecksum = checksum.getBytesPerChecksum();        
+        size = checksum.getBytesPerChecksum();        
       }
+      chunkSize = size;
       checksumSize = checksum.getChecksumSize();
 
       if (length < 0) {
@@ -147,12 +154,12 @@
         throw new IOException(msg);
       }
       
-      offset = (startOffset - (startOffset % bytesPerChecksum));
+      offset = (startOffset - (startOffset % chunkSize));
       if (length >= 0) {
         // Make sure endOffset points to end of a checksumed chunk.
         long tmpLen = startOffset + length;
-        if (tmpLen % bytesPerChecksum != 0) {
-          tmpLen += (bytesPerChecksum - tmpLen % bytesPerChecksum);
+        if (tmpLen % chunkSize != 0) {
+          tmpLen += (chunkSize - tmpLen % chunkSize);
         }
         if (tmpLen < endOffset) {
           // will use on-disk checksum here since the end is a stable chunk
@@ -162,7 +169,7 @@
 
       // seek to the right offsets
       if (offset > 0) {
-        long checksumSkip = (offset / bytesPerChecksum) * checksumSize;
+        long checksumSkip = (offset / chunkSize) * checksumSize;
         // note blockInStream is seeked when created below
         if (checksumSkip > 0) {
           // Should we use seek() for checksum file as well?
@@ -178,7 +185,7 @@
       throw ioe;
     }
   }
-
+  
   /**
    * close opened files.
    */
@@ -227,57 +234,85 @@
     // otherwise just return the same exception.
     return ioe;
   }
-
+  
   /**
-   * Sends upto maxChunks chunks of data.
-   * 
-   * When blockInPosition is >= 0, assumes 'out' is a 
-   * {@link SocketOutputStream} and tries 
-   * {@link SocketOutputStream#transferToFully(FileChannel, long, int)} to
-   * send data (and updates blockInPosition).
+   * @param datalen Length of data 
+   * @return number of chunks for data of given size
    */
-  private int sendChunks(ByteBuffer pkt, int maxChunks, OutputStream out) 
-                         throws IOException {
-    // Sends multiple chunks in one packet with a single write().
-
-    int len = (int) Math.min(endOffset - offset,
-                             (((long) bytesPerChecksum) * ((long) maxChunks)));
-    int numChunks = (len + bytesPerChecksum - 1)/bytesPerChecksum;
-    int packetLen = len + numChunks*checksumSize + 4;
-    boolean lastDataPacket = offset + len == endOffset && len > 0;
+  private int numberOfChunks(long datalen) {
+    return (int) ((datalen + chunkSize - 1)/chunkSize);
+  }
+  
+  /**
+   * Write packet header into {@code pkt}
+   */
+  private void writePacketHeader(ByteBuffer pkt, int dataLen, int packetLen) {
     pkt.clear();
-
-
-    PacketHeader header = new PacketHeader(
-      packetLen, offset, seqno, (len == 0), len);
+    PacketHeader header = new PacketHeader(packetLen, offset, seqno,
+        (dataLen == 0), dataLen, false);
     header.putInBuffer(pkt);
+  }
+  
+  /**
+   * Read checksum into given buffer
+   * @param buf buffer to read the checksum into
+   * @param checksumOffset offset at which to write the checksum into buf
+   * @param checksumLen length of checksum to write
+   * @throws IOException on error
+   */
+  private void readChecksum(byte[] buf, final int checksumOffset,
+      final int checksumLen) throws IOException {
+    if (checksumSize <= 0 && checksumIn == null) {
+      return;
+    }
+    try {
+      checksumIn.readFully(buf, checksumOffset, checksumLen);
+    } catch (IOException e) {
+      LOG.warn(" Could not read or failed to veirfy checksum for data"
+          + " at offset " + offset + " for block " + block, e);
+      IOUtils.closeStream(checksumIn);
+      checksumIn = null;
+      if (corruptChecksumOk) {
+        if (checksumOffset < checksumLen) {
+          // Just fill the array with zeros.
+          Arrays.fill(buf, checksumOffset, checksumLen, (byte) 0);
+        }
+      } else {
+        throw e;
+      }
+    }
+  }
+  
+  /**
+   * Sends a packet with up to maxChunks chunks of data.
+   * 
+   * @param pkt buffer used for writing packet data
+   * @param maxChunks maximum number of chunks to send
+   * @param out stream to send data to
+   * @param transferTo use transferTo to send data
+   * @param throttler used for throttling data transfer bandwidth
+   */
+  private int sendPacket(ByteBuffer pkt, int maxChunks, OutputStream out,
+      boolean transferTo, DataTransferThrottler throttler) throws IOException {
+    int dataLen = (int) Math.min(endOffset - offset,
+                             (chunkSize * (long) maxChunks));
+    
+    int numChunks = numberOfChunks(dataLen); // Number of chunks be sent in the packet
+    int checksumDataLen = numChunks * checksumSize;
+    int packetLen = dataLen + checksumDataLen + 4;
+    boolean lastDataPacket = offset + dataLen == endOffset && dataLen > 0;
+
+    writePacketHeader(pkt, dataLen, packetLen);
 
     int checksumOff = pkt.position();
-    int checksumLen = numChunks * checksumSize;
     byte[] buf = pkt.array();
     
     if (checksumSize > 0 && checksumIn != null) {
-      try {
-        checksumIn.readFully(buf, checksumOff, checksumLen);
-      } catch (IOException e) {
-        LOG.warn(" Could not read or failed to veirfy checksum for data" +
-                 " at offset " + offset + " for block " + block + " got : "
-                 + StringUtils.stringifyException(e));
-        IOUtils.closeStream(checksumIn);
-        checksumIn = null;
-        if (corruptChecksumOk) {
-          if (checksumOff < checksumLen) {
-            // Just fill the array with zeros.
-            Arrays.fill(buf, checksumOff, checksumLen, (byte) 0);
-          }
-        } else {
-          throw e;
-        }
-      }
+      readChecksum(buf, checksumOff, checksumDataLen);
 
       // write in progress that we need to use to get last checksum
       if (lastDataPacket && lastChunkChecksum != null) {
-        int start = checksumOff + checksumLen - checksumSize;
+        int start = checksumOff + checksumDataLen - checksumSize;
         byte[] updatedChecksum = lastChunkChecksum.getChecksum();
         
         if (updatedChecksum != null) {
@@ -286,61 +321,85 @@
       }
     }
     
-    int dataOff = checksumOff + checksumLen;
-    
-    if (blockInPosition < 0) {
-      //normal transfer
-      IOUtils.readFully(blockIn, buf, dataOff, len);
+    int dataOff = checksumOff + checksumDataLen;
+    if (!transferTo) { // normal transfer
+      IOUtils.readFully(blockIn, buf, dataOff, dataLen);
 
       if (verifyChecksum) {
-        int dOff = dataOff;
-        int cOff = checksumOff;
-        int dLeft = len;
-
-        for (int i=0; i<numChunks; i++) {
-          checksum.reset();
-          int dLen = Math.min(dLeft, bytesPerChecksum);
-          checksum.update(buf, dOff, dLen);
-          if (!checksum.compare(buf, cOff)) {
-            long failedPos = offset + len -dLeft;
-            throw new ChecksumException("Checksum failed at " + 
-                                        failedPos, failedPos);
-          }
-          dLeft -= dLen;
-          dOff += dLen;
-          cOff += checksumSize;
-        }
+        verifyChecksum(buf, dataOff, dataLen, numChunks, checksumOff);
       }
-      //writing is done below (mainly to handle IOException)
     }
     
     try {
-      if (blockInPosition >= 0) {
-        //use transferTo(). Checks on out and blockIn are already done. 
-
+      if (transferTo) {
         SocketOutputStream sockOut = (SocketOutputStream)out;
-        //first write the packet
-        sockOut.write(buf, 0, dataOff);
+        sockOut.write(buf, 0, dataOff); // First write checksum
+        
         // no need to flush. since we know out is not a buffered stream. 
-
         sockOut.transferToFully(((FileInputStream)blockIn).getChannel(), 
-                                blockInPosition, len);
-
-        blockInPosition += len;
-      } else {
+                                blockInPosition, dataLen);
+        blockInPosition += dataLen;
+      } else { 
         // normal transfer
-        out.write(buf, 0, dataOff + len);
+        out.write(buf, 0, dataOff + dataLen);
       }
-      
     } catch (IOException e) {
-      /* exception while writing to the client (well, with transferTo(),
-       * it could also be while reading from the local file).
+      /* Exception while writing to the client. Connection closure from
+       * the other end is mostly the case and we do not care much about
+       * it. But other things can go wrong, especially in transferTo(),
+       * which we do not want to ignore.
+       *
+       * The message parsing below should not be considered as a good
+       * coding example. NEVER do it to drive a program logic. NEVER.
+       * It was done here because the NIO throws an IOException for EPIPE.
        */
+      String ioem = e.getMessage();
+      if (!ioem.startsWith("Broken pipe") && !ioem.startsWith("Connection reset")) {
+        LOG.error("BlockSender.sendChunks() exception: ", e);
+      }
       throw ioeToSocketException(e);
     }
 
-    return len;
+    if (throttler != null) { // rebalancing so throttle
+      throttler.throttle(packetLen);
+    }
+
+    return dataLen;
   }
+  
+  /**
+   * Compute checksum for chunks and verify the checksum that is read from
+   * the metadata file is correct.
+   * 
+   * @param buf buffer that has checksum and data
+   * @param dataOffset position where data is written in the buf
+   * @param datalen length of data
+   * @param numChunks number of chunks corresponding to data
+   * @param checksumOffset offset where checksum is written in the buf
+   * @throws ChecksumException on failed checksum verification
+   */
+  public void verifyChecksum(final byte[] buf, final int dataOffset,
+      final int datalen, final int numChunks, final int checksumOffset)
+      throws ChecksumException {
+    int dOff = dataOffset;
+    int cOff = checksumOffset;
+    int dLeft = datalen;
+
+    for (int i = 0; i < numChunks; i++) {
+      checksum.reset();
+      int dLen = Math.min(dLeft, chunkSize);
+      checksum.update(buf, dOff, dLen);
+      if (!checksum.compare(buf, cOff)) {
+        long failedPos = offset + datalen - dLeft;
+        throw new ChecksumException("Checksum failed at " + failedPos,
+            failedPos);
+      }
+      dLeft -= dLen;
+      dOff += dLen;
+      cOff += checksumSize;
+    }
+  }
+
 
   /**
    * sendBlock() is used to read block and its metadata and stream the data to
@@ -356,79 +415,61 @@
    */
   public long sendBlock(DataOutputStream out, OutputStream baseStream)
       throws IOException {
-    if( out == null ) {
+    if (out == null) {
       throw new IOException( "out stream is null" );
     }
-
-    long initialOffset = offset;
+    initialOffset = offset;
     long totalRead = 0;
     OutputStream streamForSendChunks = out;
     
     final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
     try {
-      try {
-        checksum.writeHeader(out);
-        if ( chunkOffsetOK ) {
-          out.writeLong( offset );
-        }
-        out.flush();
-      } catch (IOException e) { //socket error
-        throw ioeToSocketException(e);
-      }
-      
       int maxChunksPerPacket;
       int pktSize = PacketHeader.PKT_HEADER_LEN;
-      
-      if (transferToAllowed && !verifyChecksum && 
-          baseStream instanceof SocketOutputStream && 
-          blockIn instanceof FileInputStream) {
-        
+      boolean transferTo = transferToAllowed && !verifyChecksum
+          && baseStream instanceof SocketOutputStream
+          && blockIn instanceof FileInputStream;
+      if (transferTo) {
         FileChannel fileChannel = ((FileInputStream)blockIn).getChannel();
-        
-        // blockInPosition also indicates sendChunks() uses transferTo.
         blockInPosition = fileChannel.position();
         streamForSendChunks = baseStream;
+        maxChunksPerPacket = numberOfChunks(TRANSFERTO_BUFFER_SIZE);
         
-        // assure a mininum buffer size.
-        maxChunksPerPacket = (Math.max(HdfsConstants.IO_FILE_BUFFER_SIZE, 
-                                       MIN_BUFFER_WITH_TRANSFERTO)
-                              + bytesPerChecksum - 1)/bytesPerChecksum;
-        
-        // allocate smaller buffer while using transferTo(). 
+        // Smaller packet size to only hold checksum when doing transferTo
         pktSize += checksumSize * maxChunksPerPacket;
       } else {
         maxChunksPerPacket = Math.max(1,
-            (HdfsConstants.IO_FILE_BUFFER_SIZE + bytesPerChecksum - 1)/bytesPerChecksum);
-        pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket;
+            numberOfChunks(HdfsConstants.IO_FILE_BUFFER_SIZE));
+        // Packet size includes both checksum and data
+        pktSize += (chunkSize + checksumSize) * maxChunksPerPacket;
       }
 
       ByteBuffer pktBuf = ByteBuffer.allocate(pktSize);
 
       while (endOffset > offset) {
-        long len = sendChunks(pktBuf, maxChunksPerPacket, 
-                              streamForSendChunks);
+        long len = sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks,
+            transferTo, null);
         offset += len;
-        totalRead += len + ((len + bytesPerChecksum - 1)/bytesPerChecksum*
-                            checksumSize);
+        totalRead += len + (numberOfChunks(len) * checksumSize);
         seqno++;
       }
       try {
         // send an empty packet to mark the end of the block
-        sendChunks(pktBuf, maxChunksPerPacket, streamForSendChunks);        
+        sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo,
+            null);
         out.flush();
       } catch (IOException e) { //socket error
         throw ioeToSocketException(e);
       }
+      blockReadFully = true;
     } finally {
       if (clientTraceFmt != null) {
         final long endTime = System.nanoTime();
-        ClientTraceLog.info(String.format(clientTraceFmt, totalRead, initialOffset, endTime - startTime));
+        ClientTraceLog.info(String.format(clientTraceFmt, totalRead,
+            initialOffset, endTime - startTime));
       }
       close();
     }
-
-    blockReadFully = initialOffset == 0 && offset >= replicaVisibleLength;
-
     return totalRead;
   }
   
@@ -440,6 +481,13 @@
     public InputStream createStream(long offset) throws IOException; 
   }
   
+  /**
+   * @return the checksum type that will be used with this block transfer.
+   */
+  public DataChecksum getChecksum() {
+    return checksum;
+  }
+  
   private static class BlockInputStreamFactory implements InputStreamFactory {
     private final ExtendedBlock block;
     private final FsDatasetSpi<?> data;
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidUtil.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidUtil.java
similarity index 97%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidUtil.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidUtil.java
index 531a0f2..9258696 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidUtil.java
@@ -50,7 +50,7 @@
       final boolean doAccessTime, final boolean needBlockToken
       ) throws FileNotFoundException, UnresolvedLinkException, IOException {
     return namesystem.getBlockLocations(src, offset, length,
-        doAccessTime, needBlockToken);
+        doAccessTime, needBlockToken, true);
   }
 }
 
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/BlockFixer.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/BlockFixer.java
similarity index 97%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/BlockFixer.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/BlockFixer.java
index 6e1d7f7..dd2fb96 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/BlockFixer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/BlockFixer.java
@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.raid;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
+
 import java.io.BufferedOutputStream;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
@@ -44,14 +47,17 @@
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.datatransfer.*;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.RaidBlockSender;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -61,6 +67,7 @@
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.RaidDFSUtil;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.net.NetUtils;
 
@@ -649,7 +656,7 @@
       mdOut.writeShort(BlockMetadataHeader.VERSION);
       
       // Create a summer and write out its header.
-      int bytesPerChecksum = conf.getInt("io.bytes.per.checksum", 512);
+      int bytesPerChecksum = conf.getInt("dfs.bytes-per-checksum", 512);
       DataChecksum sum =
         DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32,
                                      bytesPerChecksum);
@@ -709,8 +716,8 @@
         blockContents.close();
         // Reopen
         blockContents = new FileInputStream(localBlockFile);
-        sendFixedBlock(datanode, blockContents, blockMetadata, block,
-                       blockSize);
+        sendFixedBlock(datanode, blockContents, blockMetadata, block, 
+            blockSize);
       } finally {
         if (blockContents != null) {
           blockContents.close();
@@ -780,9 +787,11 @@
                               });
         
         DatanodeInfo[] nodes = new DatanodeInfo[]{datanode};
+        DataChecksum checksum = blockSender.getChecksum();
         new Sender(out).writeBlock(block.getBlock(), block.getBlockToken(), "",
             nodes, null, BlockConstructionStage.PIPELINE_SETUP_CREATE,
-            1, 0L, blockSize, 0L, DataChecksum.newDataChecksum(metadataIn));
+            1, 0L, blockSize, 0L, DataChecksum.newDataChecksum(
+                checksum.getChecksumType(), checksum.getBytesPerChecksum()));
         blockSender.sendBlock(out, baseStream);
         
         LOG.info("Sent block " + block.getBlock() + " to " + datanode.getName());
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/ConfigManager.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/ConfigManager.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/ConfigManager.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/ConfigManager.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/Decoder.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/Decoder.java
similarity index 98%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/Decoder.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/Decoder.java
index c38885c..6506008 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/Decoder.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/Decoder.java
@@ -166,9 +166,9 @@
    *
    * @param srcFs The filesystem containing the source file.
    * @param srcPath The damaged source file.
-   * @param parityPath The filesystem containing the parity file. This could be
+   * @param parityFs The filesystem containing the parity file. This could be
    *        different from fs in case the parity file is part of a HAR archive.
-   * @param parityFile The parity file.
+   * @param parityPath The parity file.
    * @param blockSize The block size of the file.
    * @param blockOffset Known location of error in the source file. There could
    *        be additional errors in the source file that are discovered during
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/DirectoryTraversal.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/DirectoryTraversal.java
similarity index 96%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/DirectoryTraversal.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/DirectoryTraversal.java
index 4c955df..c1ff9bc 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/DirectoryTraversal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/DirectoryTraversal.java
@@ -112,6 +112,8 @@
 
   public List<FileStatus> getFilteredFiles(FileFilter filter, int limit) {
     List<FileStatus> filtered = new ArrayList<FileStatus>();
+    if (limit == 0) 
+      return filtered;
 
     // We need this semaphore to block when the number of running workitems
     // is equal to the number of threads. FixedThreadPool limits the number
@@ -120,20 +122,26 @@
     Semaphore slots = new Semaphore(numThreads);
 
     while (true) {
-      synchronized(filtered) {
-        if (filtered.size() >= limit) break;
-      }
       FilterFileWorkItem work = null;
       try {
+        slots.acquire();
+        synchronized(filtered) {
+          if (filtered.size() >= limit) {
+            slots.release();
+            break;
+          }
+        }
         Node next = getNextDirectoryNode();
         if (next == null) {
+          slots.release();
           break;
         }
         work = new FilterFileWorkItem(filter, next, filtered, slots);
-        slots.acquire();
       } catch (InterruptedException ie) {
+        slots.release();
         break;
       } catch (IOException e) {
+        slots.release();
         break;
       }
       executor.execute(work);
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/DistBlockFixer.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/DistBlockFixer.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/DistBlockFixer.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/DistBlockFixer.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaid.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/DistRaid.java
similarity index 99%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaid.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/DistRaid.java
index 81a3198..5c6e5cc 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaid.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/DistRaid.java
@@ -277,6 +277,7 @@
     */
    public boolean checkComplete() throws IOException {
      JobID jobID = runningJob.getJobID();
+     LOG.info("Checking job " + jobID);
      try {
       if (runningJob.isComplete()) {
          // delete job directory
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaidNode.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/DistRaidNode.java
similarity index 96%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaidNode.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/DistRaidNode.java
index d2219ae..43be2a7 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/DistRaidNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/DistRaidNode.java
@@ -52,7 +52,7 @@
   }
 
   /**
-   * {@inheritDocs}
+   * {@inheritDoc}
    */
   @Override
   public void join() {
@@ -65,7 +65,7 @@
   }
   
   /**
-   * {@inheritDocs}
+   * {@inheritDoc}
    */
   @Override
   public void stop() {
@@ -79,7 +79,7 @@
 
 
   /**
-   * {@inheritDocs}
+   * {@inheritDoc}
    */
   @Override
   void raidFiles(PolicyInfo info, List<FileStatus> paths) throws IOException {
@@ -95,7 +95,7 @@
   }
 
   /**
-   * {@inheritDocs}
+   * {@inheritDoc}
    */
   @Override
   int getRunningJobsForPolicy(String policyName) {
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/Encoder.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/Encoder.java
similarity index 99%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/Encoder.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/Encoder.java
index f74b3a0..b87bad4 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/Encoder.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/Encoder.java
@@ -331,7 +331,6 @@
    * The implementation of generating parity data for a stripe.
    *
    * @param blocks The streams to blocks in the stripe.
-   * @param srcFile The source file.
    * @param stripeStartOffset The start offset of the stripe
    * @param blockSize The maximum size of a block.
    * @param outs output streams to the parity blocks.
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/ErasureCode.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/ErasureCode.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/ErasureCode.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/ErasureCode.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/GaloisField.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/GaloisField.java
similarity index 85%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/GaloisField.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/GaloisField.java
index 78b7af1b..a10f5d7 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/GaloisField.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/GaloisField.java
@@ -208,7 +208,7 @@
    * @param len consider x and y only from 0...len-1
    */
   public void solveVandermondeSystem(int[] x, int[] y, int len) {
-    assert(x.length <= len && y.length <= len);
+    assert(y.length <= len);
     for (int i = 0; i < len - 1; i++) {
       for (int j = len - 1; j > i; j--) {
         y[j] = y[j] ^ mulTable[x[i]][y[j - 1]];
@@ -302,4 +302,49 @@
     }
     return result;
   }
+
+  /**
+   * Perform Gaussian elimination on the given matrix. This matrix has to be a
+   * fat matrix (number of rows > number of columns).
+   */
+  public void gaussianElimination(int[][] matrix) {
+    assert(matrix != null && matrix.length > 0 && matrix[0].length > 0
+           && matrix.length < matrix[0].length);
+    int height = matrix.length;
+    int width = matrix[0].length;
+    for (int i = 0; i < height; i++) {
+      boolean pivotFound = false;
+      // scan the column for a nonzero pivot and swap it to the diagonal
+      for (int j = i; j < height; j++) {
+        if (matrix[i][j] != 0) {
+          int[] tmp = matrix[i];
+          matrix[i] = matrix[j];
+          matrix[j] = tmp;
+          pivotFound = true;
+          break;
+        }
+      }
+      if (!pivotFound) {
+        continue;
+      }
+      int pivot = matrix[i][i];
+      for (int j = i; j < width; j++) {
+        matrix[i][j] = divide(matrix[i][j], pivot);
+      }
+      for (int j = i + 1; j < height; j++) {
+        int lead = matrix[j][i];
+        for (int k = i; k < width; k++) {
+          matrix[j][k] = add(matrix[j][k], multiply(lead, matrix[i][k]));
+        }
+      }
+    }
+    for (int i = height - 1; i >=0; i--) {
+      for (int j = 0; j < i; j++) {
+        int lead = matrix[j][i];
+        for (int k = i; k < width; k++) {
+          matrix[j][k] = add(matrix[j][k], multiply(lead, matrix[i][k]));
+        }
+      }
+    }
+  }
 }
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/HarIndex.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/HarIndex.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/HarIndex.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/HarIndex.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/JobMonitor.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/JobMonitor.java
similarity index 93%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/JobMonitor.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/JobMonitor.java
index e01fcba..cda295c 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/JobMonitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/JobMonitor.java
@@ -44,12 +44,13 @@
   volatile boolean running = true;
 
   private Map<String, List<DistRaid>> jobs;
+  public static final String JOBMONITOR_INTERVAL_KEY = "raid.jobmonitor.interval";
   private long jobMonitorInterval;
   private volatile long jobsMonitored = 0;
   private volatile long jobsSucceeded = 0;
 
   public JobMonitor(Configuration conf) {
-    jobMonitorInterval = conf.getLong("raid.jobmonitor.interval", 60000);
+    jobMonitorInterval = conf.getLong(JOBMONITOR_INTERVAL_KEY, 60000);
     jobs = new java.util.HashMap<String, List<DistRaid>>();
   }
 
@@ -112,6 +113,7 @@
           } catch (IOException ioe) {
             // If there was an error, consider the job finished.
             addJob(finishedJobs, key, job);
+            LOG.error("JobMonitor exception", ioe);
           }
         }
       }
@@ -159,6 +161,17 @@
   public long jobsSucceeded() {
     return this.jobsSucceeded;
   }
+  
+  // For test code
+  int runningJobsCount() {
+    int total = 0;
+    synchronized(jobs) {
+      for (String key: jobs.keySet()) {
+        total += jobs.get(key).size();
+      }
+    }
+    return total;
+  }
 
   private static void addJob(Map<String, List<DistRaid>> jobsMap,
                               String jobName, DistRaid job) {
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/LocalBlockFixer.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/LocalBlockFixer.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/LocalBlockFixer.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/LocalBlockFixer.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/LocalRaidNode.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/LocalRaidNode.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/LocalRaidNode.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/LocalRaidNode.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/ParityInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/ParityInputStream.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/ParityInputStream.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/ParityInputStream.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidConfigurationException.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/RaidConfigurationException.java
similarity index 93%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidConfigurationException.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/RaidConfigurationException.java
index 7335999..891863a 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidConfigurationException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/RaidConfigurationException.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.raid;
 
 /**
- * Thrown when the config file for {@link CronNode} is malformed.  
+ * Thrown when the config file for {@link RaidNode} is malformed.
  */
 public class RaidConfigurationException extends Exception {
   private static final long serialVersionUID = 4046516718965587999L;
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidFilter.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/RaidFilter.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidFilter.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/RaidFilter.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidNode.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/RaidNode.java
similarity index 98%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidNode.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/RaidNode.java
index 48329d3..dc19649 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/RaidNode.java
@@ -80,6 +80,8 @@
   }
   public static final Log LOG = LogFactory.getLog( "org.apache.hadoop.raid.RaidNode");
   public static final long SLEEP_TIME = 10000L; // 10 seconds
+  public static final String TRIGGER_MONITOR_SLEEP_TIME_KEY = 
+      "hdfs.raid.trigger.monitor.sleep.time";
   public static final int DEFAULT_PORT = 60000;
   // Default stripe length = 5, parity length for RS code = 3
   public static final int DEFAULT_STRIPE_LENGTH = 5;
@@ -126,6 +128,7 @@
 
   /** Deamon thread to trigger policies */
   Daemon triggerThread = null;
+  public static long triggerMonitorSleepTime = SLEEP_TIME;
 
   /** Deamon thread to delete obsolete parity files */
   PurgeMonitor purgeMonitor = null;
@@ -299,6 +302,10 @@
     this.blockFixer = BlockFixer.createBlockFixer(conf);
     this.blockFixerThread = new Daemon(this.blockFixer);
     this.blockFixerThread.start();
+ // start the deamon thread to fire polcies appropriately
+    RaidNode.triggerMonitorSleepTime = conf.getLong(
+        TRIGGER_MONITOR_SLEEP_TIME_KEY, 
+        SLEEP_TIME);
 
     // start the deamon thread to fire polcies appropriately
     this.triggerThread = new Daemon(new TriggerMonitor());
@@ -503,7 +510,7 @@
         }
       }
       while (running) {
-        Thread.sleep(SLEEP_TIME);
+        Thread.sleep(RaidNode.triggerMonitorSleepTime);
 
         boolean reloaded = configMgr.reloadConfigsIfNecessary();
         if (reloaded) {
@@ -542,7 +549,7 @@
 
           // Apply the action on accepted paths
           LOG.info("Triggering Policy Action " + info.getName() +
-                   " " + info.getSrcPath());
+                   " " + info.getSrcPath() + " raid " + filteredPaths.size() + " files");
           try {
             raidFiles(info, filteredPaths);
           } catch (Exception e) {
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidShell.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/RaidShell.java
similarity index 95%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidShell.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/RaidShell.java
index 479043c..5581210 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidShell.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/RaidShell.java
@@ -43,6 +43,7 @@
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -296,9 +297,22 @@
     for (int i = startindex; i < argv.length; i = i + 2) {
       String path = argv[i];
       long corruptOffset = Long.parseLong(argv[i+1]);
-      LOG.debug("RaidShell recoverFile for " + path + " corruptOffset " + corruptOffset);
-      paths[j] = new Path(raidnode.recoverFile(path, corruptOffset));
-      LOG.debug("Raidshell created recovery file " + paths[j]);
+      LOG.info("RaidShell recoverFile for " + path + " corruptOffset " + corruptOffset);
+      Path recovered = new Path("/tmp/recovered." + System.currentTimeMillis());
+      FileSystem fs = recovered.getFileSystem(conf);
+      DistributedFileSystem dfs = (DistributedFileSystem)fs;
+      Configuration raidConf = new Configuration(conf);
+      raidConf.set("fs.hdfs.impl",
+                     "org.apache.hadoop.hdfs.DistributedRaidFileSystem");
+      raidConf.set("fs.raid.underlyingfs.impl",
+                     "org.apache.hadoop.hdfs.DistributedFileSystem");
+      raidConf.setBoolean("fs.hdfs.impl.disable.cache", true);
+      java.net.URI dfsUri = dfs.getUri();
+      FileSystem raidFs = FileSystem.get(dfsUri, raidConf);
+      FileUtil.copy(raidFs, new Path(path), fs, recovered, false, conf);
+
+      paths[j] = recovered;
+      LOG.info("Raidshell created recovery file " + paths[j]);
       j++;
     }
     return paths;
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidUtils.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/RaidUtils.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/RaidUtils.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/RaidUtils.java
diff --git a/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/ReedSolomonCode.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/ReedSolomonCode.java
new file mode 100644
index 0000000..0a5d91b
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/ReedSolomonCode.java
@@ -0,0 +1,183 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.raid;
+import java.util.Set;
+
+
+public class ReedSolomonCode implements ErasureCode {
+
+  private final int stripeSize;
+  private final int paritySize;
+  private final int[] generatingPolynomial;
+  private final int PRIMITIVE_ROOT = 2;
+  private final int[] primitivePower;
+  private final GaloisField GF = GaloisField.getInstance();
+  private int[] errSignature;
+  private final int[] paritySymbolLocations;
+  private final int[] dataBuff;
+
+  public ReedSolomonCode(int stripeSize, int paritySize) {
+    assert(stripeSize + paritySize < GF.getFieldSize());
+    this.stripeSize = stripeSize;
+    this.paritySize = paritySize;
+    this.errSignature = new int[paritySize];
+    this.paritySymbolLocations = new int[paritySize];
+    this.dataBuff = new int[paritySize + stripeSize];
+    for (int i = 0; i < paritySize; i++) {
+      paritySymbolLocations[i] = i;
+    }
+
+    this.primitivePower = new int[stripeSize + paritySize];
+    // compute powers of the primitive root
+    for (int i = 0; i < stripeSize + paritySize; i++) {
+      primitivePower[i] = GF.power(PRIMITIVE_ROOT, i);
+    }
+    // compute generating polynomial
+    int[] gen = {1};
+    int[] poly = new int[2];
+    for (int i = 0; i < paritySize; i++) {
+      poly[0] = primitivePower[i];
+      poly[1] = 1;
+      gen = GF.multiply(gen, poly);
+    }
+    // generating polynomial has all generating roots
+    generatingPolynomial = gen;
+  }
+
+  @Override
+  public void encode(int[] message, int[] parity) {
+    assert(message.length == stripeSize && parity.length == paritySize);
+    for (int i = 0; i < paritySize; i++) {
+      dataBuff[i] = 0;
+    }
+    for (int i = 0; i < stripeSize; i++) {
+      dataBuff[i + paritySize] = message[i];
+    }
+    GF.remainder(dataBuff, generatingPolynomial);
+    for (int i = 0; i < paritySize; i++) {
+      parity[i] = dataBuff[i];
+    }
+  }
+
+  @Override
+  public void decode(int[] data, int[] erasedLocation, int[] erasedValue) {
+    if (erasedLocation.length == 0) {
+      return;
+    }
+    assert(erasedLocation.length == erasedValue.length);
+    for (int i = 0; i < erasedLocation.length; i++) {
+      data[erasedLocation[i]] = 0;
+    }
+    for (int i = 0; i < erasedLocation.length; i++) {
+      errSignature[i] = primitivePower[erasedLocation[i]];
+      erasedValue[i] = GF.substitute(data, primitivePower[i]);
+    }
+    GF.solveVandermondeSystem(errSignature, erasedValue, erasedLocation.length);
+  }
+
+  @Override
+  public int stripeSize() {
+    return this.stripeSize;
+  }
+
+  @Override
+  public int paritySize() {
+    return this.paritySize;
+  }
+
+  @Override
+  public int symbolSize() {
+    return (int) Math.round(Math.log(GF.getFieldSize()) / Math.log(2));
+  }
+
+  /**
+   * Given parity symbols followed by message symbols, return the locations of
+   * symbols that are corrupted. Can resolve up to (parity length / 2) error
+   * locations.
+   * @param data The message and parity. The parity should be placed in the
+   *             first part of the array. In each integer, the relevant portion
+   *             is present in the least significant bits of each int.
+   *             The number of elements in data is stripeSize() + paritySize().
+   *             <b>Note that data may be changed after calling this method.</b>
+   * @param errorLocations The set to put the error location results
+   * @return true If the locations can be resolved, return true.
+   */
+  public boolean computeErrorLocations(int[] data,
+      Set<Integer> errorLocations) {
+    assert(data.length == paritySize + stripeSize && errorLocations != null);
+    errorLocations.clear();
+    int maxError = paritySize / 2;
+    int[][] syndromeMatrix = new int[maxError][];
+    for (int i = 0; i < syndromeMatrix.length; ++i) {
+      syndromeMatrix[i] = new int[maxError + 1];
+    }
+    int[] syndrome = new int[paritySize];
+
+    if (computeSyndrome(data, syndrome)) {
+      // Parity check OK. No error location added.
+      return true;
+    }
+    for (int i = 0; i < maxError; ++i) {
+      for (int j = 0; j < maxError + 1; ++j) {
+        syndromeMatrix[i][j] = syndrome[i + j];
+      }
+    }
+    GF.gaussianElimination(syndromeMatrix);
+    int[] polynomial = new int[maxError + 1];
+    polynomial[0] = 1;
+    for (int i = 0; i < maxError; ++i) {
+      polynomial[i + 1] = syndromeMatrix[maxError - 1 - i][maxError];
+    }
+    for (int i = 0; i < paritySize + stripeSize; ++i) {
+      int possibleRoot = GF.divide(1, primitivePower[i]);
+      if (GF.substitute(polynomial, possibleRoot) == 0) {
+        errorLocations.add(i);
+      }
+    }
+    // Now recover with error locations and check the syndrome again
+    int[] locations = new int[errorLocations.size()];
+    int k = 0;
+    for (int loc : errorLocations) {
+      locations[k++] = loc;
+    }
+    int [] erasedValue = new int[locations.length];
+    decode(data, locations, erasedValue);
+    for (int i = 0; i < locations.length; ++i) {
+      data[locations[i]] = erasedValue[i];
+    }
+    return computeSyndrome(data, syndrome);
+  }
+
+  /**
+   * Compute the syndrome of the input [parity, message]
+   * @param data [parity, message]
+   * @param syndrome The syndromes (checksums) of the data
+   * @return true If syndromes are all zeros
+   */
+  private boolean computeSyndrome(int[] data, int [] syndrome) {
+    boolean corruptionFound = false;
+    for (int i = 0; i < paritySize; i++) {
+      syndrome[i] = GF.substitute(data, primitivePower[i]);
+      if (syndrome[i] != 0) {
+        corruptionFound = true;
+      }
+    }
+    return !corruptionFound;
+  }
+}
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonDecoder.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/ReedSolomonDecoder.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonDecoder.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/ReedSolomonDecoder.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonEncoder.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/ReedSolomonEncoder.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonEncoder.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/ReedSolomonEncoder.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/XORDecoder.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/XORDecoder.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/XORDecoder.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/XORDecoder.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/XOREncoder.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/XOREncoder.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/XOREncoder.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/XOREncoder.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/protocol/PolicyInfo.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/protocol/PolicyInfo.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/protocol/PolicyInfo.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/protocol/PolicyInfo.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/protocol/PolicyList.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/protocol/PolicyList.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/protocol/PolicyList.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/protocol/PolicyList.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/protocol/RaidProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/protocol/RaidProtocol.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/protocol/RaidProtocol.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/java/org/apache/hadoop/raid/protocol/RaidProtocol.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/bin/start-raidnode-remote.sh b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/sbin/start-raidnode-remote.sh
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/bin/start-raidnode-remote.sh
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/sbin/start-raidnode-remote.sh
diff --git a/hadoop-mapreduce-project/src/contrib/raid/bin/start-raidnode.sh b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/sbin/start-raidnode.sh
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/bin/start-raidnode.sh
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/sbin/start-raidnode.sh
diff --git a/hadoop-mapreduce-project/src/contrib/raid/bin/stop-raidnode-remote.sh b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/sbin/stop-raidnode-remote.sh
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/bin/stop-raidnode-remote.sh
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/sbin/stop-raidnode-remote.sh
diff --git a/hadoop-mapreduce-project/src/contrib/raid/bin/stop-raidnode.sh b/hadoop-hdfs-project/hadoop-hdfs-raid/src/main/sbin/stop-raidnode.sh
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/bin/stop-raidnode.sh
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/main/sbin/stop-raidnode.sh
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/TestRaidDfs.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/hdfs/TestRaidDfs.java
similarity index 98%
rename from hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/TestRaidDfs.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/hdfs/TestRaidDfs.java
index ffdb4ed..36aab52 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/TestRaidDfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/hdfs/TestRaidDfs.java
@@ -47,8 +47,8 @@
 
 public class TestRaidDfs extends TestCase {
   final static String TEST_DIR = new File(System.getProperty("test.build.data",
-      "build/contrib/raid/test/data")).getAbsolutePath();
-  final static String LOG_DIR = "/raidlog";
+      "target/test-data")).getAbsolutePath();
+  final static String LOG_DIR = "target/raidlog";
   final static long RELOAD_INTERVAL = 1000;
   final static Log LOG = LogFactory.getLog("org.apache.hadoop.raid.TestRaidDfs");
   final static int NUM_DATANODES = 3;
@@ -414,6 +414,7 @@
     LOG.info(" Newcrc " + newcrc.getValue() + " old crc " + crc);
     if (newcrc.getValue() != crc) {
       LOG.info("CRC mismatch of file " + name + ": " + newcrc + " vs. " + crc);
+      return false;
     }
     return true;
   }
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyRaid.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyRaid.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyRaid.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyRaid.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidTestUtil.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidTestUtil.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRaidTestUtil.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixer.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixer.java
similarity index 94%
rename from hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixer.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixer.java
index 10a7212..8986bea 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixer.java
@@ -26,12 +26,14 @@
 import java.util.Random;
 import java.util.zip.CRC32;
 
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.junit.Test;
 import static org.junit.Assert.*;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
+import org.apache.hadoop.util.JarFinder;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -39,6 +41,8 @@
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobContext;
 import org.apache.hadoop.mapred.MiniMRCluster;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -53,9 +57,11 @@
   final static Log LOG = LogFactory.getLog(
                             "org.apache.hadoop.raid.TestBlockFixer");
   final static String TEST_DIR = new File(System.getProperty("test.build.data",
-      "build/contrib/raid/test/data")).getAbsolutePath();
+      "target/test-data")).getAbsolutePath();
   final static String CONFIG_FILE = new File(TEST_DIR, 
       "test-raid.xml").getAbsolutePath();
+  public static final String DistBlockFixer_JAR =
+      JarFinder.getJar(DistBlockFixer.class);
   final static long RELOAD_INTERVAL = 1000;
   final static int NUM_DATANODES = 3;
   Configuration conf;
@@ -546,6 +552,8 @@
 
     conf.setBoolean("dfs.permissions", false);
 
+    conf.set("mapreduce.framework.name", "yarn");
+
     dfs = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
     dfs.waitActive();
     fileSys = dfs.getFileSystem();
@@ -553,11 +561,28 @@
 
     FileSystem.setDefaultUri(conf, namenode);
     mr = new MiniMRCluster(4, namenode, 3);
-    jobTrackerName = "localhost:" + mr.getJobTrackerPort();
+    JobConf jobConf = mr.createJobConf();
+    jobTrackerName = "localhost:" + jobConf.get(JTConfig.JT_IPC_ADDRESS);
     hftp = "hftp://localhost.localdomain:" + dfs.getNameNodePort();
 
     FileSystem.setDefaultUri(conf, namenode);
     conf.set("mapred.job.tracker", jobTrackerName);
+    conf.set("mapreduce.framework.name", "yarn");
+    String rmAdress = jobConf.get("yarn.resourcemanager.address");
+    if (rmAdress != null) {
+      conf.set("yarn.resourcemanager.address", rmAdress);
+    }
+    String schedulerAdress =
+      jobConf.get("yarn.resourcemanager.scheduler.address");
+    if (schedulerAdress != null) {
+      conf.set("yarn.resourcemanager.scheduler.address", schedulerAdress);
+    }
+    String jobHistoryAddress = 
+        jobConf.get("mapreduce.jobhistory.address");
+    if (jobHistoryAddress != null) {
+      conf.set("mapreduce.jobhistory.address", jobHistoryAddress);
+    }
+    conf.set(JobContext.JAR, TestBlockFixer.DistBlockFixer_JAR);
     
     FileWriter fileWriter = new FileWriter(CONFIG_FILE);
     fileWriter.write("<?xml version=\"1.0\"?>\n");
@@ -609,10 +634,11 @@
     if (dfs != null) { dfs.shutdown(); }
   }
 
-  private long getCRC(FileSystem fs, Path p) throws IOException {
+  public static long getCRC(FileSystem fs, Path p) throws IOException {
     CRC32 crc = new CRC32();
     FSDataInputStream stm = fs.open(p);
-    for (int b = 0; b > 0; b = stm.read()) {
+    int b;
+    while ((b = stm.read())>=0) {
       crc.update(b);
     }
     stm.close();
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerBlockFixDist.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerBlockFixDist.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerBlockFixDist.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerBlockFixDist.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerDistConcurrency.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerDistConcurrency.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerDistConcurrency.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerDistConcurrency.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerGeneratedBlockDist.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerGeneratedBlockDist.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerGeneratedBlockDist.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerGeneratedBlockDist.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerParityBlockFixDist.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerParityBlockFixDist.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestBlockFixerParityBlockFixDist.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestBlockFixerParityBlockFixDist.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestDirectoryTraversal.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestDirectoryTraversal.java
similarity index 98%
rename from hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestDirectoryTraversal.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestDirectoryTraversal.java
index 93658a5..4ab1107 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestDirectoryTraversal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestDirectoryTraversal.java
@@ -40,7 +40,7 @@
   final static Log LOG = LogFactory.getLog(
                             "org.apache.hadoop.raid.TestDirectoryTraversal");
   final static String TEST_DIR = new File(System.getProperty("test.build.data",
-      "build/contrib/raid/test/data")).getAbsolutePath();
+      "target/test-data")).getAbsolutePath();
 
   MiniDFSCluster dfs = null;
   FileSystem fs = null;
@@ -211,7 +211,7 @@
 
   private void mySetup() throws IOException {
     conf = new Configuration();
-    dfs = new MiniDFSCluster(conf, 6, true, null);
+    dfs = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     dfs.waitActive();
     fs = dfs.getFileSystem();
   }
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestErasureCodes.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestErasureCodes.java
similarity index 81%
rename from hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestErasureCodes.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestErasureCodes.java
index d1d3f60..6d60b57 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestErasureCodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestErasureCodes.java
@@ -169,6 +169,57 @@
     assertTrue("Decode failed", java.util.Arrays.equals(copy, message[0]));
   }
 
+  public void testComputeErrorLocations() {
+    for (int i = 0; i < TEST_TIMES; ++i) {
+      verifyErrorLocations(10, 4, 1);
+      verifyErrorLocations(10, 4, 2);
+    }
+  }
+
+  public void verifyErrorLocations(int stripeSize, int paritySize, int errors) {
+    int[] message = new int[stripeSize];
+    int[] parity = new int[paritySize];
+    Set<Integer> errorLocations = new HashSet<Integer>();
+    for (int i = 0; i < message.length; ++i) {
+      message[i] = RAND.nextInt(256);
+    }
+    while (errorLocations.size() < errors) {
+      int loc = RAND.nextInt(stripeSize + paritySize);
+      errorLocations.add(loc);
+    }
+    ReedSolomonCode codec = new ReedSolomonCode(stripeSize, paritySize);
+    codec.encode(message, parity);
+    int[] data = combineArrays(parity, message);
+    for (Integer i : errorLocations) {
+      data[i] = randError(data[i]);
+    }
+    Set<Integer> recoveredLocations = new HashSet<Integer>();
+    boolean resolved = codec.computeErrorLocations(data, recoveredLocations);
+    if (resolved) {
+      assertEquals(errorLocations, recoveredLocations);
+    }
+  }
+
+  private int randError(int actual) {
+    while (true) {
+      int r = RAND.nextInt(256);
+      if (r != actual) {
+        return r;
+      }
+    }
+  }
+
+  private int[] combineArrays(int[] array1, int[] array2) {
+    int[] result = new int[array1.length + array2.length];
+    for (int i = 0; i < array1.length; ++i) {
+      result[i] = array1[i];
+    }
+    for (int i = 0; i < array2.length; ++i) {
+      result[i + array1.length] = array2[i];
+    }
+    return result;
+  }
+
   private int[] randomErasedLocation(int erasedLen, int dataLen) {
     int[] erasedLocations = new int[erasedLen];
     for (int i = 0; i < erasedLen; i++) {
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestGaloisField.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestGaloisField.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestGaloisField.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestGaloisField.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestHarIndexParser.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestHarIndexParser.java
similarity index 100%
rename from hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestHarIndexParser.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestHarIndexParser.java
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidFilter.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidFilter.java
similarity index 98%
rename from hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidFilter.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidFilter.java
index 4b1fe67..037c682 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidFilter.java
@@ -36,7 +36,7 @@
 
 public class TestRaidFilter extends TestCase {
   final static String TEST_DIR = new File(System.getProperty("test.build.data",
-      "build/contrib/raid/test/data")).getAbsolutePath();
+      "target/test-data")).getAbsolutePath();
   final static Log LOG =
     LogFactory.getLog("org.apache.hadoop.raid.TestRaidFilter");
 
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidHar.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidHar.java
similarity index 92%
rename from hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidHar.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidHar.java
index 3439804..d3aeab7 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidHar.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidHar.java
@@ -22,6 +22,7 @@
 import java.io.FileNotFoundException;
 import java.util.Random;
 
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import junit.framework.TestCase;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -34,6 +35,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.MiniMRCluster;
 
 /**
@@ -41,7 +43,7 @@
  */
 public class TestRaidHar extends TestCase {
   final static String TEST_DIR = new File(System.getProperty("test.build.data",
-      "build/contrib/raid/test/data")).getAbsolutePath();
+     "target/test-data")).getAbsolutePath();
   final static String CONFIG_FILE = new File(TEST_DIR, 
       "test-raid.xml").getAbsolutePath();
   final static long RELOAD_INTERVAL = 1000;
@@ -96,11 +98,27 @@
     fileSys = dfs.getFileSystem();
     namenode = fileSys.getUri().toString();
     mr = new MiniMRCluster(taskTrackers, namenode, 3);
-    jobTrackerName = "localhost:" + mr.getJobTrackerPort();
+    JobConf jobConf = mr.createJobConf();
+    jobTrackerName = "localhost:" + jobConf.get(JTConfig.JT_IPC_ADDRESS);
     hftp = "hftp://localhost.localdomain:" + dfs.getNameNodePort();
 
     FileSystem.setDefaultUri(conf, namenode);
     conf.set("mapred.job.tracker", jobTrackerName);
+    conf.set("mapreduce.framework.name", "yarn");
+    String rmAdress = jobConf.get("yarn.resourcemanager.address");
+    if (rmAdress != null) {
+      conf.set("yarn.resourcemanager.address", rmAdress);
+    }
+    String schedulerAdress =
+      jobConf.get("yarn.resourcemanager.scheduler.address");
+    if (schedulerAdress != null) {
+      conf.set("yarn.resourcemanager.scheduler.address", schedulerAdress);
+    }
+    String jobHistoryAddress = 
+        jobConf.get("mapreduce.jobhistory.address");
+    if (jobHistoryAddress != null) {
+      conf.set("mapreduce.jobhistory.address", jobHistoryAddress);
+    }
   }
     
   /**
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidNode.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidNode.java
similarity index 88%
rename from hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidNode.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidNode.java
index 355d9ad..5bdee75 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidNode.java
@@ -37,9 +37,13 @@
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobContext;
 import org.apache.hadoop.mapred.MiniMRCluster;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.apache.hadoop.raid.protocol.PolicyInfo;
 import org.apache.hadoop.raid.protocol.PolicyList;
+import org.apache.hadoop.util.JarFinder;
 import org.apache.hadoop.raid.protocol.PolicyInfo.ErasureCodeType;
 
 /**
@@ -49,7 +53,8 @@
   */
 public class TestRaidNode extends TestCase {
   final static String TEST_DIR = new File(System.getProperty("test.build.data",
-      "build/contrib/raid/test/data")).getAbsolutePath();
+      "target/test-data")).getAbsolutePath();
+  public static final String DistRaid_JAR = JarFinder.getJar(DistRaid.class);
   final static String CONFIG_FILE = new File(TEST_DIR, 
       "test-raid.xml").getAbsolutePath();
   final static long RELOAD_INTERVAL = 1000;
@@ -76,6 +81,8 @@
     conf.setBoolean("raid.config.reload", true);
     conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);
     conf.setBoolean("dfs.permissions.enabled", true);
+    conf.setLong(JobMonitor.JOBMONITOR_INTERVAL_KEY, 20000);
+    conf.setLong(RaidNode.TRIGGER_MONITOR_SLEEP_TIME_KEY, 3000L);
 
     // scan all policies once every 5 second
     conf.setLong("raid.policy.rescan.interval", 5000);
@@ -103,11 +110,27 @@
     namenode = fileSys.getUri().toString();
     final int taskTrackers = 4;
     mr = new MiniMRCluster(taskTrackers, namenode, 3);
-    jobTrackerName = "localhost:" + mr.getJobTrackerPort();
+    JobConf jobConf = mr.createJobConf();
+    jobTrackerName = "localhost:" + jobConf.get(JTConfig.JT_IPC_ADDRESS);
     hftp = "hftp://localhost.localdomain:" + dfs.getNameNodePort();
 
     FileSystem.setDefaultUri(conf, namenode);
     conf.set("mapred.job.tracker", jobTrackerName);
+    conf.set("mapreduce.framework.name", "yarn");
+    String rmAdress = jobConf.get("yarn.resourcemanager.address");
+    if (rmAdress != null) {
+      conf.set("yarn.resourcemanager.address", rmAdress);
+    }
+    String schedulerAdress =
+      jobConf.get("yarn.resourcemanager.scheduler.address");
+    if (schedulerAdress != null) {
+      conf.set("yarn.resourcemanager.scheduler.address", schedulerAdress);
+    }
+    String jobHistoryAddress = 
+        jobConf.get("mapreduce.jobhistory.address");
+    if (jobHistoryAddress != null) {
+      conf.set("mapreduce.jobhistory.address", jobHistoryAddress);
+    }
   }
 
   class ConfigBuilder {
@@ -238,9 +261,9 @@
     LOG.info("Test testPathFilter started.");
 
     long blockSizes    []  = {1024L};
-    long stripeLengths []  = {1, 2, 5, 6, 10, 11, 12};
-    long targetReplication = 1;
-    long metaReplication   = 1;
+    int stripeLengths []  = {5, 6, 10, 11, 12};
+    int targetReplication = 1;
+    int metaReplication   = 1;
     int  numBlock          = 11;
     int  iter = 0;
 
@@ -284,7 +307,8 @@
       LOG.info("doTestPathFilter created test files for iteration " + iter);
 
       // create an instance of the RaidNode
-      cnode = RaidNode.createRaidNode(null, conf);
+      Configuration localConf = new Configuration(conf);
+      cnode = RaidNode.createRaidNode(null, localConf);
       FileStatus[] listPaths = null;
 
       // wait till file is raided
@@ -314,7 +338,6 @@
       }
       // assertEquals(listPaths.length, 1); // all files raided
       LOG.info("doTestPathFilter all files found in Raid.");
-      Thread.sleep(20000); // Without this wait, unit test crashes
 
       // check for error at beginning of file
       shell = new RaidShell(conf);
@@ -466,16 +489,23 @@
     LOG.info("doCheckPolicy completed:");
   }
 
-  private void createTestFiles(String path, String destpath) throws IOException {
+  static public void createTestFiles(FileSystem fileSys, 
+      String path, String destpath, int nfile,
+      int nblock) throws IOException {
+    createTestFiles(fileSys, path, destpath, nfile, nblock, (short)1);
+  }
+
+  static void createTestFiles(FileSystem fileSys, String path, String destpath, int nfile,
+      int nblock, short repl) throws IOException {
     long blockSize         = 1024L;
     Path dir = new Path(path);
     Path destPath = new Path(destpath);
     fileSys.delete(dir, true);
     fileSys.delete(destPath, true);
    
-    for(int i = 0 ; i < 10; i++){
+    for(int i = 0 ; i < nfile; i++){
       Path file = new Path(path + "file" + i);
-      createOldFile(fileSys, file, 1, 7, blockSize);
+      createOldFile(fileSys, file, repl, nblock, blockSize);
     }
   }
 
@@ -499,12 +529,15 @@
 
     RaidNode cnode = null;
     try {
-      createTestFiles("/user/dhruba/raidtest/", "/destraid/user/dhruba/raidtest");
-      createTestFiles("/user/dhruba/raidtest2/", "/destraid/user/dhruba/raidtest2");
+      createTestFiles(fileSys, "/user/dhruba/raidtest/",
+          "/destraid/user/dhruba/raidtest", 5, 7);
+      createTestFiles(fileSys, "/user/dhruba/raidtest2/",
+          "/destraid/user/dhruba/raidtest2", 5, 7);
       LOG.info("Test testDistRaid created test files");
 
       Configuration localConf = new Configuration(conf);
       localConf.set(RaidNode.RAID_LOCATION_KEY, "/destraid");
+      localConf.set(JobContext.JAR, TestRaidNode.DistRaid_JAR);
       cnode = RaidNode.createRaidNode(null, localConf);
       // Verify the policies are parsed correctly
       for (PolicyList policyList : cnode.getAllPolicies()) {
@@ -540,15 +573,13 @@
              System.currentTimeMillis() - start < MAX_WAITTIME) {
         Thread.sleep(1000);
       }
-      assertEquals(dcnode.jobMonitor.jobsMonitored(), 2);
-
+      
       start = System.currentTimeMillis();
       while (dcnode.jobMonitor.jobsSucceeded() < 2 &&
              System.currentTimeMillis() - start < MAX_WAITTIME) {
         Thread.sleep(1000);
       }
-      assertEquals(dcnode.jobMonitor.jobsSucceeded(), 2);
-
+      assertEquals(dcnode.jobMonitor.jobsSucceeded(), dcnode.jobMonitor.jobsMonitored());
       LOG.info("Test testDistRaid successful.");
       
     } catch (Exception e) {
@@ -647,24 +678,19 @@
 
     RaidNode cnode = null;
     try {
-      createTestFiles(
-        "/user/dhruba/raidtest/1/", "/destraid/user/dhruba/raidtest/1");
-      createTestFiles(
-        "/user/dhruba/raidtest/2/", "/destraid/user/dhruba/raidtest/2");
-      createTestFiles(
-        "/user/dhruba/raidtest/3/", "/destraid/user/dhruba/raidtest/3");
-      createTestFiles(
-        "/user/dhruba/raidtest/4/", "/destraid/user/dhruba/raidtest/4");
+      for(int i = 0; i < 4; i++){
+        Path file = new Path("/user/dhruba/raidtest/dir" + i + "/file" + i);
+        createOldFile(fileSys, file, 1, 7, 1024L);
+      }
+
       LOG.info("Test testSuspendTraversal created test files");
 
       Configuration localConf = new Configuration(conf);
-      localConf.set(RaidNode.RAID_LOCATION_KEY, "/destraid");
-      localConf.setInt("raid.distraid.max.files", 3);
+      localConf.setInt("raid.distraid.max.jobs", 2);
+      localConf.setInt("raid.distraid.max.files", 2);
       localConf.setInt("raid.directorytraversal.threads", 1);
-      // This is too dependent on the implementation of getFilteredFiles().
-      // It relies on the threading behavior where two directories are traversed
-      // before returning because the list of files is modified in a separate
-      // thread from the one that decides if there are enough files.
+      localConf.set(JobContext.JAR, TestRaidNode.DistRaid_JAR);
+      // 4 test files: 2 jobs with 2 files each.
       final int numJobsExpected = 2;
       cnode = RaidNode.createRaidNode(null, localConf);
 
@@ -677,10 +703,20 @@
       start = System.currentTimeMillis();
       while (dcnode.jobMonitor.jobsSucceeded() < numJobsExpected &&
              System.currentTimeMillis() - start < MAX_WAITTIME) {
+        LOG.info("Waiting for num jobs succeeded " + dcnode.jobMonitor.jobsSucceeded() + 
+         " to reach " + numJobsExpected);
+        Thread.sleep(3000);
+      }
+      // Wait for any running jobs to finish.
+      start = System.currentTimeMillis();
+      while (dcnode.jobMonitor.runningJobsCount() > 0 &&
+             System.currentTimeMillis() - start < MAX_WAITTIME) {
+        LOG.info("Waiting for zero running jobs: " +
+             dcnode.jobMonitor.runningJobsCount());
         Thread.sleep(1000);
       }
-      assertEquals(dcnode.jobMonitor.jobsMonitored(), numJobsExpected);
-      assertEquals(dcnode.jobMonitor.jobsSucceeded(), numJobsExpected);
+      assertEquals(numJobsExpected, dcnode.jobMonitor.jobsMonitored());
+      assertEquals(numJobsExpected, dcnode.jobMonitor.jobsSucceeded());
 
       LOG.info("Test testSuspendTraversal successful.");
 
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidPurge.java
similarity index 94%
rename from hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidPurge.java
index accef0b..ca6dc4a 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidPurge.java
@@ -51,6 +51,7 @@
 import org.apache.hadoop.raid.protocol.PolicyList;
 import org.apache.hadoop.hdfs.TestRaidDfs;
 import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.apache.hadoop.raid.protocol.PolicyInfo;
 
 /**
@@ -58,7 +59,7 @@
  */
 public class TestRaidPurge extends TestCase {
   final static String TEST_DIR = new File(System.getProperty("test.build.data",
-      "build/contrib/raid/test/data")).getAbsolutePath();
+      "target/test-data")).getAbsolutePath();
   final static String CONFIG_FILE = new File(TEST_DIR, 
       "test-raid.xml").getAbsolutePath();
   final static long RELOAD_INTERVAL = 1000;
@@ -113,11 +114,27 @@
     fileSys = dfs.getFileSystem();
     namenode = fileSys.getUri().toString();
     mr = new MiniMRCluster(taskTrackers, namenode, 3);
-    jobTrackerName = "localhost:" + mr.getJobTrackerPort();
+    JobConf jobConf = mr.createJobConf();
+    jobTrackerName = "localhost:" + jobConf.get(JTConfig.JT_IPC_ADDRESS);
     hftp = "hftp://localhost.localdomain:" + dfs.getNameNodePort();
 
     FileSystem.setDefaultUri(conf, namenode);
     conf.set("mapred.job.tracker", jobTrackerName);
+    conf.set("mapreduce.framework.name", "yarn");
+    String rmAdress = jobConf.get("yarn.resourcemanager.address");
+    if (rmAdress != null) {
+      conf.set("yarn.resourcemanager.address", rmAdress);
+    }
+    String schedulerAdress =
+      jobConf.get("yarn.resourcemanager.scheduler.address");
+    if (schedulerAdress != null) {
+      conf.set("yarn.resourcemanager.scheduler.address", schedulerAdress);
+    }
+    String jobHistoryAddress = 
+        jobConf.get("mapreduce.jobhistory.address");
+    if (jobHistoryAddress != null) {
+      conf.set("mapreduce.jobhistory.address", jobHistoryAddress);
+    }
   }
     
   /**
@@ -235,6 +252,7 @@
 
       // create an instance of the RaidNode
       Configuration localConf = new Configuration(conf);
+      
       localConf.set(RaidNode.RAID_LOCATION_KEY, "/destraid");
       cnode = RaidNode.createRaidNode(null, localConf);
       FileStatus[] listPaths = null;
@@ -299,7 +317,7 @@
     createClusters(true);
     mySetup(1, 1, 5, harDelay);
     Path dir = new Path("/user/dhruba/raidtest/");
-    Path destPath = new Path("/destraid/user/dhruba/raidtest");
+    Path destPath = new Path("/raid/user/dhruba/raidtest");
     Path file1 = new Path(dir + "/file");
     RaidNode cnode = null;
     try {
@@ -308,7 +326,6 @@
 
       // create an instance of the RaidNode
       Configuration localConf = new Configuration(conf);
-      localConf.set(RaidNode.RAID_LOCATION_KEY, "/destraid");
       cnode = RaidNode.createRaidNode(null, localConf);
 
       // Wait till har is created.
@@ -334,14 +351,7 @@
       boolean found = false;
       FileStatus[] listPaths = null;
       while (!found || listPaths == null || listPaths.length > 1) {
-        try {
-          listPaths = fileSys.listStatus(destPath);
-        } catch (FileNotFoundException e) {
-          // If the parent directory is deleted because the har is deleted
-          // and the parent is empty, try again.
-          Thread.sleep(1000);
-          continue;
-        }
+        listPaths = fileSys.listStatus(destPath);
         if (listPaths != null) {
           for (FileStatus s: listPaths) {
             LOG.info("testPurgeHar waiting for parity file to be recreated" +
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidShell.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidShell.java
similarity index 98%
rename from hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidShell.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidShell.java
index 9375e40..c283ce9 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidShell.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidShell.java
@@ -47,7 +47,7 @@
   final static Log LOG = LogFactory.getLog(
                             "org.apache.hadoop.raid.TestRaidShell");
   final static String TEST_DIR = new File(System.getProperty("test.build.data",
-      "build/contrib/raid/test/data")).getAbsolutePath();
+      "target/test-data")).getAbsolutePath();
   final static String CONFIG_FILE = new File(TEST_DIR,
       "test-raid.xml").getAbsolutePath();
   final static long RELOAD_INTERVAL = 1000;
@@ -249,7 +249,8 @@
   private long getCRC(FileSystem fs, Path p) throws IOException {
     CRC32 crc = new CRC32();
     FSDataInputStream stm = fs.open(p);
-    for (int b = 0; b > 0; b = stm.read()) {
+    int b;
+    while ((b = stm.read())>=0) {
       crc.update(b);
     }
     stm.close();
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidShellFsck.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidShellFsck.java
similarity index 98%
rename from hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidShellFsck.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidShellFsck.java
index 2841621..fad14ea 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidShellFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidShellFsck.java
@@ -7,7 +7,7 @@
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
  *
- *     http://www.apache.org/licenses/LICENSE-2.0
+ *     http:www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -51,8 +51,8 @@
     LogFactory.getLog("org.apache.hadoop.raid.TestRaidShellFsck");
   final static String TEST_DIR = 
     new File(System.
-             getProperty("test.build.data", "build/contrib/raid/test/data")).
-    getAbsolutePath();
+             getProperty("test.build.data", "target/test-data")).getAbsolutePath();
+
   final static String CONFIG_FILE = new File(TEST_DIR, "test-raid.xml").
     getAbsolutePath();
   final static long RELOAD_INTERVAL = 1000;
@@ -262,7 +262,7 @@
               }
               
             } else {
-              // case without HAR
+               // case without HAR
               for (FileStatus f : listPaths) {
                 Path found = new Path(f.getPath().toUri().getPath());
                 if (parityFilePath.equals(found)) {
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestReedSolomonDecoder.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestReedSolomonDecoder.java
similarity index 98%
rename from hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestReedSolomonDecoder.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestReedSolomonDecoder.java
index 31704af..5f47cee 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestReedSolomonDecoder.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestReedSolomonDecoder.java
@@ -42,7 +42,7 @@
   final static Log LOG = LogFactory.getLog(
                             "org.apache.hadoop.raid.TestReedSolomonDecoder");
   final static String TEST_DIR = new File(System.getProperty("test.build.data",
-      "build/contrib/raid/test/data")).getAbsolutePath();
+        "target/test-data")).getAbsolutePath();
   final static int NUM_DATANODES = 3;
 
   Configuration conf;
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestReedSolomonEncoder.java b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestReedSolomonEncoder.java
similarity index 98%
rename from hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestReedSolomonEncoder.java
rename to hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestReedSolomonEncoder.java
index 9815591..bd12014 100644
--- a/hadoop-mapreduce-project/src/contrib/raid/src/test/org/apache/hadoop/raid/TestReedSolomonEncoder.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestReedSolomonEncoder.java
@@ -49,7 +49,7 @@
   final static Log LOG = LogFactory.getLog(
                             "org.apache.hadoop.raid.TestReedSolomonEncoder");
   final static String TEST_DIR = new File(System.getProperty("test.build.data",
-      "build/contrib/raid/test/data")).getAbsolutePath();
+        "target/test-data")).getAbsolutePath();
   final static int NUM_DATANODES = 3;
 
   Configuration conf;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cf0fb48..a708333 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -13,11 +13,6 @@
 
     HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
 
-    HDFS-744. Support hsync in HDFS. (Lars Hofhansl via szetszwo)
-
-    HDFS-3042. Automatic failover support for NameNode HA (todd)
-    (see dedicated section below for subtask breakdown)
-
   IMPROVEMENTS
 
     HDFS-1620. Rename HdfsConstants -> HdfsServerConstants, FSConstants ->
@@ -90,14 +85,20 @@
     HDFS-3476. Correct the default used in TestDFSClientRetries.busyTest()
     after HDFS-3462 (harsh)
 
+    HDFS-3040. TestMulitipleNNDataBlockScanner is misspelled. (Madhukara Phatak
+    via atm)
+
+    HDFS-3049. During the normal NN startup process, fall back on a different
+    edit log if we see one that is corrupt (Colin Patrick McCabe via todd)
+
+    HDFS-3478. Test quotas with Long.Max_Value. (Sujay Rau via eli)
+
+    HDFS-3498. Support replica removal in BlockPlacementPolicy and make
+    BlockPlacementPolicyDefault extensible for reusing code in subclasses.
+    (Junping Du via szetszwo)
+
   OPTIMIZATIONS
 
-    HDFS-2834. Add a ByteBuffer-based read API to DFSInputStream.
-    (Henry Robinson via todd)
-
-    HDFS-3110. Use directRead API to reduce the number of buffer copies in
-    libhdfs (Henry Robinson via todd)
-
   BUG FIXES
 
     HDFS-2299. TestOfflineEditsViewer is failing on trunk. (Uma Maheswara Rao G
@@ -145,11 +146,6 @@
     factor is reduced after sync follwed by closing that file. (Ashish Singhi 
     via umamahesh)
 
-    HDFS-3235. MiniDFSClusterManager doesn't correctly support -format option.
-    (Henry Robinson via atm)
-
-    HDFS-3243. TestParallelRead timing out on jenkins. (Henry Robinson via todd)
-
     HDFS-3265. PowerPc Build error. (Kumar Ravi via mattf)
 
     HDFS-2312. FSNamesystem javadoc incorrectly says its for DNs. (harsh)
@@ -162,28 +158,24 @@
     HDFS-3462. TestDFSClientRetries.busyTest() should restore default
     xceiver count in the config. (Madhukara Phatak via harsh)
 
-  BREAKDOWN OF HDFS-3042 SUBTASKS
+    HDFS-3550. Fix raid javadoc warnings. (Jason Lowe via daryn)
 
-    HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
-    
-    HDFS-3200. Scope all ZKFC configurations by nameservice (todd)
-    
-    HDFS-3223. add zkfc to hadoop-daemon.sh script (todd)
-    
-    HDFS-3261. TestHASafeMode fails on HDFS-3042 branch (todd)
-    
-    HDFS-3159. Document NN auto-failover setup and configuration (todd)
-    
-    HDFS-3412. Fix findbugs warnings in auto-HA branch (todd)
-    
-    HDFS-3432. TestDFSZKFailoverController tries to fail over too early (todd)
+    HDFS-3549. Fix dist tar build fails in hadoop-hdfs-raid project. (Jason Lowe via daryn)
 
-Release 2.0.1-alpha - UNRELEASED
-  
+Branch-2 ( Unreleased changes )
+ 
   INCOMPATIBLE CHANGES
 
   NEW FEATURES
 
+    HDFS-744. Support hsync in HDFS. (Lars Hofhansl via szetszwo)
+
+    HDFS-3042. Automatic failover support for NameNode HA (todd)
+    (see dedicated section below for breakdown of subtasks)
+
+    HDFS-3518. Add a utility method HdfsUtils.isHealthy(uri) for checking if
+    the given HDFS is healthy. (szetszwo)
+
   IMPROVEMENTS
 
     HDFS-3390. DFSAdmin should print full stack traces of errors when DEBUG
@@ -238,11 +230,37 @@
     HDFS-1013. Miscellaneous improvements to HTML markup for web UIs
     (Eugene Koontz via todd)
 
+    HDFS-3052. Change INodeFile and INodeFileUnderConstruction to package
+    private.  (szetszwo)
+
+    HDFS-3520. Add transfer rate logging to TransferFsImage. (eli)
+
+    HDFS-3504. Support configurable retry policy in DFSClient for RPC
+    connections and RPC calls, and add MultipleLinearRandomRetry, a new retry
+    policy.  (szetszwo)
+
+    HDFS-3372. offlineEditsViewer should be able to read a binary
+    edits file with recovery mode. (Colin Patrick McCabe via eli)
+
+    HDFS-3516. Check content-type in WebHdfsFileSystem.  (szetszwo)
+
+    HDFS-3535. Audit logging should log denied accesses. (Andy Isaacson via eli)
+
+    HDFS-3481. Refactor HttpFS handling of JAX-RS query string parameters (tucu)
+
+    HDFS-3572. Cleanup code which inits SPNEGO in HttpServer (todd)
+
   OPTIMIZATIONS
 
     HDFS-2982. Startup performance suffers when there are many edit log
     segments. (Colin Patrick McCabe via todd)
 
+    HDFS-2834. Add a ByteBuffer-based read API to DFSInputStream.
+    (Henry Robinson via todd)
+
+    HDFS-3110. Use directRead API to reduce the number of buffer copies in
+    libhdfs (Henry Robinson via todd)
+
   BUG FIXES
 
     HDFS-3385. The last block of INodeFileUnderConstruction is not
@@ -307,6 +325,69 @@
     HDFS-3266. DFSTestUtil#waitCorruptReplicas doesn't sleep between checks.
     (Madhukara Phatak via atm)
 
+    HDFS-3505. DirectoryScanner does not join all threads in shutdown.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3485. DataTransferThrottler will over-throttle when currentTimeMillis
+    jumps (Andy Isaacson via todd)
+
+    HDFS-2914. HA: Standby should not enter safemode when resources are low.
+    (Vinay via atm)
+
+    HDFS-3235. MiniDFSClusterManager doesn't correctly support -format option.
+    (Henry Robinson via atm)
+
+    HDFS-3514. Add missing TestParallelLocalRead. (Henry Robinson via atm)
+
+    HDFS-3243. TestParallelRead timing out on jenkins. (Henry Robinson via todd)
+
+    HDFS-3490. DatanodeWebHdfsMethods throws NullPointerException if
+    NamenodeRpcAddressParam is not set.  (szetszwo)
+
+    HDFS-2797. Fix misuses of InputStream#skip in the edit log code.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3517. TestStartup should bind ephemeral ports. (eli)
+
+    HDFS-3522. If a namenode is in safemode, it should throw SafeModeException
+    when getBlockLocations has zero locations.  (Brandon Li via szetszwo)
+    
+    HDFS-3408. BKJM : Namenode format fails, if there is no BK root. (Rakesh R via umamahesh)
+
+    HDFS-3389. Document the BKJM usage in Namenode HA. (umamahesh and Ivan Kelly via umamahesh)
+
+    HDFS-3531. EditLogFileOutputStream#preallocate should check for
+    incomplete writes. (Colin Patrick McCabe via eli)
+
+    HDFS-766. Error message not clear for set space quota out of boundary
+    values. (Jon Zuanich via atm)
+
+    HDFS-3480. Multiple SLF4J binding warning. (Vinay via eli)
+
+    HDFS-3524. Update TestFileLengthOnClusterRestart for HDFS-3522.  (Brandon
+    Li via szetszwo)
+
+  BREAKDOWN OF HDFS-3042 SUBTASKS
+
+    HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
+    
+    HDFS-3200. Scope all ZKFC configurations by nameservice (todd)
+    
+    HDFS-3223. add zkfc to hadoop-daemon.sh script (todd)
+    
+    HDFS-3261. TestHASafeMode fails on HDFS-3042 branch (todd)
+    
+    HDFS-3159. Document NN auto-failover setup and configuration (todd)
+    
+    HDFS-3412. Fix findbugs warnings in auto-HA branch (todd)
+    
+    HDFS-3432. TestDFSZKFailoverController tries to fail over too early (todd)
+
+    HDFS-3428. Move DelegationTokenRenewer to common (tucu)
+
+    HDFS-3551. WebHDFS CREATE should use client location for HTTP redirection.
+    (szetszwo)
+
 Release 2.0.0-alpha - 05-23-2012
 
   INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index a0878df..1c342de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -107,21 +107,12 @@
       <groupId>org.apache.zookeeper</groupId>
       <artifactId>zookeeper</artifactId>
       <version>3.4.2</version>
-      <exclusions>
-        <exclusion>
-          <!-- otherwise seems to drag in junit 3.8.1 via jline -->
-          <groupId>junit</groupId>
-          <artifactId>junit</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jdmk</groupId>
-          <artifactId>jmxtools</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jmx</groupId>
-          <artifactId>jmxri</artifactId>
-        </exclusion>
-      </exclusions>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-log4j12</artifactId>
+      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.zookeeper</groupId>
@@ -438,76 +429,22 @@
             <artifactId>maven-antrun-plugin</artifactId>
             <executions>
               <execution>
-                <id>compile</id>
+                <id>make</id>
                 <phase>compile</phase>
-                <goals>
-                  <goal>run</goal>
-                </goals>
+                <goals><goal>run</goal></goals>
                 <configuration>
                   <target>
-                    <copy toDir="${project.build.directory}/native">
-                      <fileset dir="${basedir}/src/main/native"/>
-                    </copy>
-                    <mkdir dir="${project.build.directory}/native/m4"/>
+                    <mkdir dir="${project.build.directory}/native"/>
+                    <exec executable="cmake" dir="${project.build.directory}/native" 
+                        failonerror="true">
+                      <arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model}"/>
+                    </exec>
+                    <exec executable="make" dir="${project.build.directory}/native" failonerror="true">
+                      <arg line="VERBOSE=1"/>
+                    </exec>
                   </target>
                 </configuration>
               </execution>
-            </executions>
-          </plugin>
-          <plugin>
-            <groupId>org.codehaus.mojo</groupId>
-            <artifactId>make-maven-plugin</artifactId>
-            <executions>
-              <execution>
-                <id>compile</id>
-                <phase>compile</phase>
-                <goals>
-                  <goal>autoreconf</goal>
-                  <goal>configure</goal>
-                  <goal>make-install</goal>
-                </goals>
-                <configuration>
-                  <!-- autoreconf settings -->
-                  <workDir>${project.build.directory}/native</workDir>
-                  <arguments>
-                    <argument>-i</argument>
-                    <argument>-f</argument>
-                  </arguments>
-
-                  <!-- configure settings -->
-                  <configureEnvironment>
-                    <property>
-                      <name>ac_cv_func_malloc_0_nonnull</name>
-                      <value>yes</value>
-                    </property>
-                    <property>
-                      <name>JVM_ARCH</name>
-                      <value>${sun.arch.data.model}</value>
-                    </property>
-                  </configureEnvironment>
-                  <configureOptions>
-                  </configureOptions>
-                  <configureWorkDir>${project.build.directory}/native</configureWorkDir>
-                  <prefix>/usr/local</prefix>
-
-                  <!-- make settings -->
-                  <installEnvironment>
-                    <property>
-                      <name>ac_cv_func_malloc_0_nonnull</name>
-                      <value>yes</value>
-                    </property>
-                    <property>
-                      <name>JVM_ARCH</name>
-                      <value>${sun.arch.data.model}</value>
-                    </property>
-                  </installEnvironment>
-
-                  <!-- configure & make settings -->
-                  <destDir>${project.build.directory}/native/target</destDir>
-
-                </configuration>
-              </execution>
-              
               <!-- TODO wire here native testcases
               <execution>
                 <id>test</id>
@@ -564,7 +501,7 @@
             <artifactId>maven-antrun-plugin</artifactId>
             <executions>
               <execution>
-                <id>compile</id>
+                <id>kdc</id>
                 <phase>compile</phase>
                 <goals>
                   <goal>run</goal>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
new file mode 100644
index 0000000..cafbcfb
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
@@ -0,0 +1,126 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
+
+# Default to release builds
+set(CMAKE_BUILD_TYPE, Release)
+
+# If JVM_ARCH_DATA_MODEL is 32, compile all binaries as 32-bit.
+# This variable is set by maven.
+if (JVM_ARCH_DATA_MODEL EQUAL 32)
+    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -m32")
+    set(CMAKE_LD_FLAGS "${CMAKE_LD_FLAGS} -m32")
+    if (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64")
+        set(CMAKE_SYSTEM_PROCESSOR "i686")
+    endif ()
+endif (JVM_ARCH_DATA_MODEL EQUAL 32)
+
+# Compile a library with both shared and static variants
+function(add_dual_library LIBNAME)
+    add_library(${LIBNAME} SHARED ${ARGN})
+    add_library(${LIBNAME}_static STATIC ${ARGN})
+    set_target_properties(${LIBNAME}_static PROPERTIES OUTPUT_NAME ${LIBNAME})
+endfunction(add_dual_library)
+
+# Link both a static and a dynamic target against some libraries
+function(target_link_dual_libraries LIBNAME)
+    target_link_libraries(${LIBNAME} ${ARGN})
+    target_link_libraries(${LIBNAME}_static ${ARGN})
+endfunction(target_link_dual_libraries)
+
+function(output_directory TGT DIR)
+    SET_TARGET_PROPERTIES(${TGT} PROPERTIES
+        RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
+    SET_TARGET_PROPERTIES(${TGT} PROPERTIES
+        ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
+    SET_TARGET_PROPERTIES(${TGT} PROPERTIES
+        LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
+endfunction(output_directory TGT DIR)
+
+function(dual_output_directory TGT DIR)
+    output_directory(${TGT} "${DIR}")
+    output_directory(${TGT}_static "${DIR}")
+endfunction(dual_output_directory TGT DIR)
+
+# Flatten a list into a string.
+function(FLATTEN_LIST INPUT SEPARATOR OUTPUT)
+  string (REPLACE ";" "${SEPARATOR}" _TMPS "${INPUT}")
+  set (${OUTPUT} "${_TMPS}" PARENT_SCOPE)
+endfunction()
+
+find_package(JNI REQUIRED)
+if (NOT GENERATED_JAVAH)
+    # Must identify where the generated headers have been placed
+    MESSAGE(FATAL_ERROR "You must set the CMake variable GENERATED_JAVAH")
+endif (NOT GENERATED_JAVAH)
+
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2 -D_GNU_SOURCE")
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_FILE_OFFSET_BITS=64")
+
+include_directories(
+    ${GENERATED_JAVAH}
+    ${CMAKE_CURRENT_SOURCE_DIR}
+    ${CMAKE_BINARY_DIR}
+    ${JNI_INCLUDE_DIRS}
+    main/native/
+)
+
+set(_FUSE_DFS_VERSION 0.1.0)
+CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/config.h.cmake ${CMAKE_BINARY_DIR}/config.h)
+
+add_dual_library(hdfs
+    main/native/hdfs.c
+    main/native/hdfsJniHelper.c
+)
+target_link_dual_libraries(hdfs
+    ${JAVA_JVM_LIBRARY}
+)
+dual_output_directory(hdfs target/usr/local/lib)
+set(LIBHDFS_VERSION "0.0.0")
+set_target_properties(hdfs PROPERTIES
+    SOVERSION ${LIBHDFS_VERSION})
+
+add_executable(hdfs_test
+    main/native/hdfs_test.c
+)
+target_link_libraries(hdfs_test
+    hdfs
+    ${JAVA_JVM_LIBRARY}
+)
+output_directory(hdfs_test target/usr/local/bin)
+
+add_executable(hdfs_read
+    main/native/hdfs_read.c
+)
+target_link_libraries(hdfs_read
+    hdfs
+    ${JAVA_JVM_LIBRARY}
+)
+output_directory(hdfs_read target/usr/local/bin)
+
+add_executable(hdfs_write
+    main/native/hdfs_write.c
+)
+target_link_libraries(hdfs_write
+    hdfs
+    ${JAVA_JVM_LIBRARY}
+)
+output_directory(hdfs_write target/usr/local/bin)
+
+add_subdirectory(contrib/fuse-dfs/src)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake b/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake
new file mode 100644
index 0000000..5c4c501
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake
@@ -0,0 +1,6 @@
+#ifndef CONFIG_H
+#define CONFIG_H
+
+#cmakedefine _FUSE_DFS_VERSION "@_FUSE_DFS_VERSION@"
+
+#endif
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
index 5317a0f..d6f1963 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
@@ -28,6 +28,7 @@
 import org.apache.bookkeeper.client.BKException;
 import org.apache.bookkeeper.client.BookKeeper;
 import org.apache.bookkeeper.client.LedgerHandle;
+import org.apache.bookkeeper.util.ZkUtils;
 
 import org.apache.zookeeper.data.Stat;
 import org.apache.zookeeper.ZooKeeper;
@@ -36,6 +37,7 @@
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.ZooDefs.Ids;
+import org.apache.zookeeper.AsyncCallback.StringCallback;
 
 import java.util.Collection;
 import java.util.Collections;
@@ -124,6 +126,12 @@
 
   private static final String BKJM_EDIT_INPROGRESS = "inprogress_";
 
+  public static final String BKJM_ZK_LEDGERS_AVAILABLE_PATH
+    = "dfs.namenode.bookkeeperjournal.zk.availablebookies";
+
+  public static final String BKJM_ZK_LEDGERS_AVAILABLE_PATH_DEFAULT
+    = "/ledgers/available";
+
   private ZooKeeper zkc;
   private final Configuration conf;
   private final BookKeeper bkc;
@@ -196,7 +204,7 @@
         zkc.create(ledgerPath, new byte[] {'0'},
             Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
       }
-
+      prepareBookKeeperEnv();
       bkc = new BookKeeper(new ClientConfiguration(),
                            zkc);
     } catch (KeeperException e) {
@@ -211,6 +219,50 @@
   }
 
   /**
+   * Pre-creating bookkeeper metadata path in zookeeper.
+   */
+  private void prepareBookKeeperEnv() throws IOException {
+    // create bookie available path in zookeeper if it doesn't exists
+    final String zkAvailablePath = conf.get(BKJM_ZK_LEDGERS_AVAILABLE_PATH,
+        BKJM_ZK_LEDGERS_AVAILABLE_PATH_DEFAULT);
+    final CountDownLatch zkPathLatch = new CountDownLatch(1);
+
+    StringCallback callback = new StringCallback() {
+      @Override
+      public void processResult(int rc, String path, Object ctx, String name) {
+        if (KeeperException.Code.OK.intValue() == rc
+            || KeeperException.Code.NODEEXISTS.intValue() == rc) {
+          LOG.info("Successfully created bookie available path : "
+              + zkAvailablePath);
+          zkPathLatch.countDown();
+        } else {
+          KeeperException.Code code = KeeperException.Code.get(rc);
+          LOG
+              .error("Error : "
+                  + KeeperException.create(code, path).getMessage()
+                  + ", failed to create bookie available path : "
+                  + zkAvailablePath);
+        }
+      }
+    };
+    ZkUtils.createFullPathOptimistic(zkc, zkAvailablePath, new byte[0],
+        Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, callback, null);
+
+    try {
+      if (!zkPathLatch.await(zkc.getSessionTimeout(), TimeUnit.MILLISECONDS)) {
+        throw new IOException("Couldn't create bookie available path :"
+            + zkAvailablePath + ", timed out " + zkc.getSessionTimeout()
+            + " millis");
+      }
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+      throw new IOException(
+          "Interrupted when creating the bookie available path : "
+              + zkAvailablePath, e);
+    }
+  }
+
+  /**
    * Start a new log segment in a BookKeeper ledger.
    * First ensure that we have the write lock for this journal.
    * Then create a ledger and stream based on that ledger.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperConfiguration.java
new file mode 100644
index 0000000..df788a2
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperConfiguration.java
@@ -0,0 +1,160 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.contrib.bkjournal;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.bookkeeper.util.LocalBookKeeper;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.ZKUtil;
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.server.NIOServerCnxnFactory;
+import org.apache.zookeeper.server.ZooKeeperServer;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestBookKeeperConfiguration {
+  private static final Log LOG = LogFactory
+      .getLog(TestBookKeeperConfiguration.class);
+  private static final int ZK_SESSION_TIMEOUT = 5000;
+  private static final String HOSTPORT = "127.0.0.1:2181";
+  private static final int CONNECTION_TIMEOUT = 30000;
+  private static NIOServerCnxnFactory serverFactory;
+  private static ZooKeeperServer zks;
+  private static ZooKeeper zkc;
+  private static int ZooKeeperDefaultPort = 2181;
+  private static File ZkTmpDir;
+  private BookKeeperJournalManager bkjm;
+  private static final String BK_ROOT_PATH = "/ledgers";
+
+  private static ZooKeeper connectZooKeeper(String ensemble)
+      throws IOException, KeeperException, InterruptedException {
+    final CountDownLatch latch = new CountDownLatch(1);
+
+    ZooKeeper zkc = new ZooKeeper(HOSTPORT, ZK_SESSION_TIMEOUT, new Watcher() {
+      public void process(WatchedEvent event) {
+        if (event.getState() == Watcher.Event.KeeperState.SyncConnected) {
+          latch.countDown();
+        }
+      }
+    });
+    if (!latch.await(ZK_SESSION_TIMEOUT, TimeUnit.MILLISECONDS)) {
+      throw new IOException("Zookeeper took too long to connect");
+    }
+    return zkc;
+  }
+
+  @BeforeClass
+  public static void setupZooKeeper() throws Exception {
+    // create a ZooKeeper server(dataDir, dataLogDir, port)
+    LOG.info("Starting ZK server");
+    ZkTmpDir = File.createTempFile("zookeeper", "test");
+    ZkTmpDir.delete();
+    ZkTmpDir.mkdir();
+
+    try {
+      zks = new ZooKeeperServer(ZkTmpDir, ZkTmpDir, ZooKeeperDefaultPort);
+      serverFactory = new NIOServerCnxnFactory();
+      serverFactory.configure(new InetSocketAddress(ZooKeeperDefaultPort), 10);
+      serverFactory.startup(zks);
+    } catch (Exception e) {
+      LOG.error("Exception while instantiating ZooKeeper", e);
+    }
+
+    boolean b = LocalBookKeeper.waitForServerUp(HOSTPORT, CONNECTION_TIMEOUT);
+    LOG.debug("ZooKeeper server up: " + b);
+  }
+
+  @Before
+  public void setup() throws Exception {
+    zkc = connectZooKeeper(HOSTPORT);
+    try {
+      ZKUtil.deleteRecursive(zkc, BK_ROOT_PATH);
+    } catch (KeeperException.NoNodeException e) {
+      LOG.debug("Ignoring no node exception on cleanup", e);
+    } catch (Exception e) {
+      LOG.error("Exception when deleting bookie root path in zk", e);
+    }
+  }
+
+  @After
+  public void teardown() throws Exception {
+    if (null != zkc) {
+      zkc.close();
+    }
+    if (null != bkjm) {
+      bkjm.close();
+    }
+  }
+
+  @AfterClass
+  public static void teardownZooKeeper() throws Exception {
+    if (null != zkc) {
+      zkc.close();
+    }
+  }
+
+  /**
+   * Verify the BKJM is creating the bookie available path configured in
+   * 'dfs.namenode.bookkeeperjournal.zk.availablebookies'
+   */
+  @Test
+  public void testWithConfiguringBKAvailablePath() throws Exception {
+    // set Bookie available path in the configuration
+    String bkAvailablePath 
+      = BookKeeperJournalManager.BKJM_ZK_LEDGERS_AVAILABLE_PATH_DEFAULT;
+    Configuration conf = new Configuration();
+    conf.setStrings(BookKeeperJournalManager.BKJM_ZK_LEDGERS_AVAILABLE_PATH,
+        bkAvailablePath);
+    Assert.assertNull(bkAvailablePath + " already exists", zkc.exists(
+        bkAvailablePath, false));
+    bkjm = new BookKeeperJournalManager(conf, URI.create("bookkeeper://"
+        + HOSTPORT + "/hdfsjournal-WithBKPath"));
+    Assert.assertNotNull("Bookie available path : " + bkAvailablePath
+        + " doesn't exists", zkc.exists(bkAvailablePath, false));
+  }
+
+  /**
+   * Verify the BKJM is creating the bookie available default path, when there
+   * is no 'dfs.namenode.bookkeeperjournal.zk.availablebookies' configured
+   */
+  @Test
+  public void testDefaultBKAvailablePath() throws Exception {
+    Configuration conf = new Configuration();
+    Assert.assertNull(BK_ROOT_PATH + " already exists", zkc.exists(
+        BK_ROOT_PATH, false));
+    new BookKeeperJournalManager(conf, URI.create("bookkeeper://" + HOSTPORT
+        + "/hdfsjournal-DefaultBKPath"));
+    Assert.assertNotNull("Bookie available path : " + BK_ROOT_PATH
+        + " doesn't exists", zkc.exists(BK_ROOT_PATH, false));
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/Makefile.am b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/Makefile.am
deleted file mode 100644
index f4d6c57..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/Makefile.am
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-@GLOBAL_HEADER_MK@
-
-@PRODUCT_MK@
-
-SUBDIRS = . src
-
-clean:
-	rm -rf autom4te.cache config.guess config.log config.status config.sub configure depcomp src/.deps install-sh Makefile.in src/Makefile.in src/Makefile missing Makefile src/fuse_dfs.o src/fuse_dfs aclocal.m4
-
-
-@GLOBAL_FOOTER_MK@
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/acinclude.m4 b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/acinclude.m4
deleted file mode 100644
index b77a6ef..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/acinclude.m4
+++ /dev/null
@@ -1,270 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-AC_DEFUN([FUSE_DFS_INITIALIZE],
-[
-AM_INIT_AUTOMAKE([ foreign 1.9.5 no-define ])
-if test "x$1" = "xlocalinstall"; then
-wdir=`pwd`
-# To use $wdir undef quote.
-#
-##########
-AC_PREFIX_DEFAULT([`pwd`/install])
-echo
-fi
-AC_PROG_CC
-AC_PROG_CXX
-AC_PROG_RANLIB(RANLIB, ranlib)
-AC_PATH_PROGS(BASH, bash)
-AC_PATH_PROGS(PERL, perl)
-AC_PATH_PROGS(PYTHON, python)
-AC_PATH_PROGS(AR, ar)
-AC_PATH_PROGS(ANT, ant)
-PRODUCT_MK=""
-])
-
-AC_DEFUN([FUSE_DFS_WITH_EXTERNAL_PATH],
-[
-cdir=`pwd`
-AC_MSG_CHECKING([Checking EXTERNAL_PATH set to])
-AC_ARG_WITH([externalpath],
-  [ --with-externalpath=DIR User specified path to external fuse dfs components.],
-  [
-    if test "x${EXTERNAL_PATH}" != "x"; then
-       echo ""
-       echo "ERROR: You have already set EXTERNAL_PATH in your environment"
-       echo "Cannot override it using --with-externalpath. Unset EXTERNAL_PATH to use this option"
-       exit 1
-    fi
-    EXTERNAL_PATH=$withval
-  ],
-  [
-    if test "x${EXTERNAL_PATH}" = "x"; then
-       EXTERNAL_PATH=$1
-    fi
-  ]
-)
-if test "x${EXTERNAL_PATH}" = "x"; then
-   export EXTERNAL_PATH="$cdir/external"
-   GLOBAL_HEADER_MK="include ${EXTERNAL_PATH}/global_header.mk"
-   GLOBAL_FOOTER_MK="include ${EXTERNAL_PATH}/global_footer.mk"
-else
-   export EXTERNAL_PATH
-   GLOBAL_HEADER_MK="include ${EXTERNAL_PATH}/global_header.mk"
-   GLOBAL_FOOTER_MK="include ${EXTERNAL_PATH}/global_footer.mk"
-fi
-AC_MSG_RESULT($EXTERNAL_PATH)
-if test ! -d ${EXTERNAL_PATH}; then
-       echo ""
-       echo "ERROR: EXTERNAL_PATH set to an nonexistent directory ${EXTERNAL_PATH}"
-       exit 1
-fi
-AC_SUBST(EXTERNAL_PATH)
-AC_SUBST(GLOBAL_HEADER_MK)
-AC_SUBST(GLOBAL_FOOTER_MK)
-])
-
-# Set option to enable shared mode. Set DEBUG and OPT for use in Makefile.am.
-AC_DEFUN([FUSE_DFS_ENABLE_DEFAULT_OPT_BUILD],
-[
-AC_MSG_CHECKING([whether to enable optimized build])
-AC_ARG_ENABLE([opt],
-  [  --disable-opt     Set up debug mode.],
-  [
-     ENABLED_OPT=$enableval
-  ],
-  [
-     ENABLED_OPT="yes"
-  ]
-)
-if test "$ENABLED_OPT" = "yes"
-then
-     CFLAGS="-Wall -O3"
-     CXXFLAGS="-Wall -O3"
-else
-     CFLAGS="-Wall -g"
-     CXXFLAGS="-Wall -g"
-fi
-AC_MSG_RESULT($ENABLED_OPT)
-AM_CONDITIONAL([OPT], [test "$ENABLED_OPT" = yes])
-AM_CONDITIONAL([DEBUG], [test "$ENABLED_OPT" = no])
-])
-
-# Set option to enable debug mode. Set DEBUG and OPT for use in Makefile.am.
-AC_DEFUN([FUSE_DFS_ENABLE_DEFAULT_DEBUG_BUILD],
-[
-AC_MSG_CHECKING([whether to enable debug build])
-AC_ARG_ENABLE([debug],
-  [  --disable-debug     Set up opt mode.],
-  [
-     ENABLED_DEBUG=$enableval
-  ],
-  [
-     ENABLED_DEBUG="yes"
-  ]
-)
-if test "$ENABLED_DEBUG" = "yes"
-then
-     CFLAGS="-Wall -g"
-     CXXFLAGS="-Wall -g"
-else
-     CFLAGS="-Wall -O3"
-     CXXFLAGS="-Wall -O3"
-fi
-AC_MSG_RESULT($ENABLED_DEBUG)
-AM_CONDITIONAL([DEBUG], [test "$ENABLED_DEBUG" = yes])
-AM_CONDITIONAL([OPT], [test "$ENABLED_DEBUG" = no])
-])
-
-# Set option to enable static libs.
-AC_DEFUN([FUSE_DFS_ENABLE_DEFAULT_STATIC],
-[
-SHARED=""
-STATIC=""
-AC_MSG_CHECKING([whether to enable static mode])
-AC_ARG_ENABLE([static],
-  [  --disable-static     Set up shared mode.],
-  [
-     ENABLED_STATIC=$enableval
-  ],
-  [
-     ENABLED_STATIC="yes"
-  ]
-)
-if test "$ENABLED_STATIC" = "yes"
-then
-     LTYPE=".a"
-else
-     LTYPE=".so"
-     SHARED_CXXFLAGS="-fPIC"
-     SHARED_CFLAGS="-fPIC"
-     SHARED_LDFLAGS="-shared -fPIC"
-     AC_SUBST(SHARED_CXXFLAGS)
-     AC_SUBST(SHARED_CFLAGS)
-     AC_SUBST(SHARED_LDFLAGS)
-fi
-AC_MSG_RESULT($ENABLED_STATIC)
-AC_SUBST(LTYPE)
-AM_CONDITIONAL([STATIC], [test "$ENABLED_STATIC" = yes])
-AM_CONDITIONAL([SHARED], [test "$ENABLED_STATIC" = no])
-])
-
-# Set option to enable shared libs.
-AC_DEFUN([FUSE_DFS_ENABLE_DEFAULT_SHARED],
-[
-SHARED=""
-STATIC=""
-AC_MSG_CHECKING([whether to enable shared mode])
-AC_ARG_ENABLE([shared],
-  [  --disable-shared     Set up static mode.],
-  [
-    ENABLED_SHARED=$enableval
-  ],
-  [
-     ENABLED_SHARED="yes"
-  ]
-)
-if test "$ENABLED_SHARED" = "yes"
-then
-     LTYPE=".so"
-     SHARED_CXXFLAGS="-fPIC"
-     SHARED_CFLAGS="-fPIC"
-     SHARED_LDFLAGS="-shared -fPIC"
-     AC_SUBST(SHARED_CXXFLAGS)
-     AC_SUBST(SHARED_CFLAGS)
-     AC_SUBST(SHARED_LDFLAGS)
-else
-     LTYPE=".a"
-fi
-AC_MSG_RESULT($ENABLED_SHARED)
-AC_SUBST(LTYPE)
-AM_CONDITIONAL([SHARED], [test "$ENABLED_SHARED" = yes])
-AM_CONDITIONAL([STATIC], [test "$ENABLED_SHARED" = no])
-])
-
-# Generates define flags and conditionals as specified by user.
-# This gets enabled *only* if user selects --enable-<FEATURE> otion.
-AC_DEFUN([FUSE_DFS_ENABLE_FEATURE],
-[
-ENABLE=""
-flag="$1"
-value="$3"
-AC_MSG_CHECKING([whether to enable $1])
-AC_ARG_ENABLE([$2],
-  [  --enable-$2     Enable $2.],
-  [
-     ENABLE=$enableval
-  ],
-  [
-     ENABLE="no"
-  ]
-)
-AM_CONDITIONAL([$1], [test "$ENABLE" = yes])
-if test "$ENABLE" = "yes"
-then
-   if test "x${value}" = "x"
-   then
-       AC_DEFINE([$1])
-   else
-       AC_DEFINE_UNQUOTED([$1], [$value])
-   fi
-fi
-AC_MSG_RESULT($ENABLE)
-])
-
-
-# can also use eval $2=$withval;AC_SUBST($2)
-AC_DEFUN([FUSE_DFS_WITH_PATH],
-[
-USRFLAG=""
-USRFLAG=$1
-AC_MSG_CHECKING([Checking $1 set to])
-AC_ARG_WITH([$2],
-  [ --with-$2=DIR User specified path.],
-  [
-    LOC=$withval
-    eval $USRFLAG=$withval
-  ],
-  [
-    LOC=$3
-    eval $USRFLAG=$3
-  ]
-)
-AC_SUBST([$1])
-AC_MSG_RESULT($LOC)
-])
-
-AC_DEFUN([FUSE_DFS_SET_FLAG_VALUE],
-[
-SETFLAG=""
-AC_MSG_CHECKING([Checking $1 set to])
-SETFLAG=$1
-eval $SETFLAG=\"$2\"
-AC_SUBST([$SETFLAG])
-AC_MSG_RESULT($2)
-])
-
-# NOTES
-# if using if else bourne stmt you must have more than a macro in it.
-# EX1 is not correct. EX2 is correct
-# EX1: if test "$XX" = "yes"; then
-#        AC_SUBST(xx)
-#      fi
-# EX2: if test "$XX" = "yes"; then
-#        xx="foo"
-#        AC_SUBST(xx)
-#      fi
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/configure.ac b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/configure.ac
deleted file mode 100644
index 1062ec3..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/configure.ac
+++ /dev/null
@@ -1,82 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Autoconf input file
-# $Id$
-# AC - autoconf
-#########################################################################
-# Section 1:
-# DO NOT TOUCH EXCEPT TO CHANGE Product-Name and Rev# IN AC_INIT
-
-AC_PREREQ(2.52)
-AC_INIT([fuse_dfs], [0.1.0])
-#AC_CONFIG_AUX_DIR([/usr/share/automake-1.9])
-# To install locally
-AC_CANONICAL_TARGET()
-FUSE_DFS_INITIALIZE([localinstall])
-AC_PREFIX_DEFAULT([`pwd`])
-
-
-
-#case $target in
-#*64*intel)
-#     OS_ARCH=intel64 ;;
-#*64*amd* | *64*unknown*)
-#     OS_ARCH=amd64 ;;
-#$esac
-#AC_SUBST(OS_ARCH)
- DEFS=""
-AC_SUBST([DEFS])
-
-# Need GNU source for multiple hashtables from glibc
-AC_GNU_SOURCE
-
-AC_FUNC_GETGROUPS
-AC_TYPE_GETGROUPS
-
-AC_PROG_CC
-AC_SYS_LARGEFILE
-
-############################################################################
-# Section 2:
-# User Configurable system defaults. Change With CAUTION!
-
-# User can include custom makefile rules. Uncomment and update only <name> in PRODUCT_MK.
-# Include where appropriate in any Makefile.am as @PRODUCT_MK@
-
-# Default path to external components and shared build tools
-# To point to other locations set  environment variable EXTERNAL_PATH.
-# DO NOT change default. Changing default value requires changing bootstrap.sh.
-FUSE_DFS_WITH_EXTERNAL_PATH([`pwd`])
-
-# Pre-defined macro to set optimized build mode. Configure with --disable-opt option to turn off optimization. Default CXXFLAGS set to '-Wall -O3'. In debug mode  CXXFLAGS is  '-Wall -g'
-# FUSE_DFSENABLE_DEFAULT_DEBUG_BUILD
-FUSE_DFS_ENABLE_DEFAULT_OPT_BUILD
-
-# Predefined macro to set static library mode. Configure with --disable-static option to turn off static lib mode.
-# FUSE_DFS_ENABLE_DEFAULT_SHARED
-FUSE_DFS_ENABLE_DEFAULT_STATIC
-
-AC_CONFIG_FILES(Makefile src/Makefile)
-
-
-############################################################################
-# Section 4:
-# DO NOT TOUCH.
-
-AC_SUBST(PRODUCT_MK)
-AC_OUTPUT
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/pom.xml
deleted file mode 100644
index db27034..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/pom.xml
+++ /dev/null
@@ -1,164 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
-
-
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-SNAPSHOT</version>
-    <relativePath>../../../../../hadoop-project</relativePath>
-  </parent>
-  <groupId>org.apache.hadoop.contrib</groupId>
-  <artifactId>hadoop-hdfs-fuse</artifactId>
-  <version>3.0.0-SNAPSHOT</version>
-  <packaging>pom</packaging>
-
-  <name>Apache Hadoop HDFS Fuse</name>
-  <description>Apache Hadoop HDFS Fuse</description>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <plugins>
-      <plugin>
-        <!-- workaround for filtered/unfiltered resources in same directory -->
-        <!-- remove when maven-eclipse-plugin 2.9 is available -->
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-eclipse-plugin</artifactId>
-        <version>2.6</version>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-surefire-plugin</artifactId>
-        <configuration>
-          <threadCount>1</threadCount>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-javadoc-plugin</artifactId>
-        <executions>
-          <execution>
-            <goals>
-              <goal>javadoc</goal>
-            </goals>
-            <phase>site</phase>
-            <configuration>
-              <linksource>true</linksource>
-              <quiet>true</quiet>
-              <verbose>false</verbose>
-              <source>${maven.compile.source}</source>
-              <charset>${maven.compile.encoding}</charset>
-              <groups>
-                <group>
-                  <title>HttpFs API</title>
-                  <packages>*</packages>
-                </group>
-              </groups>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-project-info-reports-plugin</artifactId>
-        <executions>
-          <execution>
-            <configuration>
-              <dependencyLocationsEnabled>false</dependencyLocationsEnabled>
-            </configuration>
-            <goals>
-              <goal>dependencies</goal>
-            </goals>
-            <phase>site</phase>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.rat</groupId>
-        <artifactId>apache-rat-plugin</artifactId>
-        <configuration>
-          <excludes>
-          </excludes>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-
-  <profiles>
-    <profile>
-      <id>fuse</id>
-      <activation>
-        <activeByDefault>false</activeByDefault>
-      </activation>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-antrun-plugin</artifactId>
-            <executions>
-              <execution>
-                <id>prepare-compile-native</id>
-                <phase>generate-sources</phase>
-                <goals>
-                  <goal>run</goal>
-                </goals>
-                <configuration>
-                  <target>
-                    <copy toDir="${project.build.directory}/fuse-dfs">
-                      <fileset dir="${basedir}"/>
-                    </copy>
-                  </target>
-                </configuration>
-              </execution>
-              <execution>
-                <id>compile-fuse</id>
-                <phase>compile</phase>
-                <goals>
-                  <goal>run</goal>
-                </goals>
-                <configuration>
-                  <target>
-                    <ant antfile="${project.build.directory}/fuse-dfs/build.xml"
-                         dir="${project.build.directory}/fuse-dfs">
-                      <target name="compile"/>
-                    </ant>
-                  </target>
-                </configuration>
-              </execution>
-            </executions>
-          </plugin>
-       </plugins>
-      </build>
-    </profile>
-  </profiles>
-</project>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/CMakeLists.txt
new file mode 100644
index 0000000..fb3c580
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/CMakeLists.txt
@@ -0,0 +1,73 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Find Linux FUSE
+IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
+    find_package(PkgConfig REQUIRED)
+    pkg_check_modules(FUSE fuse)
+    IF(FUSE_FOUND)
+        FLATTEN_LIST("${FUSE_CFLAGS}" " " FUSE_CFLAGS)
+        FLATTEN_LIST("${FUSE_LDFLAGS}" " " FUSE_LDFLAGS)
+        set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${FUSE_CFLAGS}")
+        set(CMAKE_LD_FLAGS "${CMAKE_LD_FLAGS} ${FUSE_LDFLAGS}")
+        MESSAGE(STATUS "Building Linux FUSE client.")
+        include_directories(${FUSE_INCLUDE_DIRS})
+    ELSE(FUSE_FOUND)
+        MESSAGE(STATUS "Failed to find Linux FUSE libraries or include files.  Will not build FUSE client.")
+    ENDIF(FUSE_FOUND)
+ELSE (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
+    MESSAGE(STATUS "Non-Linux system detected.  Will not build FUSE client.")
+ENDIF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
+
+IF(FUSE_FOUND)
+    add_executable(fuse_dfs
+        fuse_dfs.c
+        fuse_options.c 
+        fuse_connect.c 
+        fuse_impls_access.c 
+        fuse_impls_chmod.c  
+        fuse_impls_chown.c  
+        fuse_impls_create.c  
+        fuse_impls_flush.c 
+        fuse_impls_getattr.c  
+        fuse_impls_mkdir.c  
+        fuse_impls_mknod.c  
+        fuse_impls_open.c 
+        fuse_impls_read.c 
+        fuse_impls_readdir.c 
+        fuse_impls_release.c 
+        fuse_impls_rename.c 
+        fuse_impls_rmdir.c 
+        fuse_impls_statfs.c 
+        fuse_impls_symlink.c 
+        fuse_impls_truncate.c 
+        fuse_impls_unlink.c 
+        fuse_impls_utimens.c  
+        fuse_impls_write.c
+        fuse_init.c 
+        fuse_stat_struct.c 
+        fuse_trash.c 
+        fuse_users.c 
+    )
+    target_link_libraries(fuse_dfs
+        ${FUSE_LIBRARIES}
+        ${JAVA_JVM_LIBRARY}
+        hdfs
+        m
+    )
+ENDIF(FUSE_FOUND)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/Makefile.am b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/Makefile.am
deleted file mode 100644
index 706297f..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/Makefile.am
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-bin_PROGRAMS = fuse_dfs
-fuse_dfs_SOURCES = fuse_dfs.c fuse_options.c fuse_trash.c fuse_stat_struct.c fuse_users.c fuse_init.c fuse_connect.c fuse_impls_access.c fuse_impls_chmod.c  fuse_impls_chown.c  fuse_impls_create.c  fuse_impls_flush.c fuse_impls_getattr.c  fuse_impls_mkdir.c  fuse_impls_mknod.c  fuse_impls_open.c fuse_impls_read.c fuse_impls_release.c fuse_impls_readdir.c fuse_impls_rename.c fuse_impls_rmdir.c fuse_impls_statfs.c fuse_impls_symlink.c fuse_impls_truncate.c fuse_impls_utimens.c  fuse_impls_unlink.c fuse_impls_write.c
-AM_CFLAGS= -Wall -g
-AM_CPPFLAGS= -DPERMS=$(PERMS) -D_FILE_OFFSET_BITS=64 -I$(JAVA_HOME)/include -I$(HADOOP_PREFIX)/../../src/main/native -I$(JAVA_HOME)/include/linux -D_FUSE_DFS_VERSION=\"$(PACKAGE_VERSION)\" -DPROTECTED_PATHS=\"$(PROTECTED_PATHS)\" -I$(FUSE_HOME)/include
-AM_LDFLAGS= -L$(HADOOP_PREFIX)/../../target/native/target/usr/local/lib64 -L$(HADOOP_PREFIX)/../../target/native/target/usr/local/lib -L$(FUSE_HOME)/lib -L$(JAVA_HOME)/jre/lib/$(OS_ARCH)/server
-fuse_dfs_LDADD=-lfuse -lhdfs -ljvm -lm
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/fuse_dfs.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/fuse_dfs.h
index 56ed9cb..4554dbd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/fuse_dfs.h
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/fuse_dfs.h
@@ -31,13 +31,9 @@
 #include <fuse.h>
 #include <fuse/fuse_opt.h>
 
-#ifdef HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#ifdef HAVE_SETXATTR
 #include <sys/xattr.h>
-#endif
+
+#include "config.h"
 
 //
 // Check if a path is in the mount option supplied protected paths.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml
index 43764ca..c8e0c62 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml
@@ -152,7 +152,7 @@
 <tr><td><code>dfs.web.authentication.kerberos.principal</code></td>
 <td>The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
     The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-    HTTP SPENGO specification.
+    HTTP SPNEGO specification.
 </td></tr>
 <tr><td><code>dfs.web.authentication.kerberos.keytab</code></td>
 <td>The Kerberos keytab file with the credentials for the
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index d132db7..721dda5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -38,6 +38,10 @@
   public static final int     DFS_STREAM_BUFFER_SIZE_DEFAULT = 4096;
   public static final String  DFS_BYTES_PER_CHECKSUM_KEY = "dfs.bytes-per-checksum";
   public static final int     DFS_BYTES_PER_CHECKSUM_DEFAULT = 512;
+  public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY = "dfs.client.retry.policy.enabled";
+  public static final boolean DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT = false; 
+  public static final String  DFS_CLIENT_RETRY_POLICY_SPEC_KEY = "dfs.client.retry.policy.spec";
+  public static final String  DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT = "10000,6,60000,10"; //t1,n1,t2,n2,... 
   public static final String  DFS_CHECKSUM_TYPE_KEY = "dfs.checksum.type";
   public static final String  DFS_CHECKSUM_TYPE_DEFAULT = "CRC32C";
   public static final String  DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size";
@@ -329,10 +333,10 @@
   public static final String  DFS_DATANODE_USER_NAME_KEY = "dfs.datanode.kerberos.principal";
   public static final String  DFS_NAMENODE_KEYTAB_FILE_KEY = "dfs.namenode.keytab.file";
   public static final String  DFS_NAMENODE_USER_NAME_KEY = "dfs.namenode.kerberos.principal";
-  public static final String  DFS_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY = "dfs.namenode.kerberos.internal.spnego.principal";
+  public static final String  DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY = "dfs.namenode.kerberos.internal.spnego.principal";
   public static final String  DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY = "dfs.secondary.namenode.keytab.file";
   public static final String  DFS_SECONDARY_NAMENODE_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.principal";
-  public static final String  DFS_SECONDARY_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.internal.spnego.principal";
+  public static final String  DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.internal.spnego.principal";
   public static final String  DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY = "dfs.namenode.name.cache.threshold";
   public static final int     DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT = 10;
   
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 1de353b..8c0ed10 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -96,7 +96,7 @@
    */
   @Override
   public String getScheme() {
-    return "hdfs";
+    return HdfsConstants.HDFS_URI_SCHEME;
   }
 
   @Deprecated
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
index 989fc12..cd6601b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
@@ -37,6 +37,7 @@
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.DelegationTokenRenewer;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileChecksum;
@@ -46,7 +47,6 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenRenewer;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
index 27eddeb..cc6517d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
@@ -47,10 +47,12 @@
 import org.apache.hadoop.hdfs.protocolPB.RefreshUserMappingsProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.RefreshUserMappingsProtocolPB;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.retry.DefaultFailoverProxyProvider;
 import org.apache.hadoop.io.retry.FailoverProxyProvider;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
@@ -66,6 +68,7 @@
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
 
 import com.google.common.base.Preconditions;
+import com.google.protobuf.ServiceException;
 
 /**
  * Create proxy objects to communicate with a remote NN. All remote access to an
@@ -240,12 +243,106 @@
     return new NamenodeProtocolTranslatorPB(proxy);
   }
   
+  /**
+   * Return the default retry policy used in RPC.
+   * 
+   * If dfs.client.retry.policy.enabled == false, use TRY_ONCE_THEN_FAIL.
+   * 
+   * Otherwise, first unwrap ServiceException if possible, and then 
+   * (1) use multipleLinearRandomRetry for
+   *     - SafeModeException, or
+   *     - IOException other than RemoteException, or
+   *     - ServiceException; and
+   * (2) use TRY_ONCE_THEN_FAIL for
+   *     - non-SafeMode RemoteException, or
+   *     - non-IOException.
+   *     
+   * Note that dfs.client.retry.max < 0 is not allowed.
+   */
+  private static RetryPolicy getDefaultRpcRetryPolicy(Configuration conf) {
+    final RetryPolicy multipleLinearRandomRetry = getMultipleLinearRandomRetry(conf);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("multipleLinearRandomRetry = " + multipleLinearRandomRetry);
+    }
+    if (multipleLinearRandomRetry == null) {
+      //no retry
+      return RetryPolicies.TRY_ONCE_THEN_FAIL;
+    } else {
+      return new RetryPolicy() {
+        @Override
+        public RetryAction shouldRetry(Exception e, int retries, int failovers,
+            boolean isMethodIdempotent) throws Exception {
+          if (e instanceof ServiceException) {
+            //unwrap ServiceException
+            final Throwable cause = e.getCause();
+            if (cause != null && cause instanceof Exception) {
+              e = (Exception)cause;
+            }
+          }
+
+          //see (1) and (2) in the javadoc of this method.
+          final RetryPolicy p;
+          if (e instanceof RemoteException) {
+            final RemoteException re = (RemoteException)e;
+            p = SafeModeException.class.getName().equals(re.getClassName())?
+                multipleLinearRandomRetry: RetryPolicies.TRY_ONCE_THEN_FAIL;
+          } else if (e instanceof IOException || e instanceof ServiceException) {
+            p = multipleLinearRandomRetry;
+          } else { //non-IOException
+            p = RetryPolicies.TRY_ONCE_THEN_FAIL;
+          }
+
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("RETRY " + retries + ") policy="
+                + p.getClass().getSimpleName() + ", exception=" + e);
+          }
+          LOG.info("RETRY " + retries + ") policy="
+              + p.getClass().getSimpleName() + ", exception=" + e);
+          return p.shouldRetry(e, retries, failovers, isMethodIdempotent);
+        }
+      };
+    }
+  }
+
+  /**
+   * Return the MultipleLinearRandomRetry policy specified in the conf,
+   * or null if the feature is disabled.
+   * If the policy is specified in the conf but the policy cannot be parsed,
+   * the default policy is returned.
+   * 
+   * Conf property: N pairs of sleep-time and number-of-retries
+   *   dfs.client.retry.policy = "s1,n1,s2,n2,..."
+   */
+  private static RetryPolicy getMultipleLinearRandomRetry(Configuration conf) {
+    final boolean enabled = conf.getBoolean(
+        DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY,
+        DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT);
+    if (!enabled) {
+      return null;
+    }
+
+    final String policy = conf.get(
+        DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY,
+        DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT);
+
+    final RetryPolicy r = RetryPolicies.MultipleLinearRandomRetry.parseCommaSeparatedString(policy);
+    return r != null? r: RetryPolicies.MultipleLinearRandomRetry.parseCommaSeparatedString(
+        DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT);
+  }
+
   private static ClientProtocol createNNProxyWithClientProtocol(
       InetSocketAddress address, Configuration conf, UserGroupInformation ugi,
       boolean withRetries) throws IOException {
-    ClientNamenodeProtocolPB proxy = (ClientNamenodeProtocolPB) NameNodeProxies
-        .createNameNodeProxy(address, conf, ugi, ClientNamenodeProtocolPB.class, 0);
+    RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class);
+
+    final RetryPolicy defaultPolicy = getDefaultRpcRetryPolicy(conf);
+    final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
+    ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(
+        ClientNamenodeProtocolPB.class, version, address, ugi, conf,
+        NetUtils.getDefaultSocketFactory(conf), 0, defaultPolicy).getProxy();
+
     if (withRetries) { // create the proxy with retries
+
       RetryPolicy createPolicy = RetryPolicies
           .retryUpToMaximumCountWithFixedSleep(5,
               HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
@@ -258,17 +355,21 @@
       Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap
                  = new HashMap<Class<? extends Exception>, RetryPolicy>();
       exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
-          .retryByRemoteException(RetryPolicies.TRY_ONCE_THEN_FAIL,
+          .retryByRemoteException(defaultPolicy,
               remoteExceptionToPolicyMap));
       RetryPolicy methodPolicy = RetryPolicies.retryByException(
-          RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
+          defaultPolicy, exceptionToPolicyMap);
       Map<String, RetryPolicy> methodNameToPolicyMap 
                  = new HashMap<String, RetryPolicy>();
     
       methodNameToPolicyMap.put("create", methodPolicy);
     
-      proxy = (ClientNamenodeProtocolPB) RetryProxy
-          .create(ClientNamenodeProtocolPB.class, proxy, methodNameToPolicyMap);
+      proxy = (ClientNamenodeProtocolPB) RetryProxy.create(
+          ClientNamenodeProtocolPB.class,
+          new DefaultFailoverProxyProvider<ClientNamenodeProtocolPB>(
+              ClientNamenodeProtocolPB.class, proxy),
+          methodNameToPolicyMap,
+          defaultPolicy);
     }
     return new ClientNamenodeProtocolTranslatorPB(proxy);
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
new file mode 100644
index 0000000..9133cf3
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.client;
+
+import java.io.IOException;
+import java.net.URI;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.io.IOUtils;
+
+/**
+ * The public utility API for HDFS.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class HdfsUtils {
+  private static final Log LOG = LogFactory.getLog(HdfsUtils.class);
+
+  /**
+   * Is the HDFS healthy?
+   * HDFS is considered as healthy if it is up and not in safemode.
+   *
+   * @param uri the HDFS URI.  Note that the URI path is ignored.
+   * @return true if HDFS is healthy; false, otherwise.
+   */
+  public static boolean isHealthy(URI uri) {
+    //check scheme
+    final String scheme = uri.getScheme();
+    if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(scheme)) {
+      throw new IllegalArgumentException("The scheme is not "
+          + HdfsConstants.HDFS_URI_SCHEME + ", uri=" + uri);
+    }
+    
+    final Configuration conf = new Configuration();
+    //disable FileSystem cache
+    conf.setBoolean(String.format("fs.%s.impl.disable.cache", scheme), true);
+    //disable client retry for rpc connection and rpc calls
+    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, false);
+    conf.setInt(
+        CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
+
+    DistributedFileSystem fs = null;
+    try {
+      fs = (DistributedFileSystem)FileSystem.get(uri, conf);
+      final boolean safemode = fs.setSafeMode(SafeModeAction.SAFEMODE_GET);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Is namenode in safemode? " + safemode + "; uri=" + uri);
+      }
+
+      fs.close();
+      fs = null;
+      return !safemode;
+    } catch(IOException e) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Got an exception for uri=" + uri, e);
+      }
+      return false;
+    } finally {
+      IOUtils.cleanup(LOG, fs);
+    }
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
index db7eaed..0fd1e7d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
@@ -199,10 +199,10 @@
     this.xceiverCount = xceiverCount; 
   }
 
-  /** rack name */
+  /** network location */
   public synchronized String getNetworkLocation() {return location;}
     
-  /** Sets the rack name */
+  /** Sets the network location */
   public synchronized void setNetworkLocation(String location) {
     this.location = NodeBase.normalize(location);
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index be86b53..0739aab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -30,8 +30,9 @@
  * the block are stored.
  */
 @InterfaceAudience.Private
-public class BlockInfo extends Block implements
-    LightWeightGSet.LinkedElement {
+public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
+  public static final BlockInfo[] EMPTY_ARRAY = {}; 
+
   private BlockCollection bc;
 
   /** For implementing {@link LightWeightGSet.LinkedElement} interface */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 527a997..7e4a485 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2259,30 +2259,14 @@
     BlockCollection bc = getBlockCollection(b);
     final Map<String, List<DatanodeDescriptor>> rackMap
         = new HashMap<String, List<DatanodeDescriptor>>();
-    for(final Iterator<DatanodeDescriptor> iter = nonExcess.iterator();
-        iter.hasNext(); ) {
-      final DatanodeDescriptor node = iter.next();
-      final String rackName = node.getNetworkLocation();
-      List<DatanodeDescriptor> datanodeList = rackMap.get(rackName);
-      if (datanodeList == null) {
-        datanodeList = new ArrayList<DatanodeDescriptor>();
-        rackMap.put(rackName, datanodeList);
-      }
-      datanodeList.add(node);
-    }
+    final List<DatanodeDescriptor> moreThanOne = new ArrayList<DatanodeDescriptor>();
+    final List<DatanodeDescriptor> exactlyOne = new ArrayList<DatanodeDescriptor>();
     
     // split nodes into two sets
-    // priSet contains nodes on rack with more than one replica
-    // remains contains the remaining nodes
-    final List<DatanodeDescriptor> priSet = new ArrayList<DatanodeDescriptor>();
-    final List<DatanodeDescriptor> remains = new ArrayList<DatanodeDescriptor>();
-    for(List<DatanodeDescriptor> datanodeList : rackMap.values()) {
-      if (datanodeList.size() == 1 ) {
-        remains.add(datanodeList.get(0));
-      } else {
-        priSet.addAll(datanodeList);
-      }
-    }
+    // moreThanOne contains nodes on rack with more than one replica
+    // exactlyOne contains the remaining nodes
+    replicator.splitNodesWithRack(nonExcess, rackMap, moreThanOne,
+        exactlyOne);
     
     // pick one node to delete that favors the delete hint
     // otherwise pick one with least space from priSet if it is not empty
@@ -2292,30 +2276,18 @@
       // check if we can delete delNodeHint
       final DatanodeInfo cur;
       if (firstOne && delNodeHint !=null && nonExcess.contains(delNodeHint)
-          && (priSet.contains(delNodeHint)
-              || (addedNode != null && !priSet.contains(addedNode))) ) {
+          && (moreThanOne.contains(delNodeHint)
+              || (addedNode != null && !moreThanOne.contains(addedNode))) ) {
         cur = delNodeHint;
       } else { // regular excessive replica removal
         cur = replicator.chooseReplicaToDelete(bc, b, replication,
-            priSet, remains);
+        		moreThanOne, exactlyOne);
       }
       firstOne = false;
 
-      // adjust rackmap, priSet, and remains
-      String rack = cur.getNetworkLocation();
-      final List<DatanodeDescriptor> datanodes = rackMap.get(rack);
-      datanodes.remove(cur);
-      if (datanodes.isEmpty()) {
-        rackMap.remove(rack);
-      }
-      if (priSet.remove(cur)) {
-        if (datanodes.size() == 1) {
-          priSet.remove(datanodes.get(0));
-          remains.add(datanodes.get(0));
-        }
-      } else {
-        remains.remove(cur);
-      }
+      // adjust rackmap, moreThanOne, and exactlyOne
+      replicator.adjustSetsWithChosenReplica(rackMap, moreThanOne,
+          exactlyOne, cur);
 
       nonExcess.remove(cur);
       addToExcessReplicate(cur, b);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
index e1efae5..e331746 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
@@ -21,12 +21,14 @@
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
 import org.apache.hadoop.net.NetworkTopology;
@@ -241,5 +243,80 @@
                         excludedNodes,
                         blocksize);
   }
+  
+  /**
+   * Adjust rackmap, moreThanOne, and exactlyOne after removing replica on cur.
+   *
+   * @param rackMap a map from rack to replica
+   * @param moreThanOne The List of replica nodes on rack which has more than 
+   *        one replica
+   * @param exactlyOne The List of replica nodes on rack with only one replica
+   * @param cur current replica to remove
+   */
+  public void adjustSetsWithChosenReplica(final Map<String, 
+      List<DatanodeDescriptor>> rackMap,
+      final List<DatanodeDescriptor> moreThanOne,
+      final List<DatanodeDescriptor> exactlyOne, final DatanodeInfo cur) {
+    
+    String rack = getRack(cur);
+    final List<DatanodeDescriptor> datanodes = rackMap.get(rack);
+    datanodes.remove(cur);
+    if (datanodes.isEmpty()) {
+      rackMap.remove(rack);
+    }
+    if (moreThanOne.remove(cur)) {
+      if (datanodes.size() == 1) {
+        moreThanOne.remove(datanodes.get(0));
+        exactlyOne.add(datanodes.get(0));
+      }
+    } else {
+      exactlyOne.remove(cur);
+    }
+  }
+
+  /**
+   * Get rack string from a data node
+   * @param datanode
+   * @return rack of data node
+   */
+  protected String getRack(final DatanodeInfo datanode) {
+    return datanode.getNetworkLocation();
+  }
+  
+  /**
+   * Split data nodes into two sets, one set includes nodes on rack with
+   * more than one  replica, the other set contains the remaining nodes.
+   * 
+   * @param dataNodes
+   * @param rackMap a map from rack to datanodes
+   * @param moreThanOne contains nodes on rack with more than one replica
+   * @param exactlyOne remains contains the remaining nodes
+   */
+  public void splitNodesWithRack(
+      Collection<DatanodeDescriptor> dataNodes,
+      final Map<String, List<DatanodeDescriptor>> rackMap,
+      final List<DatanodeDescriptor> moreThanOne,
+      final List<DatanodeDescriptor> exactlyOne) {
+    for(DatanodeDescriptor node : dataNodes) {
+      final String rackName = getRack(node);
+      List<DatanodeDescriptor> datanodeList = rackMap.get(rackName);
+      if (datanodeList == null) {
+        datanodeList = new ArrayList<DatanodeDescriptor>();
+        rackMap.put(rackName, datanodeList);
+      }
+      datanodeList.add(node);
+    }
+    
+    // split nodes into two sets
+    for(List<DatanodeDescriptor> datanodeList : rackMap.values()) {
+      if (datanodeList.size() == 1) {
+        // exactlyOne contains nodes on rack with only one replica
+        exactlyOne.add(datanodeList.get(0));
+      } else {
+        // moreThanOne contains nodes on rack with more than one replica
+        moreThanOne.addAll(datanodeList);
+      }
+    }
+  }
 
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 6995a2e..350f863 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -56,15 +56,15 @@
     "For more information, please enable DEBUG log level on "
     + ((Log4JLogger)LOG).getLogger().getName();
 
-  private boolean considerLoad; 
+  protected boolean considerLoad; 
   private boolean preferLocalNode = true;
-  private NetworkTopology clusterMap;
+  protected NetworkTopology clusterMap;
   private FSClusterStats stats;
-  private long heartbeatInterval;   // interval for DataNode heartbeats
+  protected long heartbeatInterval;   // interval for DataNode heartbeats
   /**
    * A miss of that many heartbeats is tolerated for replica deletion policy.
    */
-  private int tolerateHeartbeatMultiplier;
+  protected int tolerateHeartbeatMultiplier;
 
   BlockPlacementPolicyDefault(Configuration conf,  FSClusterStats stats,
                            NetworkTopology clusterMap) {
@@ -88,7 +88,7 @@
         DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT);
   }
 
-  private ThreadLocal<StringBuilder> threadLocalBuilder =
+  protected ThreadLocal<StringBuilder> threadLocalBuilder =
     new ThreadLocal<StringBuilder>() {
     @Override
     protected StringBuilder initialValue() {
@@ -229,7 +229,7 @@
    * choose a node on the same rack
    * @return the chosen node
    */
-  private DatanodeDescriptor chooseLocalNode(
+  protected DatanodeDescriptor chooseLocalNode(
                                              DatanodeDescriptor localMachine,
                                              HashMap<Node, Node> excludedNodes,
                                              long blocksize,
@@ -263,7 +263,7 @@
    * in the cluster.
    * @return the chosen node
    */
-  private DatanodeDescriptor chooseLocalRack(
+  protected DatanodeDescriptor chooseLocalRack(
                                              DatanodeDescriptor localMachine,
                                              HashMap<Node, Node> excludedNodes,
                                              long blocksize,
@@ -316,7 +316,7 @@
    * from the local rack
    */
     
-  private void chooseRemoteRack(int numOfReplicas,
+  protected void chooseRemoteRack(int numOfReplicas,
                                 DatanodeDescriptor localMachine,
                                 HashMap<Node, Node> excludedNodes,
                                 long blocksize,
@@ -338,7 +338,7 @@
   /* Randomly choose one target from <i>nodes</i>.
    * @return the chosen node
    */
-  private DatanodeDescriptor chooseRandom(
+  protected DatanodeDescriptor chooseRandom(
                                           String nodes,
                                           HashMap<Node, Node> excludedNodes,
                                           long blocksize,
@@ -382,7 +382,7 @@
     
   /* Randomly choose <i>numOfReplicas</i> targets from <i>nodes</i>.
    */
-  private void chooseRandom(int numOfReplicas,
+  protected void chooseRandom(int numOfReplicas,
                             String nodes,
                             HashMap<Node, Node> excludedNodes,
                             long blocksize,
@@ -438,7 +438,7 @@
                         this.considerLoad, results);
   }
     
-  private boolean isGoodTarget(DatanodeDescriptor node,
+  protected boolean isGoodTarget(DatanodeDescriptor node,
                                long blockSize, int maxTargetPerLoc,
                                boolean considerLoad,
                                List<DatanodeDescriptor> results) {
@@ -574,8 +574,7 @@
 
     // pick replica from the first Set. If first is empty, then pick replicas
     // from second set.
-    Iterator<DatanodeDescriptor> iter =
-          first.isEmpty() ? second.iterator() : first.iterator();
+    Iterator<DatanodeDescriptor> iter = pickupReplicaSet(first, second);
 
     // Pick the node with the oldest heartbeat or with the least free space,
     // if all hearbeats are within the tolerable heartbeat interval
@@ -594,6 +593,20 @@
     }
     return oldestHeartbeatNode != null ? oldestHeartbeatNode : minSpaceNode;
   }
+
+  /**
+   * Pick up replica node set for deleting replica as over-replicated. 
+   * First set contains replica nodes on rack with more than one
+   * replica while second set contains remaining replica nodes.
+   * So pick up first set if not empty. If first is empty, then pick second.
+   */
+  protected Iterator<DatanodeDescriptor> pickupReplicaSet(
+      Collection<DatanodeDescriptor> first,
+      Collection<DatanodeDescriptor> second) {
+    Iterator<DatanodeDescriptor> iter =
+        first.isEmpty() ? second.iterator() : first.iterator();
+    return iter;
+  }
   
   @VisibleForTesting
   void setPreferLocalNode(boolean prefer) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 2aee0eb..474feb5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -38,6 +38,7 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -66,6 +67,8 @@
 import org.apache.hadoop.net.CachedDNSToSwitchMapping;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.net.Node;
+import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.net.ScriptBasedMapping;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.HostsFileReader;
@@ -108,7 +111,7 @@
       = new TreeMap<String, DatanodeDescriptor>();
 
   /** Cluster network topology */
-  private final NetworkTopology networktopology = new NetworkTopology();
+  private final NetworkTopology networktopology;
 
   /** Host names to datanode descriptors mapping. */
   private final Host2NodesMap host2DatanodeMap = new Host2NodesMap();
@@ -134,6 +137,12 @@
       ) throws IOException {
     this.namesystem = namesystem;
     this.blockManager = blockManager;
+    
+    Class<? extends NetworkTopology> networkTopologyClass =
+        conf.getClass(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY,
+            NetworkTopology.class, NetworkTopology.class);
+    networktopology = (NetworkTopology) ReflectionUtils.newInstance(
+        networkTopologyClass, conf);
 
     this.heartbeatManager = new HeartbeatManager(namesystem, blockManager, conf);
 
@@ -206,13 +215,22 @@
   public void sortLocatedBlocks(final String targethost,
       final List<LocatedBlock> locatedblocks) {
     //sort the blocks
-    final DatanodeDescriptor client = getDatanodeByHost(targethost);
+    // As it is possible for the separation of node manager and datanode, 
+    // here we should get node but not datanode only .
+    Node client = getDatanodeByHost(targethost);
+    if (client == null) {
+      List<String> hosts = new ArrayList<String> (1);
+      hosts.add(targethost);
+      String rName = dnsToSwitchMapping.resolve(hosts).get(0);
+      if (rName != null)
+        client = new NodeBase(rName + NodeBase.PATH_SEPARATOR_STR + targethost);
+    }
     for (LocatedBlock b : locatedblocks) {
       networktopology.pseudoSortByDistance(client, b.getLocations());
       
       // Move decommissioned datanodes to the bottom
       Arrays.sort(b.getLocations(), DFSUtil.DECOM_COMPARATOR);
-    }    
+    }
   }
 
   CyclicIteration<String, DatanodeDescriptor> getDatanodeCyclicIteration(
@@ -1035,4 +1053,8 @@
     }
   }
 
+  @Override
+  public String toString() {
+    return getClass().getSimpleName() + ": " + host2DatanodeMap;
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java
index 68ea1f1..082816d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java
@@ -17,7 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import java.util.Arrays;
 import java.util.HashMap;
+import java.util.Map;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
@@ -156,4 +158,14 @@
       hostmapLock.readLock().unlock();
     }
   }
+  
+  @Override
+  public String toString() {
+    final StringBuilder b = new StringBuilder(getClass().getSimpleName())
+        .append("[");
+    for(Map.Entry<String, DatanodeDescriptor[]> e : map.entrySet()) {
+      b.append("\n  " + e.getKey() + " => " + Arrays.asList(e.getValue()));
+    }
+    return b.append("\n]").toString();
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
index 1f4e974..7c8c301 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
@@ -34,7 +34,9 @@
   private final static Log LOG = LogFactory.getLog(Util.class.getName());
 
   /**
-   * Current system time.
+   * Current system time.  Do not use this to calculate a duration or interval
+   * to sleep, because it will be broken by settimeofday.  Instead, use
+   * monotonicNow.
    * @return current time in msec.
    */
   public static long now() {
@@ -42,6 +44,19 @@
   }
   
   /**
+   * Current time from some arbitrary time base in the past, counting in
+   * milliseconds, and not affected by settimeofday or similar system clock
+   * changes.  This is appropriate to use when computing how much longer to
+   * wait for an interval to expire.
+   * @return a monotonic clock that counts in milliseconds.
+   */
+  public static long monotonicNow() {
+    final long NANOSECONDS_PER_MILLISECOND = 1000000;
+
+    return System.nanoTime() / NANOSECONDS_PER_MILLISECOND;
+  }
+
+  /**
    * Interprets the passed string as a URI. In case of error it 
    * assumes the specified string is a file.
    *
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index eac68d9..3a14335 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -302,6 +302,22 @@
     shouldRun = false;
     if (masterThread != null) masterThread.shutdown();
     if (reportCompileThreadPool != null) reportCompileThreadPool.shutdown();
+    if (masterThread != null) {
+      try {
+        masterThread.awaitTermination(1, TimeUnit.MINUTES);
+      } catch (InterruptedException e) {
+        LOG.error("interrupted while waiting for masterThread to " +
+          "terminate", e);
+      }
+    }
+    if (reportCompileThreadPool != null) {
+      try {
+        reportCompileThreadPool.awaitTermination(1, TimeUnit.MINUTES);
+      } catch (InterruptedException e) {
+        LOG.error("interrupted while waiting for reportCompileThreadPool to " +
+          "terminate", e);
+      }
+    }
     if (!retainDiffs) clear();
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
index 32b5c60..eb4afe7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
@@ -98,6 +98,10 @@
       LOG.trace("HTTP " + op.getValue().getType() + ": " + op + ", " + path
           + ", ugi=" + ugi + Param.toSortedString(", ", parameters));
     }
+    if (nnRpcAddr == null) {
+      throw new IllegalArgumentException(NamenodeRpcAddressParam.NAME
+          + " is not specified.");
+    }
 
     //clear content type
     response.setContentType(null);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
index e6ddf5b..b224c86 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
@@ -165,7 +165,8 @@
             LOG.warn("skipping " + skipAmt + " bytes at the end " +
               "of edit log  '" + getName() + "': reached txid " + txId +
               " out of " + lastTxId);
-            tracker.skip(skipAmt);
+            tracker.clearLimit();
+            IOUtils.skipFully(tracker, skipAmt);
           }
         }
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
index dd8102e..08a560c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
@@ -206,10 +206,10 @@
             + fc.size());
       }
       fill.position(0);
-      int written = fc.write(fill, position);
+      IOUtils.writeFully(fc, fill, position);
       if(FSNamesystem.LOG.isDebugEnabled()) {
         FSNamesystem.LOG.debug("Edit log size is now " + fc.size() +
-            " written " + written + " bytes " + " at offset " + position);
+            " written " + fill.capacity() + " bytes " + " at offset " + position);
       }
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index d423928..f5664a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -277,7 +277,7 @@
           preferredBlockSize, modificationTime, clientName, 
           clientMachine, null);
     } else {
-      newNode = new INodeFile(permissions, 0, replication,
+      newNode = new INodeFile(permissions, BlockInfo.EMPTY_ARRAY, replication,
                               modificationTime, atime, preferredBlockSize);
     }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 5ebe5b2..8e12dcb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -1236,18 +1236,6 @@
         throw e;
       }
     }
-    // This code will go away as soon as RedundantEditLogInputStream is
-    // introduced. (HDFS-3049)
-    try {
-      if (!streams.isEmpty()) {
-        streams.get(0).skipUntil(fromTxId);
-      }
-    } catch (IOException e) {
-      // We don't want to throw an exception from here, because that would make
-      // recovery impossible even if the user requested it.  An exception will
-      // be thrown later, when we don't read the starting txid we expect.
-      LOG.error("error skipping until transaction " + fromTxId, e);
-    }
     return streams;
   }
   
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index e1b26bb..e1b1ecc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -668,7 +668,9 @@
         FSImage.LOG.warn("Caught exception after reading " + numValid +
             " ops from " + in + " while determining its valid length." +
             "Position was " + lastPos, t);
-        break;
+        in.resync();
+        FSImage.LOG.warn("After resync, position is " + in.getPosition());
+        continue;
       }
       if (lastTxId == HdfsConstants.INVALID_TXID
           || op.getTransactionId() > lastTxId) {
@@ -752,6 +754,11 @@
     }
 
     @Override
+    public void clearLimit() {
+      limitPos = Long.MAX_VALUE;
+    }
+
+    @Override
     public void mark(int limit) {
       super.mark(limit);
       markPos = curPos;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index 489f030..80f637c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -44,6 +44,7 @@
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.ArrayWritable;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
@@ -2289,9 +2290,11 @@
           // 0xff, we want to skip over that region, because there's nothing
           // interesting there.
           long numSkip = e.getNumAfterTerminator();
-          if (in.skip(numSkip) < numSkip) {
+          try {
+            IOUtils.skipFully(in,  numSkip);
+          } catch (Throwable t) {
             FSImage.LOG.error("Failed to skip " + numSkip + " bytes of " +
-              "garbage after an OP_INVALID.  Unexpected early EOF.");
+              "garbage after an OP_INVALID.", t);
             return null;
           }
         } catch (IOException e) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index fbca355..9531ee2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -240,8 +240,15 @@
   private static final void logAuditEvent(UserGroupInformation ugi,
       InetAddress addr, String cmd, String src, String dst,
       HdfsFileStatus stat) {
+    logAuditEvent(true, ugi, addr, cmd, src, dst, stat);
+  }
+
+  private static final void logAuditEvent(boolean succeeded,
+      UserGroupInformation ugi, InetAddress addr, String cmd, String src,
+      String dst, HdfsFileStatus stat) {
     final StringBuilder sb = auditBuffer.get();
     sb.setLength(0);
+    sb.append("allowed=").append(succeeded).append("\t");
     sb.append("ugi=").append(ugi).append("\t");
     sb.append("ip=").append(addr).append("\t");
     sb.append("cmd=").append(cmd).append("\t");
@@ -572,8 +579,6 @@
         !safeMode.isPopulatingReplQueues();
       setBlockTotal();
       blockManager.activate(conf);
-      this.nnrmthread = new Daemon(new NameNodeResourceMonitor());
-      nnrmthread.start();
     } finally {
       writeUnlock();
     }
@@ -590,7 +595,6 @@
     writeLock();
     try {
       if (blockManager != null) blockManager.close();
-      if (nnrmthread != null) nnrmthread.interrupt();
     } finally {
       writeUnlock();
     }
@@ -644,6 +648,10 @@
       }
       leaseManager.startMonitor();
       startSecretManagerIfNecessary();
+
+      //ResourceMonitor required only at ActiveNN. See HDFS-2914
+      this.nnrmthread = new Daemon(new NameNodeResourceMonitor());
+      nnrmthread.start();
     } finally {
       writeUnlock();
     }
@@ -666,6 +674,10 @@
       if (leaseManager != null) {
         leaseManager.stopMonitor();
       }
+      if (nnrmthread != null) {
+        ((NameNodeResourceMonitor) nnrmthread.getRunnable()).stopMonitor();
+        nnrmthread.interrupt();
+      }
       if (dir != null && dir.fsImage != null) {
         if (dir.fsImage.editLog != null) {
           dir.fsImage.editLog.close();
@@ -1013,6 +1025,21 @@
   void setPermission(String src, FsPermission permission)
       throws AccessControlException, FileNotFoundException, SafeModeException,
       UnresolvedLinkException, IOException {
+    try {
+      setPermissionInt(src, permission);
+    } catch (AccessControlException e) {
+      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+        logAuditEvent(false, UserGroupInformation.getCurrentUser(),
+                      Server.getRemoteIp(),
+                      "setPermission", src, null, null);
+      }
+      throw e;
+    }
+  }
+
+  private void setPermissionInt(String src, FsPermission permission)
+      throws AccessControlException, FileNotFoundException, SafeModeException,
+      UnresolvedLinkException, IOException {
     HdfsFileStatus resultingStat = null;
     writeLock();
     try {
@@ -1044,6 +1071,21 @@
   void setOwner(String src, String username, String group)
       throws AccessControlException, FileNotFoundException, SafeModeException,
       UnresolvedLinkException, IOException {
+    try {
+      setOwnerInt(src, username, group);
+    } catch (AccessControlException e) {
+      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+        logAuditEvent(false, UserGroupInformation.getCurrentUser(),
+                      Server.getRemoteIp(),
+                      "setOwner", src, null, null);
+      }
+      throw e;
+    } 
+  }
+
+  private void setOwnerInt(String src, String username, String group)
+      throws AccessControlException, FileNotFoundException, SafeModeException,
+      UnresolvedLinkException, IOException {
     HdfsFileStatus resultingStat = null;
     writeLock();
     try {
@@ -1084,7 +1126,8 @@
   LocatedBlocks getBlockLocations(String clientMachine, String src,
       long offset, long length) throws AccessControlException,
       FileNotFoundException, UnresolvedLinkException, IOException {
-    LocatedBlocks blocks = getBlockLocations(src, offset, length, true, true);
+    LocatedBlocks blocks = getBlockLocations(src, offset, length, true, true,
+        true);
     if (blocks != null) {
       blockManager.getDatanodeManager().sortLocatedBlocks(
           clientMachine, blocks.getLocatedBlocks());
@@ -1098,8 +1141,24 @@
    * @throws FileNotFoundException, UnresolvedLinkException, IOException
    */
   LocatedBlocks getBlockLocations(String src, long offset, long length,
-      boolean doAccessTime, boolean needBlockToken) throws FileNotFoundException,
-      UnresolvedLinkException, IOException {
+      boolean doAccessTime, boolean needBlockToken, boolean checkSafeMode)
+      throws FileNotFoundException, UnresolvedLinkException, IOException {
+    try {
+      return getBlockLocationsInt(src, offset, length, doAccessTime,
+                                  needBlockToken, checkSafeMode);
+    } catch (AccessControlException e) {
+      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+        logAuditEvent(false, UserGroupInformation.getCurrentUser(),
+                      Server.getRemoteIp(),
+                      "open", src, null, null);
+      }
+      throw e;
+    }
+  }
+
+  private LocatedBlocks getBlockLocationsInt(String src, long offset, long length,
+      boolean doAccessTime, boolean needBlockToken, boolean checkSafeMode)
+      throws FileNotFoundException, UnresolvedLinkException, IOException {
     if (isPermissionEnabled) {
       checkPathAccess(src, FsAction.READ);
     }
@@ -1119,6 +1178,15 @@
                     Server.getRemoteIp(),
                     "open", src, null, null);
     }
+    if (checkSafeMode && isInSafeMode()) {
+      for (LocatedBlock b : ret.getLocatedBlocks()) {
+        // if safemode & no block locations yet then throw safemodeException
+        if ((b.getLocations() == null) || (b.getLocations().length == 0)) {
+          throw new SafeModeException("Zero blocklocations for " + src,
+              safeMode);
+        }
+      }
+    }
     return ret;
   }
 
@@ -1187,6 +1255,20 @@
    */
   void concat(String target, String [] srcs) 
       throws IOException, UnresolvedLinkException {
+    try {
+      concatInt(target, srcs);
+    } catch (AccessControlException e) {
+      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+        logAuditEvent(false, UserGroupInformation.getLoginUser(),
+                      Server.getRemoteIp(),
+                      "concat", Arrays.toString(srcs), target, null);
+      }
+      throw e;
+    }
+  }
+
+  private void concatInt(String target, String [] srcs) 
+      throws IOException, UnresolvedLinkException {
     if(FSNamesystem.LOG.isDebugEnabled()) {
       FSNamesystem.LOG.debug("concat " + Arrays.toString(srcs) +
           " to " + target);
@@ -1339,6 +1421,20 @@
    * written to the edits log but is not flushed.
    */
   void setTimes(String src, long mtime, long atime) 
+      throws IOException, UnresolvedLinkException {
+    try {
+      setTimesInt(src, mtime, atime);
+    } catch (AccessControlException e) {
+      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+        logAuditEvent(false, UserGroupInformation.getCurrentUser(),
+                      Server.getRemoteIp(),
+                      "setTimes", src, null, null);
+      }
+      throw e;
+    }
+  }
+
+  private void setTimesInt(String src, long mtime, long atime) 
     throws IOException, UnresolvedLinkException {
     if (!isAccessTimeSupported() && atime != -1) {
       throw new IOException("Access time for hdfs is not configured. " +
@@ -1375,6 +1471,21 @@
   void createSymlink(String target, String link,
       PermissionStatus dirPerms, boolean createParent) 
       throws IOException, UnresolvedLinkException {
+    try {
+      createSymlinkInt(target, link, dirPerms, createParent);
+    } catch (AccessControlException e) {
+      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+        logAuditEvent(false, UserGroupInformation.getCurrentUser(),
+                      Server.getRemoteIp(),
+                      "createSymlink", link, target, null);
+      }
+      throw e;
+    }
+  }
+
+  private void createSymlinkInt(String target, String link,
+      PermissionStatus dirPerms, boolean createParent) 
+      throws IOException, UnresolvedLinkException {
     HdfsFileStatus resultingStat = null;
     writeLock();
     try {
@@ -1442,8 +1553,22 @@
    * @return true if successful; 
    *         false if file does not exist or is a directory
    */
-  boolean setReplication(final String src, final short replication
-      ) throws IOException {
+  boolean setReplication(final String src, final short replication)
+      throws IOException {
+    try {
+      return setReplicationInt(src, replication);
+    } catch (AccessControlException e) {
+      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+        logAuditEvent(false, UserGroupInformation.getCurrentUser(),
+                      Server.getRemoteIp(),
+                      "setReplication", src, null, null);
+      }
+      throw e;
+    }
+  }
+
+  private boolean setReplicationInt(final String src, final short replication)
+      throws IOException {
     blockManager.verifyReplication(src, replication, null);
 
     final boolean isFile;
@@ -1476,7 +1601,7 @@
     }
     return isFile;
   }
-    
+
   long getPreferredBlockSize(String filename) 
       throws IOException, UnresolvedLinkException {
     readLock();
@@ -1522,6 +1647,24 @@
       short replication, long blockSize) throws AccessControlException,
       SafeModeException, FileAlreadyExistsException, UnresolvedLinkException,
       FileNotFoundException, ParentNotDirectoryException, IOException {
+    try {
+      startFileInt(src, permissions, holder, clientMachine, flag, createParent,
+                   replication, blockSize);
+    } catch (AccessControlException e) {
+      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+        logAuditEvent(false, UserGroupInformation.getCurrentUser(),
+                      Server.getRemoteIp(),
+                      "create", src, null, null);
+      }
+      throw e;
+    }
+  }
+
+  private void startFileInt(String src, PermissionStatus permissions, String holder,
+      String clientMachine, EnumSet<CreateFlag> flag, boolean createParent,
+      short replication, long blockSize) throws AccessControlException,
+      SafeModeException, FileAlreadyExistsException, UnresolvedLinkException,
+      FileNotFoundException, ParentNotDirectoryException, IOException {
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -1600,7 +1743,7 @@
     }
 
     try {
-      INode myFile = dir.getFileINode(src);
+      INodeFile myFile = dir.getFileINode(src);
       recoverLeaseInternal(myFile, src, holder, clientMachine, false);
 
       try {
@@ -1676,22 +1819,20 @@
    * @throws UnresolvedLinkException
    * @throws IOException
    */
-  public LocatedBlock prepareFileForWrite(String src, INode file,
+  LocatedBlock prepareFileForWrite(String src, INodeFile file,
       String leaseHolder, String clientMachine, DatanodeDescriptor clientNode,
-      boolean writeToEditLog)
-      throws UnresolvedLinkException, IOException {
-    INodeFile node = (INodeFile) file;
+      boolean writeToEditLog) throws IOException {
     INodeFileUnderConstruction cons = new INodeFileUnderConstruction(
-                                    node.getLocalNameBytes(),
-                                    node.getReplication(),
-                                    node.getModificationTime(),
-                                    node.getPreferredBlockSize(),
-                                    node.getBlocks(),
-                                    node.getPermissionStatus(),
+                                    file.getLocalNameBytes(),
+                                    file.getReplication(),
+                                    file.getModificationTime(),
+                                    file.getPreferredBlockSize(),
+                                    file.getBlocks(),
+                                    file.getPermissionStatus(),
                                     leaseHolder,
                                     clientMachine,
                                     clientNode);
-    dir.replaceNode(src, node, cons);
+    dir.replaceNode(src, file, cons);
     leaseManager.addLease(cons.getClientName(), src);
     
     LocatedBlock ret = blockManager.convertLastBlockToUnderConstruction(cons);
@@ -1827,6 +1968,22 @@
       throws AccessControlException, SafeModeException,
       FileAlreadyExistsException, FileNotFoundException,
       ParentNotDirectoryException, IOException {
+    try {
+      return appendFileInt(src, holder, clientMachine);
+    } catch (AccessControlException e) {
+      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+        logAuditEvent(false, UserGroupInformation.getCurrentUser(),
+                      Server.getRemoteIp(),
+                      "append", src, null, null);
+      }
+      throw e;
+    }
+  }
+
+  private LocatedBlock appendFileInt(String src, String holder, String clientMachine)
+      throws AccessControlException, SafeModeException,
+      FileAlreadyExistsException, FileNotFoundException,
+      ParentNotDirectoryException, IOException {
     if (!supportAppends) {
       throw new UnsupportedOperationException(
           "Append is not enabled on this NameNode. Use the " +
@@ -2313,6 +2470,20 @@
    */
   @Deprecated
   boolean renameTo(String src, String dst) 
+      throws IOException, UnresolvedLinkException {
+    try {
+      return renameToInt(src, dst);
+    } catch (AccessControlException e) {
+      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+        logAuditEvent(false, UserGroupInformation.getCurrentUser(),
+                      Server.getRemoteIp(),
+                      "rename", src, dst, null);
+      }
+      throw e;
+    }
+  }
+
+  private boolean renameToInt(String src, String dst) 
     throws IOException, UnresolvedLinkException {
     boolean status = false;
     HdfsFileStatus resultingStat = null;
@@ -2424,20 +2595,35 @@
    * @see ClientProtocol#delete(String, boolean) for detailed descriptoin and 
    * description of exceptions
    */
-    boolean delete(String src, boolean recursive)
-        throws AccessControlException, SafeModeException,
-               UnresolvedLinkException, IOException {
-      if (NameNode.stateChangeLog.isDebugEnabled()) {
-        NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src);
-      }
-      boolean status = deleteInternal(src, recursive, true);
-      if (status && auditLog.isInfoEnabled() && isExternalInvocation()) {
-        logAuditEvent(UserGroupInformation.getCurrentUser(),
+  boolean delete(String src, boolean recursive)
+      throws AccessControlException, SafeModeException,
+      UnresolvedLinkException, IOException {
+    try {
+      return deleteInt(src, recursive);
+    } catch (AccessControlException e) {
+      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+        logAuditEvent(false, UserGroupInformation.getCurrentUser(),
                       Server.getRemoteIp(),
                       "delete", src, null, null);
       }
-      return status;
+      throw e;
     }
+  }
+      
+  private boolean deleteInt(String src, boolean recursive)
+      throws AccessControlException, SafeModeException,
+      UnresolvedLinkException, IOException {
+    if (NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src);
+    }
+    boolean status = deleteInternal(src, recursive, true);
+    if (status && auditLog.isInfoEnabled() && isExternalInvocation()) {
+      logAuditEvent(UserGroupInformation.getCurrentUser(),
+                    Server.getRemoteIp(),
+                    "delete", src, null, null);
+    }
+    return status;
+  }
     
   /**
    * Remove a file/directory from the namespace.
@@ -2593,6 +2779,20 @@
    */
   boolean mkdirs(String src, PermissionStatus permissions,
       boolean createParent) throws IOException, UnresolvedLinkException {
+    try {
+      return mkdirsInt(src, permissions, createParent);
+    } catch (AccessControlException e) {
+      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+        logAuditEvent(false, UserGroupInformation.getCurrentUser(),
+                      Server.getRemoteIp(),
+                      "mkdirs", src, null, null);
+      }
+      throw e;
+    }
+  }
+
+  private boolean mkdirsInt(String src, PermissionStatus permissions,
+      boolean createParent) throws IOException, UnresolvedLinkException {
     boolean status = false;
     if(NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.mkdirs: " + src);
@@ -3044,6 +3244,21 @@
    */
   DirectoryListing getListing(String src, byte[] startAfter,
       boolean needLocation) 
+      throws AccessControlException, UnresolvedLinkException, IOException {
+    try {
+      return getListingInt(src, startAfter, needLocation);
+    } catch (AccessControlException e) {
+      if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+        logAuditEvent(false, UserGroupInformation.getCurrentUser(),
+                      Server.getRemoteIp(),
+                      "listStatus", src, null, null);
+      }
+      throw e;
+    }
+  }
+
+  private DirectoryListing getListingInt(String src, byte[] startAfter,
+      boolean needLocation) 
     throws AccessControlException, UnresolvedLinkException, IOException {
     DirectoryListing dl;
     readLock();
@@ -3193,10 +3408,11 @@
    * acceptable levels, this daemon will cause the NN to exit safe mode.
    */
   class NameNodeResourceMonitor implements Runnable  {
+    boolean shouldNNRmRun = true;
     @Override
     public void run () {
       try {
-        while (fsRunning) {
+        while (fsRunning && shouldNNRmRun) {
           checkAvailableResources();
           if(!nameNodeHasResourcesAvailable()) {
             String lowResourcesMsg = "NameNode low on available disk space. ";
@@ -3217,7 +3433,11 @@
         FSNamesystem.LOG.error("Exception in NameNodeResourceMonitor: ", e);
       }
     }
-  }
+
+    public void stopMonitor() {
+      shouldNNRmRun = false;
+    }
+ }
   
   public FSImage getFSImage() {
     return dir.fsImage;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 3bfb335..1e043f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -25,9 +25,9 @@
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
 
 /** I-node for closed file. */
 @InterfaceAudience.Private
@@ -45,13 +45,6 @@
 
   BlockInfo blocks[] = null;
 
-  INodeFile(PermissionStatus permissions,
-            int nrBlocks, short replication, long modificationTime,
-            long atime, long preferredBlockSize) {
-    this(permissions, new BlockInfo[nrBlocks], replication,
-        modificationTime, atime, preferredBlockSize);
-  }
-
   INodeFile(PermissionStatus permissions, BlockInfo[] blklist,
                       short replication, long modificationTime,
                       long atime, long preferredBlockSize) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
index 66e33e0..1c19a1b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
@@ -19,6 +19,7 @@
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
@@ -32,8 +33,8 @@
 /**
  * I-node for file being written.
  */
-public class INodeFileUnderConstruction extends INodeFile 
-                                        implements MutableBlockCollection {
+@InterfaceAudience.Private
+class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollection {
   private  String clientName;         // lease holder
   private final String clientMachine;
   private final DatanodeDescriptor clientNode; // if client is a cluster node too.
@@ -45,7 +46,7 @@
                              String clientName,
                              String clientMachine,
                              DatanodeDescriptor clientNode) {
-    super(permissions.applyUMask(UMASK), 0, replication,
+    super(permissions.applyUMask(UMASK), BlockInfo.EMPTY_ARRAY, replication,
         modTime, modTime, preferredBlockSize);
     this.clientName = clientName;
     this.clientMachine = clientMachine;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
index d0ef373..9d6c8d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
@@ -25,7 +25,9 @@
 import java.util.Comparator;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.PriorityQueue;
 import java.util.SortedSet;
+import java.util.TreeSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -41,7 +43,6 @@
 import com.google.common.collect.Lists;
 import com.google.common.collect.Multimaps;
 import com.google.common.collect.Sets;
-import com.google.common.collect.TreeMultiset;
 
 /**
  * Manages a collection of Journals. None of the methods are synchronized, it is
@@ -223,8 +224,9 @@
   @Override
   public void selectInputStreams(Collection<EditLogInputStream> streams,
       long fromTxId, boolean inProgressOk) {
-    final TreeMultiset<EditLogInputStream> allStreams =
-        TreeMultiset.create(EDIT_LOG_INPUT_STREAM_COMPARATOR);
+    final PriorityQueue<EditLogInputStream> allStreams = 
+        new PriorityQueue<EditLogInputStream>(64,
+            EDIT_LOG_INPUT_STREAM_COMPARATOR);
     for (JournalAndStream jas : journals) {
       if (jas.isDisabled()) {
         LOG.info("Skipping jas " + jas + " since it's disabled");
@@ -240,7 +242,8 @@
     // transaction ID.
     LinkedList<EditLogInputStream> acc =
         new LinkedList<EditLogInputStream>();
-    for (EditLogInputStream elis : allStreams) {
+    EditLogInputStream elis;
+    while ((elis = allStreams.poll()) != null) {
       if (acc.isEmpty()) {
         acc.add(elis);
       } else {
@@ -248,7 +251,7 @@
         if (accFirstTxId == elis.getFirstTxId()) {
           acc.add(elis);
         } else if (accFirstTxId < elis.getFirstTxId()) {
-          streams.add(acc.get(0));
+          streams.add(new RedundantEditLogInputStream(acc, fromTxId));
           acc.clear();
           acc.add(elis);
         } else if (accFirstTxId > elis.getFirstTxId()) {
@@ -259,7 +262,7 @@
       }
     }
     if (!acc.isEmpty()) {
-      streams.add(acc.get(0));
+      streams.add(new RedundantEditLogInputStream(acc, fromTxId));
       acc.clear();
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
index 2798a83..44b0437 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
@@ -44,7 +44,6 @@
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authorize.AccessControlList;
 
 /**
@@ -91,22 +90,9 @@
       {
         // Add SPNEGO support to NameNode
         if (UserGroupInformation.isSecurityEnabled()) {
-          Map<String, String> params = new HashMap<String, String>();
-          String principalInConf = conf.get(
-            DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY);
-          if (principalInConf != null && !principalInConf.isEmpty()) {
-            params.put("kerberos.principal",
-                       SecurityUtil.getServerPrincipal(principalInConf, infoHost));
-            String httpKeytab = conf.get(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY);
-            if (httpKeytab != null && !httpKeytab.isEmpty()) {
-              params.put("kerberos.keytab", httpKeytab);
-            }
-
-            params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");
-
-            defineFilter(webAppContext, SPNEGO_FILTER,
-                         AuthenticationFilter.class.getName(), params, null);
-          }
+          initSpnego(conf,
+              DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY,
+              DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY);
         }
         if (WebHdfsFileSystem.isEnabled(conf, LOG)) {
           //add SPNEGO authentication filter for webhdfs
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 2100b2c..dd377ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -277,7 +277,7 @@
     // Get block locations without updating the file access time 
     // and without block access tokens
     LocatedBlocks blocks = namenode.getNamesystem().getBlockLocations(path, 0,
-        fileLen, false, false);
+        fileLen, false, false, false);
     if (blocks == null) { // the file is deleted
       return;
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java
new file mode 100644
index 0000000..7a30869
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java
@@ -0,0 +1,276 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.io.IOUtils;
+
+import com.google.common.base.Preconditions;
+import com.google.common.primitives.Longs;
+
+/**
+ * A merged input stream that handles failover between different edit logs.
+ *
+ * We will currently try each edit log stream exactly once.  In other words, we
+ * don't handle the "ping pong" scenario where different edit logs contain a
+ * different subset of the available edits.
+ */
+class RedundantEditLogInputStream extends EditLogInputStream {
+  public static final Log LOG = LogFactory.getLog(EditLogInputStream.class.getName());
+  private int curIdx;
+  private long prevTxId;
+  private final EditLogInputStream[] streams;
+
+  /**
+   * States that the RedundantEditLogInputStream can be in.
+   *
+   * <pre>
+   *                   start (if no streams)
+   *                           |
+   *                           V
+   * PrematureEOFException  +----------------+
+   *        +-------------->| EOF            |<--------------+
+   *        |               +----------------+               |
+   *        |                                                |
+   *        |          start (if there are streams)          |
+   *        |                  |                             |
+   *        |                  V                             | EOF
+   *        |   resync      +----------------+ skipUntil  +---------+
+   *        |   +---------->| SKIP_UNTIL     |----------->|  OK     |
+   *        |   |           +----------------+            +---------+
+   *        |   |                | IOE   ^ fail over to      | IOE
+   *        |   |                V       | next stream       |
+   * +----------------------+   +----------------+           |
+   * | STREAM_FAILED_RESYNC |   | STREAM_FAILED  |<----------+
+   * +----------------------+   +----------------+
+   *                  ^   Recovery mode    |
+   *                  +--------------------+
+   * </pre>
+   */
+  static private enum State {
+    /** We need to skip until prevTxId + 1 */
+    SKIP_UNTIL,
+    /** We're ready to read opcodes out of the current stream */
+    OK,
+    /** The current stream has failed. */
+    STREAM_FAILED,
+    /** The current stream has failed, and resync() was called.  */
+    STREAM_FAILED_RESYNC,
+    /** There are no more opcodes to read from this
+     * RedundantEditLogInputStream */
+    EOF;
+  }
+
+  private State state;
+  private IOException prevException;
+
+  RedundantEditLogInputStream(Collection<EditLogInputStream> streams,
+      long startTxId) {
+    this.curIdx = 0;
+    this.prevTxId = (startTxId == HdfsConstants.INVALID_TXID) ?
+      HdfsConstants.INVALID_TXID : (startTxId - 1);
+    this.state = (streams.isEmpty()) ? State.EOF : State.SKIP_UNTIL;
+    this.prevException = null;
+    // EditLogInputStreams in a RedundantEditLogInputStream must be finalized,
+    // and can't be pre-transactional.
+    EditLogInputStream first = null;
+    for (EditLogInputStream s : streams) {
+      Preconditions.checkArgument(s.getFirstTxId() !=
+          HdfsConstants.INVALID_TXID, "invalid first txid in stream: %s", s);
+      Preconditions.checkArgument(s.getLastTxId() !=
+          HdfsConstants.INVALID_TXID, "invalid last txid in stream: %s", s);
+      if (first == null) {
+        first = s;
+      } else {
+        Preconditions.checkArgument(s.getFirstTxId() == first.getFirstTxId(),
+          "All streams in the RedundantEditLogInputStream must have the same " +
+          "start transaction ID!  " + first + " had start txId " +
+          first.getFirstTxId() + ", but " + s + " had start txId " +
+          s.getFirstTxId());
+      }
+    }
+
+    this.streams = streams.toArray(new EditLogInputStream[0]);
+
+    // We sort the streams here so that the streams that end later come first.
+    Arrays.sort(this.streams, new Comparator<EditLogInputStream>() {
+      @Override
+      public int compare(EditLogInputStream a, EditLogInputStream b) {
+        return Longs.compare(b.getLastTxId(), a.getLastTxId());
+      }
+    });
+  }
+
+  @Override
+  public String getName() {
+    StringBuilder bld = new StringBuilder();
+    String prefix = "";
+    for (EditLogInputStream elis : streams) {
+      bld.append(prefix);
+      bld.append(elis.getName());
+      prefix = ", ";
+    }
+    return bld.toString();
+  }
+
+  @Override
+  public long getFirstTxId() {
+    return streams[curIdx].getFirstTxId();
+  }
+
+  @Override
+  public long getLastTxId() {
+    return streams[curIdx].getLastTxId();
+  }
+
+  @Override
+  public void close() throws IOException {
+    IOUtils.cleanup(LOG,  streams);
+  }
+
+  @Override
+  protected FSEditLogOp nextValidOp() {
+    try {
+      if (state == State.STREAM_FAILED) {
+        state = State.STREAM_FAILED_RESYNC;
+      }
+      return nextOp();
+    } catch (IOException e) {
+      return null;
+    }
+  }
+
+  @Override
+  protected FSEditLogOp nextOp() throws IOException {
+    while (true) {
+      switch (state) {
+      case SKIP_UNTIL:
+       try {
+          if (prevTxId != HdfsConstants.INVALID_TXID) {
+            LOG.info("Fast-forwarding stream '" + streams[curIdx].getName() +
+                "' to transaction ID " + (prevTxId + 1));
+            streams[curIdx].skipUntil(prevTxId + 1);
+          }
+        } catch (IOException e) {
+          prevException = e;
+          state = State.STREAM_FAILED;
+        }
+        state = State.OK;
+        break;
+      case OK:
+        try {
+          FSEditLogOp op = streams[curIdx].readOp();
+          if (op == null) {
+            state = State.EOF;
+            if (streams[curIdx].getLastTxId() == prevTxId) {
+              return null;
+            } else {
+              throw new PrematureEOFException("got premature end-of-file " +
+                  "at txid " + prevTxId + "; expected file to go up to " +
+                  streams[curIdx].getLastTxId());
+            }
+          }
+          prevTxId = op.getTransactionId();
+          return op;
+        } catch (IOException e) {
+          prevException = e;
+          state = State.STREAM_FAILED;
+        }
+        break;
+      case STREAM_FAILED:
+        if (curIdx + 1 == streams.length) {
+          throw prevException;
+        }
+        long oldLast = streams[curIdx].getLastTxId();
+        long newLast = streams[curIdx + 1].getLastTxId();
+        if (newLast < oldLast) {
+          throw new IOException("We encountered an error reading " +
+              streams[curIdx].getName() + ".  During automatic edit log " +
+              "failover, we noticed that all of the remaining edit log " +
+              "streams are shorter than the current one!  The best " +
+              "remaining edit log ends at transaction " +
+              newLast + ", but we thought we could read up to transaction " +
+              oldLast + ".  If you continue, metadata will be lost forever!");
+        }
+        LOG.error("Got error reading edit log input stream " +
+          streams[curIdx].getName() + "; failing over to edit log " +
+          streams[curIdx + 1].getName(), prevException);
+        curIdx++;
+        state = State.SKIP_UNTIL;
+        break;
+      case STREAM_FAILED_RESYNC:
+        if (curIdx + 1 == streams.length) {
+          if (prevException instanceof PrematureEOFException) {
+            // bypass early EOF check
+            state = State.EOF;
+          } else {
+            streams[curIdx].resync();
+            state = State.SKIP_UNTIL;
+          }
+        } else {
+          LOG.error("failing over to edit log " +
+              streams[curIdx + 1].getName());
+          curIdx++;
+          state = State.SKIP_UNTIL;
+        }
+        break;
+      case EOF:
+        return null;
+      }
+    }
+  }
+
+  @Override
+  public int getVersion() throws IOException {
+    return streams[curIdx].getVersion();
+  }
+
+  @Override
+  public long getPosition() {
+    return streams[curIdx].getPosition();
+  }
+
+  @Override
+  public long length() throws IOException {
+    return streams[curIdx].length();
+  }
+
+  @Override
+  public boolean isInProgress() {
+    return streams[curIdx].isInProgress();
+  }
+
+  static private final class PrematureEOFException extends IOException {
+    private static final long serialVersionUID = 1L;
+    PrematureEOFException(String msg) {
+      super(msg);
+    }
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index 7c02c64..0d79fe2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -25,10 +25,8 @@
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
 import java.util.Date;
-import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
-import java.util.Map;
 
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
@@ -68,7 +66,6 @@
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authorize.AccessControlList;
 
 import org.apache.hadoop.util.Daemon;
@@ -239,20 +236,8 @@
                                 new AccessControlList(conf.get(DFS_ADMIN, " "))) {
       {
         if (UserGroupInformation.isSecurityEnabled()) {
-          Map<String, String> params = new HashMap<String, String>();
-          String principalInConf = conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY);
-          if (principalInConf != null && !principalInConf.isEmpty()) {
-            params.put("kerberos.principal",
-                       SecurityUtil.getServerPrincipal(principalInConf, infoSocAddr.getHostName()));
-          }
-          String httpKeytab = conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);
-          if (httpKeytab != null && !httpKeytab.isEmpty()) {
-            params.put("kerberos.keytab", httpKeytab);
-          }
-          params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");
-
-          defineFilter(webAppContext, SPNEGO_FILTER, AuthenticationFilter.class.getName(),
-                       params, null);
+          initSpnego(conf, DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY,
+              DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);
         }
       }
     };
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StreamLimiter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StreamLimiter.java
index 9742082..4e533eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StreamLimiter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StreamLimiter.java
@@ -27,4 +27,9 @@
    * Set a limit.  Calling this function clears any existing limit.
    */
   public void setLimit(long limit);
+  
+  /**
+   * Disable limit.
+   */
+  public void clearLimit();
 }
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
index d64d283..d41593c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
@@ -32,11 +32,11 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.MD5Hash;
-import org.apache.hadoop.security.UserGroupInformation;
 
 import com.google.common.collect.Lists;
 
@@ -207,6 +207,7 @@
     //
     // open connection to remote server
     //
+    long startTime = Util.monotonicNow();
     URL url = new URL(str);
 
     HttpURLConnection connection = (HttpURLConnection)
@@ -312,6 +313,11 @@
                               advertisedSize);
       }
     }
+    double xferSec = Math.max(
+        ((float)(Util.monotonicNow() - startTime)) / 1000.0, 0.001);
+    long xferKb = received / 1024;
+    LOG.info(String.format("Transfer took %.2fs at %.2f KB/s",
+        xferSec, xferKb / xferSec));
 
     if (digester != null) {
       MD5Hash computedDigest = new MD5Hash(digester.digest());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 0127e95..37781ea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -56,6 +56,7 @@
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -115,6 +116,11 @@
     return REMOTE_ADDRESS.get();
   }
 
+  /** Set the remote client address. */
+  static void setRemoteAddress(String remoteAddress) {
+    REMOTE_ADDRESS.set(remoteAddress);
+  }
+
   private @Context ServletContext context;
   private @Context HttpServletRequest request;
   private @Context HttpServletResponse response;
@@ -123,7 +129,7 @@
       final DelegationParam delegation,
       final UserParam username, final DoAsParam doAsUser,
       final UriFsPathParam path, final HttpOpParam<?> op,
-      final Param<?, ?>... parameters) throws IOException {
+      final Param<?, ?>... parameters) {
     if (LOG.isTraceEnabled()) {
       LOG.trace("HTTP " + op.getValue().getType() + ": " + op + ", " + path
           + ", ugi=" + ugi + ", " + username + ", " + doAsUser
@@ -134,12 +140,26 @@
     response.setContentType(null);
   }
 
-  private static DatanodeInfo chooseDatanode(final NameNode namenode,
+  static DatanodeInfo chooseDatanode(final NameNode namenode,
       final String path, final HttpOpParam.Op op, final long openOffset,
-      Configuration conf) throws IOException {
-    if (op == GetOpParam.Op.OPEN
+      final long blocksize, Configuration conf) throws IOException {
+    final BlockManager bm = namenode.getNamesystem().getBlockManager();
+
+    if (op == PutOpParam.Op.CREATE) {
+      //choose a datanode near to client 
+      final DatanodeDescriptor clientNode = bm.getDatanodeManager(
+          ).getDatanodeByHost(getRemoteAddress());
+      if (clientNode != null) {
+        final DatanodeDescriptor[] datanodes = bm.getBlockPlacementPolicy(
+            ).chooseTarget(path, 1, clientNode, null, blocksize);
+        if (datanodes.length > 0) {
+          return datanodes[0];
+        }
+      }
+    } else if (op == GetOpParam.Op.OPEN
         || op == GetOpParam.Op.GETFILECHECKSUM
         || op == PostOpParam.Op.APPEND) {
+      //choose a datanode containing a replica 
       final NamenodeProtocols np = namenode.getRpcServer();
       final HdfsFileStatus status = np.getFileInfo(path);
       if (status == null) {
@@ -158,14 +178,13 @@
         final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
         final int count = locations.locatedBlockCount();
         if (count > 0) {
-          return JspHelper.bestNode(locations.get(0), conf);
+          return JspHelper.bestNode(locations.get(0).getLocations(), false, conf);
         }
       }
     } 
 
-    return (DatanodeDescriptor)namenode.getNamesystem().getBlockManager(
-        ).getDatanodeManager().getNetworkTopology().chooseRandom(
-        NodeBase.ROOT);
+    return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology(
+        ).chooseRandom(NodeBase.ROOT);
   }
 
   private Token<? extends TokenIdentifier> generateDelegationToken(
@@ -183,9 +202,11 @@
       final UserGroupInformation ugi, final DelegationParam delegation,
       final UserParam username, final DoAsParam doAsUser,
       final String path, final HttpOpParam.Op op, final long openOffset,
+      final long blocksize,
       final Param<?, ?>... parameters) throws URISyntaxException, IOException {
     final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
-    final DatanodeInfo dn = chooseDatanode(namenode, path, op, openOffset, conf);
+    final DatanodeInfo dn = chooseDatanode(namenode, path, op, openOffset,
+        blocksize, conf);
 
     final String delegationQuery;
     if (!UserGroupInformation.isSecurityEnabled()) {
@@ -356,7 +377,7 @@
     case CREATE:
     {
       final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
-          fullpath, op.getValue(), -1L,
+          fullpath, op.getValue(), -1L, blockSize.getValue(conf),
           permission, overwrite, bufferSize, replication, blockSize);
       return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
     } 
@@ -502,7 +523,7 @@
     case APPEND:
     {
       final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
-          fullpath, op.getValue(), -1L, bufferSize);
+          fullpath, op.getValue(), -1L, -1L, bufferSize);
       return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
     }
     default:
@@ -532,7 +553,7 @@
           final RenewerParam renewer,
       @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
           final BufferSizeParam bufferSize
-      ) throws IOException, URISyntaxException, InterruptedException {
+      ) throws IOException, InterruptedException {
     return get(ugi, delegation, username, doAsUser, ROOT, op,
         offset, length, renewer, bufferSize);
   }
@@ -598,7 +619,7 @@
     case OPEN:
     {
       final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
-          fullpath, op.getValue(), offset.getValue(), offset, length, bufferSize);
+          fullpath, op.getValue(), offset.getValue(), -1L, offset, length, bufferSize);
       return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
     }
     case GET_BLOCK_LOCATIONS:
@@ -634,7 +655,7 @@
     case GETFILECHECKSUM:
     {
       final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
-          fullpath, op.getValue(), -1L);
+          fullpath, op.getValue(), -1L, -1L);
       return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
     }
     case GETDELEGATIONTOKEN:
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index c7c206f..2112208 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -239,7 +239,12 @@
       CommandFormat c = new CommandFormat(2, Integer.MAX_VALUE);
       List<String> parameters = c.parse(args, pos);
       String str = parameters.remove(0).trim();
-      quota = StringUtils.TraditionalBinaryPrefix.string2long(str);
+      try {
+        quota = StringUtils.TraditionalBinaryPrefix.string2long(str);
+      } catch (NumberFormatException nfe) {
+        throw new IllegalArgumentException("\"" + str + "\" is not a valid value for a quota.");
+      }
+      
       this.args = parameters.toArray(new String[parameters.size()]);
     }
     
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java
index 969ecf6..c35e624 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java
@@ -18,9 +18,12 @@
 package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
 
 import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-
+import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
 
 import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
@@ -33,17 +36,21 @@
 class OfflineEditsBinaryLoader implements OfflineEditsLoader {
   private OfflineEditsVisitor visitor;
   private EditLogInputStream inputStream;
-  private boolean fixTxIds;
+  private final boolean fixTxIds;
+  private final boolean recoveryMode;
   private long nextTxId;
+  public static final Log LOG =
+      LogFactory.getLog(OfflineEditsBinaryLoader.class.getName());
   
   /**
    * Constructor
    */
   public OfflineEditsBinaryLoader(OfflineEditsVisitor visitor,
-        EditLogInputStream inputStream) {
+        EditLogInputStream inputStream, OfflineEditsViewer.Flags flags) {
     this.visitor = visitor;
     this.inputStream = inputStream;
-    this.fixTxIds = false;
+    this.fixTxIds = flags.getFixTxIds();
+    this.recoveryMode = flags.getRecoveryMode();
     this.nextTxId = -1;
   }
 
@@ -51,9 +58,9 @@
    * Loads edits file, uses visitor to process all elements
    */
   public void loadEdits() throws IOException {
-    try {
-      visitor.start(inputStream.getVersion());
-      while (true) {
+    visitor.start(inputStream.getVersion());
+    while (true) {
+      try {
         FSEditLogOp op = inputStream.readOp();
         if (op == null)
           break;
@@ -68,16 +75,24 @@
           nextTxId++;
         }
         visitor.visitOp(op);
+      } catch (IOException e) {
+        if (!recoveryMode) {
+          // Tell the visitor to clean up, then re-throw the exception
+          visitor.close(e);
+          throw e;
+        }
+        LOG.error("Got IOException while reading stream!  Resyncing.", e);
+        inputStream.resync();
+      } catch (RuntimeException e) {
+        if (!recoveryMode) {
+          // Tell the visitor to clean up, then re-throw the exception
+          visitor.close(e);
+          throw e;
+        }
+        LOG.error("Got RuntimeException while reading stream!  Resyncing.", e);
+        inputStream.resync();
       }
-      visitor.close(null);
-    } catch(IOException e) {
-      // Tell the visitor to clean up, then re-throw the exception
-      visitor.close(e);
-      throw e;
     }
-  }
-  
-  public void setFixTxIds() {
-    fixTxIds = true;
+    visitor.close(null);
   }
 }
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java
index a314352..0ce1e78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java
@@ -22,6 +22,7 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
+import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
 
@@ -36,13 +37,12 @@
   
   abstract public void loadEdits() throws IOException;
   
-  public abstract void setFixTxIds();
-  
   static class OfflineEditsLoaderFactory {
     static OfflineEditsLoader createLoader(OfflineEditsVisitor visitor,
-        String inputFileName, boolean xmlInput) throws IOException {
+        String inputFileName, boolean xmlInput,
+        OfflineEditsViewer.Flags flags) throws IOException {
       if (xmlInput) {
-        return new OfflineEditsXmlLoader(visitor, new File(inputFileName));
+        return new OfflineEditsXmlLoader(visitor, new File(inputFileName), flags);
       } else {
         File file = null;
         EditLogInputStream elis = null;
@@ -51,7 +51,7 @@
           file = new File(inputFileName);
           elis = new EditLogFileInputStream(file, HdfsConstants.INVALID_TXID,
               HdfsConstants.INVALID_TXID, false);
-          loader = new OfflineEditsBinaryLoader(visitor, elis);
+          loader = new OfflineEditsBinaryLoader(visitor, elis, flags);
         } finally {
           if ((loader == null) && (elis != null)) {
             elis.close();
@@ -61,4 +61,4 @@
       }
     }
   }
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java
index 6fecab6..833f2bc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java
@@ -17,16 +17,10 @@
  */
 package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
 
-import java.io.EOFException;
-import java.io.File;
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
 import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
-import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
 import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsLoader.OfflineEditsLoaderFactory;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
@@ -37,7 +31,6 @@
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.cli.PosixParser;
-import org.xml.sax.SAXParseException;
 
 /**
  * This class implements an offline edits viewer, tool that
@@ -78,6 +71,9 @@
       "-f,--fix-txids         Renumber the transaction IDs in the input,\n" +
       "                       so that there are no gaps or invalid " +
       "                       transaction IDs.\n" +
+      "-r,--recover           When reading binary edit logs, use recovery \n" +
+      "                       mode.  This will give you the chance to skip \n" +
+      "                       corrupt parts of the edit log.\n" +
       "-v,--verbose           More verbose output, prints the input and\n" +
       "                       output filenames, for processors that write\n" +
       "                       to a file, also output to screen. On large\n" +
@@ -113,6 +109,7 @@
     options.addOption("p", "processor", true, "");
     options.addOption("v", "verbose", false, "");
     options.addOption("f", "fix-txids", false, "");
+    options.addOption("r", "recover", false, "");
     options.addOption("h", "help", false, "");
 
     return options;
@@ -128,23 +125,20 @@
    * @return                0 on success; error code otherwise
    */
   public int go(String inputFileName, String outputFileName, String processor,
-      boolean printToScreen, boolean fixTxIds, OfflineEditsVisitor visitor)
+      Flags flags, OfflineEditsVisitor visitor)
   {
-    if (printToScreen) {
+    if (flags.getPrintToScreen()) {
       System.out.println("input  [" + inputFileName  + "]");
       System.out.println("output [" + outputFileName + "]");
     }
     try {
       if (visitor == null) {
         visitor = OfflineEditsVisitorFactory.getEditsVisitor(
-            outputFileName, processor, printToScreen);
+            outputFileName, processor, flags.getPrintToScreen());
       }
       boolean xmlInput = inputFileName.endsWith(".xml");
       OfflineEditsLoader loader = OfflineEditsLoaderFactory.
-          createLoader(visitor, inputFileName, xmlInput);
-      if (fixTxIds) {
-        loader.setFixTxIds();
-      }
+          createLoader(visitor, inputFileName, xmlInput, flags);
       loader.loadEdits();
     } catch(Exception e) {
       System.err.println("Encountered exception. Exiting: " + e.getMessage());
@@ -154,6 +148,39 @@
     return 0;
   }
 
+  public static class Flags {
+    private boolean printToScreen = false;
+    private boolean fixTxIds = false;
+    private boolean recoveryMode = false;
+    
+    public Flags() {
+    }
+    
+    public boolean getPrintToScreen() {
+      return printToScreen;
+    }
+    
+    public void setPrintToScreen() {
+      printToScreen = true;
+    }
+    
+    public boolean getFixTxIds() {
+      return fixTxIds;
+    }
+    
+    public void setFixTxIds() {
+      fixTxIds = true;
+    }
+    
+    public boolean getRecoveryMode() {
+      return recoveryMode;
+    }
+    
+    public void setRecoveryMode() {
+      recoveryMode = true;
+    }
+  }
+  
   /**
    * Main entry point for ToolRunner (see ToolRunner docs)
    *
@@ -177,6 +204,7 @@
       printHelp();
       return -1;
     }
+    
     if(cmd.hasOption("h")) { // print help and exit
       printHelp();
       return -1;
@@ -187,10 +215,17 @@
     if(processor == null) {
       processor = defaultProcessor;
     }
-    boolean printToScreen = cmd.hasOption("v");
-    boolean fixTxIds = cmd.hasOption("f");
-    return go(inputFileName, outputFileName, processor,
-        printToScreen, fixTxIds, null);
+    Flags flags = new Flags();
+    if (cmd.hasOption("r")) {
+      flags.setRecoveryMode();
+    }
+    if (cmd.hasOption("f")) {
+      flags.setFixTxIds();
+    }
+    if (cmd.hasOption("v")) {
+      flags.setPrintToScreen();
+    }
+    return go(inputFileName, outputFileName, processor, flags, null);
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java
index 009db6a..393015b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java
@@ -29,7 +29,7 @@
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
-
+import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer;
 import org.apache.hadoop.hdfs.util.XMLUtils.Stanza;
 import org.xml.sax.Attributes;
 import org.xml.sax.InputSource;
@@ -46,9 +46,9 @@
 @InterfaceStability.Unstable
 class OfflineEditsXmlLoader 
     extends DefaultHandler implements OfflineEditsLoader {
-  private boolean fixTxIds;
-  private OfflineEditsVisitor visitor;
-  private FileReader fileReader;
+  private final boolean fixTxIds;
+  private final OfflineEditsVisitor visitor;
+  private final FileReader fileReader;
   private ParseState state;
   private Stanza stanza;
   private Stack<Stanza> stanzaStack;
@@ -68,9 +68,10 @@
   }
   
   public OfflineEditsXmlLoader(OfflineEditsVisitor visitor,
-        File inputFile) throws FileNotFoundException {
+        File inputFile, OfflineEditsViewer.Flags flags) throws FileNotFoundException {
     this.visitor = visitor;
     this.fileReader = new FileReader(inputFile);
+    this.fixTxIds = flags.getFixTxIds();
   }
 
   /**
@@ -250,9 +251,4 @@
   public void characters (char ch[], int start, int length) {
     cbuf.append(ch, start, length);
   }
-
-  @Override
-  public void setFixTxIds() {
-    fixTxIds = true;
-  }
 }
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java
index 8ef5701..d452205 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.util;
 
+import static org.apache.hadoop.hdfs.server.common.Util.monotonicNow;
+
 /** 
  * a class to throttle the data transfers.
  * This class is thread safe. It can be shared by multiple threads.
@@ -26,9 +28,9 @@
 public class DataTransferThrottler {
   private long period;          // period over which bw is imposed
   private long periodExtension; // Max period over which bw accumulates.
-  private long bytesPerPeriod; // total number of bytes can be sent in each period
-  private long curPeriodStart; // current period starting time
-  private long curReserve;     // remaining bytes can be sent in the period
+  private long bytesPerPeriod;  // total number of bytes can be sent in each period
+  private long curPeriodStart;  // current period starting time
+  private long curReserve;      // remaining bytes can be sent in the period
   private long bytesAlreadyUsed;
 
   /** Constructor 
@@ -45,7 +47,7 @@
    * @param bandwidthPerSec bandwidth allowed in bytes per second. 
    */
   public DataTransferThrottler(long period, long bandwidthPerSec) {
-    this.curPeriodStart = System.currentTimeMillis();
+    this.curPeriodStart = monotonicNow();
     this.period = period;
     this.curReserve = this.bytesPerPeriod = bandwidthPerSec*period/1000;
     this.periodExtension = period*3;
@@ -87,7 +89,7 @@
     bytesAlreadyUsed += numOfBytes;
 
     while (curReserve <= 0) {
-      long now = System.currentTimeMillis();
+      long now = monotonicNow();
       long curPeriodEnd = curPeriodStart + period;
 
       if ( now < curPeriodEnd ) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 0b355cc..4327dd1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -34,11 +34,14 @@
 import java.util.Map;
 import java.util.StringTokenizer;
 
+import javax.ws.rs.core.MediaType;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.DelegationTokenRenewer;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
@@ -57,7 +60,6 @@
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenRenewer;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
@@ -252,9 +254,23 @@
     return f.isAbsolute()? f: new Path(workingDir, f);
   }
 
-  static Map<?, ?> jsonParse(final InputStream in) throws IOException {
+  static Map<?, ?> jsonParse(final HttpURLConnection c, final boolean useErrorStream
+      ) throws IOException {
+    if (c.getContentLength() == 0) {
+      return null;
+    }
+    final InputStream in = useErrorStream? c.getErrorStream(): c.getInputStream();
     if (in == null) {
-      throw new IOException("The input stream is null.");
+      throw new IOException("The " + (useErrorStream? "error": "input") + " stream is null.");
+    }
+    final String contentType = c.getContentType();
+    if (contentType != null) {
+      final MediaType parsed = MediaType.valueOf(contentType);
+      if (!MediaType.APPLICATION_JSON_TYPE.isCompatible(parsed)) {
+        throw new IOException("Content-Type \"" + contentType
+            + "\" is incompatible with \"" + MediaType.APPLICATION_JSON
+            + "\" (parsed=\"" + parsed + "\")");
+      }
     }
     return (Map<?, ?>)JSON.parse(new InputStreamReader(in));
   }
@@ -265,7 +281,7 @@
     if (code != op.getExpectedHttpResponseCode()) {
       final Map<?, ?> m;
       try {
-        m = jsonParse(conn.getErrorStream());
+        m = jsonParse(conn, true);
       } catch(IOException e) {
         throw new IOException("Unexpected HTTP response: code=" + code + " != "
             + op.getExpectedHttpResponseCode() + ", " + op.toQueryString()
@@ -425,7 +441,7 @@
     final HttpURLConnection conn = httpConnect(op, fspath, parameters);
     try {
       final Map<?, ?> m = validateResponse(op, conn);
-      return m != null? m: jsonParse(conn.getInputStream());
+      return m != null? m: jsonParse(conn, false);
     } finally {
       conn.disconnect();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/InetSocketAddressParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/InetSocketAddressParam.java
index dc21f68..9879ba3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/InetSocketAddressParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/InetSocketAddressParam.java
@@ -44,6 +44,10 @@
 
     @Override
     InetSocketAddress parse(final String str) {
+      if (str == null) {
+        throw new IllegalArgumentException("The input string is null: expect "
+            + getDomain());
+      }
       final int i = str.indexOf(':');
       if (i < 0) {
         throw new IllegalArgumentException("Failed to parse \"" + str
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java
index 023402c..6f102e1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java
@@ -59,7 +59,7 @@
 
     @Override
     public String getDomain() {
-      return "<" + NULL + " | short in radix " + radix + ">";
+      return "<" + NULL + " | long in radix " + radix + ">";
     }
 
     @Override
@@ -72,7 +72,7 @@
       }
     }
 
-    /** Convert a Short to a String. */ 
+    /** Convert a Long to a String. */ 
     String toString(final Long n) {
       return n == null? NULL: Long.toString(n, radix);
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/Makefile.am b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/Makefile.am
deleted file mode 100644
index 8bbd627..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/Makefile.am
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-@PRODUCT_MK@
-
-#AM_CPPFLAGS = -I$(top_srcdir)
-ACLOCAL_AMFLAGS = -I m4
-
-lib_LTLIBRARIES = libhdfs.la
-libhdfs_la_SOURCES = hdfs.c hdfsJniHelper.c hdfs.h
-
-#check_PROGRAMS = hdfs_test hdfs_read hdfs_write
-check_PROGRAMS = hdfs_test hdfs_read hdfs_write
-
-hdfs_test_SOURCES = hdfs_test.c hdfs.h
-hdfs_test_LDADD = ${libdir}/libhdfs.la 
-
-hdfs_read_SOURCES = hdfs_read.c
-hdfs_read_LDADD = ${libdir}/libhdfs.la 
-
-hdfs_write_SOURCES = hdfs_write.c
-hdfs_write_LDADD = ${libdir}/libhdfs.la 
-
-test: hdfs_test hdfs_read hdfs_write 
-	${LIBHDFS_SRC_DIR}/tests/test-libhdfs.sh	
-
-
-# vim: sw=4: ts=4: noet
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/configure.ac b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/configure.ac
deleted file mode 100644
index d801fc4..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/configure.ac
+++ /dev/null
@@ -1,125 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Autoconf input file
-# $Id$
-
-AC_INIT([libhdfs], [0.1.0], omalley@apache.org)
-AC_PREFIX_DEFAULT([`pwd`/../install])
-AC_CONFIG_AUX_DIR([config])
-
-# Generates Makefile from Makefile.am. Modify when new subdirs are added.
-# Change Makefile.am also to add subdirectly.
-AM_INIT_AUTOMAKE(foreign no-dist)
-AC_CONFIG_FILES(Makefile)
-
-LT_INIT
- 
-AC_CONFIG_MACRO_DIR([m4])
-dnl -------------------------------------------------------------------------
-dnl Check current host (forget about cross compilation) and validate it
-dnl against the cache (fail if the cache differs)
-dnl -------------------------------------------------------------------------
-AP_MSG_HEADER([Current host])
-AC_CANONICAL_HOST()
-AP_CANONICAL_HOST_CHECK()
-
-dnl -------------------------------------------------------------------------
-dnl Check C environment
-dnl -------------------------------------------------------------------------
-AP_MSG_HEADER([C-Language compilation tools])
-AC_PROG_CC()
-AC_CHECK_TOOL(RANLIB, ranlib, :)
-
-dnl -------------------------------------------------------------------------
-dnl Check if this host is supported
-dnl -------------------------------------------------------------------------
-AP_MSG_HEADER([Host support])
-AP_SUPPORTED_HOST()
-if test "$supported_os" = "darwin"
-then
-  if test -z "$JAVA_HOME" -a -d /System/Library/Frameworks/JavaVM.framework/Home; then
-  	JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Home
-  fi
-
-  _prevdir=`/bin/pwd`
-  if test -n "$JAVA_HOME" -a -d "$JAVA_HOME/include"; then
-    cd "$JAVA_HOME/include"
-  elif test -n "$JAVA_HOME" -a -d "$JAVA_HOME/../Headers"; then
-    cd "$JAVA_HOME/../Headers"
-  else
-    cd /System/Library/Frameworks/JavaVM.framework/Headers
-  fi
-  CFLAGS="$CFLAGS -m${JVM_ARCH} -I`/bin/pwd -P`"
-  cd $_prevdir
-  unset _prevdir
-fi
-
-dnl -------------------------------------------------------------------------
-dnl Check JAVA environment
-dnl -------------------------------------------------------------------------
-AP_MSG_HEADER([Java compilation tools])
-AP_JAVA()
-AP_SABLEVM()
-AP_KAFFE()
-AP_PROG_JAVAC()
-AP_PROG_JAR()
-AP_JVM_LIBDIR()
-if test "$supported_os" != "darwin"
-then
-  case $host_cpu in
-  arm*) ;;
-  *)
-    CFLAGS="$CFLAGS -m${JVM_ARCH}"
-    LDFLAGS="$LDFLAGS -m${JVM_ARCH}"
-    ;;
-  esac
-  AC_MSG_RESULT([VALUE OF JVM_ARCH IS :$JVM_ARCH])
-  CFLAGS="$CFLAGS -I$JAVA_HOME/include -I$JAVA_HOME/include/$supported_os"
-  LDFLAGS="$LDFLAGS -L$LIB_JVM_DIR -ljvm -Wl,-x"
-fi
-
-dnl -------------------------------------------------------------------------
-dnl Add gcc specific CFLAGS.
-dnl -------------------------------------------------------------------------
-if test "$GCC" = "yes"
-then
-   CFLAGS="$CFLAGS -Wall -Wstrict-prototypes"
-   AC_MSG_RESULT([gcc flags added])
-fi
-dnl -------------------------------------------------------------------------
-dnl Add gcc specific CFLAGS.
-dnl -------------------------------------------------------------------------
-if test -z "$LDCMD"
-then
-   LDCMD="$CC"
-fi
-AC_SUBST(LDCMD)
-
-
-AC_PROG_CC
-AC_PROG_LIBTOOL
-
-AC_TYPE_SIZE_T
-AC_CHECK_FUNCS([strdup strerror strtoul])
-AC_CHECK_HEADERS([fcntl.h])
-AC_C_CONST
-AC_C_VOLATILE
-#AC_FUNC_MALLOC
-AC_HEADER_STDBOOL
-AC_SUBST(PRODUCT_MK)
-AC_OUTPUT
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/m4/apfunctions.m4 b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/m4/apfunctions.m4
deleted file mode 100644
index cb5938f..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/m4/apfunctions.m4
+++ /dev/null
@@ -1,41 +0,0 @@
-dnl
-dnl Licensed to the Apache Software Foundation (ASF) under one or more
-dnl contributor license agreements.  See the NOTICE file distributed with
-dnl this work for additional information regarding copyright ownership.
-dnl The ASF licenses this file to You under the Apache License, Version 2.0
-dnl (the "License"); you may not use this file except in compliance with
-dnl the License.  You may obtain a copy of the License at
-dnl
-dnl     http://www.apache.org/licenses/LICENSE-2.0
-dnl
-dnl Unless required by applicable law or agreed to in writing, software
-dnl distributed under the License is distributed on an "AS IS" BASIS,
-dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-dnl See the License for the specific language governing permissions and
-dnl limitations under the License.
-dnl
-
-dnl -------------------------------------------------------------------------
-dnl Author  Pier Fumagalli <mailto:pier.fumagalli@eng.sun.com>
-dnl Version $Id$
-dnl -------------------------------------------------------------------------
-
-AC_DEFUN([AP_MSG_HEADER],[
-  printf "*** %s ***\n" "$1" 1>&2
-  AC_PROVIDE([$0])
-])
-
-AC_DEFUN([AP_CANONICAL_HOST_CHECK],[
-  AC_MSG_CHECKING([cached host system type])
-  if { test x"${ac_cv_host_system_type+set}" = x"set"  &&
-       test x"$ac_cv_host_system_type" != x"$host" ; }
-  then
-    AC_MSG_RESULT([$ac_cv_host_system_type])
-    AC_MSG_ERROR([remove the \"$cache_file\" file and re-run configure])
-  else
-    AC_MSG_RESULT(ok)
-    ac_cv_host_system_type="$host"
-  fi
-  AC_PROVIDE([$0])
-])
-
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/m4/apjava.m4 b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/m4/apjava.m4
deleted file mode 100644
index 993fc5b..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/m4/apjava.m4
+++ /dev/null
@@ -1,142 +0,0 @@
-dnl
-dnl Licensed to the Apache Software Foundation (ASF) under one or more
-dnl contributor license agreements.  See the NOTICE file distributed with
-dnl this work for additional information regarding copyright ownership.
-dnl The ASF licenses this file to You under the Apache License, Version 2.0
-dnl (the "License"); you may not use this file except in compliance with
-dnl the License.  You may obtain a copy of the License at
-dnl
-dnl     http://www.apache.org/licenses/LICENSE-2.0
-dnl
-dnl Unless required by applicable law or agreed to in writing, software
-dnl distributed under the License is distributed on an "AS IS" BASIS,
-dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-dnl See the License for the specific language governing permissions and
-dnl limitations under the License.
-dnl
-
-dnl -------------------------------------------------------------------------
-dnl Author  Pier Fumagalli <mailto:pier.fumagalli@eng.sun.com>
-dnl Version $Id$
-dnl -------------------------------------------------------------------------
-
-AC_DEFUN([AP_PROG_JAVAC_WORKS],[
-  AC_CACHE_CHECK([wether the Java compiler ($JAVAC) works],ap_cv_prog_javac_works,[
-    echo "public class Test {}" > Test.java
-    $JAVAC $JAVACFLAGS Test.java > /dev/null 2>&1
-    if test $? -eq 0
-    then
-      rm -f Test.java Test.class
-      ap_cv_prog_javac_works=yes
-    else
-      rm -f Test.java Test.class
-      AC_MSG_RESULT(no)
-      AC_MSG_ERROR([installation or configuration problem: javac cannot compile])
-    fi
-  ])
-])
-
-dnl Check for JAVA compilers.
-AC_DEFUN([AP_PROG_JAVAC],[
-  if test "$SABLEVM" != "NONE"
-  then
-    AC_PATH_PROG(JAVACSABLE,javac-sablevm,NONE,$JAVA_HOME/bin)
-  else
-    JAVACSABLE="NONE"
-  fi
-  if test "$JAVACSABLE" = "NONE"
-  then
-    XPATH="$JAVA_HOME/bin:$JAVA_HOME/Commands:$PATH"
-    AC_PATH_PROG(JAVAC,javac,NONE,$XPATH)
-  else
-    AC_PATH_PROG(JAVAC,javac-sablevm,NONE,$JAVA_HOME/bin)
-  fi
-  AC_MSG_RESULT([$JAVAC])
-  if test "$JAVAC" = "NONE"
-  then
-    AC_MSG_ERROR([javac not found])
-  fi
-  AP_PROG_JAVAC_WORKS()
-  AC_PROVIDE([$0])
-  AC_SUBST(JAVAC)
-  AC_SUBST(JAVACFLAGS)
-])
-
-dnl Check for jar archivers.
-AC_DEFUN([AP_PROG_JAR],[
-  if test "$SABLEVM" != "NONE"
-  then
-    AC_PATH_PROG(JARSABLE,jar-sablevm,NONE,$JAVA_HOME/bin)
-  else
-    JARSABLE="NONE"
-  fi
-  if test "$JARSABLE" = "NONE"
-  then
-    XPATH="$JAVA_HOME/bin:$JAVA_HOME/Commands:$PATH"
-    AC_PATH_PROG(JAR,jar,NONE,$XPATH)
-  else
-    AC_PATH_PROG(JAR,jar-sablevm,NONE,$JAVA_HOME/bin)
-  fi
-  if test "$JAR" = "NONE"
-  then
-    AC_MSG_ERROR([jar not found])
-  fi
-  AC_PROVIDE([$0])
-  AC_SUBST(JAR)
-])
-
-AC_DEFUN([AP_JAVA],[
-  AC_ARG_WITH(java,[  --with-java=DIR         Specify the location of your JDK installation],[
-    AC_MSG_CHECKING([JAVA_HOME])
-    if test -d "$withval"
-    then
-      JAVA_HOME="$withval"
-      AC_MSG_RESULT([$JAVA_HOME])
-    else
-      AC_MSG_RESULT([failed])
-      AC_MSG_ERROR([$withval is not a directory])
-    fi
-    AC_SUBST(JAVA_HOME)
-  ])
-  if test x"$JAVA_HOME" = x
-  then
-    AC_MSG_ERROR([Java Home not defined. Rerun with --with-java=[...] parameter])
-  fi
-])
-
-dnl check if the JVM in JAVA_HOME is sableVM
-dnl $JAVA_HOME/bin/sablevm and /opt/java/lib/sablevm/bin are tested.
-AC_DEFUN([AP_SABLEVM],[
-  if test x"$JAVA_HOME" != x
-  then
-    AC_PATH_PROG(SABLEVM,sablevm,NONE,$JAVA_HOME/bin)
-    if test "$SABLEVM" = "NONE"
-    then
-      dnl java may be SableVM.
-      if $JAVA_HOME/bin/java -version 2> /dev/null | grep SableVM > /dev/null
-      then
-        SABLEVM=$JAVA_HOME/bin/java
-      fi
-    fi
-    if test "$SABLEVM" != "NONE"
-    then
-      AC_MSG_RESULT([Using sableVM: $SABLEVM])
-      CFLAGS="$CFLAGS -DHAVE_SABLEVM"
-    fi
-  fi
-])
-
-dnl check if the JVM in JAVA_HOME is kaffe
-dnl $JAVA_HOME/bin/kaffe is tested.
-AC_DEFUN([AP_KAFFE],[
-  if test x"$JAVA_HOME" != x
-  then
-    AC_PATH_PROG(KAFFEVM,kaffe,NONE,$JAVA_HOME/bin)
-    if test "$KAFFEVM" != "NONE"
-    then
-      AC_MSG_RESULT([Using kaffe: $KAFFEVM])
-      CFLAGS="$CFLAGS -DHAVE_KAFFEVM"
-      LDFLAGS="$LDFLAGS -Wl,-rpath $JAVA_HOME/jre/lib/$HOST_CPU -L $JAVA_HOME/jre/lib/$HOST_CPU -lkaffevm"
-    fi
-  fi
-])
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/m4/apsupport.m4 b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/m4/apsupport.m4
deleted file mode 100644
index 0c8b262..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/m4/apsupport.m4
+++ /dev/null
@@ -1,168 +0,0 @@
-dnl
-dnl Licensed to the Apache Software Foundation (ASF) under one or more
-dnl contributor license agreements.  See the NOTICE file distributed with
-dnl this work for additional information regarding copyright ownership.
-dnl The ASF licenses this file to You under the Apache License, Version 2.0
-dnl (the "License"); you may not use this file except in compliance with
-dnl the License.  You may obtain a copy of the License at
-dnl
-dnl     http://www.apache.org/licenses/LICENSE-2.0
-dnl
-dnl Unless required by applicable law or agreed to in writing, software
-dnl distributed under the License is distributed on an "AS IS" BASIS,
-dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-dnl See the License for the specific language governing permissions and
-dnl limitations under the License.
-dnl
-
-dnl -------------------------------------------------------------------------
-dnl Author  Pier Fumagalli <mailto:pier.fumagalli@eng.sun.com>
-dnl Version $Id$
-dnl -------------------------------------------------------------------------
-
-AC_DEFUN([AP_SUPPORTED_HOST],[
-  AC_MSG_CHECKING([C flags dependant on host system type])
-
-  case $host_os in
-  darwin*)
-    CFLAGS="$CFLAGS -DOS_DARWIN -DDSO_DYLD"
-    supported_os="darwin"
-    ;;
-  solaris*)
-    CFLAGS="$CFLAGS -DOS_SOLARIS -DDSO_DLFCN"
-    supported_os="solaris"
-    LIBS="$LIBS -ldl -lthread"
-    ;;
-  linux*)
-    CFLAGS="$CFLAGS -DOS_LINUX -DDSO_DLFCN"
-    supported_os="linux"
-    LIBS="$LIBS -ldl -lpthread"
-    ;;
-  cygwin)
-    CFLAGS="$CFLAGS -DOS_CYGWIN -DDSO_DLFCN -DNO_SETSID"
-    supported_os="win32"
-    ;;
-  sysv)
-    CFLAGS="$CFLAGS -DOS_SYSV -DDSO_DLFCN"
-    LIBS="$LIBS -ldl"
-    ;;
-  sysv4)
-    CFLAGS="$CFLAGS -DOS_SYSV -DDSO_DLFCN -Kthread"
-    LDFLAGS="-Kthread $LDFLAGS"
-    LIBS="$LIBS -ldl"
-    ;;
-  freebsd*)
-    CFLAGS="$CFLAGS -DOS_FREEBSD -DDSO_DLFCN -D_THREAD_SAFE -pthread"
-    LDFLAGS="-pthread $LDFLAGS"
-    supported_os="freebsd"
-    ;;
-  osf5*)
-    CFLAGS="$CFLAGS -pthread -DOS_TRU64 -DDSO_DLFCN -D_XOPEN_SOURCE_EXTENDED"
-    LDFLAGS="$LDFLAGS -pthread"
-    ;;
-  hpux11*)
-    CFLAGS="$CFLAGS -pthread -DOS_HPUX -DDSO_DLFCN"
-    LDFLAGS="$LDFLAGS -pthread"
-    LIBS="$LIBS -lpthread"
-    ;;
-  *)
-    AC_MSG_RESULT([failed])
-    AC_MSG_ERROR([Unsupported operating system "$host_os"]);;
-  esac
-
-  case $host_cpu in
-  powerpc*)
-    CFLAGS="$CFLAGS -DCPU=\\\"$host_cpu\\\""
-    HOST_CPU=$host_cpu;;
-  sparc*)
-    CFLAGS="$CFLAGS -DCPU=\\\"$host_cpu\\\""
-    HOST_CPU=$host_cpu;;
-  i?86)
-    CFLAGS="$CFLAGS -DCPU=\\\"i386\\\""
-    HOST_CPU=i386;;
-  x86_64)
-    CFLAGS="$CFLAGS -DCPU=\\\"amd64\\\""
-    HOST_CPU=amd64;;
-  bs2000)
-    CFLAGS="$CFLAGS -DCPU=\\\"osd\\\" -DCHARSET_EBCDIC -DOSD_POSIX"
-    supported_os="osd"
-    LDFLAGS="-Kno_link_stdlibs -B llm4"
-    LIBS="$LIBS -lBLSLIB"
-    LDCMD="/opt/C/bin/cc"
-    HOST_CPU=osd;;
-  mips)
-    CFLAGS="$CFLAGS -DCPU=\\\"mips\\\""
-    supported_os="mips"
-    HOST_CPU=mips;;
-  alpha*)
-    CFLAGS="$CFLAGS -DCPU=\\\"alpha\\\""
-    supported_os="alpha"
-    HOST_CPU=alpha;;
-  hppa2.0w)
-    CFLAGS="$CFLAGS -DCPU=\\\"PA_RISC2.0W\\\""
-    supported_os="hp-ux"
-    HOST_CPU=PA_RISC2.0W;;
-  hppa2.0)
-    CFLAGS="$CFLAGS -DCPU=\\\"PA_RISC2.0\\\""
-    supported_os="hp-ux"
-    HOST_CPU=PA_RISC2.0;;
-  mipsel)
-    CFLAGS="$CFLAGS -DCPU=\\\"mipsel\\\""
-    supported_os="mipsel"
-    HOST_CPU=mipsel;;
-  ia64)
-    CFLAGS="$CFLAGS -DCPU=\\\"ia64\\\""
-    supported_os="ia64"
-    HOST_CPU=ia64;;
-  s390)
-    CFLAGS="$CFLAGS -DCPU=\\\"s390\\\""
-    supported_os="s390"
-    HOST_CPU=s390;;
-  arm*)
-    CFLAGS="$CFLAGS -DCPU=\\\"arm\\\""
-    supported_os="arm"
-    HOST_CPU=arm;;
-  *)
-    AC_MSG_RESULT([failed])
-    AC_MSG_ERROR([Unsupported CPU architecture "$host_cpu"]);;
-  esac
-
-  AC_MSG_RESULT([ok])
-  AC_SUBST(CFLAGS)
-  AC_SUBST(LDFLAGS)
-])
-
-AC_DEFUN([AP_JVM_LIBDIR],[
-  AC_MSG_CHECKING([where on earth this jvm library is..])
-  javabasedir=$JAVA_HOME
-  case $host_os in
-    cygwin* | mingw* | pw23* )
-    lib_jvm_dir=`find $javabasedir -follow \( \
-	\( -name client -type d -prune \) -o \
-        \( -name "jvm.dll" -exec dirname {} \; \) \) 2> /dev/null | tr "\n" " "`
-    ;;
-    aix*)
-    lib_jvm_dir=`find $javabasedir \( \
-        \( -name client -type d -prune \) -o \
-	\( -name "libjvm.*" -exec dirname {} \; \) \) 2> /dev/null | tr "\n" " "`
-    if test -z "$lib_jvm_dir"; then
-       lib_jvm_dir=`find $javabasedir \( \
-       \( -name client -type d -prune \) -o \
-       \( -name "libkaffevm.*" -exec dirname {} \; \) \) 2> /dev/null | tr "\n" " "`
-    fi
-    ;;
-    *)
-    lib_jvm_dir=`find $javabasedir -follow \( \
-       \( -name client -type d -prune \) -o \
-       \( -name "libjvm.*" -exec dirname {} \; \) \) 2> /dev/null | tr "\n" " "`
-    if test -z "$lib_jvm_dir"; then
-       lib_jvm_dir=`find $javabasedir -follow \( \
-       \( -name client -type d -prune \) -o \
-       \( -name "libkaffevm.*" -exec dirname {} \; \) \) 2> /dev/null | tr "\n" " "`
-    fi 
-    ;;
-  esac
-  LIB_JVM_DIR=$lib_jvm_dir
-  AC_MSG_RESULT([ohh u there ... $LIB_JVM_DIR])
-  AC_SUBST(LIB_JVM_DIR)
-])
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index c116ed1..9cf0888 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -25,8 +25,6 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
@@ -39,6 +37,8 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
 import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
 
@@ -66,12 +66,9 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.ha.HAServiceProtocol;
-import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
-import org.apache.hadoop.ha.HAServiceProtocolHelper;
-import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
-import org.apache.hadoop.ha.protocolPB.HAServiceProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
+import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@@ -134,6 +131,7 @@
     private boolean format = true;
     private boolean manageNameDfsDirs = true;
     private boolean manageNameDfsSharedDirs = true;
+    private boolean enableManagedDfsDirsRedundancy = true;
     private boolean manageDataDfsDirs = true;
     private StartupOption option = null;
     private String[] racks = null; 
@@ -187,7 +185,7 @@
       this.manageNameDfsDirs = val;
       return this;
     }
-
+    
     /**
      * Default: true
      */
@@ -199,6 +197,14 @@
     /**
      * Default: true
      */
+    public Builder enableManagedDfsDirsRedundancy(boolean val) {
+      this.enableManagedDfsDirsRedundancy = val;
+      return this;
+    }
+
+    /**
+     * Default: true
+     */
     public Builder manageDataDfsDirs(boolean val) {
       this.manageDataDfsDirs = val;
       return this;
@@ -298,6 +304,7 @@
                        builder.format,
                        builder.manageNameDfsDirs,
                        builder.manageNameDfsSharedDirs,
+                       builder.enableManagedDfsDirsRedundancy,
                        builder.manageDataDfsDirs,
                        builder.option,
                        builder.racks,
@@ -385,7 +392,7 @@
   public MiniDFSCluster(Configuration conf,
                         int numDataNodes,
                         StartupOption nameNodeOperation) throws IOException {
-    this(0, conf, numDataNodes, false, false, false,  nameNodeOperation, 
+    this(0, conf, numDataNodes, false, false, false, false,  nameNodeOperation, 
           null, null, null);
   }
   
@@ -407,7 +414,8 @@
                         int numDataNodes,
                         boolean format,
                         String[] racks) throws IOException {
-    this(0, conf, numDataNodes, format, true, true,  null, racks, null, null);
+    this(0, conf, numDataNodes, format, true, true, true, null,
+        racks, null, null);
   }
   
   /**
@@ -429,7 +437,8 @@
                         int numDataNodes,
                         boolean format,
                         String[] racks, String[] hosts) throws IOException {
-    this(0, conf, numDataNodes, format, true, true, null, racks, hosts, null);
+    this(0, conf, numDataNodes, format, true, true, true, null,
+        racks, hosts, null);
   }
   
   /**
@@ -462,8 +471,8 @@
                         boolean manageDfsDirs,
                         StartupOption operation,
                         String[] racks) throws IOException {
-    this(nameNodePort, conf, numDataNodes, format, manageDfsDirs, manageDfsDirs,
-         operation, racks, null, null);
+    this(nameNodePort, conf, numDataNodes, format, manageDfsDirs,
+        manageDfsDirs, manageDfsDirs, operation, racks, null, null);
   }
 
   /**
@@ -497,7 +506,7 @@
                         String[] racks,
                         long[] simulatedCapacities) throws IOException {
     this(nameNodePort, conf, numDataNodes, format, manageDfsDirs, manageDfsDirs,
-          operation, racks, null, simulatedCapacities);
+        manageDfsDirs, operation, racks, null, simulatedCapacities);
   }
   
   /**
@@ -531,13 +540,15 @@
                         int numDataNodes,
                         boolean format,
                         boolean manageNameDfsDirs,
+                        boolean enableManagedDfsDirsRedundancy,
                         boolean manageDataDfsDirs,
                         StartupOption operation,
                         String[] racks, String hosts[],
                         long[] simulatedCapacities) throws IOException {
     this.nameNodes = new NameNodeInfo[1]; // Single namenode in the cluster
     initMiniDFSCluster(conf, numDataNodes, format,
-        manageNameDfsDirs, true, manageDataDfsDirs, operation, racks, hosts,
+        manageNameDfsDirs, true, enableManagedDfsDirsRedundancy, manageDataDfsDirs,
+        operation, racks, hosts,
         simulatedCapacities, null, true, false,
         MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0));
   }
@@ -545,8 +556,8 @@
   private void initMiniDFSCluster(
       Configuration conf,
       int numDataNodes, boolean format, boolean manageNameDfsDirs,
-      boolean manageNameDfsSharedDirs, boolean manageDataDfsDirs,
-      StartupOption operation, String[] racks,
+      boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy,
+      boolean manageDataDfsDirs, StartupOption operation, String[] racks,
       String[] hosts, long[] simulatedCapacities, String clusterId,
       boolean waitSafeMode, boolean setupHostsFile,
       MiniDFSNNTopology nnTopology)
@@ -586,6 +597,7 @@
     federation = nnTopology.isFederated();
     createNameNodesAndSetConf(
         nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
+        enableManagedDfsDirsRedundancy,
         format, operation, clusterId, conf);
     
     if (format) {
@@ -608,7 +620,8 @@
   
   private void createNameNodesAndSetConf(MiniDFSNNTopology nnTopology,
       boolean manageNameDfsDirs, boolean manageNameDfsSharedDirs,
-      boolean format, StartupOption operation, String clusterId,
+      boolean enableManagedDfsDirsRedundancy, boolean format,
+      StartupOption operation, String clusterId,
       Configuration conf) throws IOException {
     Preconditions.checkArgument(nnTopology.countNameNodes() > 0,
         "empty NN topology: no namenodes specified!");
@@ -664,7 +677,7 @@
       Collection<URI> prevNNDirs = null;
       int nnCounterForFormat = nnCounter;
       for (NNConf nn : nameservice.getNNs()) {
-        initNameNodeConf(conf, nsId, nn.getNnId(), manageNameDfsDirs,
+        initNameNodeConf(conf, nsId, nn.getNnId(), manageNameDfsDirs, manageNameDfsDirs,
             nnCounterForFormat);
         Collection<URI> namespaceDirs = FSNamesystem.getNamespaceDirs(conf);
         if (format) {
@@ -696,7 +709,8 @@
 
       // Start all Namenodes
       for (NNConf nn : nameservice.getNNs()) {
-        initNameNodeConf(conf, nsId, nn.getNnId(), manageNameDfsDirs, nnCounter);
+        initNameNodeConf(conf, nsId, nn.getNnId(), manageNameDfsDirs,
+            enableManagedDfsDirsRedundancy, nnCounter);
         createNameNode(nnCounter++, conf, numDataNodes, false, operation,
             clusterId, nsId, nn.getNnId());
       }
@@ -730,8 +744,8 @@
 
   private void initNameNodeConf(Configuration conf,
       String nameserviceId, String nnId,
-      boolean manageNameDfsDirs, int nnIndex)
-      throws IOException {
+      boolean manageNameDfsDirs, boolean enableManagedDfsDirsRedundancy,
+      int nnIndex) throws IOException {
     if (nameserviceId != null) {
       conf.set(DFS_NAMESERVICE_ID, nameserviceId);
     }
@@ -740,12 +754,21 @@
     }
     
     if (manageNameDfsDirs) {
-      conf.set(DFS_NAMENODE_NAME_DIR_KEY,
-          fileAsURI(new File(base_dir, "name" + (2*nnIndex + 1)))+","+
-          fileAsURI(new File(base_dir, "name" + (2*nnIndex + 2))));
-      conf.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY,
-          fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 1)))+","+
-          fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 2))));
+      if (enableManagedDfsDirsRedundancy) {
+        conf.set(DFS_NAMENODE_NAME_DIR_KEY,
+            fileAsURI(new File(base_dir, "name" + (2*nnIndex + 1)))+","+
+            fileAsURI(new File(base_dir, "name" + (2*nnIndex + 2))));
+        conf.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY,
+            fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 1)))+","+
+            fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 2))));
+      } else {
+        conf.set(DFS_NAMENODE_NAME_DIR_KEY,
+            fileAsURI(new File(base_dir, "name" + (2*nnIndex + 1))).
+              toString());
+        conf.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY,
+            fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 1))).
+              toString());
+      }
     }
   }
 
@@ -1384,7 +1407,6 @@
       waitClusterUp();
       LOG.info("Restarted the namenode");
       waitActive();
-      LOG.info("Cluster is active");
     }
   }
 
@@ -1760,6 +1782,7 @@
         }
       }
     }
+    LOG.info("Cluster is active");
   }
   
   private synchronized boolean shouldWait(DatanodeInfo[] dnInfo,
@@ -2143,7 +2166,7 @@
     String nnId = null;
     initNameNodeAddress(conf, nameserviceId,
         new NNConf(nnId).setIpcPort(namenodePort));
-    initNameNodeConf(conf, nameserviceId, nnId, true, nnIndex);
+    initNameNodeConf(conf, nameserviceId, nnId, true, true, nnIndex);
     createNameNode(nnIndex, conf, numDataNodes, true, null, null,
         nameserviceId, nnId);
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index 2ba4dde..44e4f9f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -25,46 +25,55 @@
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
 
-import java.net.SocketTimeoutException;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.LongWritable;
+import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.net.InetSocketAddress;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.net.InetSocketAddress;
+import java.net.SocketTimeoutException;
+import java.net.URI;
 import java.security.MessageDigest;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 import junit.framework.TestCase;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.client.HdfsUtils;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
-import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.retry.RetryPolicies.MultipleLinearRandomRetry;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.log4j.Level;
 import org.mockito.Mockito;
 import org.mockito.internal.stubbing.answers.ThrowsException;
 import org.mockito.invocation.InvocationOnMock;
@@ -341,7 +350,7 @@
 
           // We shouldn't have gained an extra block by the RPC.
           assertEquals(blockCount, blockCount2);
-          return (LocatedBlock) ret2;
+          return ret2;
         }
       }).when(spyNN).addBlock(Mockito.anyString(), Mockito.anyString(),
           Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any());
@@ -798,5 +807,161 @@
       cluster.shutdown();
     }
   }
-}
 
+  /** Test client retry with namenode restarting. */
+  public void testNamenodeRestart() throws Exception {
+    ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
+
+    final List<Exception> exceptions = new ArrayList<Exception>();
+
+    final Path dir = new Path("/testNamenodeRestart");
+
+    final Configuration conf = new Configuration();
+    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, true);
+
+    final short numDatanodes = 3;
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(numDatanodes)
+        .build();
+    try {
+      cluster.waitActive();
+      final DistributedFileSystem dfs = cluster.getFileSystem();
+      final URI uri = dfs.getUri();
+      assertTrue(HdfsUtils.isHealthy(uri));
+
+      //create a file
+      final long length = 1L << 20;
+      final Path file1 = new Path(dir, "foo"); 
+      DFSTestUtil.createFile(dfs, file1, length, numDatanodes, 20120406L);
+
+      //get file status
+      final FileStatus s1 = dfs.getFileStatus(file1);
+      assertEquals(length, s1.getLen());
+
+      //shutdown namenode
+      assertTrue(HdfsUtils.isHealthy(uri));
+      cluster.shutdownNameNode(0);
+      assertFalse(HdfsUtils.isHealthy(uri));
+
+      //namenode is down, create another file in a thread
+      final Path file3 = new Path(dir, "file"); 
+      final Thread thread = new Thread(new Runnable() {
+        @Override
+        public void run() {
+          try {
+            //it should retry till namenode is up.
+            final FileSystem fs = AppendTestUtil.createHdfsWithDifferentUsername(conf);
+            DFSTestUtil.createFile(fs, file3, length, numDatanodes, 20120406L);
+          } catch (Exception e) {
+            exceptions.add(e);
+          }
+        }
+      });
+      thread.start();
+
+      //restart namenode in a new thread
+      new Thread(new Runnable() {
+        @Override
+        public void run() {
+          try {
+            //sleep, restart, and then wait active
+            TimeUnit.SECONDS.sleep(30);
+            assertFalse(HdfsUtils.isHealthy(uri));
+            cluster.restartNameNode(0, false);
+            cluster.waitActive();
+            assertTrue(HdfsUtils.isHealthy(uri));
+          } catch (Exception e) {
+            exceptions.add(e);
+          }
+        }
+      }).start();
+
+      //namenode is down, it should retry until namenode is up again. 
+      final FileStatus s2 = dfs.getFileStatus(file1);
+      assertEquals(s1, s2);
+
+      //check file1 and file3
+      thread.join();
+      assertEquals(dfs.getFileChecksum(file1), dfs.getFileChecksum(file3));
+
+      //enter safe mode
+      assertTrue(HdfsUtils.isHealthy(uri));
+      dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+      assertFalse(HdfsUtils.isHealthy(uri));
+      
+      //leave safe mode in a new thread
+      new Thread(new Runnable() {
+        @Override
+        public void run() {
+          try {
+            //sleep and then leave safe mode
+            TimeUnit.SECONDS.sleep(30);
+            assertFalse(HdfsUtils.isHealthy(uri));
+            dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+            assertTrue(HdfsUtils.isHealthy(uri));
+          } catch (Exception e) {
+            exceptions.add(e);
+          }
+        }
+      }).start();
+
+      //namenode is in safe mode, create should retry until it leaves safe mode.
+      final Path file2 = new Path(dir, "bar");
+      DFSTestUtil.createFile(dfs, file2, length, numDatanodes, 20120406L);
+      assertEquals(dfs.getFileChecksum(file1), dfs.getFileChecksum(file2));
+      
+      assertTrue(HdfsUtils.isHealthy(uri));
+
+      //make sure it won't retry on exceptions like FileNotFoundException
+      final Path nonExisting = new Path(dir, "nonExisting");
+      LOG.info("setPermission: " + nonExisting);
+      try {
+        dfs.setPermission(nonExisting, new FsPermission((short)0));
+        fail();
+      } catch(FileNotFoundException fnfe) {
+        LOG.info("GOOD!", fnfe);
+      }
+
+      if (!exceptions.isEmpty()) {
+        LOG.error("There are " + exceptions.size() + " exception(s):");
+        for(int i = 0; i < exceptions.size(); i++) {
+          LOG.error("Exception " + i, exceptions.get(i));
+        }
+        fail();
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  public void testMultipleLinearRandomRetry() {
+    parseMultipleLinearRandomRetry(null, "");
+    parseMultipleLinearRandomRetry(null, "11");
+    parseMultipleLinearRandomRetry(null, "11,22,33");
+    parseMultipleLinearRandomRetry(null, "11,22,33,44,55");
+    parseMultipleLinearRandomRetry(null, "AA");
+    parseMultipleLinearRandomRetry(null, "11,AA");
+    parseMultipleLinearRandomRetry(null, "11,22,33,FF");
+    parseMultipleLinearRandomRetry(null, "11,-22");
+    parseMultipleLinearRandomRetry(null, "-11,22");
+
+    parseMultipleLinearRandomRetry("[22x11ms]",
+        "11,22");
+    parseMultipleLinearRandomRetry("[22x11ms, 44x33ms]",
+        "11,22,33,44");
+    parseMultipleLinearRandomRetry("[22x11ms, 44x33ms, 66x55ms]",
+        "11,22,33,44,55,66");
+    parseMultipleLinearRandomRetry("[22x11ms, 44x33ms, 66x55ms]",
+        "   11,   22, 33,  44, 55,  66   ");
+  }
+  
+  static void parseMultipleLinearRandomRetry(String expected, String s) {
+    final MultipleLinearRandomRetry r = MultipleLinearRandomRetry.parseCommaSeparatedString(s);
+    LOG.info("input=" + s + ", parsed=" + r + ", expected=" + expected);
+    if (r == null) {
+      assertEquals(expected, null);
+    } else {
+      assertEquals("MultipleLinearRandomRetry" + expected, r.toString());
+    }
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java
index 5f9ad32..f63ba9a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java
@@ -65,8 +65,8 @@
         in = (HdfsDataInputStream) dfs.open(path);
         Assert.fail("Expected IOException");
       } catch (IOException e) {
-        Assert.assertEquals("Could not obtain the last block locations.", e
-            .getLocalizedMessage());
+        Assert.assertTrue(e.getLocalizedMessage().indexOf(
+            "Name node is in safe mode") >= 0);
       }
     } finally {
       if (null != in) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelLocalRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelLocalRead.java
new file mode 100644
index 0000000..05702d2
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelLocalRead.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestParallelLocalRead extends TestParallelReadUtil {
+
+  @BeforeClass
+  static public void setupCluster() throws Exception {
+    HdfsConfiguration conf = new HdfsConfiguration();
+
+    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,
+        false);
+    conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
+        UserGroupInformation.getCurrentUser().getShortUserName());
+
+    setupCluster(1, conf);
+  }
+
+  @AfterClass
+  static public void teardownCluster() throws Exception {
+    TestParallelReadUtil.teardownCluster();
+  }
+
+  /**
+   * Do parallel read several times with different number of files and threads.
+   *
+   * Note that while this is the only "test" in a junit sense, we're actually
+   * dispatching a lot more. Failures in the other methods (and other threads)
+   * need to be manually collected, which is inconvenient.
+   */
+  @Test
+  public void testParallelReadCopying() throws IOException {
+    runTestWorkload(new CopyingReadWorkerHelper());
+  }
+
+  @Test
+  public void testParallelReadByteBuffer() throws IOException {
+    runTestWorkload(new DirectReadWorkerHelper());
+  }
+
+  @Test
+  public void testParallelReadMixed() throws IOException {
+    runTestWorkload(new MixedWorkloadHelper());
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java
index cb98929..497d29d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java
@@ -18,39 +18,34 @@
 
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Random;
+
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
-import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.URI;
-import java.util.Collection;
-import java.util.List;
-import java.util.Random;
-import static org.junit.Assert.*;
 import org.junit.Test;
 
-import com.google.common.collect.Lists;
-
 /**
  * A JUnit test for checking if restarting DFS preserves the
  * blocks that are part of an unclosed file.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index a747a33..19818a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.*;
 
 import java.io.OutputStream;
 import java.security.PrivilegedExceptionAction;
@@ -773,7 +771,60 @@
       final ContentSummary computed) {
     assertEquals(expected.toString(), computed.toString());
   }
-
+ 
+  /**
+   * Test limit cases for setting space quotas.
+   */
+  @Test
+  public void testMaxSpaceQuotas() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    final FileSystem fs = cluster.getFileSystem();
+    assertTrue("Not a HDFS: "+fs.getUri(),
+                fs instanceof DistributedFileSystem);
+    final DistributedFileSystem dfs = (DistributedFileSystem)fs;
+    
+    // create test directory
+    final Path testFolder = new Path("/testFolder");
+    assertTrue(dfs.mkdirs(testFolder));
+    
+    // setting namespace quota to Long.MAX_VALUE - 1 should work
+    dfs.setQuota(testFolder, Long.MAX_VALUE - 1, 10);
+    ContentSummary c = dfs.getContentSummary(testFolder);
+    assertTrue("Quota not set properly", c.getQuota() == Long.MAX_VALUE - 1);
+    
+    // setting diskspace quota to Long.MAX_VALUE - 1 should work
+    dfs.setQuota(testFolder, 10, Long.MAX_VALUE - 1);
+    c = dfs.getContentSummary(testFolder);
+    assertTrue("Quota not set properly", c.getSpaceQuota() == Long.MAX_VALUE - 1);
+    
+    // setting namespace quota to Long.MAX_VALUE should not work + no error
+    dfs.setQuota(testFolder, Long.MAX_VALUE, 10);
+    c = dfs.getContentSummary(testFolder);
+    assertTrue("Quota should not have changed", c.getQuota() == 10);
+    
+    // setting diskspace quota to Long.MAX_VALUE should not work + no error
+    dfs.setQuota(testFolder, 10, Long.MAX_VALUE);
+    c = dfs.getContentSummary(testFolder);
+    assertTrue("Quota should not have changed", c.getSpaceQuota() == 10);
+    
+    // setting namespace quota to Long.MAX_VALUE + 1 should not work + error
+    try {
+      dfs.setQuota(testFolder, Long.MAX_VALUE + 1, 10);
+      fail("Exception not thrown");
+    } catch (IllegalArgumentException e) {
+      // Expected
+    }
+    
+    // setting diskspace quota to Long.MAX_VALUE + 1 should not work + error
+    try {
+      dfs.setQuota(testFolder, 10, Long.MAX_VALUE + 1);
+      fail("Exception not thrown");
+    } catch (IllegalArgumentException e) {
+      // Expected
+    }
+  }
+  
   /**
    * Violate a space quota using files of size < 1 block. Test that block
    * allocation conservatively assumes that for quota checking the entire
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
index 3da6864..d880012 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
@@ -23,6 +23,7 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -31,9 +32,11 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.test.GenericTestUtils;
 
 import static org.junit.Assert.*;
@@ -372,4 +375,76 @@
     dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
     assertFalse("State was expected to be out of safemode.", dfs.isInSafeMode());
   }
+  
+  @Test
+  public void testSafeModeWhenZeroBlockLocations() throws IOException {
+
+    try {
+      Path file1 = new Path("/tmp/testManualSafeMode/file1");
+      Path file2 = new Path("/tmp/testManualSafeMode/file2");
+      
+      System.out.println("Created file1 and file2.");
+      
+      // create two files with one block each.
+      DFSTestUtil.createFile(fs, file1, 1000, (short)1, 0);
+      DFSTestUtil.createFile(fs, file2, 2000, (short)1, 0);
+      checkGetBlockLocationsWorks(fs, file1);
+      
+      NameNode namenode = cluster.getNameNode();
+
+      // manually set safemode.
+      dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+      assertTrue("should still be in SafeMode", namenode.isInSafeMode());
+      // getBlock locations should still work since block locations exists
+      checkGetBlockLocationsWorks(fs, file1);
+      dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+      assertFalse("should not be in SafeMode", namenode.isInSafeMode());
+      
+      
+      // Now 2nd part of the tests where there aren't block locations
+      cluster.shutdownDataNodes();
+      cluster.shutdownNameNode(0);
+      
+      // now bring up just the NameNode.
+      cluster.restartNameNode();
+      cluster.waitActive();
+      
+      System.out.println("Restarted cluster with just the NameNode");
+      
+      namenode = cluster.getNameNode();
+      
+      assertTrue("No datanode is started. Should be in SafeMode", 
+                 namenode.isInSafeMode());
+      FileStatus stat = fs.getFileStatus(file1);
+      try {
+        fs.getFileBlockLocations(stat, 0, 1000);
+        assertTrue("Should have got safemode exception", false);
+      } catch (SafeModeException e) {
+        // as expected 
+      } catch (RemoteException re) {
+        if (!re.getClassName().equals(SafeModeException.class.getName()))
+          assertTrue("Should have got safemode exception", false);   
+      }
+
+
+      dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);      
+      assertFalse("Should not be in safemode", namenode.isInSafeMode());
+      checkGetBlockLocationsWorks(fs, file1);
+
+    } finally {
+      if(fs != null) fs.close();
+      if(cluster!= null) cluster.shutdown();
+    }
+  }
+  
+  void checkGetBlockLocationsWorks(FileSystem fs, Path fileName) throws IOException {
+    FileStatus stat = fs.getFileStatus(fileName);
+    try {  
+      fs.getFileBlockLocations(stat, 0, 1000);
+    } catch (SafeModeException e) {
+      assertTrue("Should have not got safemode exception", false);
+    } catch (RemoteException re) {
+      assertTrue("Should have not got safemode exception", false);   
+    }    
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 743fb3b..07682fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -17,7 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -29,14 +31,9 @@
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.net.NetworkTopology;
 import org.junit.Before;
 import org.junit.Test;
@@ -381,11 +378,11 @@
   }
   
   private BlockInfo addBlockOnNodes(long blockId, List<DatanodeDescriptor> nodes) {
-    INodeFile iNode = Mockito.mock(INodeFile.class);
-    Mockito.doReturn((short)3).when(iNode).getReplication();
+    BlockCollection bc = Mockito.mock(BlockCollection.class);
+    Mockito.doReturn((short)3).when(bc).getReplication();
     BlockInfo blockInfo = blockOnNodes(blockId, nodes);
 
-    bm.blocksMap.addBlockCollection(blockInfo, iNode);
+    bm.blocksMap.addBlockCollection(blockInfo, bc);
     return blockInfo;
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index ce570f7..aefd0be 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -24,6 +24,7 @@
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.Random;
 
 import org.apache.hadoop.conf.Configuration;
@@ -34,7 +35,6 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.net.NetworkTopology;
@@ -61,7 +61,7 @@
         DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"),
         DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/d1/r2"),
         DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d2/r3"),
-        DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3")        
+        DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3")
       };
 
     FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
@@ -587,4 +587,50 @@
         fifthPrioritySize, chosenBlocks.get(
             UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS).size());
   }
+  
+  /**
+   * Test for the chooseReplicaToDelete are processed based on 
+   * block locality and free space
+   */
+  @Test
+  public void testChooseReplicaToDelete() throws Exception {
+    List<DatanodeDescriptor> replicaNodeList = new 
+        ArrayList<DatanodeDescriptor>();
+    final Map<String, List<DatanodeDescriptor>> rackMap
+        = new HashMap<String, List<DatanodeDescriptor>>();
+    
+    dataNodes[0].setRemaining(4*1024*1024);
+    replicaNodeList.add(dataNodes[0]);
+    
+    dataNodes[1].setRemaining(3*1024*1024);
+    replicaNodeList.add(dataNodes[1]);
+    
+    dataNodes[2].setRemaining(2*1024*1024);
+    replicaNodeList.add(dataNodes[2]);
+    
+    dataNodes[5].setRemaining(1*1024*1024);
+    replicaNodeList.add(dataNodes[5]);
+    
+    List<DatanodeDescriptor> first = new ArrayList<DatanodeDescriptor>();
+    List<DatanodeDescriptor> second = new ArrayList<DatanodeDescriptor>();
+    replicator.splitNodesWithRack(
+        replicaNodeList, rackMap, first, second);
+    // dataNodes[0] and dataNodes[1] are in first set as their rack has two 
+    // replica nodes, while datanodes[2] and dataNodes[5] are in second set.
+    assertEquals(2, first.size());
+    assertEquals(2, second.size());
+    DatanodeDescriptor chosenNode = replicator.chooseReplicaToDelete(
+        null, null, (short)3, first, second);
+    // Within first set, dataNodes[1] with less free space
+    assertEquals(chosenNode, dataNodes[1]);
+
+    replicator.adjustSetsWithChosenReplica(
+        rackMap, first, second, chosenNode);
+    assertEquals(0, first.size());
+    assertEquals(3, second.size());
+    // Within second set, dataNodes[5] with less free space
+    chosenNode = replicator.chooseReplicaToDelete(
+        null, null, (short)2, first, second);
+    assertEquals(chosenNode, dataNodes[5]);
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
index 78d20ad..8584cc8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
@@ -28,8 +28,6 @@
 import java.util.Random;
 import java.util.concurrent.TimeoutException;
 
-import junit.framework.TestCase;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -52,14 +50,21 @@
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.net.NetUtils;
+
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 /**
  * This class tests if block replacement request to data nodes work correctly.
  */
-public class TestBlockReplacement extends TestCase {
+public class TestBlockReplacement {
   private static final Log LOG = LogFactory.getLog(
   "org.apache.hadoop.hdfs.TestBlockReplacement");
 
   MiniDFSCluster cluster;
+  @Test
   public void testThrottler() throws IOException {
     Configuration conf = new HdfsConfiguration();
     FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
@@ -83,6 +88,7 @@
     assertTrue(totalBytes*1000/(end-start)<=bandwidthPerSec);
   }
   
+  @Test
   public void testBlockReplacement() throws IOException, TimeoutException {
     final Configuration CONF = new HdfsConfiguration();
     final String[] INITIAL_RACKS = {"/RACK0", "/RACK1", "/RACK2"};
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java
similarity index 97%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java
rename to hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java
index 14a0da3..9b52252 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java
@@ -34,9 +34,9 @@
 import org.junit.Test;
 
 
-public class TestMulitipleNNDataBlockScanner {
+public class TestMultipleNNDataBlockScanner {
   private static final Log LOG = 
-    LogFactory.getLog(TestMulitipleNNDataBlockScanner.class);
+    LogFactory.getLog(TestMultipleNNDataBlockScanner.class);
   Configuration conf;
   MiniDFSCluster cluster = null;
   String bpids[] = new String[3];
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
index ab2640d..6794591 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
@@ -56,7 +56,7 @@
   public static LocatedBlocks getBlockLocations(NameNode namenode,
       String src, long offset, long length) throws IOException {
     return namenode.getNamesystem().getBlockLocations(
-        src, offset, length, false, true);
+        src, offset, length, false, true, true);
   }
   
   public static HdfsFileStatus getFileInfo(NameNode namenode, String src,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
new file mode 100644
index 0000000..694d84f
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
@@ -0,0 +1,162 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.junit.Assert.*;
+import org.junit.Before;
+import org.junit.After;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.regex.Pattern;
+
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.log4j.PatternLayout;
+import org.apache.log4j.RollingFileAppender;
+import org.junit.Test;
+
+/**
+ * A JUnit test that audit logs are generated
+ */
+public class TestAuditLogs {
+  static final String auditLogFile = System.getProperty("test.build.dir",
+      "build/test") + "/audit.log";
+  
+  // Pattern for: 
+  // allowed=(true|false) ugi=name ip=/address cmd={cmd} src={path} dst=null perm=null
+  static final Pattern auditPattern = Pattern.compile(
+      "allowed=.*?\\s" +
+      "ugi=.*?\\s" + 
+      "ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" + 
+      "cmd=.*?\\ssrc=.*?\\sdst=null\\s" + 
+      "perm=.*?");
+  static final Pattern successPattern = Pattern.compile(
+      ".*allowed=true.*");
+  static final String username = "bob";
+  static final String[] groups = { "group1" };
+  static final String fileName = "/srcdat";
+
+  DFSTestUtil util;
+  MiniDFSCluster cluster;
+  FileSystem fs;
+  String fnames[];
+  Configuration conf;
+  UserGroupInformation userGroupInfo;
+
+  @Before
+  public void setupCluster() throws Exception {
+    conf = new HdfsConfiguration();
+    final long precision = 1L;
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
+    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
+    util = new DFSTestUtil("TestAuditAllowed", 20, 3, 8*1024);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    fs = cluster.getFileSystem();
+    util.createFiles(fs, fileName);
+
+    fnames = util.getFileNames(fileName);
+    util.waitReplication(fs, fileName, (short)3);
+    userGroupInfo = UserGroupInformation.createUserForTesting(username, groups);
+ }
+
+  @After
+  public void teardownCluster() throws Exception {
+    util.cleanup(fs, "/srcdat");
+    fs.close();
+    cluster.shutdown();
+  }
+
+  /** test that allowed operation puts proper entry in audit log */
+  @Test
+  public void testAuditAllowed() throws Exception {
+    final Path file = new Path(fnames[0]);
+    FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);
+
+    setupAuditLogs();
+    InputStream istream = userfs.open(file);
+    int val = istream.read();
+    istream.close();
+    verifyAuditLogs(true);
+    assertTrue("failed to read from file", val > 0);
+  }
+
+  /** test that denied operation puts proper entry in audit log */
+  @Test
+  public void testAuditDenied() throws Exception {
+    final Path file = new Path(fnames[0]);
+    FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);
+
+    fs.setPermission(file, new FsPermission((short)0600));
+    fs.setOwner(file, "root", null);
+
+    setupAuditLogs();
+
+    try {
+      userfs.open(file);
+      fail("open must not succeed");
+    } catch(AccessControlException e) {
+      System.out.println("got access denied, as expected.");
+    }
+    verifyAuditLogs(false);
+  }
+
+  /** Sets up log4j logger for auditlogs */
+  private void setupAuditLogs() throws IOException {
+    File file = new File(auditLogFile);
+    if (file.exists()) {
+      file.delete();
+    }
+    Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
+    logger.setLevel(Level.INFO);
+    PatternLayout layout = new PatternLayout("%m%n");
+    RollingFileAppender appender = new RollingFileAppender(layout, auditLogFile);
+    logger.addAppender(appender);
+  }
+
+  private void verifyAuditLogs(boolean expectSuccess) throws IOException {
+    // Turn off the logs
+    Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
+    logger.setLevel(Level.OFF);
+    
+    // Ensure audit log has only one entry
+    BufferedReader reader = new BufferedReader(new FileReader(auditLogFile));
+    String line = reader.readLine();
+    assertNotNull(line);
+    assertTrue("Expected audit event not found in audit log",
+        auditPattern.matcher(line).matches());
+    assertTrue("Expected success=" + expectSuccess,
+               successPattern.matcher(line).matches() == expectSuccess);
+    assertNull("Unexpected event in audit log", reader.readLine());
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
index 5e77d73..ee037cb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
@@ -506,21 +506,29 @@
     FSImage fsimage = namesystem.getFSImage();
     final FSEditLog editLog = fsimage.getEditLog();
     fileSys.mkdirs(new Path("/tmp"));
-    StorageDirectory sd = fsimage.getStorage().dirIterator(NameNodeDirType.EDITS).next();
+
+    Iterator<StorageDirectory> iter = fsimage.getStorage().
+      dirIterator(NameNodeDirType.EDITS);
+    LinkedList<StorageDirectory> sds = new LinkedList<StorageDirectory>();
+    while (iter.hasNext()) {
+      sds.add(iter.next());
+    }
     editLog.close();
     cluster.shutdown();
 
-    File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 3);
-    assertTrue(editFile.exists());
-
-    long fileLen = editFile.length();
-    System.out.println("File name: " + editFile + " len: " + fileLen);
-    RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
-    rwf.seek(fileLen-4); // seek to checksum bytes
-    int b = rwf.readInt();
-    rwf.seek(fileLen-4);
-    rwf.writeInt(b+1);
-    rwf.close();
+    for (StorageDirectory sd : sds) {
+      File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 3);
+      assertTrue(editFile.exists());
+  
+      long fileLen = editFile.length();
+      LOG.debug("Corrupting Log File: " + editFile + " len: " + fileLen);
+      RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
+      rwf.seek(fileLen-4); // seek to checksum bytes
+      int b = rwf.readInt();
+      rwf.seek(fileLen-4);
+      rwf.writeInt(b+1);
+      rwf.close();
+    }
     
     try {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).build();
@@ -1232,6 +1240,113 @@
     }
   }
 
+  private static long readAllEdits(Collection<EditLogInputStream> streams,
+      long startTxId) throws IOException {
+    FSEditLogOp op;
+    long nextTxId = startTxId;
+    long numTx = 0;
+    for (EditLogInputStream s : streams) {
+      while (true) {
+        op = s.readOp();
+        if (op == null)
+          break;
+        if (op.getTransactionId() != nextTxId) {
+          throw new IOException("out of order transaction ID!  expected " +
+              nextTxId + " but got " + op.getTransactionId() + " when " +
+              "reading " + s.getName());
+        }
+        numTx++;
+        nextTxId = op.getTransactionId() + 1;
+      }
+    }
+    return numTx;
+  }
+
+  /**
+   * Test edit log failover.  If a single edit log is missing, other 
+   * edits logs should be used instead.
+   */
+  @Test
+  public void testEditLogFailOverFromMissing() throws IOException {
+    File f1 = new File(TEST_DIR + "/failover0");
+    File f2 = new File(TEST_DIR + "/failover1");
+    List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI());
+
+    NNStorage storage = setupEdits(editUris, 3);
+    
+    final long startErrorTxId = 1*TXNS_PER_ROLL + 1;
+    final long endErrorTxId = 2*TXNS_PER_ROLL;
+
+    File[] files = new File(f1, "current").listFiles(new FilenameFilter() {
+        public boolean accept(File dir, String name) {
+          if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId, 
+                                  endErrorTxId))) {
+            return true;
+          }
+          return false;
+        }
+      });
+    assertEquals(1, files.length);
+    assertTrue(files[0].delete());
+
+    FSEditLog editlog = getFSEditLog(storage);
+    editlog.initJournalsForWrite();
+    long startTxId = 1;
+    try {
+      readAllEdits(editlog.selectInputStreams(startTxId, 4*TXNS_PER_ROLL),
+          startTxId);
+    } catch (IOException e) {
+      LOG.error("edit log failover didn't work", e);
+      fail("Edit log failover didn't work");
+    }
+  }
+
+  /** 
+   * Test edit log failover from a corrupt edit log
+   */
+  @Test
+  public void testEditLogFailOverFromCorrupt() throws IOException {
+    File f1 = new File(TEST_DIR + "/failover0");
+    File f2 = new File(TEST_DIR + "/failover1");
+    List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI());
+
+    NNStorage storage = setupEdits(editUris, 3);
+    
+    final long startErrorTxId = 1*TXNS_PER_ROLL + 1;
+    final long endErrorTxId = 2*TXNS_PER_ROLL;
+
+    File[] files = new File(f1, "current").listFiles(new FilenameFilter() {
+        public boolean accept(File dir, String name) {
+          if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId, 
+                                  endErrorTxId))) {
+            return true;
+          }
+          return false;
+        }
+      });
+    assertEquals(1, files.length);
+
+    long fileLen = files[0].length();
+    LOG.debug("Corrupting Log File: " + files[0] + " len: " + fileLen);
+    RandomAccessFile rwf = new RandomAccessFile(files[0], "rw");
+    rwf.seek(fileLen-4); // seek to checksum bytes
+    int b = rwf.readInt();
+    rwf.seek(fileLen-4);
+    rwf.writeInt(b+1);
+    rwf.close();
+    
+    FSEditLog editlog = getFSEditLog(storage);
+    editlog.initJournalsForWrite();
+    long startTxId = 1;
+    try {
+      readAllEdits(editlog.selectInputStreams(startTxId, 4*TXNS_PER_ROLL),
+          startTxId);
+    } catch (IOException e) {
+      LOG.error("edit log failover didn't work", e);
+      fail("Edit log failover didn't work");
+    }
+  }
+
   /**
    * Test creating a directory with lots and lots of edit log segments
    */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
index d39df40..c1ca4ac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
@@ -51,6 +51,16 @@
   }
 
   @Test
+  public void testConstants() {
+    // Each call to FSEditLogOp#Reader#readOp can read at most MAX_OP_SIZE bytes
+    // before getting an exception.  So we don't want to preallocate a longer
+    // region than MAX_OP_SIZE, because then we'd get an IOException when reading
+    // through the padding at the end of the file.
+    assertTrue(EditLogFileOutputStream.PREALLOCATION_LENGTH <
+        FSEditLogOp.MAX_OP_SIZE);
+  }
+
+  @Test
   public void testPreallocation() throws IOException {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
index a54df2c..267e128 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
@@ -77,7 +77,7 @@
     MiniDFSCluster cluster = null;
     FileSystem fileSys = null;
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES)
-        .build();
+        .enableManagedDfsDirsRedundancy(false).build();
     cluster.waitActive();
     fileSys = cluster.getFileSystem();
     final FSNamesystem namesystem = cluster.getNamesystem();
@@ -107,7 +107,7 @@
     bld.append("Recent opcode offsets: (\\d+\\s*){4}$");
     try {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES)
-          .format(false).build();
+          .enableManagedDfsDirsRedundancy(false).format(false).build();
       fail("should not be able to start");
     } catch (IOException e) {
       assertTrue("error message contains opcodes message",
@@ -327,6 +327,56 @@
   }
 
   @Test
+  public void testValidateEditLogWithCorruptBody() throws IOException {
+    File testDir = new File(TEST_DIR, "testValidateEditLogWithCorruptBody");
+    SortedMap<Long, Long> offsetToTxId = Maps.newTreeMap();
+    final int NUM_TXNS = 20;
+    File logFile = prepareUnfinalizedTestEditLog(testDir, NUM_TXNS,
+        offsetToTxId);
+    // Back up the uncorrupted log
+    File logFileBak = new File(testDir, logFile.getName() + ".bak");
+    Files.copy(logFile, logFileBak);
+    EditLogValidation validation =
+        EditLogFileInputStream.validateEditLog(logFile);
+    assertTrue(!validation.hasCorruptHeader());
+    // We expect that there will be an OP_START_LOG_SEGMENT, followed by
+    // NUM_TXNS opcodes, followed by an OP_END_LOG_SEGMENT.
+    assertEquals(NUM_TXNS + 1, validation.getEndTxId());
+    // Corrupt each edit and verify that validation continues to work
+    for (Map.Entry<Long, Long> entry : offsetToTxId.entrySet()) {
+      long txOffset = entry.getKey();
+      long txId = entry.getValue();
+
+      // Restore backup, corrupt the txn opcode
+      Files.copy(logFileBak, logFile);
+      corruptByteInFile(logFile, txOffset);
+      validation = EditLogFileInputStream.validateEditLog(logFile);
+      long expectedEndTxId = (txId == (NUM_TXNS + 1)) ?
+          NUM_TXNS : (NUM_TXNS + 1);
+      assertEquals("Failed when corrupting txn opcode at " + txOffset,
+          expectedEndTxId, validation.getEndTxId());
+      assertTrue(!validation.hasCorruptHeader());
+    }
+
+    // Truncate right before each edit and verify that validation continues
+    // to work
+    for (Map.Entry<Long, Long> entry : offsetToTxId.entrySet()) {
+      long txOffset = entry.getKey();
+      long txId = entry.getValue();
+
+      // Restore backup, corrupt the txn opcode
+      Files.copy(logFileBak, logFile);
+      truncateFile(logFile, txOffset);
+      validation = EditLogFileInputStream.validateEditLog(logFile);
+      long expectedEndTxId = (txId == 0) ?
+          HdfsConstants.INVALID_TXID : (txId - 1);
+      assertEquals("Failed when corrupting txid " + txId + " txn opcode " +
+        "at " + txOffset, expectedEndTxId, validation.getEndTxId());
+      assertTrue(!validation.hasCorruptHeader());
+    }
+  }
+
+  @Test
   public void testValidateEmptyEditLog() throws IOException {
     File testDir = new File(TEST_DIR, "testValidateEmptyEditLog");
     SortedMap<Long, Long> offsetToTxId = Maps.newTreeMap();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
index e972f59..80d7958 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
@@ -20,10 +20,10 @@
 import static org.junit.Assert.*;
 
 import java.net.URI;
-import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.Iterator;
+import java.util.PriorityQueue;
 
 import java.io.RandomAccessFile;
 import java.io.File;
@@ -33,7 +33,6 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.JournalManager.CorruptionException;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
@@ -45,7 +44,6 @@
 import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_FAIL;
 
 import com.google.common.collect.ImmutableList;
-import com.google.common.collect.TreeMultiset;
 import com.google.common.base.Joiner;
 
 public class TestFileJournalManager {
@@ -64,12 +62,13 @@
   static long getNumberOfTransactions(FileJournalManager jm, long fromTxId,
       boolean inProgressOk, boolean abortOnGap) throws IOException {
     long numTransactions = 0, txId = fromTxId;
-    final TreeMultiset<EditLogInputStream> allStreams =
-        TreeMultiset.create(JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
+    final PriorityQueue<EditLogInputStream> allStreams = 
+        new PriorityQueue<EditLogInputStream>(64,
+            JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
     jm.selectInputStreams(allStreams, fromTxId, inProgressOk);
-
+    EditLogInputStream elis = null;
     try {
-      for (EditLogInputStream elis : allStreams) {
+      while ((elis = allStreams.poll()) != null) {
         elis.skipUntil(txId);
         while (true) {
           FSEditLogOp op = elis.readOp();
@@ -87,6 +86,7 @@
       }
     } finally {
       IOUtils.cleanup(LOG, allStreams.toArray(new EditLogInputStream[0]));
+      IOUtils.cleanup(LOG, elis);
     }
     return numTransactions;
   }
@@ -379,27 +379,28 @@
   
   private static EditLogInputStream getJournalInputStream(JournalManager jm,
       long txId, boolean inProgressOk) throws IOException {
-    final TreeMultiset<EditLogInputStream> allStreams =
-        TreeMultiset.create(JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
+    final PriorityQueue<EditLogInputStream> allStreams = 
+        new PriorityQueue<EditLogInputStream>(64,
+            JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
     jm.selectInputStreams(allStreams, txId, inProgressOk);
+    EditLogInputStream elis = null, ret;
     try {
-      for (Iterator<EditLogInputStream> iter = allStreams.iterator();
-          iter.hasNext();) {
-        EditLogInputStream elis = iter.next();
+      while ((elis = allStreams.poll()) != null) {
         if (elis.getFirstTxId() > txId) {
           break;
         }
         if (elis.getLastTxId() < txId) {
-          iter.remove();
           elis.close();
           continue;
         }
         elis.skipUntil(txId);
-        iter.remove();
-        return elis;
+        ret = elis;
+        elis = null;
+        return ret;
       }
     } finally {
       IOUtils.cleanup(LOG,  allStreams.toArray(new EditLogInputStream[0]));
+      IOUtils.cleanup(LOG,  elis);
     }
     return null;
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 9b59802..531dc87 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -76,8 +76,9 @@
       "build/test") + "/audit.log";
   
   // Pattern for: 
-  // ugi=name ip=/address cmd=FSCK src=/ dst=null perm=null
+  // allowed=true ugi=name ip=/address cmd=FSCK src=/ dst=null perm=null
   static final Pattern fsckPattern = Pattern.compile(
+      "allowed=.*?\\s" +
       "ugi=.*?\\s" + 
       "ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" + 
       "cmd=fsck\\ssrc=\\/\\sdst=null\\s" + 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
index 608ee26..c1287e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
@@ -343,7 +343,7 @@
     StorageDirectory sd = null;
     try {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
-          .build();
+          .enableManagedDfsDirsRedundancy(false).build();
       cluster.waitActive();
       if (!finalize) {
         // Normally, the in-progress edit log would be finalized by
@@ -379,7 +379,7 @@
     try {
       LOG.debug("trying to start normally (this should fail)...");
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
-          .format(false).build();
+          .enableManagedDfsDirsRedundancy(false).format(false).build();
       cluster.waitActive();
       cluster.shutdown();
       if (needRecovery) {
@@ -404,7 +404,8 @@
     try {
       LOG.debug("running recovery...");
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
-          .format(false).startupOption(recoverStartOpt).build();
+          .enableManagedDfsDirsRedundancy(false).format(false)
+          .startupOption(recoverStartOpt).build();
     } catch (IOException e) {
       fail("caught IOException while trying to recover. " +
           "message was " + e.getMessage() +
@@ -420,7 +421,7 @@
     try {
       LOG.debug("starting cluster normally after recovery...");
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
-          .format(false).build();
+          .enableManagedDfsDirsRedundancy(false).format(false).build();
       LOG.debug("successfully recovered the " + corruptor.getName() +
           " corrupted edit log");
       cluster.waitActive();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
index 74c3cf8..c265579 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
@@ -100,6 +100,9 @@
         fileAsURI(new File(hdfsDir, "name")).toString());
     config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
         new File(hdfsDir, "data").getPath());
+    config.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
+    config.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
+    config.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
     config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
         fileAsURI(new File(hdfsDir, "secondary")).toString());
     config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java
index 79dcec4..794a3b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java
@@ -20,6 +20,7 @@
 import static org.junit.Assert.*;
 
 import java.io.File;
+import java.io.FileOutputStream;
 import java.io.IOException;
 import java.net.URI;
 import java.util.Collections;
@@ -35,6 +36,7 @@
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 
@@ -118,8 +120,8 @@
     }
   }
   
-  @Test
-  public void testFailoverFinalizesAndReadsInProgress() throws Exception {
+  private void testFailoverFinalizesAndReadsInProgress(
+      boolean partialTxAtEnd) throws Exception {
     Configuration conf = new Configuration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
       .nnTopology(MiniDFSNNTopology.simpleHATopology())
@@ -130,8 +132,21 @@
       URI sharedUri = cluster.getSharedEditsDir(0, 1);
       File sharedDir = new File(sharedUri.getPath(), "current");
       FSImageTestUtil.createAbortedLogWithMkdirs(sharedDir, NUM_DIRS_IN_LOG, 1);
+      
       assertEditFiles(Collections.singletonList(sharedUri),
           NNStorage.getInProgressEditsFileName(1));
+      if (partialTxAtEnd) {
+        FileOutputStream outs = null;
+        try {
+          File editLogFile =
+              new File(sharedDir, NNStorage.getInProgressEditsFileName(1));
+          outs = new FileOutputStream(editLogFile, true);
+          outs.write(new byte[] { 0x18, 0x00, 0x00, 0x00 } );
+          LOG.error("editLogFile = " + editLogFile);
+        } finally {
+          IOUtils.cleanup(LOG, outs);
+        }
+     }
 
       // Transition one of the NNs to active
       cluster.transitionToActive(0);
@@ -149,7 +164,18 @@
     } finally {
       cluster.shutdown();
     }
+  }
+  
+  @Test
+  public void testFailoverFinalizesAndReadsInProgressSimple()
+      throws Exception {
+    testFailoverFinalizesAndReadsInProgress(false);
+  }
 
+  @Test
+  public void testFailoverFinalizesAndReadsInProgressWithPartialTxAtEnd()
+      throws Exception {
+    testFailoverFinalizesAndReadsInProgress(true);
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureOfSharedDir.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureOfSharedDir.java
index cc9552a..a158a5e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureOfSharedDir.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureOfSharedDir.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY;
 import static org.junit.Assert.*;
 
 import java.io.File;
@@ -127,6 +129,7 @@
   @Test
   public void testFailureOfSharedDir() throws Exception {
     Configuration conf = new Configuration();
+    conf.setLong(DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY, 2000);
     
     // The shared edits dir will automatically be marked required.
     MiniDFSCluster cluster = null;
@@ -151,6 +154,15 @@
       assertEquals(0, FileUtil.chmod(sharedEditsDir.getAbsolutePath(), "-w",
           true));
 
+      Thread.sleep(conf.getLong(DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,
+          DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT) * 2);
+
+      NameNode nn1 = cluster.getNameNode(1);
+      assertTrue(nn1.isStandbyState());
+      assertFalse(
+          "StandBy NameNode should not go to SafeMode on resource unavailability",
+          nn1.isInSafeMode());
+
       NameNode nn0 = cluster.getNameNode(0);
       nn0.getNamesystem().getFSImage().getEditLog().getJournalSet()
           .setRuntimeForTesting(mockRuntime);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
new file mode 100644
index 0000000..74373be
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
@@ -0,0 +1,140 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.web.resources;
+
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
+import org.apache.hadoop.hdfs.web.resources.GetOpParam;
+import org.apache.hadoop.hdfs.web.resources.PostOpParam;
+import org.apache.hadoop.hdfs.web.resources.PutOpParam;
+import org.apache.log4j.Level;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test WebHDFS which provides data locality using HTTP redirection.
+ */
+public class TestWebHdfsDataLocality {
+  static final Log LOG = LogFactory.getLog(TestWebHdfsDataLocality.class);
+  {
+    ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
+    ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
+  }
+  
+  private static final String RACK0 = "/rack0";
+  private static final String RACK1 = "/rack1";
+  private static final String RACK2 = "/rack2";
+
+  @Test
+  public void testDataLocality() throws Exception {
+    final Configuration conf = WebHdfsTestUtil.createConf();
+    final String[] racks = {RACK0, RACK0, RACK1, RACK1, RACK2, RACK2};
+    final int nDataNodes = racks.length;
+    LOG.info("nDataNodes=" + nDataNodes + ", racks=" + Arrays.asList(racks));
+
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(nDataNodes)
+        .racks(racks)
+        .build();
+    try {
+      cluster.waitActive();
+
+      final DistributedFileSystem dfs = cluster.getFileSystem();
+      final NameNode namenode = cluster.getNameNode();
+      final DatanodeManager dm = namenode.getNamesystem().getBlockManager(
+          ).getDatanodeManager();
+      LOG.info("dm=" + dm);
+  
+      final long blocksize = DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
+      final String f = "/foo";
+
+      { //test CREATE
+        for(int i = 0; i < nDataNodes; i++) {
+          //set client address to a particular datanode
+          final DataNode dn = cluster.getDataNodes().get(i);
+          final String ipAddr = dm.getDatanode(dn.getDatanodeId()).getIpAddr();
+          NamenodeWebHdfsMethods.setRemoteAddress(ipAddr);
+
+          //The chosen datanode must be the same as the client address
+          final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
+              namenode, f, PutOpParam.Op.CREATE, -1L, blocksize, conf);
+          Assert.assertEquals(ipAddr, chosen.getIpAddr());
+        }
+      }
+  
+      //create a file with one replica.
+      final Path p = new Path(f);
+      final FSDataOutputStream out = dfs.create(p, (short)1);
+      out.write(1);
+      out.close();
+  
+      //get replica location.
+      final LocatedBlocks locatedblocks = NameNodeAdapter.getBlockLocations(
+          namenode, f, 0, 1);
+      final List<LocatedBlock> lb = locatedblocks.getLocatedBlocks();
+      Assert.assertEquals(1, lb.size());
+      final DatanodeInfo[] locations = lb.get(0).getLocations();
+      Assert.assertEquals(1, locations.length);
+      final DatanodeInfo expected = locations[0];
+      
+      //For GETFILECHECKSUM, OPEN and APPEND,
+      //the chosen datanode must be the same as the replica location.
+
+      { //test GETFILECHECKSUM
+        final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
+            namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize, conf);
+        Assert.assertEquals(expected, chosen);
+      }
+  
+      { //test OPEN
+        final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
+            namenode, f, GetOpParam.Op.OPEN, 0, blocksize, conf);
+        Assert.assertEquals(expected, chosen);
+      }
+
+      { //test APPEND
+        final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
+            namenode, f, PostOpParam.Op.APPEND, -1L, blocksize, conf);
+        Assert.assertEquals(expected, chosen);
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
index 84f5521..a6746a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
@@ -22,11 +22,14 @@
 import java.io.IOException;
 import java.io.File;
 import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
 import java.util.Map;
 import java.util.HashMap;
 
 import org.junit.Test;
 import org.junit.Before;
+
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import org.apache.commons.logging.Log;
@@ -34,12 +37,12 @@
 
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
 import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer;
+import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer.Flags;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 
 import org.apache.hadoop.hdfs.server.namenode.OfflineEditsViewerHelper;
 
 public class TestOfflineEditsViewer {
-
   private static final Log LOG = LogFactory.getLog(TestOfflineEditsViewer.class);
 
   private static final Map<FSEditLogOpCodes, Boolean> obsoleteOpCodes =
@@ -97,8 +100,8 @@
     String editsReparsed  = cacheDir + "/editsReparsed";
 
     // parse to XML then back to binary
-    runOev(edits,          editsParsedXml, "xml");
-    runOev(editsParsedXml, editsReparsed,  "binary");
+    assertEquals(0, runOev(edits, editsParsedXml, "xml", false));
+    assertEquals(0, runOev(editsParsedXml, editsReparsed, "binary", false));
 
     // judgment time
     assertTrue(
@@ -115,6 +118,42 @@
   }
 
   @Test
+  public void testRecoveryMode() throws IOException {
+    LOG.info("START - testing with generated edits");
+
+    nnHelper.startCluster(buildDir + "/dfs/");
+
+    // edits generated by nnHelper (MiniDFSCluster), should have all op codes
+    // binary, XML, reparsed binary
+    String edits          = nnHelper.generateEdits();
+    
+    // Corrupt the file by truncating the end
+    FileChannel editsFile = new FileOutputStream(edits, true).getChannel();
+    editsFile.truncate(editsFile.size() - 5);
+    
+    String editsParsedXml = cacheDir + "/editsRecoveredParsed.xml";
+    String editsReparsed  = cacheDir + "/editsRecoveredReparsed";
+    String editsParsedXml2 = cacheDir + "/editsRecoveredParsed2.xml";
+
+    // Can't read the corrupted file without recovery mode
+    assertEquals(-1, runOev(edits, editsParsedXml, "xml", false));
+    
+    // parse to XML then back to binary
+    assertEquals(0, runOev(edits, editsParsedXml, "xml", true));
+    assertEquals(0, runOev(editsParsedXml, editsReparsed,  "binary", false));
+    assertEquals(0, runOev(editsReparsed, editsParsedXml2, "xml", false));
+
+    // judgment time
+    assertTrue("Test round trip",
+      filesEqualIgnoreTrailingZeros(editsParsedXml, editsParsedXml2));
+
+    // removes edits so do this at the end
+    nnHelper.shutdownCluster();
+
+    LOG.info("END");
+  }
+
+  @Test
   public void testStored() throws IOException {
 
     LOG.info("START - testing with stored reference edits");
@@ -128,8 +167,9 @@
     String editsStoredXml          = cacheDir + "/editsStored.xml";
       
     // parse to XML then back to binary
-    runOev(editsStored,             editsStoredParsedXml, "xml");
-    runOev(editsStoredParsedXml,    editsStoredReparsed,  "binary");
+    assertEquals(0, runOev(editsStored, editsStoredParsedXml, "xml", false));
+    assertEquals(0, runOev(editsStoredParsedXml, editsStoredReparsed,
+        "binary", false));
 
     // judgement time
     assertTrue(
@@ -151,14 +191,18 @@
    * @param inFilename input edits filename
    * @param outFilename oputput edits filename
    */
-  private void runOev(String inFilename, String outFilename, String processor)
-    throws IOException {
+  private int runOev(String inFilename, String outFilename, String processor,
+      boolean recovery) throws IOException {
 
     LOG.info("Running oev [" + inFilename + "] [" + outFilename + "]");
 
     OfflineEditsViewer oev = new OfflineEditsViewer();
-    if (oev.go(inFilename, outFilename, processor, true, false, null) != 0)
-      throw new RuntimeException("oev failed");
+    Flags flags = new Flags();
+    flags.setPrintToScreen();
+    if (recovery) {
+      flags.setRecoveryMode();
+    }
+    return oev.go(inFilename, outFilename, processor, flags, null);
   }
 
   /**
@@ -172,7 +216,7 @@
     FileOutputStream fout = new FileOutputStream(outFilename);
     StatisticsEditsVisitor visitor = new StatisticsEditsVisitor(fout);
     OfflineEditsViewer oev = new OfflineEditsViewer();
-    if (oev.go(inFilename, outFilename, "stats", false, false, visitor) != 0)
+    if (oev.go(inFilename, outFilename, "stats", new Flags(), visitor) != 0)
       return false;
     LOG.info("Statistics for " + inFilename + "\n" +
       visitor.getStatisticsString());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
index 04ffd10..b49818c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
@@ -44,6 +44,7 @@
 import org.apache.hadoop.hdfs.web.resources.DoAsParam;
 import org.apache.hadoop.hdfs.web.resources.GetOpParam;
 import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
+import org.apache.hadoop.hdfs.web.resources.NamenodeRpcAddressParam;
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -286,6 +287,10 @@
     final Path root = new Path("/");
     final Path dir = new Path("/test/testUrl");
     assertTrue(webhdfs.mkdirs(dir));
+    final Path file = new Path("/test/file");
+    final FSDataOutputStream out = webhdfs.create(file);
+    out.write(1);
+    out.close();
 
     {//test GETHOMEDIRECTORY
       final URL url = webhdfs.toUrl(GetOpParam.Op.GETHOMEDIRECTORY, root);
@@ -351,5 +356,47 @@
     {//test append.
       AppendTestUtil.testAppend(fs, new Path(dir, "append"));
     }
+
+    {//test NamenodeRpcAddressParam not set.
+      final HttpOpParam.Op op = PutOpParam.Op.CREATE;
+      final URL url = webhdfs.toUrl(op, dir);
+      HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+      conn.setRequestMethod(op.getType().toString());
+      conn.setDoOutput(false);
+      conn.setInstanceFollowRedirects(false);
+      conn.connect();
+      final String redirect = conn.getHeaderField("Location");
+      conn.disconnect();
+
+      //remove NamenodeRpcAddressParam
+      WebHdfsFileSystem.LOG.info("redirect = " + redirect);
+      final int i = redirect.indexOf(NamenodeRpcAddressParam.NAME);
+      final int j = redirect.indexOf("&", i);
+      String modified = redirect.substring(0, i - 1) + redirect.substring(j);
+      WebHdfsFileSystem.LOG.info("modified = " + modified);
+
+      //connect to datanode
+      conn = (HttpURLConnection)new URL(modified).openConnection();
+      conn.setRequestMethod(op.getType().toString());
+      conn.setDoOutput(op.getDoOutput());
+      conn.connect();
+      assertEquals(HttpServletResponse.SC_BAD_REQUEST, conn.getResponseCode());
+    }
+
+    {//test jsonParse with non-json type.
+      final HttpOpParam.Op op = GetOpParam.Op.OPEN;
+      final URL url = webhdfs.toUrl(op, file);
+      final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+      conn.setRequestMethod(op.getType().toString());
+      conn.connect();
+
+      try {
+        WebHdfsFileSystem.jsonParse(conn, false);
+        fail();
+      } catch(IOException ioe) {
+        WebHdfsFileSystem.LOG.info("GOOD", ioe);
+      }
+      conn.disconnect();
+    }
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java
index 9ea0a46..9ae0fb2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java
@@ -40,6 +40,12 @@
 public class WebHdfsTestUtil {
   public static final Log LOG = LogFactory.getLog(WebHdfsTestUtil.class);
 
+  public static Configuration createConf() {
+    final Configuration conf = new Configuration();
+    conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
+    return conf;
+  }
+
   public static WebHdfsFileSystem getWebHdfsFileSystem(final Configuration conf
       ) throws IOException, URISyntaxException {
     final String uri = WebHdfsFileSystem.SCHEME  + "://"
@@ -49,7 +55,7 @@
 
   public static WebHdfsFileSystem getWebHdfsFileSystemAs(
       final UserGroupInformation ugi, final Configuration conf
-      ) throws IOException, URISyntaxException, InterruptedException {
+      ) throws IOException, InterruptedException {
     return ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() {
       @Override
       public WebHdfsFileSystem run() throws Exception {
@@ -70,7 +76,7 @@
       final int expectedResponseCode) throws IOException {
     conn.connect();
     Assert.assertEquals(expectedResponseCode, conn.getResponseCode());
-    return WebHdfsFileSystem.jsonParse(conn.getInputStream());
+    return WebHdfsFileSystem.jsonParse(conn, false);
   }
   
   public static HttpURLConnection twoStepWrite(HttpURLConnection conn,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopologyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopologyWithNodeGroup.java
new file mode 100644
index 0000000..7dbd33a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopologyWithNodeGroup.java
@@ -0,0 +1,165 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.net;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+
+public class TestNetworkTopologyWithNodeGroup extends TestCase {
+  private final static NetworkTopologyWithNodeGroup cluster = new 
+      NetworkTopologyWithNodeGroup();
+
+  private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
+      DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1/s1"),
+      DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1/s1"),
+      DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r1/s2"),
+      DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/d1/r2/s3"),
+      DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d1/r2/s3"),
+      DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d1/r2/s4"),
+      DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r3/s5"),
+      DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r3/s6")
+  };
+
+  private final static NodeBase computeNode = new NodeBase("/d1/r1/s1/h9");
+
+  static {
+    for(int i=0; i<dataNodes.length; i++) {
+      cluster.add(dataNodes[i]);
+    }
+  }
+
+  public void testNumOfChildren() throws Exception {
+    assertEquals(cluster.getNumOfLeaves(), dataNodes.length);
+  }
+
+  public void testNumOfRacks() throws Exception {
+    assertEquals(cluster.getNumOfRacks(), 3);
+  }
+
+  public void testRacks() throws Exception {
+    assertEquals(cluster.getNumOfRacks(), 3);
+    assertTrue(cluster.isOnSameRack(dataNodes[0], dataNodes[1]));
+    assertTrue(cluster.isOnSameRack(dataNodes[1], dataNodes[2]));
+    assertFalse(cluster.isOnSameRack(dataNodes[2], dataNodes[3]));
+    assertTrue(cluster.isOnSameRack(dataNodes[3], dataNodes[4]));
+    assertTrue(cluster.isOnSameRack(dataNodes[4], dataNodes[5]));
+    assertFalse(cluster.isOnSameRack(dataNodes[5], dataNodes[6]));
+    assertTrue(cluster.isOnSameRack(dataNodes[6], dataNodes[7]));
+  }
+
+  public void testNodeGroups() throws Exception {
+    assertEquals(cluster.getNumOfRacks(), 3);
+    assertTrue(cluster.isOnSameNodeGroup(dataNodes[0], dataNodes[1]));
+    assertFalse(cluster.isOnSameNodeGroup(dataNodes[1], dataNodes[2]));
+    assertFalse(cluster.isOnSameNodeGroup(dataNodes[2], dataNodes[3]));
+    assertTrue(cluster.isOnSameNodeGroup(dataNodes[3], dataNodes[4]));
+    assertFalse(cluster.isOnSameNodeGroup(dataNodes[4], dataNodes[5]));
+    assertFalse(cluster.isOnSameNodeGroup(dataNodes[5], dataNodes[6]));
+    assertFalse(cluster.isOnSameNodeGroup(dataNodes[6], dataNodes[7]));
+  }
+
+  public void testGetDistance() throws Exception {
+    assertEquals(cluster.getDistance(dataNodes[0], dataNodes[0]), 0);
+    assertEquals(cluster.getDistance(dataNodes[0], dataNodes[1]), 2);
+    assertEquals(cluster.getDistance(dataNodes[0], dataNodes[2]), 4);
+    assertEquals(cluster.getDistance(dataNodes[0], dataNodes[3]), 6);
+    assertEquals(cluster.getDistance(dataNodes[0], dataNodes[6]), 8);
+  }
+
+  public void testPseudoSortByDistance() throws Exception {
+    DatanodeDescriptor[] testNodes = new DatanodeDescriptor[4];
+
+    // array contains both local node, local node group & local rack node
+    testNodes[0] = dataNodes[1];
+    testNodes[1] = dataNodes[2];
+    testNodes[2] = dataNodes[3];
+    testNodes[3] = dataNodes[0];
+    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
+    assertTrue(testNodes[0] == dataNodes[0]);
+    assertTrue(testNodes[1] == dataNodes[1]);
+    assertTrue(testNodes[2] == dataNodes[2]);
+    assertTrue(testNodes[3] == dataNodes[3]);
+
+    // array contains local node & local node group
+    testNodes[0] = dataNodes[3];
+    testNodes[1] = dataNodes[4];
+    testNodes[2] = dataNodes[1];
+    testNodes[3] = dataNodes[0];
+    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
+    assertTrue(testNodes[0] == dataNodes[0]);
+    assertTrue(testNodes[1] == dataNodes[1]);
+
+    // array contains local node & rack node
+    testNodes[0] = dataNodes[5];
+    testNodes[1] = dataNodes[3];
+    testNodes[2] = dataNodes[2];
+    testNodes[3] = dataNodes[0];
+    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
+    assertTrue(testNodes[0] == dataNodes[0]);
+    assertTrue(testNodes[1] == dataNodes[2]);
+
+    // array contains local-nodegroup node (not a data node also) & rack node
+    testNodes[0] = dataNodes[6];
+    testNodes[1] = dataNodes[7];
+    testNodes[2] = dataNodes[2];
+    testNodes[3] = dataNodes[0];
+    cluster.pseudoSortByDistance(computeNode, testNodes );
+    assertTrue(testNodes[0] == dataNodes[0]);
+    assertTrue(testNodes[1] == dataNodes[2]);
+  }
+
+  /**
+   * This picks a large number of nodes at random in order to ensure coverage
+   * 
+   * @param numNodes the number of nodes
+   * @param excludedScope the excluded scope
+   * @return the frequency that nodes were chosen
+   */
+  private Map<Node, Integer> pickNodesAtRandom(int numNodes,
+      String excludedScope) {
+    Map<Node, Integer> frequency = new HashMap<Node, Integer>();
+    for (DatanodeDescriptor dnd : dataNodes) {
+      frequency.put(dnd, 0);
+    }
+
+    for (int j = 0; j < numNodes; j++) {
+      Node random = cluster.chooseRandom(excludedScope);
+      frequency.put(random, frequency.get(random) + 1);
+    }
+    return frequency;
+  }
+
+  /**
+   * This test checks that chooseRandom works for an excluded node.
+   */
+  public void testChooseRandomExcludedNode() {
+    String scope = "~" + NodeBase.getPath(dataNodes[0]);
+    Map<Node, Integer> frequency = pickNodesAtRandom(100, scope);
+
+    for (Node key : dataNodes) {
+      // all nodes except the first should be more than zero
+      assertTrue(frequency.get(key) > 0 || key == dataNodes[0]);
+    }
+  }
+
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
index 918c004..1f6a626 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
@@ -15470,7 +15470,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>For input string: "a5"</expected-output>
+          <expected-output>setSpaceQuota: "a5" is not a valid value for a quota.</expected-output>
         </comparator>
       </comparators>
     </test>
diff --git a/hadoop-hdfs-project/pom.xml b/hadoop-hdfs-project/pom.xml
index 0e2684b..38c93fa 100644
--- a/hadoop-hdfs-project/pom.xml
+++ b/hadoop-hdfs-project/pom.xml
@@ -34,7 +34,7 @@
     <module>hadoop-hdfs</module>
     <module>hadoop-hdfs-httpfs</module>
     <module>hadoop-hdfs/src/contrib/bkjournal</module>
-    <module>hadoop-hdfs/src/contrib/fuse-dfs</module>
+    <module>hadoop-hdfs-raid</module>
   </modules>
 
   <build>
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 56a036f..e821545 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -60,6 +60,9 @@
 
   BUG FIXES
 
+    MAPREDUCE-4356. [Rumen] Provide access to the method
+                    ParsedTask.obtainTaskAttempts(). (ravigummadi)
+
     MAPREDUCE-4100. [Gridmix] Bug fixed in compression emulation feature for 
                     map only jobs. (amarrk)
 
@@ -118,7 +121,9 @@
     MAPREDUCE-3990. MRBench allows Long-sized input-lines value
     but parses CLI argument as an Integer. (harsh)
 
-Release 2.0.1-alpha - UNRELEASED
+    MAPREDUCE-3868. Make Raid Compile. (Weiyan Wang via schen)
+
+Branch-2 ( Unreleased changes )
 
   INCOMPATIBLE CHANGES
 
@@ -129,6 +134,12 @@
     MAPREDUCE-4146. Support limits on task status string length and number of
     block locations in branch-2. (Ahmed Radwan via tomwhite)
 
+    MAPREDUCE-3871. Allow symlinking in LocalJobRunner DistributedCache.
+    (tomwhite)
+
+    MAPREDUCE-3921. MR AM should act on node health status changes. 
+    (Bikas Saha via sseth)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -166,6 +177,23 @@
     MAPREDUCE-3873. Fixed NodeManagers' decommissioning at RM to accept IP
     addresses also. (xieguiming via vinodkv)
 
+    MAPREDUCE-4306. Fix distributed shell to work with users other than the one
+    running the daemons. (Ahmed Radwan via sseth)
+
+    MAPREDUCE-4031. Prevent a Node Manager hang during shutdown. 
+    (Devaraj K via sseth)
+
+    MAPREDUCE-4336. Distributed Shell fails when used with the CapacityScheduler
+    (ahmed via tucu)
+
+    MAPREDUCE-4290. Fix Yarn Applicaiton Status to MR JobState conversion. 
+    (Devaraj K via sseth)
+
+    MAPREDUCE-2289. Permissions race can make getStagingDir fail on local filesystem 
+    (ahmed via tucu)
+
+    MAPREDUCE-4372. Deadlock in Resource Manager (Devaraj K via bobby)
+
 Release 2.0.0-alpha - 05-23-2012
 
   INCOMPATIBLE CHANGES
@@ -386,6 +414,8 @@
     MAPREDUCE-4301. Dedupe some strings in MRAM for memory savings 
     (bobby via tgraves)
 
+    MAPREDUCE-4267. mavenize pipes (tgraves via bobby)
+
   OPTIMIZATIONS
 
     MAPREDUCE-3850. Avoid redundant calls for tokens in TokenCache (Daryn
@@ -568,6 +598,30 @@
     MAPREDUCE-3350. Per-app RM page should have the list of application-attempts
     like on the app JHS page (Jonathon Eagles via tgraves)
 
+    MAPREDUCE-3842. Stop webpages from automatic refreshing (tgraves)
+
+    MAPREDUCE-3927. Shuffle hang when set map.failures.percent
+    (Bhallamudi Venkata Siva Kamesh via tgraves)
+
+    MAPREDUCE-4311. Capacity scheduler.xml does not accept decimal values for
+    capacity and maximum-capacity settings (Karthik Kambatla via tgraves)
+
+    MAPREDUCE-4341. add types to capacity scheduler properties documentation
+    (Karthik Kambatla via tgraves)
+
+    MAPREDUCE-4270. Move the data_join test classes to the correct path.
+    (Thomas Graves via sseth)
+
+    MAPREDUCE-3889. job client tries to use /tasklog interface, but that
+    doesn't exist anymore (Devaraj K via bobby)
+
+    MAPREDUCE-4320. gridmix mainClass wrong in pom.xml (tgraves)
+
+    MAPREDUCE-4295. RM crashes due to DNS issue (tgraves)
+
+    MAPREDUCE-4228. mapreduce.job.reduce.slowstart.completedmaps is not working
+    properly (Jason Lowe via bobby)
+
 Release 0.23.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttempt.java
index e627128..af4aef7 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttempt.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttempt.java
@@ -25,6 +25,7 @@
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
 
 
 /**
@@ -55,6 +56,11 @@
   String getAssignedContainerMgrAddress();
   
   /**
+   * @return node's id if a container is assigned, otherwise null.
+   */
+  NodeId getNodeId();
+  
+  /**
    * @return node's http address if a container is assigned, otherwise null.
    */
   String getNodeHttpAddress();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobEventType.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobEventType.java
index e0223b1..5accb6d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobEventType.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobEventType.java
@@ -44,5 +44,9 @@
   JOB_COUNTER_UPDATE,
   
   //Producer:TaskAttemptListener
-  JOB_TASK_ATTEMPT_FETCH_FAILURE
+  JOB_TASK_ATTEMPT_FETCH_FAILURE,
+  
+  //Producer:RMContainerAllocator
+  JOB_UPDATED_NODES
+  
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobUpdatedNodesEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobUpdatedNodesEvent.java
new file mode 100644
index 0000000..c332fb0
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobUpdatedNodesEvent.java
@@ -0,0 +1,40 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.event;
+
+import java.util.List;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+
+
+
+public class JobUpdatedNodesEvent extends JobEvent {
+
+  private final List<NodeReport> updatedNodes;
+  public JobUpdatedNodesEvent(JobId jobId, List<NodeReport> updatedNodes) {
+    super(jobId, JobEventType.JOB_UPDATED_NODES);
+    this.updatedNodes = updatedNodes;
+  }
+
+  public List<NodeReport> getUpdatedNodes() {
+    return updatedNodes;
+  }
+
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptKillEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptKillEvent.java
new file mode 100644
index 0000000..9bcc838
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptKillEvent.java
@@ -0,0 +1,37 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.job.event;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+
+
+public class TaskAttemptKillEvent extends TaskAttemptEvent {
+
+  private final String message;
+
+  public TaskAttemptKillEvent(TaskAttemptId attemptID,
+      String message) {
+    super(attemptID, TaskAttemptEventType.TA_KILL);
+    this.message = message;
+  }
+
+  public String getMessage() {
+    return message;
+  }
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
index daafcf7..10eb68d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
@@ -77,6 +77,7 @@
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobDiagnosticsUpdateEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
@@ -85,8 +86,10 @@
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptCompletedEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptFetchFailureEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobUpdatedNodesEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
 import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
@@ -100,6 +103,9 @@
 import org.apache.hadoop.yarn.Clock;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
 import org.apache.hadoop.yarn.state.MultipleArcTransition;
@@ -148,6 +154,12 @@
   private final Object tasksSyncHandle = new Object();
   private final Set<TaskId> mapTasks = new LinkedHashSet<TaskId>();
   private final Set<TaskId> reduceTasks = new LinkedHashSet<TaskId>();
+  /**
+   * maps nodes to tasks that have run on those nodes
+   */
+  private final HashMap<NodeId, List<TaskAttemptId>> 
+    nodesToSucceededTaskAttempts = new HashMap<NodeId, List<TaskAttemptId>>();
+
   private final EventHandler eventHandler;
   private final MRAppMetrics metrics;
   private final String userName;
@@ -194,6 +206,8 @@
           new TaskAttemptCompletedEventTransition();
   private static final CounterUpdateTransition COUNTER_UPDATE_TRANSITION =
       new CounterUpdateTransition();
+  private static final UpdatedNodesTransition UPDATED_NODES_TRANSITION =
+      new UpdatedNodesTransition();
 
   protected static final
     StateMachineFactory<JobImpl, JobState, JobEventType, JobEvent> 
@@ -218,7 +232,10 @@
           .addTransition(JobState.NEW, JobState.ERROR,
               JobEventType.INTERNAL_ERROR,
               INTERNAL_ERROR_TRANSITION)
-
+          // Ignore-able events
+          .addTransition(JobState.NEW, JobState.NEW,
+              JobEventType.JOB_UPDATED_NODES)
+              
           // Transitions from INITED state
           .addTransition(JobState.INITED, JobState.INITED,
               JobEventType.JOB_DIAGNOSTIC_UPDATE,
@@ -234,7 +251,10 @@
           .addTransition(JobState.INITED, JobState.ERROR,
               JobEventType.INTERNAL_ERROR,
               INTERNAL_ERROR_TRANSITION)
-
+          // Ignore-able events
+          .addTransition(JobState.INITED, JobState.INITED,
+              JobEventType.JOB_UPDATED_NODES)
+              
           // Transitions from RUNNING state
           .addTransition(JobState.RUNNING, JobState.RUNNING,
               JobEventType.JOB_TASK_ATTEMPT_COMPLETED,
@@ -252,6 +272,9 @@
           .addTransition(JobState.RUNNING, JobState.KILL_WAIT,
               JobEventType.JOB_KILL, new KillTasksTransition())
           .addTransition(JobState.RUNNING, JobState.RUNNING,
+              JobEventType.JOB_UPDATED_NODES,
+              UPDATED_NODES_TRANSITION)
+          .addTransition(JobState.RUNNING, JobState.RUNNING,
               JobEventType.JOB_MAP_TASK_RESCHEDULED,
               new MapTaskRescheduledTransition())
           .addTransition(JobState.RUNNING, JobState.RUNNING,
@@ -288,8 +311,9 @@
           // Ignore-able events
           .addTransition(JobState.KILL_WAIT, JobState.KILL_WAIT,
               EnumSet.of(JobEventType.JOB_KILL,
-                         JobEventType.JOB_MAP_TASK_RESCHEDULED,
-                         JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE))
+                  JobEventType.JOB_UPDATED_NODES,
+                  JobEventType.JOB_MAP_TASK_RESCHEDULED,
+                  JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE))
 
           // Transitions from SUCCEEDED state
           .addTransition(JobState.SUCCEEDED, JobState.SUCCEEDED,
@@ -303,7 +327,8 @@
               INTERNAL_ERROR_TRANSITION)
           // Ignore-able events
           .addTransition(JobState.SUCCEEDED, JobState.SUCCEEDED,
-              EnumSet.of(JobEventType.JOB_KILL,
+              EnumSet.of(JobEventType.JOB_KILL, 
+                  JobEventType.JOB_UPDATED_NODES,
                   JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE))
 
           // Transitions from FAILED state
@@ -318,7 +343,8 @@
               INTERNAL_ERROR_TRANSITION)
           // Ignore-able events
           .addTransition(JobState.FAILED, JobState.FAILED,
-              EnumSet.of(JobEventType.JOB_KILL,
+              EnumSet.of(JobEventType.JOB_KILL, 
+                  JobEventType.JOB_UPDATED_NODES,
                   JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE))
 
           // Transitions from KILLED state
@@ -333,7 +359,8 @@
               INTERNAL_ERROR_TRANSITION)
           // Ignore-able events
           .addTransition(JobState.KILLED, JobState.KILLED,
-              EnumSet.of(JobEventType.JOB_KILL,
+              EnumSet.of(JobEventType.JOB_KILL, 
+                  JobEventType.JOB_UPDATED_NODES,
                   JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE))
 
           // No transitions from INTERNAL_ERROR state. Ignore all.
@@ -346,6 +373,7 @@
                   JobEventType.JOB_TASK_ATTEMPT_COMPLETED,
                   JobEventType.JOB_MAP_TASK_RESCHEDULED,
                   JobEventType.JOB_DIAGNOSTIC_UPDATE,
+                  JobEventType.JOB_UPDATED_NODES,
                   JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE,
                   JobEventType.INTERNAL_ERROR))
           .addTransition(JobState.ERROR, JobState.ERROR,
@@ -590,9 +618,9 @@
       float reduceProgress = 0f;
       for (Task task : this.tasks.values()) {
         if (task.getType() == TaskType.MAP) {
-          mapProgress += task.getProgress();
+          mapProgress += (task.isFinished() ? 1f : task.getProgress());
         } else {
-          reduceProgress += task.getProgress();
+          reduceProgress += (task.isFinished() ? 1f : task.getProgress());
         }
       }
       if (this.numMapTasks != 0) {
@@ -895,7 +923,7 @@
       LOG.info(msg.toString());
     }
   }
-
+  
   /**
    * ChainMapper and ChainReducer must execute in parallel, so they're not
    * compatible with uberization/LocalContainerLauncher (100% sequential).
@@ -924,6 +952,24 @@
     }
     return isChainJob;
   }
+  
+  private void actOnUnusableNode(NodeId nodeId, NodeState nodeState) {
+    // rerun previously successful map tasks
+    List<TaskAttemptId> taskAttemptIdList = nodesToSucceededTaskAttempts.get(nodeId);
+    if(taskAttemptIdList != null) {
+      String mesg = "TaskAttempt killed because it ran on unusable node "
+          + nodeId;
+      for(TaskAttemptId id : taskAttemptIdList) {
+        if(TaskType.MAP == id.getTaskId().getTaskType()) {
+          // reschedule only map tasks because their outputs maybe unusable
+          LOG.info(mesg + ". AttemptId:" + id);
+          eventHandler.handle(new TaskAttemptKillEvent(id, mesg));
+        }
+      }
+    }
+    // currently running task attempts on unusable nodes are handled in
+    // RMContainerAllocator
+  }
 
   /*
   private int getBlockSize() {
@@ -1269,18 +1315,37 @@
       tce.setEventId(job.taskAttemptCompletionEvents.size());
       job.taskAttemptCompletionEvents.add(tce);
       
+      TaskAttemptId attemptId = tce.getAttemptId();
+      TaskId taskId = attemptId.getTaskId();
       //make the previous completion event as obsolete if it exists
       Object successEventNo = 
-        job.successAttemptCompletionEventNoMap.remove(tce.getAttemptId().getTaskId());
+        job.successAttemptCompletionEventNoMap.remove(taskId);
       if (successEventNo != null) {
         TaskAttemptCompletionEvent successEvent = 
           job.taskAttemptCompletionEvents.get((Integer) successEventNo);
         successEvent.setStatus(TaskAttemptCompletionEventStatus.OBSOLETE);
       }
-
+      
+      // if this attempt is not successful then why is the previous successful 
+      // attempt being removed above - MAPREDUCE-4330
       if (TaskAttemptCompletionEventStatus.SUCCEEDED.equals(tce.getStatus())) {
-        job.successAttemptCompletionEventNoMap.put(tce.getAttemptId().getTaskId(), 
-            tce.getEventId());
+        job.successAttemptCompletionEventNoMap.put(taskId, tce.getEventId());
+        
+        // here we could have simply called Task.getSuccessfulAttempt() but
+        // the event that triggers this code is sent before
+        // Task.successfulAttempt is set and so there is no guarantee that it
+        // will be available now
+        Task task = job.tasks.get(taskId);
+        TaskAttempt attempt = task.getAttempt(attemptId);
+        NodeId nodeId = attempt.getNodeId();
+        assert (nodeId != null); // node must exist for a successful event
+        List<TaskAttemptId> taskAttemptIdList = job.nodesToSucceededTaskAttempts
+            .get(nodeId);
+        if (taskAttemptIdList == null) {
+          taskAttemptIdList = new ArrayList<TaskAttemptId>();
+          job.nodesToSucceededTaskAttempts.put(nodeId, taskAttemptIdList);
+        }
+        taskAttemptIdList.add(attempt.getID());
       }
     }
   }
@@ -1460,7 +1525,22 @@
       }
     }
   }
-
+  
+  private static class UpdatedNodesTransition implements
+      SingleArcTransition<JobImpl, JobEvent> {
+    @Override
+    public void transition(JobImpl job, JobEvent event) {
+      JobUpdatedNodesEvent updateEvent = (JobUpdatedNodesEvent) event;
+      for(NodeReport nr: updateEvent.getUpdatedNodes()) {
+        NodeState nodeState = nr.getNodeState();
+        if(nodeState.isUnusable()) {
+          // act on the updates
+          job.actOnUnusableNode(nr.getNodeId(), nodeState);
+        }
+      }
+    }
+  }
+  
   private static class InternalErrorTransition implements
       SingleArcTransition<JobImpl, JobEvent> {
     @Override
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
index cafff92..66d48b6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
@@ -84,6 +84,7 @@
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
@@ -404,14 +405,17 @@
          TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE,
          new TooManyFetchFailureTransition())
      .addTransition(
+         TaskAttemptState.SUCCEEDED, TaskAttemptState.KILLED,
+         TaskAttemptEventType.TA_KILL,
+         new KilledAfterSuccessTransition())
+     .addTransition(
          TaskAttemptState.SUCCEEDED, TaskAttemptState.SUCCEEDED,
          TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
          DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
      // Ignore-able events for SUCCEEDED state
      .addTransition(TaskAttemptState.SUCCEEDED,
          TaskAttemptState.SUCCEEDED,
-         EnumSet.of(TaskAttemptEventType.TA_KILL,
-             TaskAttemptEventType.TA_FAILMSG,
+         EnumSet.of(TaskAttemptEventType.TA_FAILMSG,
              TaskAttemptEventType.TA_CONTAINER_CLEANED,
              TaskAttemptEventType.TA_CONTAINER_COMPLETED))
 
@@ -818,6 +822,16 @@
     }
   }
 
+  @Override 
+  public NodeId getNodeId() {
+    readLock.lock();
+    try {
+      return containerNodeId;
+    } finally {
+      readLock.unlock();
+    }
+  }
+  
   /**If container Assigned then return the node's address, otherwise null.
    */
   @Override
@@ -999,7 +1013,7 @@
   }
 
   private static JobCounterUpdateEvent createJobCounterUpdateEventTAFailed(
-      TaskAttemptImpl taskAttempt) {
+      TaskAttemptImpl taskAttempt, boolean taskAlreadyCompleted) {
     TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
     JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());
     
@@ -1007,16 +1021,22 @@
     
     if (taskType == TaskType.MAP) {
       jce.addCounterUpdate(JobCounter.NUM_FAILED_MAPS, 1);
-      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, slotMillisIncrement);
+      if(!taskAlreadyCompleted) {
+        // dont double count the elapsed time
+        jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, slotMillisIncrement);
+      }
     } else {
       jce.addCounterUpdate(JobCounter.NUM_FAILED_REDUCES, 1);
-      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, slotMillisIncrement);
+      if(!taskAlreadyCompleted) {
+        // dont double count the elapsed time
+        jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, slotMillisIncrement);
+      }
     }
     return jce;
   }
   
   private static JobCounterUpdateEvent createJobCounterUpdateEventTAKilled(
-      TaskAttemptImpl taskAttempt) {
+      TaskAttemptImpl taskAttempt, boolean taskAlreadyCompleted) {
     TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
     JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());
     
@@ -1024,10 +1044,16 @@
     
     if (taskType == TaskType.MAP) {
       jce.addCounterUpdate(JobCounter.NUM_KILLED_MAPS, 1);
-      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, slotMillisIncrement);
+      if(!taskAlreadyCompleted) {
+        // dont double count the elapsed time
+        jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, slotMillisIncrement);
+      }
     } else {
       jce.addCounterUpdate(JobCounter.NUM_KILLED_REDUCES, 1);
-      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, slotMillisIncrement);
+      if(!taskAlreadyCompleted) {
+        // dont double count the elapsed time
+        jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, slotMillisIncrement);
+      }
     }
     return jce;
   }  
@@ -1259,10 +1285,10 @@
                 finalState);
         if(finalState == TaskAttemptState.FAILED) {
           taskAttempt.eventHandler
-            .handle(createJobCounterUpdateEventTAFailed(taskAttempt));
+            .handle(createJobCounterUpdateEventTAFailed(taskAttempt, false));
         } else if(finalState == TaskAttemptState.KILLED) {
           taskAttempt.eventHandler
-          .handle(createJobCounterUpdateEventTAKilled(taskAttempt));
+          .handle(createJobCounterUpdateEventTAKilled(taskAttempt, false));
         }
         taskAttempt.eventHandler.handle(new JobHistoryEvent(
             taskAttempt.attemptId.getTaskId().getJobId(), tauce));
@@ -1394,7 +1420,7 @@
       
       if (taskAttempt.getLaunchTime() != 0) {
         taskAttempt.eventHandler
-            .handle(createJobCounterUpdateEventTAFailed(taskAttempt));
+            .handle(createJobCounterUpdateEventTAFailed(taskAttempt, false));
         TaskAttemptUnsuccessfulCompletionEvent tauce =
             createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
                 TaskAttemptState.FAILED);
@@ -1463,7 +1489,7 @@
       
       if (taskAttempt.getLaunchTime() != 0) {
         taskAttempt.eventHandler
-            .handle(createJobCounterUpdateEventTAFailed(taskAttempt));
+            .handle(createJobCounterUpdateEventTAFailed(taskAttempt, true));
         TaskAttemptUnsuccessfulCompletionEvent tauce =
             createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
                 TaskAttemptState.FAILED);
@@ -1477,6 +1503,32 @@
           taskAttempt.attemptId, TaskEventType.T_ATTEMPT_FAILED));
     }
   }
+  
+  private static class KilledAfterSuccessTransition implements
+      SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public void transition(TaskAttemptImpl taskAttempt, 
+        TaskAttemptEvent event) {
+      TaskAttemptKillEvent msgEvent = (TaskAttemptKillEvent) event;
+      //add to diagnostic
+      taskAttempt.addDiagnosticInfo(msgEvent.getMessage());
+
+      // not setting a finish time since it was set on success
+      assert (taskAttempt.getFinishTime() != 0);
+
+      assert (taskAttempt.getLaunchTime() != 0);
+      taskAttempt.eventHandler
+          .handle(createJobCounterUpdateEventTAKilled(taskAttempt, true));
+      TaskAttemptUnsuccessfulCompletionEvent tauce = createTaskAttemptUnsuccessfulCompletionEvent(
+          taskAttempt, TaskAttemptState.KILLED);
+      taskAttempt.eventHandler.handle(new JobHistoryEvent(taskAttempt.attemptId
+          .getTaskId().getJobId(), tauce));
+      taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
+          taskAttempt.attemptId, TaskEventType.T_ATTEMPT_KILLED));
+    }
+  }
 
   private static class KilledTransition implements
       SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@@ -1489,7 +1541,7 @@
       taskAttempt.setFinishTime();
       if (taskAttempt.getLaunchTime() != 0) {
         taskAttempt.eventHandler
-            .handle(createJobCounterUpdateEventTAKilled(taskAttempt));
+            .handle(createJobCounterUpdateEventTAKilled(taskAttempt, false));
         TaskAttemptUnsuccessfulCompletionEvent tauce =
             createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
                 TaskAttemptState.KILLED);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
index 58edd16..c7f89f9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
@@ -191,13 +191,14 @@
     .addTransition(TaskState.SUCCEEDED, //only possible for map tasks
         EnumSet.of(TaskState.SCHEDULED, TaskState.FAILED),
         TaskEventType.T_ATTEMPT_FAILED, new MapRetroactiveFailureTransition())
+    .addTransition(TaskState.SUCCEEDED, //only possible for map tasks
+        EnumSet.of(TaskState.SCHEDULED, TaskState.SUCCEEDED),
+        TaskEventType.T_ATTEMPT_KILLED, new MapRetroactiveKilledTransition())
     // Ignore-able transitions.
     .addTransition(
         TaskState.SUCCEEDED, TaskState.SUCCEEDED,
-        EnumSet.of(TaskEventType.T_KILL,
-            TaskEventType.T_ADD_SPEC_ATTEMPT,
-            TaskEventType.T_ATTEMPT_LAUNCHED,
-            TaskEventType.T_ATTEMPT_KILLED))
+        EnumSet.of(TaskEventType.T_ADD_SPEC_ATTEMPT,
+            TaskEventType.T_ATTEMPT_LAUNCHED))
 
     // Transitions from FAILED state        
     .addTransition(TaskState.FAILED, TaskState.FAILED,
@@ -629,7 +630,6 @@
   // always called inside a transition, in turn inside the Write Lock
   private void handleTaskAttemptCompletion(TaskAttemptId attemptId,
       TaskAttemptCompletionEventStatus status) {
-    finishedAttempts++;
     TaskAttempt attempt = attempts.get(attemptId);
     //raise the completion event only if the container is assigned
     // to nextAttemptNumber
@@ -681,6 +681,11 @@
         taId == null ? null : TypeConverter.fromYarn(taId));
     return taskFailedEvent;
   }
+  
+  private static void unSucceed(TaskImpl task) {
+    task.commitAttempt = null;
+    task.successfulAttempt = null;
+  }
 
   /**
   * @return a String representation of the splits.
@@ -755,6 +760,7 @@
       task.handleTaskAttemptCompletion(
           ((TaskTAttemptEvent) event).getTaskAttemptID(), 
           TaskAttemptCompletionEventStatus.SUCCEEDED);
+      task.finishedAttempts++;
       --task.numberUncompletedAttempts;
       task.successfulAttempt = ((TaskTAttemptEvent) event).getTaskAttemptID();
       task.eventHandler.handle(new JobTaskEvent(
@@ -790,6 +796,7 @@
       task.handleTaskAttemptCompletion(
           ((TaskTAttemptEvent) event).getTaskAttemptID(), 
           TaskAttemptCompletionEventStatus.KILLED);
+      task.finishedAttempts++;
       --task.numberUncompletedAttempts;
       if (task.successfulAttempt == null) {
         task.addAndScheduleAttempt();
@@ -808,6 +815,7 @@
       task.handleTaskAttemptCompletion(
           ((TaskTAttemptEvent) event).getTaskAttemptID(), 
           TaskAttemptCompletionEventStatus.KILLED);
+      task.finishedAttempts++;
       // check whether all attempts are finished
       if (task.finishedAttempts == task.attempts.size()) {
         if (task.historyTaskStartGenerated) {
@@ -845,6 +853,7 @@
             attempt.getAssignedContainerMgrAddress()));
       }
       
+      task.finishedAttempts++;
       if (task.failedAttempts < task.maxAttempts) {
         task.handleTaskAttemptCompletion(
             ((TaskTAttemptEvent) event).getTaskAttemptID(), 
@@ -880,12 +889,6 @@
     protected TaskState getDefaultState(Task task) {
       return task.getState();
     }
-
-    protected void unSucceed(TaskImpl task) {
-      ++task.numberUncompletedAttempts;
-      task.commitAttempt = null;
-      task.successfulAttempt = null;
-    }
   }
 
   private static class MapRetroactiveFailureTransition
@@ -908,6 +911,8 @@
       //  fails, we have to let AttemptFailedTransition.transition
       //  believe that there's no redundancy.
       unSucceed(task);
+      // fake increase in Uncomplete attempts for super.transition
+      ++task.numberUncompletedAttempts;
       return super.transition(task, event);
     }
 
@@ -917,6 +922,45 @@
     }
   }
 
+  private static class MapRetroactiveKilledTransition implements
+    MultipleArcTransition<TaskImpl, TaskEvent, TaskState> {
+
+    @Override
+    public TaskState transition(TaskImpl task, TaskEvent event) {
+      // verify that this occurs only for map task
+      // TODO: consider moving it to MapTaskImpl
+      if (!TaskType.MAP.equals(task.getType())) {
+        LOG.error("Unexpected event for REDUCE task " + event.getType());
+        task.internalError(event.getType());
+      }
+
+      TaskTAttemptEvent attemptEvent = (TaskTAttemptEvent) event;
+      TaskAttemptId attemptId = attemptEvent.getTaskAttemptID();
+      if(task.successfulAttempt == attemptId) {
+        // successful attempt is now killed. reschedule
+        // tell the job about the rescheduling
+        unSucceed(task);
+        task.handleTaskAttemptCompletion(
+            attemptId, 
+            TaskAttemptCompletionEventStatus.KILLED);
+        task.eventHandler.handle(new JobMapTaskRescheduledEvent(task.taskId));
+        // typically we are here because this map task was run on a bad node and 
+        // we want to reschedule it on a different node.
+        // Depending on whether there are previous failed attempts or not this 
+        // can SCHEDULE or RESCHEDULE the container allocate request. If this
+        // SCHEDULE's then the dataLocal hosts of this taskAttempt will be used
+        // from the map splitInfo. So the bad node might be sent as a location 
+        // to the RM. But the RM would ignore that just like it would ignore 
+        // currently pending container requests affinitized to bad nodes.
+        task.addAndScheduleAttempt();
+        return TaskState.SCHEDULED;
+      } else {
+        // nothing to do
+        return TaskState.SUCCEEDED;
+      }
+    }
+  }
+
   private static class KillNewTransition 
     implements SingleArcTransition<TaskImpl, TaskEvent> {
     @Override
@@ -966,6 +1010,7 @@
     public void transition(TaskImpl task, TaskEvent event) {
       task.metrics.launchedTask(task);
       task.metrics.runningTask(task);
+      
     }
   }
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
index bcb8223..a0ba0e4 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
@@ -21,6 +21,7 @@
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
+import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -46,19 +47,27 @@
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobDiagnosticsUpdateEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobUpdatedNodesEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.api.records.AMResponse;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.util.RackResolver;
@@ -408,15 +417,6 @@
     
     LOG.info("Recalculating schedule...");
     
-    //if all maps are assigned, then ramp up all reduces irrespective of the 
-    //headroom
-    if (scheduledMaps == 0 && numPendingReduces > 0) {
-      LOG.info("All maps assigned. " +
-      		"Ramping up all remaining reduces:" + numPendingReduces);
-      scheduleAllReduces();
-      return;
-    }
-    
     //check for slow start
     if (!getIsReduceStarted()) {//not set yet
       int completedMapsForReduceSlowstart = (int)Math.ceil(reduceSlowStart * 
@@ -432,6 +432,15 @@
       }
     }
     
+    //if all maps are assigned, then ramp up all reduces irrespective of the
+    //headroom
+    if (scheduledMaps == 0 && numPendingReduces > 0) {
+      LOG.info("All maps assigned. " +
+          "Ramping up all remaining reduces:" + numPendingReduces);
+      scheduleAllReduces();
+      return;
+    }
+
     float completedMapPercent = 0f;
     if (totalMaps != 0) {//support for 0 maps
       completedMapPercent = (float)completedMaps/totalMaps;
@@ -489,7 +498,8 @@
     }
   }
 
-  private void scheduleAllReduces() {
+  @Private
+  public void scheduleAllReduces() {
     for (ContainerRequest req : pendingReduces) {
       scheduledRequests.addReduce(req);
     }
@@ -583,7 +593,9 @@
 
     //Called on each allocation. Will know about newly blacklisted/added hosts.
     computeIgnoreBlacklisting();
-    
+
+    handleUpdatedNodes(response);
+
     for (ContainerStatus cont : finishedContainers) {
       LOG.info("Received completed container " + cont.getContainerId());
       TaskAttemptId attemptID = assignedRequests.get(cont.getContainerId());
@@ -600,10 +612,48 @@
         String diagnostics = cont.getDiagnostics();
         eventHandler.handle(new TaskAttemptDiagnosticsUpdateEvent(attemptID,
             diagnostics));
-      }
+      }      
     }
     return newContainers;
   }
+  
+  @SuppressWarnings("unchecked")
+  private void handleUpdatedNodes(AMResponse response) {
+    // send event to the job about on updated nodes
+    List<NodeReport> updatedNodes = response.getUpdatedNodes();
+    if (!updatedNodes.isEmpty()) {
+
+      // send event to the job to act upon completed tasks
+      eventHandler.handle(new JobUpdatedNodesEvent(getJob().getID(),
+          updatedNodes));
+
+      // act upon running tasks
+      HashSet<NodeId> unusableNodes = new HashSet<NodeId>();
+      for (NodeReport nr : updatedNodes) {
+        NodeState nodeState = nr.getNodeState();
+        if (nodeState.isUnusable()) {
+          unusableNodes.add(nr.getNodeId());
+        }
+      }
+      for (int i = 0; i < 2; ++i) {
+        HashMap<TaskAttemptId, Container> taskSet = i == 0 ? assignedRequests.maps
+            : assignedRequests.reduces;
+        // kill running containers
+        for (Map.Entry<TaskAttemptId, Container> entry : taskSet.entrySet()) {
+          TaskAttemptId tid = entry.getKey();
+          NodeId taskAttemptNodeId = entry.getValue().getNodeId();
+          if (unusableNodes.contains(taskAttemptNodeId)) {
+            LOG.info("Killing taskAttempt:" + tid
+                + " because it is running on unusable node:"
+                + taskAttemptNodeId);
+            eventHandler.handle(new TaskAttemptKillEvent(tid,
+                "TaskAttempt killed because it ran on unusable node"
+                    + taskAttemptNodeId));
+          }
+        }
+      }
+    }
+  }
 
   @Private
   public int getMemLimit() {
@@ -743,7 +793,6 @@
         boolean blackListed = false;         
         ContainerRequest assigned = null;
         
-        ContainerId allocatedContainerId = allocated.getId();
         if (isAssignable) {
           // do not assign if allocated container is on a  
           // blacklisted host
@@ -790,7 +839,7 @@
               eventHandler.handle(new TaskAttemptContainerAssignedEvent(
                   assigned.attemptID, allocated, applicationACLs));
 
-              assignedRequests.add(allocatedContainerId, assigned.attemptID);
+              assignedRequests.add(allocated, assigned.attemptID);
 
               if (LOG.isDebugEnabled()) {
                 LOG.info("Assigned container (" + allocated + ") "
@@ -811,7 +860,7 @@
         // or if we could not assign it 
         if (blackListed || assigned == null) {
           containersReleased++;
-          release(allocatedContainerId);
+          release(allocated.getId());
         }
       }
     }
@@ -974,20 +1023,20 @@
   private class AssignedRequests {
     private final Map<ContainerId, TaskAttemptId> containerToAttemptMap =
       new HashMap<ContainerId, TaskAttemptId>();
-    private final LinkedHashMap<TaskAttemptId, ContainerId> maps = 
-      new LinkedHashMap<TaskAttemptId, ContainerId>();
-    private final LinkedHashMap<TaskAttemptId, ContainerId> reduces = 
-      new LinkedHashMap<TaskAttemptId, ContainerId>();
+    private final LinkedHashMap<TaskAttemptId, Container> maps = 
+      new LinkedHashMap<TaskAttemptId, Container>();
+    private final LinkedHashMap<TaskAttemptId, Container> reduces = 
+      new LinkedHashMap<TaskAttemptId, Container>();
     private final Set<TaskAttemptId> preemptionWaitingReduces = 
       new HashSet<TaskAttemptId>();
     
-    void add(ContainerId containerId, TaskAttemptId tId) {
-      LOG.info("Assigned container " + containerId.toString() + " to " + tId);
-      containerToAttemptMap.put(containerId, tId);
+    void add(Container container, TaskAttemptId tId) {
+      LOG.info("Assigned container " + container.getId().toString() + " to " + tId);
+      containerToAttemptMap.put(container.getId(), tId);
       if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) {
-        maps.put(tId, containerId);
+        maps.put(tId, container);
       } else {
-        reduces.put(tId, containerId);
+        reduces.put(tId, container);
       }
     }
 
@@ -1017,9 +1066,9 @@
     boolean remove(TaskAttemptId tId) {
       ContainerId containerId = null;
       if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) {
-        containerId = maps.remove(tId);
+        containerId = maps.remove(tId).getId();
       } else {
-        containerId = reduces.remove(tId);
+        containerId = reduces.remove(tId).getId();
         if (containerId != null) {
           boolean preempted = preemptionWaitingReduces.remove(tId);
           if (preempted) {
@@ -1038,12 +1087,20 @@
     TaskAttemptId get(ContainerId cId) {
       return containerToAttemptMap.get(cId);
     }
+    
+    NodeId getNodeId(TaskAttemptId tId) {
+      if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) {
+        return maps.get(tId).getNodeId();
+      } else {
+        return reduces.get(tId).getNodeId();
+      }
+    }
 
     ContainerId get(TaskAttemptId tId) {
       if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) {
-        return maps.get(tId);
+        return maps.get(tId).getId();
       } else {
-        return reduces.get(tId);
+        return reduces.get(tId).getId();
       }
     }
   }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersPage.java
index f324b53..d7afcd8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersPage.java
@@ -28,9 +28,6 @@
   @Override protected void preHead(Page.HTML<_> html) {
     commonPreHead(html);
 
-    // Counters page is a summary. Helps to refresh automatically.
-    html.meta_http("refresh", "10");
-
     String tid = $(TASK_ID);
     String activeNav = "3";
     if(tid == null || tid.isEmpty()) {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobPage.java
index fd98549..00f4750 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobPage.java
@@ -33,9 +33,6 @@
                : join("MapReduce Job ", $(JOB_ID)));
     commonPreHead(html);
 
-    // This is a job-summary page. Helps to refresh automatically.
-    html.meta_http("refresh", "10");
-
     set(initID(ACCORDION, "nav"), "{autoHeight:false, active:2}");
   }
 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
index 69e114f..e83a957 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
@@ -126,10 +126,6 @@
   @Override protected void preHead(Page.HTML<_> html) {
     commonPreHead(html);
 
-    // This page is a list of all attempts which are limited in number. Okay to
-    // refresh automatically.
-    html.meta_http("refresh", "10");
-
     set(initID(ACCORDION, "nav"), "{autoHeight:false, active:3}");
     set(DATATABLES_ID, "attempts");
     set(initID(DATATABLES, "attempts"), attemptsTableInit());
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
index 81af358..7f1d820 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
@@ -64,6 +64,7 @@
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.apache.hadoop.yarn.util.Records;
 
@@ -232,6 +233,11 @@
     diags.add(DIAGS.next());
     return new TaskAttempt() {
       @Override
+      public NodeId getNodeId() throws UnsupportedOperationException{
+        throw new UnsupportedOperationException();
+      }
+      
+      @Override
       public TaskAttemptId getID() {
         return taid;
       }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java
index 3ca9c24..a0d8e77 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java
@@ -22,6 +22,7 @@
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 
+import java.util.ArrayList;
 import java.util.Iterator;
 
 import junit.framework.Assert;
@@ -29,17 +30,26 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobUpdatedNodesEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.junit.Test;
 
 /**
@@ -160,6 +170,159 @@
     
     app.waitForState(job, JobState.SUCCEEDED);
   }
+  
+  /**
+   * The test verifies that the AM re-runs maps that have run on bad nodes. It
+   * also verifies that the AM records all success/killed events so that reduces
+   * are notified about map output status changes. It also verifies that the
+   * re-run information is preserved across AM restart
+   */
+  @Test
+  public void testUpdatedNodes() throws Exception {
+    int runCount = 0;
+    MRApp app = new MRAppWithHistory(2, 1, false, this.getClass().getName(),
+        true, ++runCount);
+    Configuration conf = new Configuration();
+    // after half of the map completion, reduce will start
+    conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 0.5f);
+    // uberization forces full slowstart (1.0), so disable that
+    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
+    Job job = app.submit(conf);
+    app.waitForState(job, JobState.RUNNING);
+    Assert.assertEquals("Num tasks not correct", 3, job.getTasks().size());
+    Iterator<Task> it = job.getTasks().values().iterator();
+    Task mapTask1 = it.next();
+    Task mapTask2 = it.next();
+
+    // all maps must be running
+    app.waitForState(mapTask1, TaskState.RUNNING);
+    app.waitForState(mapTask2, TaskState.RUNNING);
+
+    TaskAttempt task1Attempt = mapTask1.getAttempts().values().iterator()
+        .next();
+    TaskAttempt task2Attempt = mapTask2.getAttempts().values().iterator()
+        .next();
+    NodeId node1 = task1Attempt.getNodeId();
+    NodeId node2 = task2Attempt.getNodeId();
+    Assert.assertEquals(node1, node2);
+
+    // send the done signal to the task
+    app.getContext()
+        .getEventHandler()
+        .handle(
+            new TaskAttemptEvent(task1Attempt.getID(),
+                TaskAttemptEventType.TA_DONE));
+    app.getContext()
+        .getEventHandler()
+        .handle(
+            new TaskAttemptEvent(task2Attempt.getID(),
+                TaskAttemptEventType.TA_DONE));
+
+    // all maps must be succeeded
+    app.waitForState(mapTask1, TaskState.SUCCEEDED);
+    app.waitForState(mapTask2, TaskState.SUCCEEDED);
+
+    TaskAttemptCompletionEvent[] events = job.getTaskAttemptCompletionEvents(0,
+        100);
+    Assert.assertEquals("Expecting 2 completion events for success", 2,
+        events.length);
+
+    // send updated nodes info
+    ArrayList<NodeReport> updatedNodes = new ArrayList<NodeReport>();
+    NodeReport nr = RecordFactoryProvider.getRecordFactory(null)
+        .newRecordInstance(NodeReport.class);
+    nr.setNodeId(node1);
+    nr.setNodeState(NodeState.UNHEALTHY);
+    updatedNodes.add(nr);
+    app.getContext().getEventHandler()
+        .handle(new JobUpdatedNodesEvent(job.getID(), updatedNodes));
+
+    app.waitForState(task1Attempt, TaskAttemptState.KILLED);
+    app.waitForState(task2Attempt, TaskAttemptState.KILLED);
+
+    events = job.getTaskAttemptCompletionEvents(0, 100);
+    Assert.assertEquals("Expecting 2 more completion events for killed", 4,
+        events.length);
+
+    // all maps must be back to running
+    app.waitForState(mapTask1, TaskState.RUNNING);
+    app.waitForState(mapTask2, TaskState.RUNNING);
+
+    Iterator<TaskAttempt> itr = mapTask1.getAttempts().values().iterator();
+    itr.next();
+    task1Attempt = itr.next();
+
+    // send the done signal to the task
+    app.getContext()
+        .getEventHandler()
+        .handle(
+            new TaskAttemptEvent(task1Attempt.getID(),
+                TaskAttemptEventType.TA_DONE));
+
+    // map1 must be succeeded. map2 must be running
+    app.waitForState(mapTask1, TaskState.SUCCEEDED);
+    app.waitForState(mapTask2, TaskState.RUNNING);
+
+    events = job.getTaskAttemptCompletionEvents(0, 100);
+    Assert.assertEquals("Expecting 1 more completion events for success", 5,
+        events.length);
+
+    // Crash the app again.
+    app.stop();
+
+    // rerun
+    // in rerun the 1st map will be recovered from previous run
+    app = new MRAppWithHistory(2, 1, false, this.getClass().getName(), false,
+        ++runCount);
+    conf = new Configuration();
+    conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
+    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
+    job = app.submit(conf);
+    app.waitForState(job, JobState.RUNNING);
+    Assert.assertEquals("No of tasks not correct", 3, job.getTasks().size());
+    it = job.getTasks().values().iterator();
+    mapTask1 = it.next();
+    mapTask2 = it.next();
+    Task reduceTask = it.next();
+
+    // map 1 will be recovered, no need to send done
+    app.waitForState(mapTask1, TaskState.SUCCEEDED);
+    app.waitForState(mapTask2, TaskState.RUNNING);
+
+    events = job.getTaskAttemptCompletionEvents(0, 100);
+    Assert.assertEquals(
+        "Expecting 2 completion events for killed & success of map1", 2,
+        events.length);
+
+    task2Attempt = mapTask2.getAttempts().values().iterator().next();
+    app.getContext()
+        .getEventHandler()
+        .handle(
+            new TaskAttemptEvent(task2Attempt.getID(),
+                TaskAttemptEventType.TA_DONE));
+    app.waitForState(mapTask2, TaskState.SUCCEEDED);
+
+    events = job.getTaskAttemptCompletionEvents(0, 100);
+    Assert.assertEquals("Expecting 1 more completion events for success", 3,
+        events.length);
+
+    app.waitForState(reduceTask, TaskState.RUNNING);
+    TaskAttempt task3Attempt = reduceTask.getAttempts().values().iterator()
+        .next();
+    app.getContext()
+        .getEventHandler()
+        .handle(
+            new TaskAttemptEvent(task3Attempt.getID(),
+                TaskAttemptEventType.TA_DONE));
+    app.waitForState(reduceTask, TaskState.SUCCEEDED);
+
+    events = job.getTaskAttemptCompletionEvents(0, 100);
+    Assert.assertEquals("Expecting 1 more completion events for success", 4,
+        events.length);
+
+    // job succeeds
+    app.waitForState(job, JobState.SUCCEEDED);
+  }
 
   @Test
   public void testJobError() throws Exception {
@@ -194,10 +357,6 @@
       ((AppContext) getContext()).getAllJobs().put(spiedJob.getID(), spiedJob);
       return spiedJob;
     }
-
-    JobImpl getSpiedJob() {
-      return this.spiedJob;
-    }
   }
 
   @Test
@@ -232,6 +391,21 @@
       TypeConverter.fromYarn(state);
     }
   }
+  
+  private final class MRAppWithHistory extends MRApp {
+    public MRAppWithHistory(int maps, int reduces, boolean autoComplete,
+        String testName, boolean cleanOnStart, int startCount) {
+      super(maps, reduces, autoComplete, testName, cleanOnStart, startCount);
+    }
+
+    @Override
+    protected EventHandler<JobHistoryEvent> createJobHistoryHandler(
+        AppContext context) {
+      JobHistoryEventHandler eventHandler = new JobHistoryEventHandler(context, 
+          getStartCount());
+      return eventHandler;
+    }
+  }
 
   public static void main(String[] args) throws Exception {
     TestMRApp t = new TestMRApp();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java
index a7f42ee..33306f4 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java
@@ -18,15 +18,24 @@
 
 package org.apache.hadoop.mapreduce.v2.app;
 
+import static org.mockito.Matchers.anyFloat;
+import static org.mockito.Matchers.anyInt;
 import static org.mockito.Matchers.isA;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.doCallRealMethod;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import junit.framework.Assert;
@@ -46,9 +55,11 @@
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobUpdatedNodesEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerFailedEvent;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent;
@@ -63,9 +74,10 @@
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
 import org.apache.hadoop.yarn.event.Event;
@@ -74,13 +86,11 @@
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
-import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
-import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
 import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.junit.After;
 import org.junit.Test;
@@ -426,29 +436,21 @@
 
     // Finish off 1 map.
     Iterator<Task> it = job.getTasks().values().iterator();
-    finishNextNTasks(mrApp, it, 1);
+    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 1);
     allocator.schedule();
     rmDispatcher.await();
     Assert.assertEquals(0.095f, job.getProgress(), 0.001f);
     Assert.assertEquals(0.095f, rmApp.getProgress(), 0.001f);
 
     // Finish off 7 more so that map-progress is 80%
-    finishNextNTasks(mrApp, it, 7);
+    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 7);
     allocator.schedule();
     rmDispatcher.await();
     Assert.assertEquals(0.41f, job.getProgress(), 0.001f);
     Assert.assertEquals(0.41f, rmApp.getProgress(), 0.001f);
 
     // Finish off the 2 remaining maps
-    finishNextNTasks(mrApp, it, 2);
-
-    // Wait till all reduce-attempts request for containers
-    for (Task t : job.getTasks().values()) {
-      if (t.getType() == TaskType.REDUCE) {
-        mrApp.waitForState(t.getAttempts().values().iterator().next(),
-          TaskAttemptState.UNASSIGNED);
-      }
-    }
+    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 2);
 
     allocator.schedule();
     rmDispatcher.await();
@@ -465,7 +467,7 @@
     }
 
     // Finish off 2 reduces
-    finishNextNTasks(mrApp, it, 2);
+    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 2);
 
     allocator.schedule();
     rmDispatcher.await();
@@ -473,7 +475,7 @@
     Assert.assertEquals(0.59f, rmApp.getProgress(), 0.001f);
 
     // Finish off the remaining 8 reduces.
-    finishNextNTasks(mrApp, it, 8);
+    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 8);
     allocator.schedule();
     rmDispatcher.await();
     // Remaining is JobCleanup
@@ -481,19 +483,28 @@
     Assert.assertEquals(0.95f, rmApp.getProgress(), 0.001f);
   }
 
-  private void finishNextNTasks(MRApp mrApp, Iterator<Task> it, int nextN)
-      throws Exception {
+  private void finishNextNTasks(DrainDispatcher rmDispatcher, MockNM node,
+      MRApp mrApp, Iterator<Task> it, int nextN) throws Exception {
     Task task;
     for (int i=0; i<nextN; i++) {
       task = it.next();
-      finishTask(mrApp, task);
+      finishTask(rmDispatcher, node, mrApp, task);
     }
   }
 
-  private void finishTask(MRApp mrApp, Task task) throws Exception {
+  private void finishTask(DrainDispatcher rmDispatcher, MockNM node,
+      MRApp mrApp, Task task) throws Exception {
     TaskAttempt attempt = task.getAttempts().values().iterator().next();
+    List<ContainerStatus> contStatus = new ArrayList<ContainerStatus>(1);
+    contStatus.add(BuilderUtils.newContainerStatus(attempt.getAssignedContainerID(),
+        ContainerState.COMPLETE, "", 0));
+    Map<ApplicationId,List<ContainerStatus>> statusUpdate =
+        new HashMap<ApplicationId,List<ContainerStatus>>(1);
+    statusUpdate.put(mrApp.getAppID(), contStatus);
+    node.nodeHeartbeat(statusUpdate, true);
+    rmDispatcher.await();
     mrApp.getContext().getEventHandler().handle(
-        new TaskAttemptEvent(attempt.getID(), TaskAttemptEventType.TA_DONE));
+          new TaskAttemptEvent(attempt.getID(), TaskAttemptEventType.TA_DONE));
     mrApp.waitForState(task, TaskState.SUCCEEDED);
   }
 
@@ -574,26 +585,108 @@
     Iterator<Task> it = job.getTasks().values().iterator();
 
     // Finish off 1 map so that map-progress is 10%
-    finishNextNTasks(mrApp, it, 1);
+    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 1);
     allocator.schedule();
     rmDispatcher.await();
     Assert.assertEquals(0.14f, job.getProgress(), 0.001f);
     Assert.assertEquals(0.14f, rmApp.getProgress(), 0.001f);
 
     // Finish off 5 more map so that map-progress is 60%
-    finishNextNTasks(mrApp, it, 5);
+    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 5);
     allocator.schedule();
     rmDispatcher.await();
     Assert.assertEquals(0.59f, job.getProgress(), 0.001f);
     Assert.assertEquals(0.59f, rmApp.getProgress(), 0.001f);
 
     // Finish off remaining map so that map-progress is 100%
-    finishNextNTasks(mrApp, it, 4);
+    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 4);
     allocator.schedule();
     rmDispatcher.await();
     Assert.assertEquals(0.95f, job.getProgress(), 0.001f);
     Assert.assertEquals(0.95f, rmApp.getProgress(), 0.001f);
   }
+  
+  @Test
+  public void testUpdatedNodes() throws Exception {
+    Configuration conf = new Configuration();
+    MyResourceManager rm = new MyResourceManager(conf);
+    rm.start();
+    DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
+        .getDispatcher();
+
+    // Submit the application
+    RMApp app = rm.submitApp(1024);
+    dispatcher.await();
+    MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
+    amNodeManager.nodeHeartbeat(true);
+    dispatcher.await();
+
+    ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
+        .getAppAttemptId();
+    rm.sendAMLaunched(appAttemptId);
+    dispatcher.await();
+    
+    JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
+    Job mockJob = mock(Job.class);
+    MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
+        appAttemptId, mockJob);
+
+    // add resources to scheduler
+    MockNM nm1 = rm.registerNode("h1:1234", 10240);
+    MockNM nm2 = rm.registerNode("h2:1234", 10240);
+    dispatcher.await();
+
+    // create the map container request
+    ContainerRequestEvent event = createReq(jobId, 1, 1024,
+        new String[] { "h1" });
+    allocator.sendRequest(event);
+    TaskAttemptId attemptId = event.getAttemptID();
+    
+    TaskAttempt mockTaskAttempt = mock(TaskAttempt.class);
+    when(mockTaskAttempt.getNodeId()).thenReturn(nm1.getNodeId());
+    Task mockTask = mock(Task.class);
+    when(mockTask.getAttempt(attemptId)).thenReturn(mockTaskAttempt);
+    when(mockJob.getTask(attemptId.getTaskId())).thenReturn(mockTask);
+
+    // this tells the scheduler about the requests
+    List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
+    dispatcher.await();
+
+    nm1.nodeHeartbeat(true);
+    dispatcher.await();
+    // get the assignment
+    assigned = allocator.schedule();
+    dispatcher.await();
+    Assert.assertEquals(1, assigned.size());
+    Assert.assertEquals(nm1.getNodeId(), assigned.get(0).getContainer().getNodeId());
+    // no updated nodes reported
+    Assert.assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty());
+    Assert.assertTrue(allocator.getTaskAttemptKillEvents().isEmpty());
+    
+    // mark nodes bad
+    nm1.nodeHeartbeat(false);
+    nm2.nodeHeartbeat(false);
+    dispatcher.await();
+    
+    // schedule response returns updated nodes
+    assigned = allocator.schedule();
+    dispatcher.await();
+    Assert.assertEquals(0, assigned.size());
+    // updated nodes are reported
+    Assert.assertEquals(1, allocator.getJobUpdatedNodeEvents().size());
+    Assert.assertEquals(1, allocator.getTaskAttemptKillEvents().size());
+    Assert.assertEquals(2, allocator.getJobUpdatedNodeEvents().get(0).getUpdatedNodes().size());
+    Assert.assertEquals(attemptId, allocator.getTaskAttemptKillEvents().get(0).getTaskAttemptID());
+    allocator.getJobUpdatedNodeEvents().clear();
+    allocator.getTaskAttemptKillEvents().clear();
+    
+    assigned = allocator.schedule();
+    dispatcher.await();
+    Assert.assertEquals(0, assigned.size());
+    // no updated nodes reported
+    Assert.assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty());
+    Assert.assertTrue(allocator.getTaskAttemptKillEvents().isEmpty());
+  }
 
   @Test
   public void testBlackListedNodes() throws Exception {
@@ -1100,7 +1193,10 @@
   private static class MyContainerAllocator extends RMContainerAllocator {
     static final List<TaskAttemptContainerAssignedEvent> events
       = new ArrayList<TaskAttemptContainerAssignedEvent>();
-
+    static final List<TaskAttemptKillEvent> taskAttemptKillEvents 
+      = new ArrayList<TaskAttemptKillEvent>();
+    static final List<JobUpdatedNodesEvent> jobUpdatedNodeEvents 
+    = new ArrayList<JobUpdatedNodesEvent>();
     private MyResourceManager rm;
 
     private static AppContext createAppContext(
@@ -1119,6 +1215,10 @@
           // Only capture interesting events.
           if (event instanceof TaskAttemptContainerAssignedEvent) {
             events.add((TaskAttemptContainerAssignedEvent) event);
+          } else if (event instanceof TaskAttemptKillEvent) {
+            taskAttemptKillEvents.add((TaskAttemptKillEvent)event);
+          } else if (event instanceof JobUpdatedNodesEvent) {
+            jobUpdatedNodeEvents.add((JobUpdatedNodesEvent)event);
           }
         }
       });
@@ -1202,6 +1302,14 @@
       events.clear();
       return result;
     }
+    
+    List<TaskAttemptKillEvent> getTaskAttemptKillEvents() {
+      return taskAttemptKillEvents;
+    }
+    
+    List<JobUpdatedNodesEvent> getJobUpdatedNodeEvents() {
+      return jobUpdatedNodeEvents;
+    }
 
     @Override
     protected void startAllocatorThread() {
@@ -1239,6 +1347,18 @@
         maxReduceRampupLimit, reduceSlowStart);
     verify(allocator, never()).setIsReduceStarted(true);
     
+    // verify slow-start still in effect when no more maps need to
+    // be scheduled but some have yet to complete
+    allocator.scheduleReduces(
+        totalMaps, succeededMaps,
+        0, scheduledReduces,
+        totalMaps - succeededMaps, assignedReduces,
+        mapResourceReqt, reduceResourceReqt,
+        numPendingReduces,
+        maxReduceRampupLimit, reduceSlowStart);
+    verify(allocator, never()).setIsReduceStarted(true);
+    verify(allocator, never()).scheduleAllReduces();
+
     succeededMaps = 3;
     allocator.scheduleReduces(
         totalMaps, succeededMaps, 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
index a5dae84..80c4823 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
@@ -66,6 +66,7 @@
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -529,6 +530,11 @@
     }
 
     @Override
+    public NodeId getNodeId() throws UnsupportedOperationException{
+      throw new UnsupportedOperationException();
+    }
+    
+    @Override
     public TaskAttemptId getID() {
       return myAttemptID;
     }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
index 2fec814..670959b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
@@ -23,6 +23,7 @@
 import java.io.File;
 import java.io.IOException;
 import java.net.MalformedURLException;
+import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URL;
 import java.net.URLClassLoader;
@@ -45,6 +46,7 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.MRConfig;
@@ -72,6 +74,8 @@
   private List<String> localFiles = new ArrayList<String>();
   private List<String> localClasspaths = new ArrayList<String>();
   
+  private List<File> symlinksCreated = new ArrayList<File>();
+  
   private boolean setupCalled = false;
   
   /**
@@ -172,18 +176,51 @@
               .size()])));
     }
     if (DistributedCache.getSymlink(conf)) {
-      // This is not supported largely because, 
-      // for a Child subprocess, the cwd in LocalJobRunner
-      // is not a fresh slate, but rather the user's working directory.
-      // This is further complicated because the logic in
-      // setupWorkDir only creates symlinks if there's a jarfile
-      // in the configuration.
-      LOG.warn("LocalJobRunner does not support " +
-          "symlinking into current working dir.");
+      File workDir = new File(System.getProperty("user.dir"));
+      URI[] archives = DistributedCache.getCacheArchives(conf);
+      URI[] files = DistributedCache.getCacheFiles(conf);
+      Path[] localArchives = DistributedCache.getLocalCacheArchives(conf);
+      Path[] localFiles = DistributedCache.getLocalCacheFiles(conf);
+      if (archives != null) {
+        for (int i = 0; i < archives.length; i++) {
+          String link = archives[i].getFragment();
+          String target = new File(localArchives[i].toUri()).getPath();
+          symlink(workDir, target, link);
+        }
+      }
+      if (files != null) {
+        for (int i = 0; i < files.length; i++) {
+          String link = files[i].getFragment();
+          String target = new File(localFiles[i].toUri()).getPath();
+          symlink(workDir, target, link);
+        }
+      }
     }
     setupCalled = true;
   }
   
+  /**
+   * Utility method for creating a symlink and warning on errors.
+   *
+   * If link is null, does nothing.
+   */
+  private void symlink(File workDir, String target, String link)
+      throws IOException {
+    if (link != null) {
+      link = workDir.toString() + Path.SEPARATOR + link;
+      File flink = new File(link);
+      if (!flink.exists()) {
+        LOG.info(String.format("Creating symlink: %s <- %s", target, link));
+        if (0 != FileUtil.symLink(target, link)) {
+          LOG.warn(String.format("Failed to create symlink: %s <- %s", target,
+              link));
+        } else {
+          symlinksCreated.add(new File(link));
+        }
+      }
+    }
+  }
+  
   /** 
    * Are the resources that should be added to the classpath? 
    * Should be called after setup().
@@ -217,6 +254,12 @@
   }
 
   public void close() throws IOException {
+    for (File symlink : symlinksCreated) {
+      if (!symlink.delete()) {
+        LOG.warn("Failed to delete symlink created by the local job runner: " +
+            symlink);
+      }
+    }
     FileContext localFSFileContext = FileContext.getLocalFSFileContext();
     for (String archive : localArchives) {
       localFSFileContext.delete(new Path(archive), true);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
index ddabb4c..12dce4b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
@@ -44,6 +44,7 @@
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueState;
@@ -376,22 +377,27 @@
     }
     return reports;
   }
-
-  public static JobStatus.State fromYarn(YarnApplicationState state) {
-    switch (state) {
+  
+  public static State fromYarn(YarnApplicationState yarnApplicationState,
+      FinalApplicationStatus finalApplicationStatus) {
+    switch (yarnApplicationState) {
     case NEW:
     case SUBMITTED:
       return State.PREP;
     case RUNNING:
       return State.RUNNING;
     case FINISHED:
-      return State.SUCCEEDED;
+      if (finalApplicationStatus == FinalApplicationStatus.SUCCEEDED) {
+        return State.SUCCEEDED;
+      } else if (finalApplicationStatus == FinalApplicationStatus.KILLED) {
+        return State.KILLED;
+      }
     case FAILED:
       return State.FAILED;
     case KILLED:
       return State.KILLED;
     }
-    throw new YarnException("Unrecognized application state: " + state);
+    throw new YarnException("Unrecognized application state: " + yarnApplicationState);
   }
 
   private static final String TT_NAME_PREFIX = "tracker_";
@@ -417,7 +423,7 @@
       new JobStatus(
           TypeConverter.fromYarn(application.getApplicationId()),
           0.0f, 0.0f, 0.0f, 0.0f,
-          TypeConverter.fromYarn(application.getYarnApplicationState()),
+          TypeConverter.fromYarn(application.getYarnApplicationState(), application.getFinalApplicationStatus()),
           org.apache.hadoop.mapreduce.JobPriority.NORMAL,
           application.getUser(), application.getName(),
           application.getQueue(), jobFile, trackingUrl, false
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java
index 6798831..4336807 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java
@@ -23,6 +23,7 @@
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.Arrays;
 import java.util.jar.JarOutputStream;
 import java.util.zip.ZipEntry;
 
@@ -61,6 +62,9 @@
 public class TestMRWithDistributedCache extends TestCase {
   private static Path TEST_ROOT_DIR =
     new Path(System.getProperty("test.build.data","/tmp"));
+  private static File symlinkFile = new File("distributed.first.symlink");
+  private static File expectedAbsentSymlinkFile =
+    new File("distributed.second.jar");
   private static Configuration conf = new Configuration();
   private static FileSystem localFs;
   static {
@@ -107,20 +111,17 @@
       TestCase.assertNotNull(cl.getResource("distributed.jar.inside3"));
       TestCase.assertNull(cl.getResource("distributed.jar.inside4"));
 
-
       // Check that the symlink for the renaming was created in the cwd;
-      // This only happens for real for non-local jobtrackers.
-      // (The symlinks exist in "localRunner/" for local Jobtrackers,
-      // but the user has no way to get at them.
-      if (!"local".equals(
-          context.getConfiguration().get(JTConfig.JT_IPC_ADDRESS))) {
-        File symlinkFile = new File("distributed.first.symlink");
-        TestCase.assertTrue("symlink distributed.first.symlink doesn't exist", symlinkFile.exists());
-        TestCase.assertEquals("symlink distributed.first.symlink length not 1", 1, symlinkFile.length());
-      }
+      TestCase.assertTrue("symlink distributed.first.symlink doesn't exist",
+          symlinkFile.exists());
+      TestCase.assertEquals("symlink distributed.first.symlink length not 1", 1,
+          symlinkFile.length());
+      
+      TestCase.assertFalse("second file should not be symlinked",
+          expectedAbsentSymlinkFile.exists());
     }
   }
-
+  
   private void testWithConf(Configuration conf) throws IOException,
       InterruptedException, ClassNotFoundException, URISyntaxException {
     // Create a temporary file of length 1.
@@ -144,11 +145,7 @@
     job.addFileToClassPath(second);
     job.addArchiveToClassPath(third);
     job.addCacheArchive(fourth.toUri());
-    
-    // don't create symlink for LocalJobRunner
-    if (!"local".equals(conf.get(JTConfig.JT_IPC_ADDRESS))) {
-      job.createSymlink();
-    }
+    job.createSymlink();
     job.setMaxMapAttempts(1); // speed up failures
 
     job.submit();
@@ -157,10 +154,17 @@
 
   /** Tests using the local job runner. */
   public void testLocalJobRunner() throws Exception {
+    symlinkFile.delete(); // ensure symlink is not present (e.g. if test is
+                          // killed part way through)
+    
     Configuration c = new Configuration();
     c.set(JTConfig.JT_IPC_ADDRESS, "local");
     c.set("fs.defaultFS", "file:///");
     testWithConf(c);
+    
+    assertFalse("Symlink not removed by local job runner",
+            // Symlink target will have gone so can't use File.exists()
+            Arrays.asList(new File(".").list()).contains(symlinkFile.getName()));
   }
 
   private Path createTempFile(String filename, String contents)
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java
index a9a2c0a9..49dec4a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java
@@ -27,6 +27,7 @@
 import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
@@ -45,7 +46,7 @@
   @Test
   public void testEnums() throws Exception {
     for (YarnApplicationState applicationState : YarnApplicationState.values()) {
-      TypeConverter.fromYarn(applicationState);
+      TypeConverter.fromYarn(applicationState, FinalApplicationStatus.FAILED);
     }
     
     for (TaskType taskType : TaskType.values()) {
@@ -63,8 +64,6 @@
     for (TaskState taskState : TaskState.values()) {
       TypeConverter.fromYarn(taskState);
     }
-    
-    
   }
   
   @Test
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
index 51bac98..2fd666e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
@@ -18,30 +18,19 @@
 
 package org.apache.hadoop.mapreduce;
 
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.FileOutputStream;
 import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.io.OutputStreamWriter;
-import java.net.URL;
-import java.net.URLConnection;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
-import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration.IntegerRanges;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.RawComparator;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
@@ -1367,14 +1356,6 @@
       Job.TaskStatusFilter filter, boolean profiling, IntegerRanges mapRanges,
       IntegerRanges reduceRanges) throws IOException, InterruptedException {
     for (TaskCompletionEvent event : events) {
-      TaskCompletionEvent.Status status = event.getStatus();
-      if (profiling && shouldDownloadProfile() &&
-         (status == TaskCompletionEvent.Status.SUCCEEDED ||
-            status == TaskCompletionEvent.Status.FAILED) &&
-            (event.isMapTask() ? mapRanges : reduceRanges).
-              isIncluded(event.idWithinJob())) {
-        downloadProfile(event);
-      }
       switch (filter) {
       case NONE:
         break;
@@ -1382,7 +1363,6 @@
         if (event.getStatus() == 
           TaskCompletionEvent.Status.SUCCEEDED) {
           LOG.info(event.toString());
-          displayTaskLogs(event.getTaskAttemptId(), event.getTaskTrackerHttp());
         }
         break; 
       case FAILED:
@@ -1397,8 +1377,6 @@
               System.err.println(diagnostics);
             }
           }
-          // Displaying the task logs
-          displayTaskLogs(event.getTaskAttemptId(), event.getTaskTrackerHttp());
         }
         break; 
       case KILLED:
@@ -1408,67 +1386,10 @@
         break; 
       case ALL:
         LOG.info(event.toString());
-        displayTaskLogs(event.getTaskAttemptId(), event.getTaskTrackerHttp());
         break;
       }
     }
   }
-  
-  private void downloadProfile(TaskCompletionEvent e) throws IOException  {
-    URLConnection connection = new URL(
-      getTaskLogURL(e.getTaskAttemptId(), e.getTaskTrackerHttp()) + 
-      "&filter=profile").openConnection();
-    InputStream in = connection.getInputStream();
-    OutputStream out = new FileOutputStream(e.getTaskAttemptId() + ".profile");
-    IOUtils.copyBytes(in, out, 64 * 1024, true);
-  }
-  
-  private void displayTaskLogs(TaskAttemptID taskId, String baseUrl)
-      throws IOException {
-    // The tasktracker for a 'failed/killed' job might not be around...
-    if (baseUrl != null) {
-      // Construct the url for the tasklogs
-      String taskLogUrl = getTaskLogURL(taskId, baseUrl);
-      
-      // Copy tasks's stdout of the JobClient
-      getTaskLogs(taskId, new URL(taskLogUrl+"&filter=stdout"), System.out);
-        
-      // Copy task's stderr to stderr of the JobClient 
-      getTaskLogs(taskId, new URL(taskLogUrl+"&filter=stderr"), System.err);
-    }
-  }
-    
-  private void getTaskLogs(TaskAttemptID taskId, URL taskLogUrl, 
-                           OutputStream out) {
-    try {
-      int tasklogtimeout = cluster.getConf().getInt(
-        TASKLOG_PULL_TIMEOUT_KEY, DEFAULT_TASKLOG_TIMEOUT);
-      URLConnection connection = taskLogUrl.openConnection();
-      connection.setReadTimeout(tasklogtimeout);
-      connection.setConnectTimeout(tasklogtimeout);
-      BufferedReader input = 
-        new BufferedReader(new InputStreamReader(connection.getInputStream()));
-      BufferedWriter output = 
-        new BufferedWriter(new OutputStreamWriter(out));
-      try {
-        String logData = null;
-        while ((logData = input.readLine()) != null) {
-          if (logData.length() > 0) {
-            output.write(taskId + ": " + logData + "\n");
-            output.flush();
-          }
-        }
-      } finally {
-        input.close();
-      }
-    } catch(IOException ioe) {
-      LOG.warn("Error reading task output " + ioe.getMessage()); 
-    }
-  }
-  
-  private String getTaskLogURL(TaskAttemptID taskId, String baseUrl) {
-    return (baseUrl + "/tasklog?plaintext=true&attemptid=" + taskId); 
-  }
 
   /** The interval at which monitorAndPrintJob() prints status */
   public static int getProgressPollInterval(Configuration conf) {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java
index b084d1c..a4ea1d8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java
@@ -27,12 +27,18 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
 /**
  * A utility to manage job submission files.
  */
 @InterfaceAudience.Private
 public class JobSubmissionFiles {
 
+  private final static Log LOG = LogFactory.getLog(JobSubmissionFiles.class);
+
   // job submission directory is private!
   final public static FsPermission JOB_DIR_PERMISSION =
     FsPermission.createImmutable((short) 0700); // rwx--------
@@ -102,14 +108,18 @@
     if (fs.exists(stagingArea)) {
       FileStatus fsStatus = fs.getFileStatus(stagingArea);
       String owner = fsStatus.getOwner();
-      if (!(owner.equals(currentUser) || owner.equals(realUser)) || 
-          !fsStatus.getPermission().equals(JOB_DIR_PERMISSION)) {
-         throw new IOException("The ownership/permissions on the staging " +
-                      "directory " + stagingArea + " is not as expected. " + 
-                      "It is owned by " + owner + " and permissions are "+ 
-                      fsStatus.getPermission() + ". The directory must " +
+      if (!(owner.equals(currentUser) || owner.equals(realUser))) {
+         throw new IOException("The ownership on the staging directory " +
+                      stagingArea + " is not as expected. " +
+                      "It is owned by " + owner + ". The directory must " +
                       "be owned by the submitter " + currentUser + " or " +
-                      "by " + realUser + " and permissions must be rwx------");
+                      "by " + realUser);
+      }
+      if (!fsStatus.getPermission().equals(JOB_DIR_PERMISSION)) {
+        LOG.info("Permissions on staging directory " + stagingArea + " are " +
+          "incorrect: " + fsStatus.getPermission() + ". Fixing permissions " +
+          "to correct value " + JOB_DIR_PERMISSION);
+        fs.setPermission(stagingArea, JOB_DIR_PERMISSION);
       }
     } else {
       fs.mkdirs(stagingArea, 
@@ -118,4 +128,4 @@
     return stagingArea;
   }
   
-}
\ No newline at end of file
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
index 34eb594..32240f8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
@@ -282,9 +282,12 @@
       if(attemptInfo.getAttemptId().equals(taskInfo.getSuccessfulAttemptId()))
       {
         // the failed attempt is the one that made this task successful
-        // so its no longer successful
+        // so its no longer successful. Reset fields set in
+        // handleTaskFinishedEvent()
+        taskInfo.counters = null;
+        taskInfo.finishTime = -1;
         taskInfo.status = null;
-        // not resetting the other fields set in handleTaskFinishedEvent()
+        taskInfo.successfulAttemptId = null;
       }
     }
   }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleScheduler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleScheduler.java
index df8c6b3..8d5bc3b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleScheduler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleScheduler.java
@@ -137,24 +137,26 @@
 
       // update the status
       totalBytesShuffledTillNow += bytes;
-      float mbs = (float) totalBytesShuffledTillNow / (1024 * 1024);
-      int mapsDone = totalMaps - remainingMaps;
-      long secsSinceStart = 
-        (System.currentTimeMillis()-startTime)/1000+1;
-
-      float transferRate = mbs/secsSinceStart;
-      progress.set((float) mapsDone / totalMaps);
-      String statusString = mapsDone + " / " + totalMaps + " copied.";
-      status.setStateString(statusString);
-      progress.setStatus("copy(" + mapsDone + " of " + totalMaps 
-          + " at " +
-          mbpsFormat.format(transferRate) +  " MB/s)");
-      
+      updateStatus();
       reduceShuffleBytes.increment(bytes);
       lastProgressTime = System.currentTimeMillis();
-      LOG.debug("map " + mapId + " done " + statusString);
+      LOG.debug("map " + mapId + " done " + status.getStateString());
     }
   }
+  
+  private void updateStatus() {
+    float mbs = (float) totalBytesShuffledTillNow / (1024 * 1024);
+    int mapsDone = totalMaps - remainingMaps;
+    long secsSinceStart = (System.currentTimeMillis() - startTime) / 1000 + 1;
+
+    float transferRate = mbs / secsSinceStart;
+    progress.set((float) mapsDone / totalMaps);
+    String statusString = mapsDone + " / " + totalMaps + " copied.";
+    status.setStateString(statusString);
+
+    progress.setStatus("copy(" + mapsDone + " of " + totalMaps + " at "
+        + mbpsFormat.format(transferRate) + " MB/s)");
+  }
 
   public synchronized void copyFailed(TaskAttemptID mapId, MapHost host,
                                       boolean readError) {
@@ -256,7 +258,13 @@
   }
   
   public synchronized void tipFailed(TaskID taskId) {
-    finishedMaps[taskId.getId()] = true;
+    if (!finishedMaps[taskId.getId()]) {
+      finishedMaps[taskId.getId()] = true;
+      if (--remainingMaps == 0) {
+        notifyAll();
+      }
+      updateStatus();
+    }
   }
   
   public synchronized void addKnownMapOutput(String hostName, 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestShuffleScheduler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestShuffleScheduler.java
new file mode 100644
index 0000000..f4ed330
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestShuffleScheduler.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.task.reduce;
+
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.TaskAttemptID;
+import org.apache.hadoop.mapred.TaskStatus;
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.TaskID;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.util.Progress;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestShuffleScheduler {
+
+  @SuppressWarnings("rawtypes")
+  @Test
+  public void testTipFailed() throws Exception {
+    JobConf job = new JobConf();
+    job.setNumMapTasks(2);
+
+    TaskStatus status = new TaskStatus() {
+      @Override
+      public boolean getIsMap() {
+        return false;
+      }
+
+      @Override
+      public void addFetchFailedMap(TaskAttemptID mapTaskId) {
+      }
+    };
+    Progress progress = new Progress();
+
+    ShuffleScheduler scheduler = new ShuffleScheduler(job, status, null,
+        progress, null, null, null);
+
+    JobID jobId = new JobID();
+    TaskID taskId1 = new TaskID(jobId, TaskType.REDUCE, 1);
+    scheduler.tipFailed(taskId1);
+
+    Assert.assertEquals("Progress should be 0.5", 0.5f, progress.getProgress(),
+        0.0f);
+    Assert.assertFalse(scheduler.waitUntilDone(1));
+
+    TaskID taskId0 = new TaskID(jobId, TaskType.REDUCE, 0);
+    scheduler.tipFailed(taskId0);
+    Assert.assertEquals("Progress should be 1.0", 1.0f, progress.getProgress(),
+        0.0f);
+    Assert.assertTrue(scheduler.waitUntilDone(1));
+  }
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java
index 84ec23e..8f8e776 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java
@@ -30,6 +30,7 @@
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.util.Records;
 
 public class CompletedTaskAttempt implements TaskAttempt {
@@ -58,6 +59,11 @@
   }
 
   @Override
+  public NodeId getNodeId() throws UnsupportedOperationException{
+    throw new UnsupportedOperationException();
+  }
+  
+  @Override
   public ContainerId getAssignedContainerID() {
     return attemptInfo.getContainerId();
   }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java
index 5334f29..ad1ebc9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java
@@ -19,13 +19,26 @@
 package org.apache.hadoop.mapred;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 
 import junit.framework.Assert;
 
+import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.JobStatus.State;
 import org.apache.hadoop.yarn.api.ClientRMProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.util.Records;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
 import org.mockito.Mockito;
@@ -35,7 +48,7 @@
   /**
    * Tests that getRootQueues makes a request for the (recursive) child queues
    */
-@Test
+  @Test
   public void testGetRootQueues() throws IOException, InterruptedException {
     ClientRMProtocol applicationsManager = Mockito.mock(ClientRMProtocol.class);
     GetQueueInfoResponse response = Mockito.mock(GetQueueInfoResponse.class);
@@ -60,4 +73,56 @@
       argument.getValue().getRecursive());
   }
 
+  @Test
+  public void tesAllJobs() throws Exception {
+    ClientRMProtocol applicationsManager = Mockito.mock(ClientRMProtocol.class);
+    GetAllApplicationsResponse allApplicationsResponse = Records
+        .newRecord(GetAllApplicationsResponse.class);
+    List<ApplicationReport> applications = new ArrayList<ApplicationReport>();
+    applications.add(getApplicationReport(YarnApplicationState.FINISHED,
+        FinalApplicationStatus.FAILED));
+    applications.add(getApplicationReport(YarnApplicationState.FINISHED,
+        FinalApplicationStatus.SUCCEEDED));
+    applications.add(getApplicationReport(YarnApplicationState.FINISHED,
+        FinalApplicationStatus.KILLED));
+    applications.add(getApplicationReport(YarnApplicationState.FAILED,
+        FinalApplicationStatus.FAILED));
+    allApplicationsResponse.setApplicationList(applications);
+    Mockito.when(
+        applicationsManager.getAllApplications(Mockito
+            .any(GetAllApplicationsRequest.class))).thenReturn(
+        allApplicationsResponse);
+    ResourceMgrDelegate resourceMgrDelegate = new ResourceMgrDelegate(
+        new YarnConfiguration(), applicationsManager);
+    JobStatus[] allJobs = resourceMgrDelegate.getAllJobs();
+
+    Assert.assertEquals(State.FAILED, allJobs[0].getState());
+    Assert.assertEquals(State.SUCCEEDED, allJobs[1].getState());
+    Assert.assertEquals(State.KILLED, allJobs[2].getState());
+    Assert.assertEquals(State.FAILED, allJobs[3].getState());
+  }
+
+  private ApplicationReport getApplicationReport(
+      YarnApplicationState yarnApplicationState,
+      FinalApplicationStatus finalApplicationStatus) {
+    ApplicationReport appReport = Mockito.mock(ApplicationReport.class);
+    ApplicationResourceUsageReport appResources = Mockito
+        .mock(ApplicationResourceUsageReport.class);
+    Mockito.when(appReport.getApplicationId()).thenReturn(
+        Records.newRecord(ApplicationId.class));
+    Mockito.when(appResources.getNeededResources()).thenReturn(
+        Records.newRecord(Resource.class));
+    Mockito.when(appResources.getReservedResources()).thenReturn(
+        Records.newRecord(Resource.class));
+    Mockito.when(appResources.getUsedResources()).thenReturn(
+        Records.newRecord(Resource.class));
+    Mockito.when(appReport.getApplicationResourceUsageReport()).thenReturn(
+        appResources);
+    Mockito.when(appReport.getYarnApplicationState()).thenReturn(
+        yarnApplicationState);
+    Mockito.when(appReport.getFinalApplicationStatus()).thenReturn(
+        finalApplicationStatus);
+
+    return appReport;
+  }
 }
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java
index 31916c6..7345c25 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java
@@ -38,5 +38,9 @@
   LOST, 
   
   /** Node has rebooted */
-  REBOOTED
-}
\ No newline at end of file
+  REBOOTED;
+  
+  public boolean isUnusable() {
+    return (this == UNHEALTHY || this == DECOMMISSIONED || this == LOST);
+  }
+}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index b790e3c..592ab03 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -44,7 +44,6 @@
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.AMRMProtocol;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.ContainerManager;
@@ -635,12 +634,10 @@
       ctx.setContainerId(container.getId());
       ctx.setResource(container.getResource());
 
-      try {
-        ctx.setUser(UserGroupInformation.getCurrentUser().getShortUserName());
-      } catch (IOException e) {
-        LOG.info("Getting current user info failed when trying to launch the container"
-            + e.getMessage());
-      }
+      String jobUserName = System.getenv(ApplicationConstants.Environment.USER
+          .name());
+      ctx.setUser(jobUserName);
+      LOG.info("Setting user in ContainerLaunchContext to: " + jobUserName);
 
       // Set the environment 
       ctx.setEnvironment(shellEnv);
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
index 41e8e6d..bfea87a 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
@@ -95,7 +95,7 @@
  * 
  * <p> For the actual job submission, the client first has to create an {@link ApplicationSubmissionContext}. 
  * The {@link ApplicationSubmissionContext} defines the application details such as {@link ApplicationId} 
- * and application name, user submitting the application, the priority assigned to the application and the queue 
+ * and application name, the priority assigned to the application and the queue
  * to which this application needs to be assigned. In addition to this, the {@link ApplicationSubmissionContext}
  * also defines the {@link ContainerLaunchContext} which describes the <code>Container</code> with which 
  * the {@link ApplicationMaster} is launched. </p>
@@ -132,8 +132,6 @@
   private int amPriority = 0;
   // Queue for App master
   private String amQueue = "";
-  // User to run app master as
-  private String amUser = "";
   // Amt. of memory resource to request for to run the App Master
   private int amMemory = 10; 
 
@@ -221,6 +219,7 @@
    * Parse command line options
    * @param args Parsed command line options 
    * @return Whether the init was successful to run the client
+   * @throws ParseException
    */
   public boolean init(String[] args) throws ParseException {
 
@@ -228,7 +227,6 @@
     opts.addOption("appname", true, "Application Name. Default value - DistributedShell");
     opts.addOption("priority", true, "Application Priority. Default 0");
     opts.addOption("queue", true, "RM Queue in which this application is to be submitted");
-    opts.addOption("user", true, "User to run the application as");
     opts.addOption("timeout", true, "Application timeout in milliseconds");
     opts.addOption("master_memory", true, "Amount of memory in MB to be requested to run the application master");
     opts.addOption("jar", true, "Jar file containing the application master");
@@ -262,8 +260,7 @@
 
     appName = cliParser.getOptionValue("appname", "DistributedShell");
     amPriority = Integer.parseInt(cliParser.getOptionValue("priority", "0"));
-    amQueue = cliParser.getOptionValue("queue", "");
-    amUser = cliParser.getOptionValue("user", "");
+    amQueue = cliParser.getOptionValue("queue", "default");
     amMemory = Integer.parseInt(cliParser.getOptionValue("master_memory", "10"));		
 
     if (amMemory < 0) {
@@ -356,6 +353,7 @@
     }
 
     GetQueueInfoRequest queueInfoReq = Records.newRecord(GetQueueInfoRequest.class);
+    queueInfoReq.setQueueName(this.amQueue);
     GetQueueInfoResponse queueInfoResp = applicationsManager.getQueueInfo(queueInfoReq);		
     QueueInfo queueInfo = queueInfoResp.getQueueInfo();
     LOG.info("Queue info"
@@ -567,10 +565,6 @@
     commands.add(command.toString());		
     amContainer.setCommands(commands);
 
-    // For launching an AM Container, setting user here is not needed
-    // Set user in ApplicationSubmissionContext
-    // amContainer.setUser(amUser);
-
     // Set up resource type requirements
     // For now, only memory is supported so we set memory requirements
     Resource capability = Records.newRecord(Resource.class);
@@ -594,9 +588,6 @@
 
     // Set the queue to which this application is to be submitted in the RM
     appContext.setQueue(amQueue);
-    // Set the user submitting this application 
-    // TODO can it be empty? 
-    appContext.setUser(amUser);
 
     // Create the request to send to the applications manager 
     SubmitApplicationRequest appRequest = Records.newRecord(SubmitApplicationRequest.class);
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
index 1b3a76a..c8f325d 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
@@ -28,6 +28,7 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.service.AbstractService;
 
@@ -127,7 +128,8 @@
     catch (Throwable t) {
       //TODO Maybe log the state of the queue
       LOG.fatal("Error in dispatcher thread", t);
-      if (exitOnDispatchException) {
+      if (exitOnDispatchException
+          && (ShutdownHookManager.get().isShutdownInProgress()) == false) {
         LOG.info("Exiting, bbye..");
         System.exit(-1);
       }
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
index fb3c97b..f865b2d 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
@@ -47,47 +47,37 @@
       <build>
         <plugins>
           <plugin>
-            <groupId>org.codehaus.mojo</groupId>
-            <artifactId>make-maven-plugin</artifactId>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-antrun-plugin</artifactId>
             <executions>
               <execution>
-                <id>compile</id>
+                <id>make</id>
                 <phase>compile</phase>
-                <goals>
-                  <goal>autoreconf</goal>
-                  <goal>configure</goal>
-                  <goal>make-install</goal>
-                </goals>
+                <goals><goal>run</goal></goals>
+                <configuration>
+                  <target>
+                    <mkdir dir="${project.build.directory}/native/target"/>
+                    <exec executable="cmake" dir="${project.build.directory}/native" failonerror="true">
+                      <arg line="${basedir}/src/ -DHADOOP_CONF_DIR=${container-executor.conf.dir} -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model}"/>
+                      <env key="CFLAGS" value="${container-executor.additional_cflags}"/>
+                    </exec>
+                    <exec executable="make" dir="${project.build.directory}/native" failonerror="true">
+                      <arg line="VERBOSE=1"/>
+                    </exec>
+                  </target>
+                </configuration>
               </execution>
               <execution>
-                <id>test</id>
+                <id>native_tests</id>
                 <phase>test</phase>
-                <goals>
-                  <goal>test</goal>
-                </goals>
+                <configuration>
+                  <target>
+                    <exec executable="test-container-executor" dir="${project.build.directory}/native" failonerror="true">
+                    </exec>
+                  </target>
+                </configuration>
               </execution>
             </executions>
-            <configuration>
-              <!-- autoreconf settings -->
-              <workDir>${project.build.directory}/native/container-executor</workDir>
-              <arguments>
-                <argument>-i</argument>
-              </arguments>
-
-              <!-- configure settings -->
-              <configureEnvironment>
-                <property>
-                  <name>CFLAGS</name>
-                  <value>-DHADOOP_CONF_DIR=${container-executor.conf.dir} ${container-executor.additional_cflags}</value>
-                </property>
-              </configureEnvironment>
-              <configureWorkDir>${project.build.directory}/native/container-executor</configureWorkDir>
-              <prefix>/usr/local</prefix>
-
-              <!-- configure & make settings -->
-              <destDir>${project.build.directory}/native/target</destDir>
-
-            </configuration>
           </plugin>
         </plugins>
       </build>
@@ -172,14 +162,6 @@
             <goals>
               <goal>run</goal>
             </goals>
-            <configuration>
-              <target>
-                <mkdir dir="${project.build.directory}/native"/>
-                <copy toDir="${project.build.directory}/native">
-                  <fileset dir="${basedir}/src/main/native"/>
-                </copy>
-              </target>
-            </configuration>
           </execution>
         </executions>
       </plugin>
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
new file mode 100644
index 0000000..ace151a
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
@@ -0,0 +1,69 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
+
+set(CMAKE_BUILD_TYPE, Release)
+
+if (JVM_ARCH_DATA_MODEL EQUAL 32)
+    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -m32")
+    set(CMAKE_LD_FLAGS "${CMAKE_LD_FLAGS} -m32")
+    if (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64")
+        set(CMAKE_SYSTEM_PROCESSOR "i686")
+    endif ()
+endif (JVM_ARCH_DATA_MODEL EQUAL 32)
+
+function(output_directory TGT DIR)
+    SET_TARGET_PROPERTIES(${TGT} PROPERTIES
+        RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
+    SET_TARGET_PROPERTIES(${TGT} PROPERTIES
+        ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
+    SET_TARGET_PROPERTIES(${TGT} PROPERTIES
+        LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
+endfunction(output_directory TGT DIR)
+
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2 -D_GNU_SOURCE")
+# note: can't enable -D_LARGEFILE: see MAPREDUCE-4258
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT")
+
+include_directories(
+    ${CMAKE_CURRENT_SOURCE_DIR}
+    ${CMAKE_BINARY_DIR}
+    main/native/container-executor
+    main/native/container-executor/impl
+)
+CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/config.h.cmake ${CMAKE_BINARY_DIR}/config.h)
+
+add_library(container
+    main/native/container-executor/impl/configuration.c
+    main/native/container-executor/impl/container-executor.c
+)
+
+add_executable(container-executor
+    main/native/container-executor/impl/main.c
+)
+target_link_libraries(container-executor
+    container
+)
+output_directory(container-executor target/usr/local/bin)
+
+add_executable(test-container-executor
+    main/native/container-executor/test/test-container-executor.c
+)
+target_link_libraries(test-container-executor
+    container
+)
+output_directory(test-container-executor target/usr/local/bin)
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake
new file mode 100644
index 0000000..1fff361
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake
@@ -0,0 +1,6 @@
+#ifndef CONFIG_H
+#define CONFIG_H
+
+#cmakedefine HADOOP_CONF_DIR "@HADOOP_CONF_DIR@"
+
+#endif
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java
index df891d8..1a92491 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java
@@ -45,9 +45,6 @@
   @Override protected void preHead(Page.HTML<_> html) {
     commonPreHead(html);
 
-    // Per-app information. Helps to refresh automatically.
-    html.meta_http("refresh", "10");
-
     set(DATATABLES_ID, "containers");
     set(initID(DATATABLES, "containers"), containersTableInit());
     setTableStyles(html, "containers");
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java
index 8e11729..060d72a 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java
@@ -41,9 +41,6 @@
   protected void preHead(Page.HTML<_> html) {
     commonPreHead(html);
 
-    // Per-container information. Helps to refresh automatically.
-    html.meta_http("refresh", "10");
-
     setTitle("Container " + $(CONTAINER_ID));
     set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
   }
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java
index db13a90..9eb3599f 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java
@@ -42,9 +42,6 @@
   protected void commonPreHead(HTML<_> html) {
     super.commonPreHead(html);
 
-    // Node summary page. Helps to refresh automatically.
-    html.meta_http("refresh", "10");
-
     set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
   }
 
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/.autom4te.cfg b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/.autom4te.cfg
deleted file mode 100644
index d21d1c9..0000000
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/.autom4te.cfg
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# autom4te configuration for hadoop utils library
-#
-
-begin-language: "Autoheader-preselections"
-args: --no-cache 
-end-language: "Autoheader-preselections"
-
-begin-language: "Automake-preselections"
-args: --no-cache 
-end-language: "Automake-preselections"
-
-begin-language: "Autoreconf-preselections"
-args: --no-cache 
-end-language: "Autoreconf-preselections"
-
-begin-language: "Autoconf-without-aclocal-m4"
-args: --no-cache 
-end-language: "Autoconf-without-aclocal-m4"
-
-begin-language: "Autoconf"
-args: --no-cache 
-end-language: "Autoconf"
-
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/.deps/container-executor.Po b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/.deps/container-executor.Po
deleted file mode 100644
index 9ce06a8..0000000
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/.deps/container-executor.Po
+++ /dev/null
@@ -1 +0,0 @@
-# dummy
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/Makefile.am b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/Makefile.am
deleted file mode 100644
index 4938bb2..0000000
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/Makefile.am
+++ /dev/null
@@ -1,32 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-AM_CFLAGS=-I$(srcdir)/impl -Wall -g -Werror
-
-# Define the programs that need to be built
-bin_PROGRAMS = container-executor
-check_PROGRAMS = test-container-executor
-
-TESTS = test-container-executor
-
-# Define the sources for the common files
-common_SOURCES = impl/configuration.c impl/container-executor.c
-
-# Define the sources for the real executable
-container_executor_SOURCES = $(common_SOURCES) impl/main.c
-
-# Define the sources for the test executable
-test_container_executor_SOURCES = $(common_SOURCES) test/test-container-executor.c
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/configure.ac b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/configure.ac
deleted file mode 100644
index db8af88..0000000
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/configure.ac
+++ /dev/null
@@ -1,54 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-#                                               -*- Autoconf -*-
-# Process this file with autoconf to produce a configure script.
-
-AC_PREREQ(2.59)
-AC_INIT(linux-container-executor, 1.0.0, mapreduce-dev@hadoop.apache.org)
-AC_GNU_SOURCE
-#AC_SYS_LARGEFILE
-
-AM_INIT_AUTOMAKE([subdir-objects foreign no-dist])
-
-AC_CONFIG_SRCDIR([impl/container-executor.c])
-AC_CONFIG_FILES([Makefile])
-
-AC_PREFIX_DEFAULT(`pwd`/../install)
-
-CHECK_INSTALL_CFLAG
-HADOOP_UTILS_SETUP
-
-# Checks for programs.
-AC_PROG_CC
-AM_PROG_CC_C_O
-
-# Checks for libraries.
-
-# Checks for header files.
-AC_LANG(C)
-AC_CHECK_HEADERS([unistd.h])
-
-# Checks for typedefs, structures, and compiler characteristics.
-AC_HEADER_STDBOOL
-AC_C_CONST
-AC_TYPE_OFF_T
-AC_TYPE_SIZE_T
-AC_FUNC_STRERROR_R
-
-# Checks for library functions.
-AC_CHECK_FUNCS([mkdir uname])
-AC_OUTPUT
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
index cd8caab..d6ce5aa 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+#include "config.h"
 #include "configuration.h"
 #include "container-executor.h"
 
@@ -29,8 +30,6 @@
 #include <string.h>
 #include <sys/stat.h>
 
-#define _STRINGIFY(X) #X
-#define STRINGIFY(X) _STRINGIFY(X)
 #define CONF_FILENAME "container-executor.cfg"
 
 // When building as part of a Maven build this value gets defined by using
@@ -101,7 +100,7 @@
 
   char *executable_file = get_executable();
 
-  char *orig_conf_file = STRINGIFY(HADOOP_CONF_DIR) "/" CONF_FILENAME;
+  char *orig_conf_file = HADOOP_CONF_DIR "/" CONF_FILENAME;
   char *conf_file = resolve_config_path(orig_conf_file, argv[0]);
   char *local_dirs, *log_dirs;
 
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index abf3ff9..9950b93 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -20,7 +20,6 @@
 
 
 import java.io.IOException;
-import java.net.InetAddress;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 
@@ -48,8 +47,8 @@
 import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store;
-import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store.RMState;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store.RMState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
@@ -327,7 +326,8 @@
           } catch (Throwable t) {
             LOG.fatal("Error in handling event type " + event.getType()
                 + " to the scheduler", t);
-            if (shouldExitOnError) {
+            if (shouldExitOnError
+                && !ShutdownHookManager.get().isShutdownInProgress()) {
               LOG.info("Exiting, bbye..");
               System.exit(-1);
             }
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index e13a14d..86e2dd3 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -80,13 +80,13 @@
   DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT = 0.1f;
   
   @Private
-  public static final int UNDEFINED = -1;
+  public static final float UNDEFINED = -1;
   
   @Private
-  public static final int MINIMUM_CAPACITY_VALUE = 1;
+  public static final float MINIMUM_CAPACITY_VALUE = 1;
   
   @Private
-  public static final int MAXIMUM_CAPACITY_VALUE = 100;
+  public static final float MAXIMUM_CAPACITY_VALUE = 100;
   
   @Private
   public static final int DEFAULT_USER_LIMIT = 100;
@@ -132,8 +132,8 @@
         DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT);
   }
   
-  public int getCapacity(String queue) {
-    int capacity = getInt(getQueuePrefix(queue) + CAPACITY, UNDEFINED);
+  public float getCapacity(String queue) {
+    float capacity = getFloat(getQueuePrefix(queue) + CAPACITY, UNDEFINED);
     if (capacity < MINIMUM_CAPACITY_VALUE || capacity > MAXIMUM_CAPACITY_VALUE) {
       throw new IllegalArgumentException("Illegal " +
       		"capacity of " + capacity + " for queue " + queue);
@@ -143,31 +143,31 @@
     return capacity;
   }
   
-  public void setCapacity(String queue, int capacity) {
-    setInt(getQueuePrefix(queue) + CAPACITY, capacity);
+  public void setCapacity(String queue, float capacity) {
+    setFloat(getQueuePrefix(queue) + CAPACITY, capacity);
     LOG.debug("CSConf - setCapacity: queuePrefix=" + getQueuePrefix(queue) + 
         ", capacity=" + capacity);
   }
 
-  public int getMaximumCapacity(String queue) {
-    int maxCapacity = 
-      getInt(getQueuePrefix(queue) + MAXIMUM_CAPACITY, MAXIMUM_CAPACITY_VALUE);
+  public float getMaximumCapacity(String queue) {
+    float maxCapacity = getFloat(getQueuePrefix(queue) + MAXIMUM_CAPACITY,
+        MAXIMUM_CAPACITY_VALUE);
     return maxCapacity;
   }
   
-  public void setMaximumCapacity(String queue, int maxCapacity) {
+  public void setMaximumCapacity(String queue, float maxCapacity) {
     if (maxCapacity > MAXIMUM_CAPACITY_VALUE) {
       throw new IllegalArgumentException("Illegal " +
           "maximum-capacity of " + maxCapacity + " for queue " + queue);
     }
-    setInt(getQueuePrefix(queue) + MAXIMUM_CAPACITY, maxCapacity);
+    setFloat(getQueuePrefix(queue) + MAXIMUM_CAPACITY, maxCapacity);
     LOG.debug("CSConf - setMaxCapacity: queuePrefix=" + getQueuePrefix(queue) + 
         ", maxCapacity=" + maxCapacity);
   }
   
   public int getUserLimit(String queue) {
-    int userLimit = 
-      getInt(getQueuePrefix(queue) + USER_LIMIT, DEFAULT_USER_LIMIT);
+    int userLimit = getInt(getQueuePrefix(queue) + USER_LIMIT,
+        DEFAULT_USER_LIMIT);
     return userLimit;
   }
 
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 75d5249..aed6c90 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -1180,9 +1180,16 @@
     if (UserGroupInformation.isSecurityEnabled()) {
       ContainerTokenIdentifier tokenIdentifier = new ContainerTokenIdentifier(
           containerId, nodeId.toString(), capability);
-      containerToken = BuilderUtils.newContainerToken(nodeId, ByteBuffer
-          .wrap(containerTokenSecretManager
-              .createPassword(tokenIdentifier)), tokenIdentifier);
+      try {
+        containerToken = BuilderUtils.newContainerToken(nodeId, ByteBuffer
+            .wrap(containerTokenSecretManager
+                .createPassword(tokenIdentifier)), tokenIdentifier);
+      } catch (IllegalArgumentException e) {
+         // this could be because DNS is down - in which case we just want
+         // to retry and not bring RM down
+         LOG.error("Error trying to create new container", e);
+         return null;
+      }
     }
 
     // Create the container
@@ -1211,6 +1218,11 @@
     // Create the container if necessary
     Container container = 
         getContainer(rmContainer, application, node, capability, priority);
+  
+    // something went wrong getting/creating the container 
+    if (container == null) {
+      return Resources.none();
+    }
 
     // Can we allocate a container on this node?
     int availableContainers = 
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index cdb1060..bd7e988 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -111,7 +111,7 @@
 			      cs.getConfiguration().getEnableUserMetrics(),
 			      cs.getConf());
 
-    int rawCapacity = cs.getConfiguration().getCapacity(getQueuePath());
+    float rawCapacity = cs.getConfiguration().getCapacity(getQueuePath());
 
     if (rootQueue &&
         (rawCapacity != CapacitySchedulerConfiguration.MAXIMUM_CAPACITY_VALUE)) {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java
index f2abe5d..a55c62f 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java
@@ -24,8 +24,6 @@
 
   @Override protected void preHead(Page.HTML<_> html) {
     commonPreHead(html);
-    // App page is per-app information. Helps to refresh automatically.
-    html.meta_http("refresh", "10");
   }
 
   @Override protected Class<? extends SubView> content() {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 40d44aa..d3ec035 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -59,13 +59,13 @@
   private static final String B1 = B + ".b1";
   private static final String B2 = B + ".b2";
   private static final String B3 = B + ".b3";
-  private static int A_CAPACITY = 10;
-  private static int B_CAPACITY = 90;
-  private static int A1_CAPACITY = 30;
-  private static int A2_CAPACITY = 70;
-  private static int B1_CAPACITY = 50;
-  private static int B2_CAPACITY = 30;
-  private static int B3_CAPACITY = 20;
+  private static float A_CAPACITY = 10.5f;
+  private static float B_CAPACITY = 89.5f;
+  private static float A1_CAPACITY = 30;
+  private static float A2_CAPACITY = 70;
+  private static float B1_CAPACITY = 50;
+  private static float B2_CAPACITY = 30;
+  private static float B3_CAPACITY = 20;
 
   private ResourceManager resourceManager = null;
   
@@ -250,14 +250,14 @@
     cs.reinitialize(conf, null, null);
     checkQueueCapacities(cs, A_CAPACITY, B_CAPACITY);
 
-    conf.setCapacity(A, 80);
-    conf.setCapacity(B, 20);
+    conf.setCapacity(A, 80f);
+    conf.setCapacity(B, 20f);
     cs.reinitialize(conf, null,null);
-    checkQueueCapacities(cs, 80, 20);
+    checkQueueCapacities(cs, 80f, 20f);
   }
 
   private void checkQueueCapacities(CapacityScheduler cs,
-      int capacityA, int capacityB) {
+      float capacityA, float capacityB) {
     CSQueue rootQueue = cs.getRootQueue();
     CSQueue queueA = findQueue(rootQueue, A);
     CSQueue queueB = findQueue(rootQueue, B);
@@ -274,13 +274,13 @@
     checkQueueCapacity(queueB, capB, capB, 1.0f, 1.0f);
     checkQueueCapacity(queueA1, A1_CAPACITY / 100.0f,
         (A1_CAPACITY/100.0f) * capA, 1.0f, 1.0f);
-    checkQueueCapacity(queueA2, (float)A2_CAPACITY / 100.0f,
+    checkQueueCapacity(queueA2, A2_CAPACITY / 100.0f,
         (A2_CAPACITY/100.0f) * capA, 1.0f, 1.0f);
-    checkQueueCapacity(queueB1, (float)B1_CAPACITY / 100.0f,
+    checkQueueCapacity(queueB1, B1_CAPACITY / 100.0f,
         (B1_CAPACITY/100.0f) * capB, 1.0f, 1.0f);
-    checkQueueCapacity(queueB2, (float)B2_CAPACITY / 100.0f,
+    checkQueueCapacity(queueB2, B2_CAPACITY / 100.0f,
         (B2_CAPACITY/100.0f) * capB, 1.0f, 1.0f);
-    checkQueueCapacity(queueB3, (float)B3_CAPACITY / 100.0f,
+    checkQueueCapacity(queueB3, B3_CAPACITY / 100.0f,
         (B3_CAPACITY/100.0f) * capB, 1.0f, 1.0f);
   }
 
@@ -340,7 +340,7 @@
     CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
     setupQueueConfiguration(conf);
     conf.setQueues(CapacitySchedulerConfiguration.ROOT + ".a.a1", new String[] {"b1"} );
-    conf.setCapacity(CapacitySchedulerConfiguration.ROOT + ".a.a1.b1", 100);
+    conf.setCapacity(CapacitySchedulerConfiguration.ROOT + ".a.a1.b1", 100.0f);
     conf.setUserLimitFactor(CapacitySchedulerConfiguration.ROOT + ".a.a1.b1", 100.0f);
 
     cs.reinitialize(conf, null, null);
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index 7571a0f..f74dab4 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -135,7 +135,7 @@
     conf.setAcl(CapacitySchedulerConfiguration.ROOT, QueueACL.SUBMIT_APPLICATIONS, " ");
     
     final String Q_A = CapacitySchedulerConfiguration.ROOT + "." + A;
-    conf.setCapacity(Q_A, 9);
+    conf.setCapacity(Q_A, 8.5f);
     conf.setMaximumCapacity(Q_A, 20);
     conf.setAcl(Q_A, QueueACL.SUBMIT_APPLICATIONS, "*");
     
@@ -145,7 +145,7 @@
     conf.setAcl(Q_B, QueueACL.SUBMIT_APPLICATIONS, "*");
 
     final String Q_C = CapacitySchedulerConfiguration.ROOT + "." + C;
-    conf.setCapacity(Q_C, 1);
+    conf.setCapacity(Q_C, 1.5f);
     conf.setMaximumCapacity(Q_C, 10);
     conf.setAcl(Q_C, QueueACL.SUBMIT_APPLICATIONS, " ");
     
@@ -208,8 +208,8 @@
 	  //can add more sturdy test with 3-layer queues 
 	  //once MAPREDUCE:3410 is resolved
 	  LeafQueue a = stubLeafQueue((LeafQueue)queues.get(A));
-	  assertEquals(0.09, a.getCapacity(), epsilon);
-	  assertEquals(0.09, a.getAbsoluteCapacity(), epsilon);
+	  assertEquals(0.085, a.getCapacity(), epsilon);
+	  assertEquals(0.085, a.getAbsoluteCapacity(), epsilon);
 	  assertEquals(0.2, a.getMaximumCapacity(), epsilon);
 	  assertEquals(0.2, a.getAbsoluteMaximumCapacity(), epsilon);
 	  
@@ -220,8 +220,8 @@
 	  assertEquals(0.99, b.getAbsoluteMaximumCapacity(), epsilon);
 
 	  ParentQueue c = (ParentQueue)queues.get(C);
-	  assertEquals(0.01, c.getCapacity(), epsilon);
-	  assertEquals(0.01, c.getAbsoluteCapacity(), epsilon);
+	  assertEquals(0.015, c.getCapacity(), epsilon);
+	  assertEquals(0.015, c.getAbsoluteCapacity(), epsilon);
 	  assertEquals(0.1, c.getMaximumCapacity(), epsilon);
 	  assertEquals(0.1, c.getAbsoluteMaximumCapacity(), epsilon);
   }
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
index 9e80b79..c4c4849 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
@@ -18,16 +18,27 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.inOrder;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.when;
 
 import java.util.HashMap;
-import java.util.Map;
-
 import java.util.List;
+import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.QueueACL;
+import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
@@ -35,10 +46,6 @@
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.yarn.api.records.QueueACL;
-import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
-
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -289,10 +296,10 @@
     conf.setCapacity(Q_B, 50);
     
     final String Q_C = CapacitySchedulerConfiguration.ROOT + "." + C;
-    conf.setCapacity(Q_C, 20);
+    conf.setCapacity(Q_C, 19.5f);
     
     final String Q_D = CapacitySchedulerConfiguration.ROOT + "." + D;
-    conf.setCapacity(Q_D, 20);
+    conf.setCapacity(Q_D, 20.5f);
     
     // Define 2-nd level queues
     conf.setQueues(Q_A, new String[] {A1, A2});
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
index b8dc072..c0b5861 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
@@ -124,11 +124,11 @@
     conf.setCapacity(CapacitySchedulerConfiguration.ROOT, 100);
 
     final String A = CapacitySchedulerConfiguration.ROOT + ".a";
-    conf.setCapacity(A, 10);
+    conf.setCapacity(A, 10.5f);
     conf.setMaximumCapacity(A, 50);
 
     final String B = CapacitySchedulerConfiguration.ROOT + ".b";
-    conf.setCapacity(B, 90);
+    conf.setCapacity(B, 89.5f);
 
     // Define 2nd-level queues
     final String A1 = A + ".a1";
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm
index 0ee8220..01a9f60 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm
@@ -197,14 +197,14 @@
 || Property                            || Description                         |
 *--------------------------------------+--------------------------------------+
 | <<<yarn.scheduler.capacity.<queue-path>.capacity>>> | |
-| | Queue <capacity> in percentage (%). | 
+| | Queue <capacity> in percentage (%) as a float (e.g. 12.5).| 
 | | The sum of capacities for all queues, at each level, must be equal |
 | | to 100. | 
 | | Applications in the queue may consume more resources than the queue's | 
 | | capacity if there are free resources, providing elasticity. |
 *--------------------------------------+--------------------------------------+
 | <<<yarn.scheduler.capacity.<queue-path>.maximum-capacity>>> |   | 
-| | Maximum queue capacity in percentage (%). |
+| | Maximum queue capacity in percentage (%) as a float. |
 | | This limits the <elasticity> for applications in the queue. |
 | | Defaults to -1 which disables it. |
 *--------------------------------------+--------------------------------------+
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HDFSHighAvailability.apt.vm b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HDFSHighAvailability.apt.vm
index 67d4232..7e7cb66 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HDFSHighAvailability.apt.vm
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HDFSHighAvailability.apt.vm
@@ -712,4 +712,155 @@
 
   Even if automatic failover is configured, you may initiate a manual failover
   using the same <<<hdfs haadmin>>> command. It will perform a coordinated
-  failover.
\ No newline at end of file
+  failover.
+
+ 
+* BookKeeper as a Shared storage (EXPERIMENTAL)
+
+   One option for shared storage for the NameNode is BookKeeper. 
+  BookKeeper achieves high availability and strong durability guarantees by replicating
+  edit log entries across multiple storage nodes. The edit log can be striped across 
+  the storage nodes for high performance. Fencing is supported in the protocol, i.e, 
+  BookKeeper will not allow two writers to write the single edit log.
+
+  The meta data for BookKeeper is stored in ZooKeeper.
+  In current HA architecture, a Zookeeper cluster is required for ZKFC. The same cluster can be
+  for BookKeeper metadata.
+
+  For more details on building a BookKeeper cluster, please refer to the 
+   {{{http://zookeeper.apache.org/bookkeeper/docs/trunk/bookkeeperConfig.html }BookKeeper documentation}}
+
+ The BookKeeperJournalManager is an implementation of the HDFS JournalManager interface, which allows custom write ahead logging implementations to be plugged into the HDFS NameNode.
+ 
+ **<<BookKeeper Journal Manager>>
+
+   To use BookKeeperJournalManager, add the following to hdfs-site.xml.
+
+----
+    <property>
+      <name>dfs.namenode.shared.edits.dir</name>
+      <value>bookkeeper://zk1:2181;zk2:2181;zk3:2181/hdfsjournal</value>
+    </property>
+
+    <property>
+      <name>dfs.namenode.edits.journal-plugin.bookkeeper</name>
+      <value>org.apache.hadoop.contrib.bkjournal.BookKeeperJournalManager</value>
+    </property>
+----
+
+   The URI format for bookkeeper is <<<bookkeeper://[zkEnsemble]/[rootZnode]
+   [zookkeeper ensemble]>>> is a list of semi-colon separated, zookeeper host:port
+   pairs. In the example above there are 3 servers, in the ensemble,
+   zk1, zk2 & zk3, each one listening on port 2181.
+
+   <<<[root znode]>>> is the path of the zookeeper znode, under which the edit log
+   information will be stored.
+
+   The class specified for the journal-plugin must be available in the NameNode's
+   classpath. We explain how to generate a jar file with the journal manager and
+   its dependencies, and how to put it into the classpath below.
+
+ *** <<More configuration options>> 
+
+     * <<dfs.namenode.bookkeeperjournal.output-buffer-size>> - 
+       Number of bytes a bookkeeper journal stream will buffer before
+       forcing a flush. Default is 1024.
+     
+----
+       <property>
+         <name>dfs.namenode.bookkeeperjournal.output-buffer-size</name>
+         <value>1024</value>
+       </property>
+----
+
+     * <<dfs.namenode.bookkeeperjournal.ensemble-size>> - 
+       Number of bookkeeper servers in edit log ensembles. This
+       is the number of bookkeeper servers which need to be available
+       for the edit log to be writable. Default is 3.
+
+----
+       <property>
+         <name>dfs.namenode.bookkeeperjournal.ensemble-size</name>
+         <value>3</value>
+       </property>
+----
+
+     * <<dfs.namenode.bookkeeperjournal.quorum-size>> - 
+       Number of bookkeeper servers in the write quorum. This is the
+       number of bookkeeper servers which must have acknowledged the
+       write of an entry before it is considered written. Default is 2.
+
+----
+       <property>
+         <name>dfs.namenode.bookkeeperjournal.quorum-size</name>
+         <value>2</value>
+       </property>
+----
+
+     * <<dfs.namenode.bookkeeperjournal.digestPw>> - 
+       Password to use when creating edit log segments.
+
+----
+       <property>
+        <name>dfs.namenode.bookkeeperjournal.digestPw</name>
+        <value>myPassword</value>
+       </property>
+----
+
+     * <<dfs.namenode.bookkeeperjournal.zk.session.timeout>> - 
+       Session timeout for Zookeeper client from BookKeeper Journal Manager.
+       Hadoop recommends that this value should be less than the ZKFC 
+       session timeout value. Default value is 3000.
+
+----
+       <property>
+         <name>dfs.namenode.bookkeeperjournal.zk.session.timeout</name>
+         <value>3000</value>
+       </property>
+----
+
+ *** <<Building BookKeeper Journal Manager plugin jar>>
+
+     To generate the distribution packages for BK journal, do the
+     following.
+
+     $ mvn clean package -Pdist
+
+     This will generate a jar with the BookKeeperJournalManager, all the dependencies
+     needed by the journal manager,
+     hadoop-hdfs/src/contrib/bkjournal/target/hadoop-hdfs-bkjournal-<VERSION>.jar
+
+     Note that the -Pdist part of the build command is important, as otherwise
+     the dependencies would not be packaged in the jar. The dependencies included in
+     the jar are {{{http://maven.apache.org/plugins/maven-shade-plugin/}shaded}} to
+     avoid conflicts with other dependencies of the NameNode.
+
+ *** <<Putting the BookKeeperJournalManager in the NameNode classpath>>
+
+    To run a HDFS namenode using BookKeeper as a backend, copy the bkjournal
+    jar, generated above, into the lib directory of hdfs. In the standard 
+    distribution of HDFS, this is at $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/
+
+    cp hadoop-hdfs/src/contrib/bkjournal/target/hadoop-hdfs-bkjournal-<VERSION>.jar $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/
+
+ *** <<Current limitations>> 
+
+      1) NameNode format command will not format the BookKeeper data automatically. 
+         We have to clean the data manually from BookKeeper cluster 
+         and create the /ledgers/available path in Zookeeper. 
+----
+$ zkCli.sh create /ledgers 0
+$ zkCli.sh create /ledgers/available 0
+----
+         Note:
+          bookkeeper://zk1:2181;zk2:2181;zk3:2181/hdfsjournal
+          The final part /hdfsjournal specifies the znode in zookeeper where
+          ledger metadata will be stored. Administrators may set this to anything
+          they wish.
+
+      2) Security in BookKeeper. BookKeeper does not support SASL nor SSL for
+         connections between the NameNode and BookKeeper storage nodes.
+
+      3) Auto-Recovery of storage node failures. Work inprogress 
+      {{{https://issues.apache.org/jira/browse/BOOKKEEPER-237 }BOOKKEEPER-237}}.
+         Currently we have the tools to manually recover the data from failed storage nodes.
\ No newline at end of file
diff --git a/hadoop-mapreduce-project/src/c++/pipes/.autom4te.cfg b/hadoop-mapreduce-project/src/c++/pipes/.autom4te.cfg
deleted file mode 100644
index d21d1c9..0000000
--- a/hadoop-mapreduce-project/src/c++/pipes/.autom4te.cfg
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# autom4te configuration for hadoop utils library
-#
-
-begin-language: "Autoheader-preselections"
-args: --no-cache 
-end-language: "Autoheader-preselections"
-
-begin-language: "Automake-preselections"
-args: --no-cache 
-end-language: "Automake-preselections"
-
-begin-language: "Autoreconf-preselections"
-args: --no-cache 
-end-language: "Autoreconf-preselections"
-
-begin-language: "Autoconf-without-aclocal-m4"
-args: --no-cache 
-end-language: "Autoconf-without-aclocal-m4"
-
-begin-language: "Autoconf"
-args: --no-cache 
-end-language: "Autoconf"
-
diff --git a/hadoop-mapreduce-project/src/c++/pipes/Makefile.am b/hadoop-mapreduce-project/src/c++/pipes/Makefile.am
deleted file mode 100644
index 2c91d7f..0000000
--- a/hadoop-mapreduce-project/src/c++/pipes/Makefile.am
+++ /dev/null
@@ -1,31 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-ACLOCAL_AMFLAGS = -I ../utils/m4
-AM_CXXFLAGS=-I$(srcdir)/api -Wall -I$(HADOOP_UTILS_PREFIX)/include
-
-# List the api header files and where they will be installed
-apidir = $(includedir)/hadoop
-api_HEADERS = \
-	api/hadoop/Pipes.hh \
-	api/hadoop/TemplateFactory.hh
-
-# Define the libaries that need to be built
-lib_LIBRARIES = libhadooppipes.a
-
-# Define the sources for lib 
-libhadooppipes_a_SOURCES = \
-	impl/HadoopPipes.cc
-
diff --git a/hadoop-mapreduce-project/src/c++/pipes/configure.ac b/hadoop-mapreduce-project/src/c++/pipes/configure.ac
deleted file mode 100644
index ac05ece0..0000000
--- a/hadoop-mapreduce-project/src/c++/pipes/configure.ac
+++ /dev/null
@@ -1,57 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-#                                               -*- Autoconf -*-
-# Process this file with autoconf to produce a configure script.
-
-AC_PREREQ(2.59)
-AC_INIT(hadoop-pipes, 0.13.0, omalley@apache.org)
-
-AC_CONFIG_AUX_DIR([config])
-AC_CONFIG_MACRO_DIR([../utils/m4])
-
-AM_INIT_AUTOMAKE([subdir-objects foreign no-dist])
-
-AC_CONFIG_SRCDIR([impl/HadoopPipes.cc])
-AC_CONFIG_HEADER([impl/config.h])
-AC_CONFIG_FILES([Makefile])
-
-AC_PREFIX_DEFAULT(`pwd`/../install)
-
-USE_HADOOP_UTILS
-HADOOP_PIPES_SETUP
-CHECK_INSTALL_CFLAG
-
-# Checks for programs.
-AC_PROG_CXX
-AC_PROG_LIBTOOL
-
-# Checks for libraries.
-
-# Checks for header files.
-AC_LANG(C++)
-AC_CHECK_HEADERS([unistd.h])
-
-# Checks for typedefs, structures, and compiler characteristics.
-AC_HEADER_STDBOOL
-AC_C_CONST
-AC_TYPE_OFF_T
-AC_TYPE_SIZE_T
-AC_FUNC_STRERROR_R
-
-# Checks for library functions.
-AC_CHECK_FUNCS([mkdir uname])
-AC_OUTPUT
diff --git a/hadoop-mapreduce-project/src/c++/utils/.autom4te.cfg b/hadoop-mapreduce-project/src/c++/utils/.autom4te.cfg
deleted file mode 100644
index d21d1c9..0000000
--- a/hadoop-mapreduce-project/src/c++/utils/.autom4te.cfg
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# autom4te configuration for hadoop utils library
-#
-
-begin-language: "Autoheader-preselections"
-args: --no-cache 
-end-language: "Autoheader-preselections"
-
-begin-language: "Automake-preselections"
-args: --no-cache 
-end-language: "Automake-preselections"
-
-begin-language: "Autoreconf-preselections"
-args: --no-cache 
-end-language: "Autoreconf-preselections"
-
-begin-language: "Autoconf-without-aclocal-m4"
-args: --no-cache 
-end-language: "Autoconf-without-aclocal-m4"
-
-begin-language: "Autoconf"
-args: --no-cache 
-end-language: "Autoconf"
-
diff --git a/hadoop-mapreduce-project/src/c++/utils/Makefile.am b/hadoop-mapreduce-project/src/c++/utils/Makefile.am
deleted file mode 100644
index d99ea14..0000000
--- a/hadoop-mapreduce-project/src/c++/utils/Makefile.am
+++ /dev/null
@@ -1,33 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-ACLOCAL_AMFLAGS = -I m4
-AM_CXXFLAGS=-I$(srcdir)/api -Wall
-
-# List the api header files and where they will be installed
-apidir = $(includedir)/hadoop
-api_HEADERS = \
-	api/hadoop/StringUtils.hh \
-	api/hadoop/SerialUtils.hh
-
-
-# Define the libaries that need to be built
-lib_LIBRARIES = libhadooputils.a
-
-# Define the sources for lib 
-libhadooputils_a_SOURCES = \
-	impl/StringUtils.cc \
-	impl/SerialUtils.cc
-
diff --git a/hadoop-mapreduce-project/src/c++/utils/configure.ac b/hadoop-mapreduce-project/src/c++/utils/configure.ac
deleted file mode 100644
index 9283181..0000000
--- a/hadoop-mapreduce-project/src/c++/utils/configure.ac
+++ /dev/null
@@ -1,56 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-#                                               -*- Autoconf -*-
-# Process this file with autoconf to produce a configure script.
-
-AC_PREREQ(2.59)
-AC_INIT(hadoop-utils, 0.13.0, omalley@apache.org)
-
-AC_CONFIG_AUX_DIR([config])
-AC_CONFIG_MACRO_DIR([m4])
-
-AM_INIT_AUTOMAKE([subdir-objects foreign no-dist])
-
-AC_CONFIG_SRCDIR([impl/SerialUtils.cc])
-AC_CONFIG_HEADER([impl/config.h])
-AC_CONFIG_FILES([Makefile])
-
-AC_PREFIX_DEFAULT(`pwd`/../install)
-
-CHECK_INSTALL_CFLAG
-HADOOP_UTILS_SETUP
-
-# Checks for programs.
-AC_PROG_CXX
-AC_PROG_LIBTOOL
-
-# Checks for libraries.
-
-# Checks for header files.
-AC_LANG(C++)
-AC_CHECK_HEADERS([unistd.h])
-
-# Checks for typedefs, structures, and compiler characteristics.
-AC_HEADER_STDBOOL
-AC_C_CONST
-AC_TYPE_OFF_T
-AC_TYPE_SIZE_T
-AC_FUNC_STRERROR_R
-
-# Checks for library functions.
-AC_CHECK_FUNCS([mkdir uname])
-AC_OUTPUT
diff --git a/hadoop-mapreduce-project/src/c++/utils/m4/hadoop_utils.m4 b/hadoop-mapreduce-project/src/c++/utils/m4/hadoop_utils.m4
deleted file mode 100644
index d0ed6c4..0000000
--- a/hadoop-mapreduce-project/src/c++/utils/m4/hadoop_utils.m4
+++ /dev/null
@@ -1,68 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# hadoop_utils.m4
-
-# Check to see if the install program supports -C
-# If so, use "install -C" for the headers. Otherwise, every install
-# updates the timestamps on the installed headers, which causes a recompilation
-# of any downstream libraries.
-AC_DEFUN([CHECK_INSTALL_CFLAG],[
-AC_REQUIRE([AC_PROG_INSTALL])
-touch foo
-if $INSTALL -C foo bar; then
-  INSTALL_DATA="$INSTALL_DATA -C"
-fi
-rm -f foo bar
-])
-
-# Set up the things we need for compiling hadoop utils
-AC_DEFUN([HADOOP_UTILS_SETUP],[
-AC_REQUIRE([AC_GNU_SOURCE])
-AC_REQUIRE([AC_SYS_LARGEFILE])
-])
-
-# define a macro for using hadoop utils
-AC_DEFUN([USE_HADOOP_UTILS],[
-AC_REQUIRE([HADOOP_UTILS_SETUP])
-AC_ARG_WITH([hadoop-utils],
-            AS_HELP_STRING([--with-hadoop-utils=<dir>],
-                           [directory to get hadoop_utils from]),
-            [HADOOP_UTILS_PREFIX="$withval"],
-            [HADOOP_UTILS_PREFIX="\${prefix}"])
-AC_SUBST(HADOOP_UTILS_PREFIX)
-])
-
-AC_DEFUN([HADOOP_PIPES_SETUP],[
-AC_CHECK_HEADERS([pthread.h], [], 
-  AC_MSG_ERROR(Please check if you have installed the pthread library)) 
-AC_CHECK_LIB([pthread], [pthread_create], [], 
-  AC_MSG_ERROR(Cannot find libpthread.so, please check))
-AC_CHECK_LIB([crypto], [HMAC_Init], [], 
-  AC_MSG_ERROR(Cannot find libcrypto.so, please check))
-])
-
-# define a macro for using hadoop pipes
-AC_DEFUN([USE_HADOOP_PIPES],[
-AC_REQUIRE([USE_HADOOP_UTILS])
-AC_REQUIRE([HADOOP_PIPES_SETUP])
-AC_ARG_WITH([hadoop-pipes],
-            AS_HELP_STRING([--with-hadoop-pipes=<dir>],
-                           [directory to get hadoop pipes from]),
-            [HADOOP_PIPES_PREFIX="$withval"],
-            [HADOOP_PIPES_PREFIX="\${prefix}"])
-AC_SUBST(HADOOP_PIPES_PREFIX)
-])
diff --git a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonCode.java b/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonCode.java
deleted file mode 100644
index 639e5373..0000000
--- a/hadoop-mapreduce-project/src/contrib/raid/src/java/org/apache/hadoop/raid/ReedSolomonCode.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.raid;
-
-public class ReedSolomonCode implements ErasureCode {
-
-  private final int stripeSize;
-  private final int paritySize;
-  private final int[] generatingPolynomial;
-  private final int PRIMITIVE_ROOT = 2;
-  private final int[] primitivePower;
-  private final GaloisField GF = GaloisField.getInstance();
-  private int[] errSignature;
-  private final int[] paritySymbolLocations;
-  private final int[] dataBuff;
-
-  public ReedSolomonCode(int stripeSize, int paritySize) {
-    assert(stripeSize + paritySize < GF.getFieldSize());
-    this.stripeSize = stripeSize;
-    this.paritySize = paritySize;
-    this.errSignature = new int[paritySize];
-    this.paritySymbolLocations = new int[paritySize];
-    this.dataBuff = new int[paritySize + stripeSize];
-    for (int i = 0; i < paritySize; i++) {
-      paritySymbolLocations[i] = i;
-    }
-
-    this.primitivePower = new int[stripeSize + paritySize];
-    // compute powers of the primitive root
-    for (int i = 0; i < stripeSize + paritySize; i++) {
-      primitivePower[i] = GF.power(PRIMITIVE_ROOT, i);
-    }
-    // compute generating polynomial
-    int[] gen = {1};
-    int[] poly = new int[2];
-    for (int i = 0; i < paritySize; i++) {
-      poly[0] = primitivePower[i];
-      poly[1] = 1;
-      gen = GF.multiply(gen, poly);
-    }
-    // generating polynomial has all generating roots
-    generatingPolynomial = gen;
-  }
-
-  @Override
-  public void encode(int[] message, int[] parity) {
-    assert(message.length == stripeSize && parity.length == paritySize);
-    for (int i = 0; i < paritySize; i++) {
-      dataBuff[i] = 0;
-    }
-    for (int i = 0; i < stripeSize; i++) {
-      dataBuff[i + paritySize] = message[i];
-    }
-    GF.remainder(dataBuff, generatingPolynomial);
-    for (int i = 0; i < paritySize; i++) {
-      parity[i] = dataBuff[i];
-    }
-  }
-
-  @Override
-  public void decode(int[] data, int[] erasedLocation, int[] erasedValue) {
-    if (erasedLocation.length == 0) {
-      return;
-    }
-    assert(erasedLocation.length == erasedValue.length);
-    for (int i = 0; i < erasedLocation.length; i++) {
-      data[erasedLocation[i]] = 0;
-    }
-    for (int i = 0; i < erasedLocation.length; i++) {
-      errSignature[i] = primitivePower[erasedLocation[i]];
-      erasedValue[i] = GF.substitute(data, primitivePower[i]);
-    }
-    GF.solveVandermondeSystem(errSignature, erasedValue, erasedLocation.length);
-  }
-
-  @Override
-  public int stripeSize() {
-    return this.stripeSize;
-  }
-
-  @Override
-  public int paritySize() {
-    return this.paritySize;
-  }
-
-  @Override
-  public int symbolSize() {
-    return (int) Math.round(Math.log(GF.getFieldSize()) / Math.log(2));
-  }
-}
diff --git a/hadoop-mapreduce-project/src/examples/pipes/.autom4te.cfg b/hadoop-mapreduce-project/src/examples/pipes/.autom4te.cfg
deleted file mode 100644
index d21d1c9..0000000
--- a/hadoop-mapreduce-project/src/examples/pipes/.autom4te.cfg
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# autom4te configuration for hadoop utils library
-#
-
-begin-language: "Autoheader-preselections"
-args: --no-cache 
-end-language: "Autoheader-preselections"
-
-begin-language: "Automake-preselections"
-args: --no-cache 
-end-language: "Automake-preselections"
-
-begin-language: "Autoreconf-preselections"
-args: --no-cache 
-end-language: "Autoreconf-preselections"
-
-begin-language: "Autoconf-without-aclocal-m4"
-args: --no-cache 
-end-language: "Autoconf-without-aclocal-m4"
-
-begin-language: "Autoconf"
-args: --no-cache 
-end-language: "Autoconf"
-
diff --git a/hadoop-mapreduce-project/src/examples/pipes/Makefile.am b/hadoop-mapreduce-project/src/examples/pipes/Makefile.am
deleted file mode 100644
index 731ab1e..0000000
--- a/hadoop-mapreduce-project/src/examples/pipes/Makefile.am
+++ /dev/null
@@ -1,36 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-ACLOCAL_AMFLAGS = -I ../../c++/utils/m4
-AM_CXXFLAGS=-Wall -I$(HADOOP_UTILS_PREFIX)/include \
-            -I$(HADOOP_PIPES_PREFIX)/include
-LDADD=-L$(HADOOP_UTILS_PREFIX)/lib -L$(HADOOP_PIPES_PREFIX)/lib \
-      -lhadooppipes -lhadooputils
-
-bin_PROGRAMS= wordcount-simple wordcount-part wordcount-nopipe pipes-sort
-
-# Define the sources for each program
-wordcount_simple_SOURCES = \
-	impl/wordcount-simple.cc
-
-wordcount_part_SOURCES = \
-	impl/wordcount-part.cc
-
-wordcount_nopipe_SOURCES = \
-	impl/wordcount-nopipe.cc
-
-pipes_sort_SOURCES = \
-        impl/sort.cc
-
diff --git a/hadoop-mapreduce-project/src/examples/pipes/README.txt b/hadoop-mapreduce-project/src/examples/pipes/README.txt
deleted file mode 100644
index 4685304..0000000
--- a/hadoop-mapreduce-project/src/examples/pipes/README.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-To run the examples, first compile them:
-
-% ant -Dcompile.c++=yes examples
-
-and then copy the binaries to dfs:
-
-% bin/hadoop fs -put build/c++-examples/Linux-i386-32/bin /examples/bin
-
-create an input directory with text files:
-
-% bin/hadoop fs -put my-data in-dir
-
-and run the word count example:
-
-% bin/hadoop pipes -conf src/examples/pipes/conf/word.xml \
-                   -input in-dir -output out-dir
diff --git a/hadoop-mapreduce-project/src/examples/pipes/configure.ac b/hadoop-mapreduce-project/src/examples/pipes/configure.ac
deleted file mode 100644
index 7959ba6..0000000
--- a/hadoop-mapreduce-project/src/examples/pipes/configure.ac
+++ /dev/null
@@ -1,58 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-#                                               -*- Autoconf -*-
-# Process this file with autoconf to produce a configure script.
-
-AC_PREREQ(2.59)
-AC_INIT(hadoop-pipes-examples, 0.13.0, omalley@apache.org)
-
-AC_CONFIG_AUX_DIR([config])
-AC_CONFIG_MACRO_DIR([../../c++/utils/m4])
-
-AM_INIT_AUTOMAKE([subdir-objects foreign no-dist])
-
-AC_CONFIG_SRCDIR([impl/wordcount-simple.cc])
-AC_CONFIG_HEADER([impl/config.h])
-AC_CONFIG_FILES([Makefile])
-
-AC_PREFIX_DEFAULT(`pwd`/../install)
-
-USE_HADOOP_PIPES
-
-# Checks for programs.
-AC_PROG_CXX
-AC_PROG_INSTALL
-AC_PROG_LIBTOOL
-
-# Checks for libraries.
-
-# Checks for header files.
-AC_LANG(C++)
-AC_CHECK_HEADERS([unistd.h])
-
-# Checks for typedefs, structures, and compiler characteristics.
-AC_HEADER_STDBOOL
-AC_C_CONST
-AC_TYPE_OFF_T
-AC_TYPE_SIZE_T
-AC_FUNC_STRERROR_R
-
-# Checks for library functions.
-AC_CHECK_FUNCS([mkdir uname])
-AC_CHECK_LIB([socket],[shutdown])
-AC_CHECK_LIB([nsl],[xdr_float])
-AC_OUTPUT
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 1aa3e40..ace8f75 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -259,6 +259,12 @@
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdfs-raid</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-minicluster</artifactId>
         <version>${project.version}</version>
       </dependency>
@@ -380,7 +386,7 @@
       <dependency>
         <groupId>org.jboss.netty</groupId>
         <artifactId>netty</artifactId>
-        <version>3.2.3.Final</version>
+        <version>3.2.4.Final</version>
       </dependency>
 
       <dependency>
diff --git a/hadoop-project/src/site/apt/index.apt.vm b/hadoop-project/src/site/apt/index.apt.vm
index fe05eeb..32e708e 100644
--- a/hadoop-project/src/site/apt/index.apt.vm
+++ b/hadoop-project/src/site/apt/index.apt.vm
@@ -16,10 +16,10 @@
   ---
   ${maven.build.timestamp}
   
-Apache Hadoop 0.23
+Apache Hadoop ${project.version}
 
-  Apache Hadoop 0.23 consists of significant improvements over the previous 
-  stable release (hadoop-0.20.205).
+  Apache Hadoop ${project.version} consists of significant
+  improvements over the previous stable release (hadoop-1.x).
 
   Here is a short overview of the improvments to both HDFS and MapReduce.
 
diff --git a/hadoop-tools/hadoop-datajoin/src/test/java/README.txt b/hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/README.txt
similarity index 100%
rename from hadoop-tools/hadoop-datajoin/src/test/java/README.txt
rename to hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/README.txt
diff --git a/hadoop-tools/hadoop-datajoin/src/test/java/SampleDataJoinMapper.java b/hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/SampleDataJoinMapper.java
similarity index 100%
rename from hadoop-tools/hadoop-datajoin/src/test/java/SampleDataJoinMapper.java
rename to hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/SampleDataJoinMapper.java
diff --git a/hadoop-tools/hadoop-datajoin/src/test/java/SampleDataJoinReducer.java b/hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/SampleDataJoinReducer.java
similarity index 100%
rename from hadoop-tools/hadoop-datajoin/src/test/java/SampleDataJoinReducer.java
rename to hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/SampleDataJoinReducer.java
diff --git a/hadoop-tools/hadoop-datajoin/src/test/java/SampleTaggedMapOutput.java b/hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/SampleTaggedMapOutput.java
similarity index 100%
rename from hadoop-tools/hadoop-datajoin/src/test/java/SampleTaggedMapOutput.java
rename to hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/SampleTaggedMapOutput.java
diff --git a/hadoop-tools/hadoop-gridmix/pom.xml b/hadoop-tools/hadoop-gridmix/pom.xml
index 9537883..15ed600 100644
--- a/hadoop-tools/hadoop-gridmix/pom.xml
+++ b/hadoop-tools/hadoop-gridmix/pom.xml
@@ -121,7 +121,7 @@
          <configuration>
           <archive>
            <manifest>
-            <mainClass>org.apache.hadoop.tools.HadoopArchives</mainClass>
+            <mainClass>org.apache.hadoop.mapred.gridmix.Gridmix</mainClass>
            </manifest>
          </archive>
         </configuration>
diff --git a/hadoop-tools/hadoop-pipes/pom.xml b/hadoop-tools/hadoop-pipes/pom.xml
new file mode 100644
index 0000000..70875f2
--- /dev/null
+++ b/hadoop-tools/hadoop-pipes/pom.xml
@@ -0,0 +1,108 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project>
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-project</artifactId>
+    <version>3.0.0-SNAPSHOT</version>
+    <relativePath>../../hadoop-project</relativePath>
+  </parent>
+  <groupId>org.apache.hadoop</groupId>
+  <artifactId>hadoop-pipes</artifactId>
+  <version>3.0.0-SNAPSHOT</version>
+  <description>Apache Hadoop Pipes</description>
+  <name>Apache Hadoop Pipes</name>
+  <packaging>pom</packaging>
+
+  <properties>
+    <hadoop.log.dir>${project.build.directory}/log</hadoop.log.dir>
+  </properties>
+
+  <profiles>
+    <profile>
+      <id>native</id>
+      <activation>
+        <activeByDefault>false</activeByDefault>
+      </activation>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-antrun-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>make</id>
+                <phase>compile</phase>
+                <goals><goal>run</goal></goals>
+                <configuration>
+                  <target>
+                    <mkdir dir="${project.build.directory}/native"/>
+                    <exec executable="cmake" dir="${project.build.directory}/native" 
+                        failonerror="true">
+                      <arg line="${basedir}/src/ -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model}"/>
+                    </exec>
+                    <exec executable="make" dir="${project.build.directory}/native" failonerror="true">
+                      <arg line="VERBOSE=1"/>
+                    </exec>
+                  </target>
+                </configuration>
+              </execution>
+              <!-- TODO wire here native testcases
+              <execution>
+                <id>test</id>
+                <phase>test</phase>
+                <goals>
+                  <goal>test</goal>
+                </goals>
+                <configuration>
+                  <destDir>${project.build.directory}/native/target</destDir>
+                </configuration>
+              </execution>
+              -->
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles> 
+
+<!--
+  <build>
+    <plugins>
+     <plugin>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>compile</id>
+            <phase>generate-sources</phase>
+            <goals>
+              <goal>run</goal>
+            </goals>
+            <configuration>
+              <target>
+                <mkdir dir="${basedir}/../target/native"/>
+                <copy toDir="${basedir}/../target/native">
+                  <fileset dir="${basedir}/src/main/native"/>
+                </copy>
+              </target>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+-->
+</project>
diff --git a/hadoop-tools/hadoop-pipes/src/CMakeLists.txt b/hadoop-tools/hadoop-pipes/src/CMakeLists.txt
new file mode 100644
index 0000000..8ab7d27
--- /dev/null
+++ b/hadoop-tools/hadoop-pipes/src/CMakeLists.txt
@@ -0,0 +1,99 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+ 
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
+find_package(OpenSSL REQUIRED)
+
+set(CMAKE_BUILD_TYPE, Release)
+
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2")
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -Wall -O2")
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_FILE_OFFSET_BITS=64")
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_REENTRANT -D_FILE_OFFSET_BITS=64")
+
+if (JVM_ARCH_DATA_MODEL EQUAL 32)
+    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -m32")
+    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -m32")
+    set(CMAKE_LD_FLAGS "${CMAKE_LD_FLAGS} -m32")
+    if (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64")
+        set(CMAKE_SYSTEM_PROCESSOR "i686")
+    endif ()
+endif (JVM_ARCH_DATA_MODEL EQUAL 32)
+
+function(output_directory TGT DIR)
+    SET_TARGET_PROPERTIES(${TGT} PROPERTIES
+        RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
+   SET_TARGET_PROPERTIES(${TGT} PROPERTIES
+        ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
+    SET_TARGET_PROPERTIES(${TGT} PROPERTIES
+        LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DIR}")
+endfunction(output_directory TGT DIR)
+
+include_directories(
+    main/native/utils/api
+    main/native/pipes/api
+    ${CMAKE_CURRENT_SOURCE_DIR}
+    ${OPENSSL_INCLUDE_DIR}
+)
+
+# Example programs
+add_executable(wordcount-simple main/native/examples/impl/wordcount-simple.cc)
+target_link_libraries(wordcount-simple hadooppipes hadooputils)
+
+add_executable(wordcount-part main/native/examples/impl/wordcount-part.cc)
+target_link_libraries(wordcount-part hadooppipes hadooputils)
+
+add_executable(wordcount-nopipe main/native/examples/impl/wordcount-nopipe.cc)
+target_link_libraries(wordcount-nopipe hadooppipes hadooputils)
+
+add_executable(pipes-sort main/native/examples/impl/sort.cc)
+target_link_libraries(pipes-sort hadooppipes hadooputils)
+
+install(TARGETS wordcount-simple wordcount-part wordcount-nopipe pipes-sort 
+    RUNTIME DESTINATION bin
+)
+
+add_library(hadooputils STATIC
+    main/native/utils/impl/StringUtils.cc
+    main/native/utils/impl/SerialUtils.cc
+)
+
+install(FILES
+    main/native/utils/api/hadoop/SerialUtils.hh
+    main/native/utils/api/hadoop/StringUtils.hh
+    DESTINATION api/hadoop
+    COMPONENT headers
+)
+install(TARGETS hadooputils DESTINATION lib)
+
+add_library(hadooppipes STATIC
+    main/native/pipes/impl/HadoopPipes.cc
+)
+target_link_libraries(hadooppipes
+    ${JAVA_JVM_LIBRARY}
+    ${OPENSSL_LIBRARIES}
+    pthread
+)
+
+install(FILES
+    main/native/pipes/api/hadoop/Pipes.hh
+    main/native/pipes/api/hadoop/TemplateFactory.hh
+    DESTINATION api/hadoop
+    COMPONENT headers
+)
+install(TARGETS hadooppipes DESTINATION lib)
diff --git a/hadoop-tools/hadoop-pipes/src/main/native/examples/README.txt b/hadoop-tools/hadoop-pipes/src/main/native/examples/README.txt
new file mode 100644
index 0000000..b9448f5
--- /dev/null
+++ b/hadoop-tools/hadoop-pipes/src/main/native/examples/README.txt
@@ -0,0 +1,16 @@
+To run the examples, first compile them:
+
+% mvn install 
+
+and then copy the binaries to dfs:
+
+% hadoop fs -put target/native/wordcount-simple /examples/bin/
+
+create an input directory with text files:
+
+% hadoop fs -put my-data in-dir
+
+and run the word count example:
+
+% hadoop pipes -conf src/main/native/examples/conf/word.xml \
+                   -input in-dir -output out-dir
diff --git a/hadoop-mapreduce-project/src/examples/pipes/conf/word-part.xml b/hadoop-tools/hadoop-pipes/src/main/native/examples/conf/word-part.xml
similarity index 100%
rename from hadoop-mapreduce-project/src/examples/pipes/conf/word-part.xml
rename to hadoop-tools/hadoop-pipes/src/main/native/examples/conf/word-part.xml
diff --git a/hadoop-mapreduce-project/src/examples/pipes/conf/word.xml b/hadoop-tools/hadoop-pipes/src/main/native/examples/conf/word.xml
similarity index 100%
rename from hadoop-mapreduce-project/src/examples/pipes/conf/word.xml
rename to hadoop-tools/hadoop-pipes/src/main/native/examples/conf/word.xml
diff --git a/hadoop-mapreduce-project/src/examples/pipes/impl/sort.cc b/hadoop-tools/hadoop-pipes/src/main/native/examples/impl/sort.cc
similarity index 100%
rename from hadoop-mapreduce-project/src/examples/pipes/impl/sort.cc
rename to hadoop-tools/hadoop-pipes/src/main/native/examples/impl/sort.cc
diff --git a/hadoop-mapreduce-project/src/examples/pipes/impl/wordcount-nopipe.cc b/hadoop-tools/hadoop-pipes/src/main/native/examples/impl/wordcount-nopipe.cc
similarity index 100%
rename from hadoop-mapreduce-project/src/examples/pipes/impl/wordcount-nopipe.cc
rename to hadoop-tools/hadoop-pipes/src/main/native/examples/impl/wordcount-nopipe.cc
diff --git a/hadoop-mapreduce-project/src/examples/pipes/impl/wordcount-part.cc b/hadoop-tools/hadoop-pipes/src/main/native/examples/impl/wordcount-part.cc
similarity index 100%
rename from hadoop-mapreduce-project/src/examples/pipes/impl/wordcount-part.cc
rename to hadoop-tools/hadoop-pipes/src/main/native/examples/impl/wordcount-part.cc
diff --git a/hadoop-mapreduce-project/src/examples/pipes/impl/wordcount-simple.cc b/hadoop-tools/hadoop-pipes/src/main/native/examples/impl/wordcount-simple.cc
similarity index 100%
rename from hadoop-mapreduce-project/src/examples/pipes/impl/wordcount-simple.cc
rename to hadoop-tools/hadoop-pipes/src/main/native/examples/impl/wordcount-simple.cc
diff --git a/hadoop-mapreduce-project/src/c++/pipes/api/hadoop/Pipes.hh b/hadoop-tools/hadoop-pipes/src/main/native/pipes/api/hadoop/Pipes.hh
similarity index 100%
rename from hadoop-mapreduce-project/src/c++/pipes/api/hadoop/Pipes.hh
rename to hadoop-tools/hadoop-pipes/src/main/native/pipes/api/hadoop/Pipes.hh
diff --git a/hadoop-mapreduce-project/src/c++/pipes/api/hadoop/TemplateFactory.hh b/hadoop-tools/hadoop-pipes/src/main/native/pipes/api/hadoop/TemplateFactory.hh
similarity index 100%
rename from hadoop-mapreduce-project/src/c++/pipes/api/hadoop/TemplateFactory.hh
rename to hadoop-tools/hadoop-pipes/src/main/native/pipes/api/hadoop/TemplateFactory.hh
diff --git a/hadoop-mapreduce-project/src/c++/pipes/debug/pipes-default-gdb-commands.txt b/hadoop-tools/hadoop-pipes/src/main/native/pipes/debug/pipes-default-gdb-commands.txt
similarity index 100%
rename from hadoop-mapreduce-project/src/c++/pipes/debug/pipes-default-gdb-commands.txt
rename to hadoop-tools/hadoop-pipes/src/main/native/pipes/debug/pipes-default-gdb-commands.txt
diff --git a/hadoop-mapreduce-project/src/c++/pipes/debug/pipes-default-script b/hadoop-tools/hadoop-pipes/src/main/native/pipes/debug/pipes-default-script
similarity index 100%
rename from hadoop-mapreduce-project/src/c++/pipes/debug/pipes-default-script
rename to hadoop-tools/hadoop-pipes/src/main/native/pipes/debug/pipes-default-script
diff --git a/hadoop-mapreduce-project/src/c++/pipes/impl/HadoopPipes.cc b/hadoop-tools/hadoop-pipes/src/main/native/pipes/impl/HadoopPipes.cc
similarity index 100%
rename from hadoop-mapreduce-project/src/c++/pipes/impl/HadoopPipes.cc
rename to hadoop-tools/hadoop-pipes/src/main/native/pipes/impl/HadoopPipes.cc
diff --git a/hadoop-mapreduce-project/src/c++/utils/api/hadoop/SerialUtils.hh b/hadoop-tools/hadoop-pipes/src/main/native/utils/api/hadoop/SerialUtils.hh
similarity index 100%
rename from hadoop-mapreduce-project/src/c++/utils/api/hadoop/SerialUtils.hh
rename to hadoop-tools/hadoop-pipes/src/main/native/utils/api/hadoop/SerialUtils.hh
diff --git a/hadoop-mapreduce-project/src/c++/utils/api/hadoop/StringUtils.hh b/hadoop-tools/hadoop-pipes/src/main/native/utils/api/hadoop/StringUtils.hh
similarity index 100%
rename from hadoop-mapreduce-project/src/c++/utils/api/hadoop/StringUtils.hh
rename to hadoop-tools/hadoop-pipes/src/main/native/utils/api/hadoop/StringUtils.hh
diff --git a/hadoop-mapreduce-project/src/c++/utils/impl/SerialUtils.cc b/hadoop-tools/hadoop-pipes/src/main/native/utils/impl/SerialUtils.cc
similarity index 100%
rename from hadoop-mapreduce-project/src/c++/utils/impl/SerialUtils.cc
rename to hadoop-tools/hadoop-pipes/src/main/native/utils/impl/SerialUtils.cc
diff --git a/hadoop-mapreduce-project/src/c++/utils/impl/StringUtils.cc b/hadoop-tools/hadoop-pipes/src/main/native/utils/impl/StringUtils.cc
similarity index 100%
rename from hadoop-mapreduce-project/src/c++/utils/impl/StringUtils.cc
rename to hadoop-tools/hadoop-pipes/src/main/native/utils/impl/StringUtils.cc
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/ParsedTask.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/ParsedTask.java
index 90eebd6..11cf12d 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/ParsedTask.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/ParsedTask.java
@@ -91,7 +91,10 @@
     return failedDueToAttempt;
   }
 
-  List<ParsedTaskAttempt> obtainTaskAttempts() {
+  /**
+   * @return the list of attempts of this task.
+   */
+  public List<ParsedTaskAttempt> obtainTaskAttempts() {
     List<LoggedTaskAttempt> attempts = getAttempts();
     return convertTaskAttempts(attempts);
   }
diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml b/hadoop-tools/hadoop-tools-dist/pom.xml
index 330cfd3..951eab1 100644
--- a/hadoop-tools/hadoop-tools-dist/pom.xml
+++ b/hadoop-tools/hadoop-tools-dist/pom.xml
@@ -70,6 +70,13 @@
       <artifactId>hadoop-gridmix</artifactId>
       <scope>compile</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-pipes</artifactId>
+      <scope>compile</scope>
+      <type>pom</type>
+      <version>${project.version}</version>
+    </dependency>
   </dependencies>
 
   <build>
@@ -92,4 +99,44 @@
     </plugins>
   </build>
 
+  <profiles>
+    <profile>
+      <id>dist</id>
+      <activation>
+        <activeByDefault>false</activeByDefault>
+      </activation>
+      <build>
+        <plugins>
+         <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-assembly-plugin</artifactId>
+            <dependencies>
+              <dependency>
+                <groupId>org.apache.hadoop</groupId>
+                <artifactId>hadoop-assemblies</artifactId>
+                <version>${project.version}</version>
+              </dependency>
+            </dependencies>
+            <executions>
+              <execution>
+                <id>dist</id>
+                <phase>prepare-package</phase>
+                <goals>
+                  <goal>single</goal>
+                </goals>
+                <configuration>
+                  <appendAssemblyId>false</appendAssemblyId>
+                  <attach>false</attach>
+                  <finalName>${project.artifactId}-${project.version}</finalName>
+                  <descriptorRefs>
+                    <descriptorRef>hadoop-tools</descriptorRef>
+                  </descriptorRefs>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
 </project>
diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml
index 3166b9e..bc75e2b 100644
--- a/hadoop-tools/pom.xml
+++ b/hadoop-tools/pom.xml
@@ -39,6 +39,7 @@
     <module>hadoop-datajoin</module>
     <module>hadoop-tools-dist</module>
     <module>hadoop-extras</module>
+    <module>hadoop-pipes</module>
   </modules>
 
   <build>