Merge remote-tracking branch 'origin/trunk' into HDFS-8966
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
index 1a5d7d0..85899e5 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
@@ -14,7 +14,9 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-<assembly>
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
   <id>hadoop-distro</id>
   <formats>
     <format>dir</format>
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml
index 89e8771..0edfdeb 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml
@@ -12,7 +12,9 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
-<assembly>
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
   <id>hadoop-hdfs-nfs-dist</id>
   <formats>
     <format>dir</format>
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml
index 6468a8a..4d508ee 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml
@@ -12,7 +12,9 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
-<assembly>
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
   <id>hadoop-httpfs-dist</id>
   <formats>
     <format>dir</format>
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml
index 9bbd0eb..5830bba 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml
@@ -12,7 +12,9 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
-<assembly>
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
   <id>hadoop-kms-dist</id>
   <formats>
     <format>dir</format>
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml
index df08c6c..5157edc 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml
@@ -14,9 +14,9 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
   xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
   <id>hadoop-mapreduce-dist</id>
   <formats>
     <format>dir</format>
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-nfs-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-nfs-dist.xml
index 9271239..cb3d9cd 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-nfs-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-nfs-dist.xml
@@ -12,7 +12,9 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
-<assembly>
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
   <id>hadoop-nfs-dist</id>
   <formats>
     <format>dir</format>
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-sls.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-sls.xml
index 8bdab7b..95b263a 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-sls.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-sls.xml
@@ -15,7 +15,9 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
-<assembly>
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
   <id>hadoop-sls</id>
   <formats>
     <format>dir</format>
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
index 41c4fb6..b1e039f 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
@@ -14,9 +14,9 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
   xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
   <id>hadoop-src</id>
   <formats>
     <format>tar.gz</format>
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml
index 3f646e6..15c2572 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml
@@ -14,9 +14,9 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
   xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
   <id>hadoop-tools</id>
   <formats>
     <format>dir</format>
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
index 6d386f1..219ed81 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
@@ -14,9 +14,9 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
   xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
   <id>hadoop-yarn-dist</id>
   <formats>
     <format>dir</format>
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 99d4782..0d1bce2 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -249,6 +249,10 @@
     HADOOP-12385. Include nested stack trace in SaslRpcClient.getServerToken()
     (stevel)
 
+    HADOOP-12133. Add schemas to Maven Assembly XMLs (Gábor Lipták via aw)
+
+    HADOOP-12541. make re2j dependency consistent (Matthew Paduano via aw)
+
   BUG FIXES
 
     HADOOP-11473. test-patch says "-1 overall" even when all checks are +1
@@ -603,6 +607,12 @@
 
       HADOOP-11921. Enhance tests for erasure coders. (Kai Zheng)
 
+      HADOOP-12327. Initialize output buffers with ZERO bytes in erasure coder.
+      (Kai Zheng via waltersu4549)
+
+      HADOOP-12047. Indicate preference not to affect input buffers during
+      coding in erasure coder. (Kai Zheng via waltersu4549)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -1296,6 +1306,14 @@
     HADOOP-12519. hadoop-azure tests should avoid creating a metrics
     configuration file in the module root directory. (cnauroth)
 
+    HADOOP-12533. Introduce FileNotFoundException in WASB for read and seek API.
+    (Dushyanth via cnauroth)
+
+    HADOOP-12508. delete fails with exception when lease is held on blob.
+    (Gaurav Kanade via cnauroth)
+
+    HADOOP-12542. TestDNS fails on Windows after HADOOP-12437. (cnauroth)
+
   OPTIMIZATIONS
 
     HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()
@@ -2153,7 +2171,19 @@
     HADOOP-11837. AuthenticationFilter should destroy SignerSecretProvider in
     Tomcat deployments. (Bowen Zhang via wheat9)
 
-Release 2.6.2 - UNRELEASED
+Release 2.6.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+Release 2.6.2 - 2015-10-28
 
   INCOMPATIBLE CHANGES
 
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 4e47a3f..4735c6b 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -194,7 +194,6 @@
     <dependency>
       <groupId>com.google.re2j</groupId>
       <artifactId>re2j</artifactId>
-      <version>${re2j.version}</version>
       <scope>compile</scope>
     </dependency>
     <dependency>
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index bd8aa2a..d3eb0ad 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -2079,16 +2079,17 @@
     CACHE.remove(this.key, this);
   }
 
-  /** Return the total size of all files in the filesystem.*/
-  public long getUsed() throws IOException{
-    long used = 0;
-    RemoteIterator<LocatedFileStatus> files = listFiles(new Path("/"), true);
-    while (files.hasNext()) {
-      used += files.next().getLen();
-    }
-    return used;
+  /** Return the total size of all files in the filesystem. */
+  public long getUsed() throws IOException {
+    Path path = new Path("/");
+    return getUsed(path);
   }
-  
+
+  /** Return the total size of all files from a specified path. */
+  public long getUsed(Path path) throws IOException {
+    return getContentSummary(path).getLength();
+  }
+
   /**
    * Get the block size for a particular file.
    * @param f the filename
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index f862c74..00f6778 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -389,7 +389,13 @@
   public long getUsed() throws IOException{
     return fs.getUsed();
   }
-  
+
+  /** Return the total size of all files from a specified path.*/
+  @Override
+  public long getUsed(Path path) throws IOException {
+    return fs.getUsed(path);
+  }
+
   @Override
   public long getDefaultBlockSize() {
     return fs.getDefaultBlockSize();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
index 868b8dc..ea5e6a3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
@@ -1237,6 +1237,12 @@
     return fs.getUsed();
   }
 
+  /** Return the total size of all files from a specified path.*/
+  @Override
+  public long getUsed(Path path) throws IOException {
+    return fs.getUsed(path);
+  }
+
   @SuppressWarnings("deprecation")
   @Override
   public long getDefaultBlockSize() {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
index 35e9492..b195216 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
@@ -22,6 +22,8 @@
 import org.apache.hadoop.conf.Configured;
 
 import java.nio.ByteBuffer;
+import java.util.HashMap;
+import java.util.Map;
 
 /**
  * A common class of basic facilities to be shared by encoder and decoder
@@ -32,14 +34,60 @@
 public abstract class AbstractRawErasureCoder
     extends Configured implements RawErasureCoder {
 
+  private static byte[] emptyChunk = new byte[4096];
   private final int numDataUnits;
   private final int numParityUnits;
   private final int numAllUnits;
+  private final Map<CoderOption, Object> coderOptions;
 
   public AbstractRawErasureCoder(int numDataUnits, int numParityUnits) {
     this.numDataUnits = numDataUnits;
     this.numParityUnits = numParityUnits;
     this.numAllUnits = numDataUnits + numParityUnits;
+    this.coderOptions = new HashMap<>(3);
+
+    coderOptions.put(CoderOption.PREFER_DIRECT_BUFFER, preferDirectBuffer());
+    coderOptions.put(CoderOption.ALLOW_CHANGE_INPUTS, false);
+    coderOptions.put(CoderOption.ALLOW_VERBOSE_DUMP, false);
+  }
+
+  @Override
+  public Object getCoderOption(CoderOption option) {
+    if (option == null) {
+      throw new HadoopIllegalArgumentException("Invalid option");
+    }
+    return coderOptions.get(option);
+  }
+
+  @Override
+  public void setCoderOption(CoderOption option, Object value) {
+    if (option == null || value == null) {
+      throw new HadoopIllegalArgumentException(
+          "Invalid option or option value");
+    }
+    if (option.isReadOnly()) {
+      throw new HadoopIllegalArgumentException(
+          "The option is read-only: " + option.name());
+    }
+
+    coderOptions.put(option, value);
+  }
+
+  /**
+   * Make sure to return an empty chunk buffer for the desired length.
+   * @param leastLength
+   * @return empty chunk of zero bytes
+   */
+  protected static byte[] getEmptyChunk(int leastLength) {
+    if (emptyChunk.length >= leastLength) {
+      return emptyChunk; // In most time
+    }
+
+    synchronized (AbstractRawErasureCoder.class) {
+      emptyChunk = new byte[leastLength];
+    }
+
+    return emptyChunk;
   }
 
   @Override
@@ -57,13 +105,35 @@
   }
 
   @Override
-  public boolean preferDirectBuffer() {
+  public void release() {
+    // Nothing to do by default
+  }
+
+  /**
+   * Tell if direct buffer is preferred or not. It's for callers to
+   * decide how to allocate coding chunk buffers, using DirectByteBuffer or
+   * bytes array. It will return false by default.
+   * @return true if native buffer is preferred for performance consideration,
+   * otherwise false.
+   */
+  protected boolean preferDirectBuffer() {
     return false;
   }
 
-  @Override
-  public void release() {
-    // Nothing to do by default
+  protected boolean isAllowingChangeInputs() {
+    Object value = getCoderOption(CoderOption.ALLOW_CHANGE_INPUTS);
+    if (value != null && value instanceof Boolean) {
+      return (boolean) value;
+    }
+    return false;
+  }
+
+  protected boolean isAllowingVerboseDump() {
+    Object value = getCoderOption(CoderOption.ALLOW_VERBOSE_DUMP);
+    if (value != null && value instanceof Boolean) {
+      return (boolean) value;
+    }
+    return false;
   }
 
   /**
@@ -73,11 +143,9 @@
    * @return the buffer itself, with ZERO bytes written, the position and limit
    *         are not changed after the call
    */
-  protected ByteBuffer resetBuffer(ByteBuffer buffer) {
+  protected ByteBuffer resetBuffer(ByteBuffer buffer, int len) {
     int pos = buffer.position();
-    for (int i = pos; i < buffer.limit(); ++i) {
-      buffer.put((byte) 0);
-    }
+    buffer.put(getEmptyChunk(len), 0, len);
     buffer.position(pos);
 
     return buffer;
@@ -90,9 +158,8 @@
    * @return the buffer itself
    */
   protected byte[] resetBuffer(byte[] buffer, int offset, int len) {
-    for (int i = offset; i < len; ++i) {
-      buffer[i] = (byte) 0;
-    }
+    byte[] empty = getEmptyChunk(len);
+    System.arraycopy(empty, 0, buffer, offset, len);
 
     return buffer;
   }
@@ -104,9 +171,10 @@
    * @param allowNull whether to allow any element to be null or not
    * @param dataLen the length of data available in the buffer to ensure with
    * @param isDirectBuffer is direct buffer or not to ensure with
+   * @param isOutputs is output buffer or not
    */
-  protected void ensureLengthAndType(ByteBuffer[] buffers, boolean allowNull,
-                                     int dataLen, boolean isDirectBuffer) {
+  protected void checkParameterBuffers(ByteBuffer[] buffers, boolean
+      allowNull, int dataLen, boolean isDirectBuffer, boolean isOutputs) {
     for (ByteBuffer buffer : buffers) {
       if (buffer == null && !allowNull) {
         throw new HadoopIllegalArgumentException(
@@ -120,18 +188,23 @@
           throw new HadoopIllegalArgumentException(
               "Invalid buffer, isDirect should be " + isDirectBuffer);
         }
+        if (isOutputs) {
+          resetBuffer(buffer, dataLen);
+        }
       }
     }
   }
 
   /**
-   * Check and ensure the buffers are of the length specified by dataLen.
+   * Check and ensure the buffers are of the length specified by dataLen. If is
+   * output buffers, ensure they will be ZEROed.
    * @param buffers the buffers to check
    * @param allowNull whether to allow any element to be null or not
    * @param dataLen the length of data available in the buffer to ensure with
+   * @param isOutputs is output buffer or not
    */
-  protected void ensureLength(byte[][] buffers,
-                              boolean allowNull, int dataLen) {
+  protected void checkParameterBuffers(byte[][] buffers, boolean allowNull,
+                                       int dataLen, boolean isOutputs) {
     for (byte[] buffer : buffers) {
       if (buffer == null && !allowNull) {
         throw new HadoopIllegalArgumentException(
@@ -139,6 +212,8 @@
       } else if (buffer != null && buffer.length != dataLen) {
         throw new HadoopIllegalArgumentException(
             "Invalid buffer not of length " + dataLen);
+      } else if (isOutputs) {
+        resetBuffer(buffer, 0, dataLen);
       }
     }
   }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureDecoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureDecoder.java
index a99730d..2cfb57c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureDecoder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureDecoder.java
@@ -48,8 +48,8 @@
     if (dataLen == 0) {
       return;
     }
-    ensureLengthAndType(inputs, true, dataLen, usingDirectBuffer);
-    ensureLengthAndType(outputs, false, dataLen, usingDirectBuffer);
+    checkParameterBuffers(inputs, true, dataLen, usingDirectBuffer, false);
+    checkParameterBuffers(outputs, false, dataLen, usingDirectBuffer, true);
 
     if (usingDirectBuffer) {
       doDecode(inputs, erasedIndexes, outputs);
@@ -106,8 +106,8 @@
     if (dataLen == 0) {
       return;
     }
-    ensureLength(inputs, true, dataLen);
-    ensureLength(outputs, false, dataLen);
+    checkParameterBuffers(inputs, true, dataLen, false);
+    checkParameterBuffers(outputs, false, dataLen, true);
 
     int[] inputOffsets = new int[inputs.length]; // ALL ZERO
     int[] outputOffsets = new int[outputs.length]; // ALL ZERO
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureEncoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureEncoder.java
index 99c754e..13c895c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureEncoder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureEncoder.java
@@ -45,8 +45,8 @@
     if (dataLen == 0) {
       return;
     }
-    ensureLengthAndType(inputs, false, dataLen, usingDirectBuffer);
-    ensureLengthAndType(outputs, false, dataLen, usingDirectBuffer);
+    checkParameterBuffers(inputs, false, dataLen, usingDirectBuffer, false);
+    checkParameterBuffers(outputs, false, dataLen, usingDirectBuffer, true);
 
     if (usingDirectBuffer) {
       doEncode(inputs, outputs);
@@ -93,8 +93,8 @@
     if (dataLen == 0) {
       return;
     }
-    ensureLength(inputs, false, dataLen);
-    ensureLength(outputs, false, dataLen);
+    checkParameterBuffers(inputs, false, dataLen, false);
+    checkParameterBuffers(outputs, false, dataLen, true);
 
     int[] inputOffsets = new int[inputs.length]; // ALL ZERO
     int[] outputOffsets = new int[outputs.length]; // ALL ZERO
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/CoderOption.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/CoderOption.java
new file mode 100644
index 0000000..e4d97ca
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/CoderOption.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+/**
+ * Supported erasure coder options.
+ */
+public enum CoderOption {
+  /* If direct buffer is preferred, for perf consideration */
+  PREFER_DIRECT_BUFFER(true),    // READ-ONLY
+  /**
+   * Allow changing input buffer content (not positions).
+   * Maybe better perf if allowed
+   */
+  ALLOW_CHANGE_INPUTS(false),    // READ-WRITE
+  /* Allow dump verbose debug info or not */
+  ALLOW_VERBOSE_DUMP(false);     // READ-WRITE
+
+  private boolean isReadOnly = false;
+
+  CoderOption(boolean isReadOnly) {
+    this.isReadOnly = isReadOnly;
+  }
+
+  public boolean isReadOnly() {
+    return isReadOnly;
+  }
+};
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawDecoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawDecoder.java
index 1acaab9..87347c0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawDecoder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawDecoder.java
@@ -206,7 +206,7 @@
         if (erasedIndexes[i] == erasedOrNotToReadIndexes[j]) {
           found = true;
           adjustedDirectBufferOutputsParameter[j] =
-              resetBuffer(outputs[outputIdx++]);
+              resetBuffer(outputs[outputIdx++], dataLen);
         }
       }
       if (!found) {
@@ -220,7 +220,7 @@
         ByteBuffer buffer = checkGetDirectBuffer(bufferIdx, dataLen);
         buffer.position(0);
         buffer.limit(dataLen);
-        adjustedDirectBufferOutputsParameter[i] = resetBuffer(buffer);
+        adjustedDirectBufferOutputsParameter[i] = resetBuffer(buffer, dataLen);
         bufferIdx++;
       }
     }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawEncoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawEncoder.java
index 813a312..0f25b2a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawEncoder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawEncoder.java
@@ -21,6 +21,7 @@
 import org.apache.hadoop.io.erasurecode.rawcoder.util.RSUtil;
 
 import java.nio.ByteBuffer;
+import java.util.Arrays;
 
 /**
  * A raw erasure encoder in RS code scheme in pure Java in case native one
@@ -54,8 +55,26 @@
   protected void doEncode(ByteBuffer[] inputs, ByteBuffer[] outputs) {
     // parity units + data units
     ByteBuffer[] all = new ByteBuffer[outputs.length + inputs.length];
-    System.arraycopy(outputs, 0, all, 0, outputs.length);
-    System.arraycopy(inputs, 0, all, outputs.length, inputs.length);
+
+    if (isAllowingChangeInputs()) {
+      System.arraycopy(outputs, 0, all, 0, outputs.length);
+      System.arraycopy(inputs, 0, all, outputs.length, inputs.length);
+    } else {
+      System.arraycopy(outputs, 0, all, 0, outputs.length);
+
+      /**
+       * Note when this coder would be really (rarely) used in a production
+       * system, this can  be optimized to cache and reuse the new allocated
+       * buffers avoiding reallocating.
+       */
+      ByteBuffer tmp;
+      for (int i = 0; i < inputs.length; i++) {
+        tmp = ByteBuffer.allocate(inputs[i].remaining());
+        tmp.put(inputs[i]);
+        tmp.flip();
+        all[outputs.length + i] = tmp;
+      }
+    }
 
     // Compute the remainder
     RSUtil.GF.remainder(all, generatingPolynomial);
@@ -67,15 +86,26 @@
                           int[] outputOffsets) {
     // parity units + data units
     byte[][] all = new byte[outputs.length + inputs.length][];
-    System.arraycopy(outputs, 0, all, 0, outputs.length);
-    System.arraycopy(inputs, 0, all, outputs.length, inputs.length);
+    int[] allOffsets = new int[outputOffsets.length + inputOffsets.length];
 
-    int[] offsets = new int[inputOffsets.length + outputOffsets.length];
-    System.arraycopy(outputOffsets, 0, offsets, 0, outputOffsets.length);
-    System.arraycopy(inputOffsets, 0, offsets,
-        outputOffsets.length, inputOffsets.length);
+    if (isAllowingChangeInputs()) {
+      System.arraycopy(outputs, 0, all, 0, outputs.length);
+      System.arraycopy(inputs, 0, all, outputs.length, inputs.length);
+
+      System.arraycopy(outputOffsets, 0, allOffsets, 0, outputOffsets.length);
+      System.arraycopy(inputOffsets, 0, allOffsets,
+          outputOffsets.length, inputOffsets.length);
+    } else {
+      System.arraycopy(outputs, 0, all, 0, outputs.length);
+      System.arraycopy(outputOffsets, 0, allOffsets, 0, outputOffsets.length);
+
+      for (int i = 0; i < inputs.length; i++) {
+        all[outputs.length + i] = Arrays.copyOfRange(inputs[i],
+            inputOffsets[i], inputOffsets[i] + dataLen);
+      }
+    }
 
     // Compute the remainder
-    RSUtil.GF.remainder(all, offsets, dataLen, generatingPolynomial);
+    RSUtil.GF.remainder(all, allOffsets, dataLen, generatingPolynomial);
   }
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java
index 082c9c3..dbe2da9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java
@@ -38,6 +38,20 @@
 public interface RawErasureCoder extends Configurable {
 
   /**
+   * Get a coder option value.
+   * @param option
+   * @return
+   */
+  public Object getCoderOption(CoderOption option);
+
+  /**
+   * Set a coder option value.
+   * @param option
+   * @param value
+   */
+  public void setCoderOption(CoderOption option, Object value);
+
+  /**
    * The number of data input units for the coding. A unit can be a byte,
    * chunk or buffer or even a block.
    * @return count of data input units
@@ -52,15 +66,6 @@
   public int getNumParityUnits();
 
   /**
-   * Tell if direct buffer is preferred or not. It's for callers to
-   * decide how to allocate coding chunk buffers, using DirectByteBuffer or
-   * bytes array. It will return false by default.
-   * @return true if native buffer is preferred for performance consideration,
-   * otherwise false.
-   */
-  public boolean preferDirectBuffer();
-
-  /**
    * Should be called when release this coder. Good chance to release encoding
    * or decoding buffers
    */
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureDecoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureDecoder.java
index 7a07b71..ab322fa 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureDecoder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureDecoder.java
@@ -54,24 +54,27 @@
    * Note, for both inputs and outputs, no mixing of on-heap buffers and direct
    * buffers are allowed.
    *
-   * @param inputs inputs to read data from, contents may change after the call
+   * If the coder option ALLOW_CHANGE_INPUTS is set true (false by default), the
+   * content of input buffers may change after the call, subject to concrete
+   * implementation. Anyway the positions of input buffers will move forward.
+   *
+   * @param inputs input buffers to read data from
    * @param erasedIndexes indexes of erased units in the inputs array
-   * @param outputs outputs to write into for data generated according to
-   *                erasedIndexes, ready for reading the result data from after
-   *                the call
+   * @param outputs output buffers to put decoded data into according to
+   *                erasedIndexes, ready for read after the call
    */
-  public void decode(ByteBuffer[] inputs, int[] erasedIndexes,
+  void decode(ByteBuffer[] inputs, int[] erasedIndexes,
                      ByteBuffer[] outputs);
 
   /**
    * Decode with inputs and erasedIndexes, generates outputs. More see above.
-   * @param inputs inputs to read data from, contents may change after the call
+   *
+   * @param inputs input buffers to read data from
    * @param erasedIndexes indexes of erased units in the inputs array
-   * @param outputs outputs to write into for data generated according to
-   *                erasedIndexes, ready for reading the result data from after
-   *                the call
+   * @param outputs output buffers to put decoded data into according to
+   *                erasedIndexes, ready for read after the call
    */
-  public void decode(byte[][] inputs, int[] erasedIndexes, byte[][] outputs);
+  void decode(byte[][] inputs, int[] erasedIndexes, byte[][] outputs);
 
   /**
    * Decode with inputs and erasedIndexes, generates outputs. More see above.
@@ -79,12 +82,11 @@
    * Note, for both input and output ECChunks, no mixing of on-heap buffers and
    * direct buffers are allowed.
    *
-   * @param inputs inputs to read data from, contents may change after the call
+   * @param inputs input buffers to read data from
    * @param erasedIndexes indexes of erased units in the inputs array
-   * @param outputs outputs to write into for data generated according to
-   *                erasedIndexes, ready for reading the result data from after
-   *                the call
+   * @param outputs output buffers to put decoded data into according to
+   *                erasedIndexes, ready for read after the call
    */
-  public void decode(ECChunk[] inputs, int[] erasedIndexes, ECChunk[] outputs);
+  void decode(ECChunk[] inputs, int[] erasedIndexes, ECChunk[] outputs);
 
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureEncoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureEncoder.java
index d451eb9..91ef714 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureEncoder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureEncoder.java
@@ -38,29 +38,32 @@
    * Note, for both inputs and outputs, no mixing of on-heap buffers and direct
    * buffers are allowed.
    *
-   * @param inputs inputs to read data from, contents may change after the call
-   * @param outputs
+   * If the coder option ALLOW_CHANGE_INPUTS is set true (false by default), the
+   * content of input buffers may change after the call, subject to concrete
+   * implementation. Anyway the positions of input buffers will move forward.
+   *
+   * @param inputs input buffers to read data from
+   * @param outputs output buffers to put the encoded data into, read to read
+   *                after the call
    */
-  public void encode(ByteBuffer[] inputs, ByteBuffer[] outputs);
+  void encode(ByteBuffer[] inputs, ByteBuffer[] outputs);
 
   /**
-   * Encode with inputs and generates outputs
-   * @param inputs inputs to read data from, contents may change after the call
-   * @param outputs outputs to write into for data generated, ready for reading
-   *                the result data from after the call
+   * Encode with inputs and generates outputs. More see above.
+   *
+   * @param inputs input buffers to read data from
+   * @param outputs output buffers to put the encoded data into, read to read
+   *                after the call
    */
-  public void encode(byte[][] inputs, byte[][] outputs);
+  void encode(byte[][] inputs, byte[][] outputs);
 
   /**
-   * Encode with inputs and generates outputs.
+   * Encode with inputs and generates outputs. More see above.
    *
-   * Note, for both input and output ECChunks, no mixing of on-heap buffers and
-   * direct buffers are allowed.
-   *
-   * @param inputs inputs to read data from, contents may change after the call
-   * @param outputs outputs to write into for data generated, ready for reading
-   *                the result data from after the call
+   * @param inputs input buffers to read data from
+   * @param outputs output buffers to put the encoded data into, read to read
+   *                after the call
    */
-  public void encode(ECChunk[] inputs, ECChunk[] outputs);
+  void encode(ECChunk[] inputs, ECChunk[] outputs);
 
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XORRawDecoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XORRawDecoder.java
index f11dd9f..61017dd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XORRawDecoder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XORRawDecoder.java
@@ -39,7 +39,6 @@
   protected void doDecode(ByteBuffer[] inputs, int[] erasedIndexes,
                           ByteBuffer[] outputs) {
     ByteBuffer output = outputs[0];
-    resetBuffer(output);
 
     int erasedIdx = erasedIndexes[0];
 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XORRawEncoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XORRawEncoder.java
index bc1ae90..646fc17 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XORRawEncoder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XORRawEncoder.java
@@ -37,7 +37,6 @@
 
   protected void doEncode(ByteBuffer[] inputs, ByteBuffer[] outputs) {
     ByteBuffer output = outputs[0];
-    resetBuffer(output);
 
     // Get the first buffer's data.
     int iIdx, oIdx;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index f978ae7..171d52a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -139,7 +139,17 @@
       Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap) {
     return new RemoteExceptionDependentRetry(defaultPolicy, exceptionToPolicyMap);
   }
-  
+
+  /**
+   * A retry policy for exceptions other than RemoteException.
+   */
+  public static final RetryPolicy retryOtherThanRemoteException(
+      RetryPolicy defaultPolicy,
+      Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap) {
+    return new OtherThanRemoteExceptionDependentRetry(defaultPolicy,
+        exceptionToPolicyMap);
+  }
+
   public static final RetryPolicy failoverOnNetworkException(int maxFailovers) {
     return failoverOnNetworkException(TRY_ONCE_THEN_FAIL, maxFailovers);
   }
@@ -489,7 +499,37 @@
       return policy.shouldRetry(e, retries, failovers, isIdempotentOrAtMostOnce);
     }
   }
-  
+
+  static class OtherThanRemoteExceptionDependentRetry implements RetryPolicy {
+
+    private RetryPolicy defaultPolicy;
+    private Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap;
+
+    public OtherThanRemoteExceptionDependentRetry(RetryPolicy defaultPolicy,
+        Map<Class<? extends Exception>,
+        RetryPolicy> exceptionToPolicyMap) {
+      this.defaultPolicy = defaultPolicy;
+      this.exceptionToPolicyMap = exceptionToPolicyMap;
+    }
+
+    @Override
+    public RetryAction shouldRetry(Exception e, int retries, int failovers,
+        boolean isIdempotentOrAtMostOnce) throws Exception {
+      RetryPolicy policy = null;
+      // ignore Remote Exception
+      if (e instanceof RemoteException) {
+        // do nothing
+      } else {
+        policy = exceptionToPolicyMap.get(e.getClass());
+      }
+      if (policy == null) {
+        policy = defaultPolicy;
+      }
+      return policy.shouldRetry(
+          e, retries, failovers, isIdempotentOrAtMostOnce);
+    }
+  }
+
   static class ExponentialBackoffRetry extends RetryLimited {
     
     public ExponentialBackoffRetry(
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
index 8be7e35..b197575 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
@@ -44,6 +44,7 @@
    * {@link org.apache.hadoop.fs.CommonConfigurationKeysPublic#HADOOP_CALLER_CONTEXT_MAX_SIZE_DEFAULT}
    */
   private final String context;
+
   /** The caller's signature for validation.
    *
    * The signature is optional. The null or empty signature will be abandoned.
@@ -58,10 +59,6 @@
     this.signature = builder.signature;
   }
 
-  public boolean isValid() {
-    return context != null;
-  }
-
   public String getContext() {
     return context;
   }
@@ -71,6 +68,11 @@
         null : Arrays.copyOf(signature, signature.length);
   }
 
+  @InterfaceAudience.Private
+  public boolean isContextValid() {
+    return context != null && !context.isEmpty();
+  }
+
   @Override
   public int hashCode() {
     return new HashCodeBuilder().append(context).toHashCode();
@@ -92,9 +94,10 @@
           .isEquals();
     }
   }
+
   @Override
   public String toString() {
-    if (!isValid()) {
+    if (!isContextValid()) {
       return "";
     }
     String str = context;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
index 4bfcd66..1a5acba 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
@@ -180,7 +180,7 @@
 
     // Add caller context if it is not null
     CallerContext callerContext = CallerContext.getCurrent();
-    if (callerContext != null && callerContext.isValid()) {
+    if (callerContext != null && callerContext.isContextValid()) {
       RPCCallerContextProto.Builder contextBuilder = RPCCallerContextProto
           .newBuilder().setContext(callerContext.getContext());
       if (callerContext.getSignature() != null) {
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 3e01fe3..2e24aba 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -240,6 +240,7 @@
 | `LockQueueLength` | Number of threads waiting to acquire FSNameSystem lock |
 | `TotalSyncCount` | Total number of sync operations performed by edit log |
 | `TotalSyncTimes` | Total number of milliseconds spent by various edit logs in sync operation|
+| `NameDirSize` | NameNode name directories size in bytes |
 
 JournalNode
 -----------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
index 0d2b5ad..62a0b9c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
@@ -64,6 +64,8 @@
   private static int FIXED_DATA_GENERATOR = 0;
   protected byte[][] fixedData;
 
+  protected boolean allowChangeInputs;
+
   protected int getChunkSize() {
     return chunkSize;
   }
@@ -253,6 +255,22 @@
     }
   }
 
+  protected void markChunks(ECChunk[] chunks) {
+    for (int i = 0; i < chunks.length; i++) {
+      if (chunks[i] != null) {
+        chunks[i].getBuffer().mark();
+      }
+    }
+  }
+
+  protected void restoreChunksFromMark(ECChunk[] chunks) {
+    for (int i = 0; i < chunks.length; i++) {
+      if (chunks[i] != null) {
+        chunks[i].getBuffer().reset();
+      }
+    }
+  }
+
   /**
    * Clone chunks along with copying the associated data. It respects how the
    * chunk buffer is allocated, direct or non-direct. It avoids affecting the
@@ -277,6 +295,10 @@
    * @return a new chunk
    */
   protected ECChunk cloneChunkWithData(ECChunk chunk) {
+    if (chunk == null) {
+      return null;
+    }
+
     ByteBuffer srcBuffer = chunk.getBuffer();
 
     byte[] bytesArr = new byte[srcBuffer.remaining()];
@@ -453,14 +475,16 @@
     byte[][] bytesArr = new byte[chunks.length][];
 
     for (int i = 0; i < chunks.length; i++) {
-      bytesArr[i] = chunks[i].toBytesArray();
+      if (chunks[i] != null) {
+        bytesArr[i] = chunks[i].toBytesArray();
+      }
     }
 
     return bytesArr;
   }
 
   /**
-   * Dump all the settings used in the test case if allowDump is enabled.
+   * Dump all the settings used in the test case if isAllowingVerboseDump is enabled.
    */
   protected void dumpSetting() {
     if (allowDump) {
@@ -473,14 +497,16 @@
               append(Arrays.toString(erasedDataIndexes));
       sb.append(" erasedParityIndexes=").
               append(Arrays.toString(erasedParityIndexes));
-      sb.append(" usingDirectBuffer=").append(usingDirectBuffer).append("\n");
+      sb.append(" usingDirectBuffer=").append(usingDirectBuffer);
+      sb.append(" isAllowingChangeInputs=").append(allowChangeInputs);
+      sb.append("\n");
 
       System.out.println(sb.toString());
     }
   }
 
   /**
-   * Dump chunks prefixed with a header if allowDump is enabled.
+   * Dump chunks prefixed with a header if isAllowingVerboseDump is enabled.
    * @param header
    * @param chunks
    */
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestRawCoderBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestRawCoderBase.java
index 2b7a3c4..ec93d44 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestRawCoderBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestRawCoderBase.java
@@ -68,9 +68,9 @@
      * The following runs will use 3 different chunkSize for inputs and outputs,
      * to verify the same encoder/decoder can process variable width of data.
      */
-    performTestCoding(baseChunkSize, true, false, false);
-    performTestCoding(baseChunkSize - 17, false, false, false);
-    performTestCoding(baseChunkSize + 16, true, false, false);
+    performTestCoding(baseChunkSize, true, false, false, false);
+    performTestCoding(baseChunkSize - 17, false, false, false, true);
+    performTestCoding(baseChunkSize + 16, true, false, false, false);
   }
 
   /**
@@ -82,7 +82,7 @@
     prepareCoders();
 
     try {
-      performTestCoding(baseChunkSize, false, true, false);
+      performTestCoding(baseChunkSize, false, true, false, true);
       Assert.fail("Encoding test with bad input should fail");
     } catch (Exception e) {
       // Expected
@@ -98,7 +98,7 @@
     prepareCoders();
 
     try {
-      performTestCoding(baseChunkSize, false, false, true);
+      performTestCoding(baseChunkSize, false, false, true, true);
       Assert.fail("Decoding test with bad output should fail");
     } catch (Exception e) {
       // Expected
@@ -123,9 +123,11 @@
   }
 
   private void performTestCoding(int chunkSize, boolean usingSlicedBuffer,
-                                 boolean useBadInput, boolean useBadOutput) {
+                                 boolean useBadInput, boolean useBadOutput,
+                                 boolean allowChangeInputs) {
     setChunkSize(chunkSize);
     prepareBufferAllocator(usingSlicedBuffer);
+    setAllowChangeInputs(allowChangeInputs);
 
     dumpSetting();
 
@@ -141,10 +143,16 @@
     // Backup all the source chunks for later recovering because some coders
     // may affect the source data.
     ECChunk[] clonedDataChunks = cloneChunksWithData(dataChunks);
+    markChunks(dataChunks);
 
     encoder.encode(dataChunks, parityChunks);
     dumpChunks("Encoded parity chunks", parityChunks);
 
+    if (!allowChangeInputs) {
+      restoreChunksFromMark(dataChunks);
+      compareAndVerify(clonedDataChunks, dataChunks);
+    }
+
     // Backup and erase some chunks
     ECChunk[] backupChunks = backupAndEraseChunks(clonedDataChunks, parityChunks);
 
@@ -160,14 +168,31 @@
       corruptSomeChunk(recoveredChunks);
     }
 
+    ECChunk[] clonedInputChunks = null;
+    if (!allowChangeInputs) {
+      markChunks(inputChunks);
+      clonedInputChunks = cloneChunksWithData(inputChunks);
+    }
+
     dumpChunks("Decoding input chunks", inputChunks);
     decoder.decode(inputChunks, getErasedIndexesForDecoding(), recoveredChunks);
     dumpChunks("Decoded/recovered chunks", recoveredChunks);
 
+    if (!allowChangeInputs) {
+      restoreChunksFromMark(inputChunks);
+      compareAndVerify(clonedInputChunks, inputChunks);
+    }
+
     // Compare
     compareAndVerify(backupChunks, recoveredChunks);
   }
 
+  private void setAllowChangeInputs(boolean allowChangeInputs) {
+    this.allowChangeInputs = allowChangeInputs;
+    encoder.setCoderOption(CoderOption.ALLOW_CHANGE_INPUTS, allowChangeInputs);
+    decoder.setCoderOption(CoderOption.ALLOW_CHANGE_INPUTS, allowChangeInputs);
+  }
+
   private void prepareCoders() {
     if (encoder == null) {
       encoder = createEncoder();
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestXORRawCoder.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestXORRawCoder.java
index 48463ad..3b07b24 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestXORRawCoder.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestXORRawCoder.java
@@ -29,6 +29,7 @@
   public void setup() {
     this.encoderClass = XORRawEncoder.class;
     this.decoderClass = XORRawDecoder.class;
+    setAllowDump(false);
   }
 
   @Test
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
index 81f3a9b..35a45b4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
@@ -22,6 +22,7 @@
 import static org.apache.hadoop.io.retry.RetryPolicies.TRY_ONCE_THEN_FAIL;
 import static org.apache.hadoop.io.retry.RetryPolicies.retryByException;
 import static org.apache.hadoop.io.retry.RetryPolicies.retryByRemoteException;
+import static org.apache.hadoop.io.retry.RetryPolicies.retryOtherThanRemoteException;
 import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumCountWithFixedSleep;
 import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumCountWithProportionalSleep;
 import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumTimeWithFixedSleep;
@@ -29,6 +30,7 @@
 import static org.apache.hadoop.io.retry.RetryPolicies.exponentialBackoffRetry;
 import static org.junit.Assert.*;
 
+import java.io.IOException;
 import java.util.Collections;
 import java.util.Map;
 import java.util.concurrent.Callable;
@@ -213,8 +215,29 @@
     } catch (RemoteException e) {
       // expected
     }
-  }  
-  
+  }
+
+  @Test
+  public void testRetryOtherThanRemoteException() throws Throwable {
+    Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
+        Collections.<Class<? extends Exception>, RetryPolicy>singletonMap(
+            IOException.class, RETRY_FOREVER);
+
+    UnreliableInterface unreliable = (UnreliableInterface)
+        RetryProxy.create(UnreliableInterface.class, unreliableImpl,
+            retryOtherThanRemoteException(TRY_ONCE_THEN_FAIL,
+                exceptionToPolicyMap));
+    // should retry with local IOException.
+    unreliable.failsOnceWithIOException();
+    try {
+      // won't get retry on remote exception
+      unreliable.failsOnceWithRemoteException();
+      fail("Should fail");
+    } catch (RemoteException e) {
+      // expected
+    }
+  }
+
   @Test
   public void testRetryInterruptible() throws Throwable {
     final UnreliableInterface unreliable = (UnreliableInterface)
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
index ce9c16e..9387772 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
@@ -26,6 +26,8 @@
 
   private int failsOnceInvocationCount,
     failsOnceWithValueInvocationCount,
+    failsOnceIOExceptionInvocationCount,
+    failsOnceRemoteExceptionInvocationCount,
     failsTenTimesInvocationCount,
     succeedsOnceThenFailsCount,
     succeedsOnceThenFailsIdempotentCount,
@@ -90,6 +92,21 @@
   }
 
   @Override
+  public void failsOnceWithIOException() throws IOException {
+    if (failsOnceIOExceptionInvocationCount++ == 0) {
+      throw new IOException("test exception for failsOnceWithIOException");
+    }
+  }
+
+  @Override
+  public void failsOnceWithRemoteException() throws RemoteException {
+    if (failsOnceRemoteExceptionInvocationCount++ == 0) {
+      throw new RemoteException(IOException.class.getName(),
+          "test exception for failsOnceWithRemoteException");
+    }
+  }
+
+  @Override
   public void failsTenTimesThenSucceeds() throws UnreliableException {
     if (failsTenTimesInvocationCount++ < 10) {
       throw new UnreliableException();
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java
index 3fbe11a..6c9c153 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java
@@ -54,6 +54,9 @@
   void alwaysFailsWithFatalException() throws FatalException;
   void alwaysFailsWithRemoteFatalException() throws RemoteException;
 
+  void failsOnceWithIOException() throws IOException;
+  void failsOnceWithRemoteException() throws RemoteException;
+
   void failsOnceThenSucceeds() throws UnreliableException;
   boolean failsOnceThenSucceedsWithReturnValue() throws UnreliableException;
 
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
index b26c7ca..a0bfe73 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
@@ -30,6 +30,7 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Time;
 
 import org.junit.Test;
@@ -37,6 +38,7 @@
 import static org.hamcrest.CoreMatchers.not;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.*;
+import static org.junit.Assume.assumeTrue;
 
 /**
  * Test host name and IP resolution and caching.
@@ -185,13 +187,17 @@
    *
    * This test may fail on some misconfigured test machines that don't have
    * an entry for "localhost" in their hosts file. This entry is correctly
-   * configured out of the box on common Linux distributions, OS X and
-   * Windows.
+   * configured out of the box on common Linux distributions and OS X.
+   *
+   * Windows refuses to resolve 127.0.0.1 to "localhost" despite the presence of
+   * this entry in the hosts file.  We skip the test on Windows to avoid
+   * reporting a spurious failure.
    *
    * @throws Exception
    */
   @Test (timeout=60000)
   public void testLookupWithHostsFallback() throws Exception {
+    assumeTrue(!Shell.WINDOWS);
     final String oldHostname = changeDnsCachedHostname(DUMMY_HOSTNAME);
 
     try {
@@ -231,7 +237,7 @@
 
   private String getLoopbackInterface() throws SocketException {
     return NetworkInterface.getByInetAddress(
-        InetAddress.getLoopbackAddress()).getDisplayName();
+        InetAddress.getLoopbackAddress()).getName();
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
index 63c16d1..c70b890 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -51,6 +51,27 @@
         </exclusion>
       </exclusions>
     </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.mockito</groupId>
+      <artifactId>mockito-all</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.mock-server</groupId>
+      <artifactId>mockserver-netty</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
   </dependencies>
 
   <build>
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 7856133..7433256 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -892,7 +892,6 @@
 
     @Override
     void decode() {
-      // TODO no copy for data chunks. this depends on HADOOP-12047
       final int span = (int) alignedStripe.getSpanInBlock();
       for (int i = 0; i < alignedStripe.chunks.length; i++) {
         if (alignedStripe.chunks[i] != null &&
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 884fa42..f5bae2a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -221,9 +221,6 @@
     private void clear() {
       for (int i = 0; i< numAllBlocks; i++) {
         buffers[i].clear();
-        if (i >= numDataBlocks) {
-          Arrays.fill(buffers[i].array(), (byte) 0);
-        }
       }
     }
 
@@ -844,6 +841,11 @@
 
   void writeParityCells() throws IOException {
     final ByteBuffer[] buffers = cellBuffers.getBuffers();
+    // Skips encoding and writing parity cells if there are no healthy parity
+    // data streamers
+    if (!checkAnyParityStreamerIsHealthy()) {
+      return;
+    }
     //encode the data cells
     encode(encoder, numDataBlocks, buffers);
     for (int i = numDataBlocks; i < numAllBlocks; i++) {
@@ -852,6 +854,19 @@
     cellBuffers.clear();
   }
 
+  private boolean checkAnyParityStreamerIsHealthy() {
+    for (int i = numDataBlocks; i < numAllBlocks; i++) {
+      if (streamers.get(i).isHealthy()) {
+        return true;
+      }
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Skips encoding and writing parity cells as there are "
+          + "no healthy parity data streamers: " + streamers);
+    }
+    return false;
+  }
+
   void writeParity(int index, ByteBuffer buffer, byte[] checksumBuf)
       throws IOException {
     final StripedDataStreamer current = setCurrentStreamer(index);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/ReplaceDatanodeOnFailure.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/ReplaceDatanodeOnFailure.java
index c21a6a5c..7099c28d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/ReplaceDatanodeOnFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/ReplaceDatanodeOnFailure.java
@@ -30,16 +30,49 @@
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class ReplaceDatanodeOnFailure {
+  /**
+   * DEFAULT condition:
+   *   Let r be the replication number.
+   *   Let n be the number of existing datanodes.
+   *   Add a new datanode only if r >= 3 and either
+   *   (1) floor(r/2) >= n or (2) the block is hflushed/appended.
+   */
+  private static final Condition CONDITION_DEFAULT = new Condition() {
+    @Override
+    public boolean satisfy(final short replication,
+        final DatanodeInfo[] existings, final int n, final boolean isAppend,
+        final boolean isHflushed) {
+      return replication >= 3 &&
+          (n <= (replication / 2) || isAppend || isHflushed);
+    }
+  };
+  /** Return false unconditionally. */
+  private static final Condition CONDITION_FALSE = new Condition() {
+    @Override
+    public boolean satisfy(short replication, DatanodeInfo[] existings,
+        int nExistings, boolean isAppend, boolean isHflushed) {
+      return false;
+    }
+  };
+  /** Return true unconditionally. */
+  private static final Condition CONDITION_TRUE = new Condition() {
+    @Override
+    public boolean satisfy(short replication, DatanodeInfo[] existings,
+        int nExistings, boolean isAppend, boolean isHflushed) {
+      return true;
+    }
+  };
+
   /** The replacement policies */
   public enum Policy {
     /** The feature is disabled in the entire site. */
-    DISABLE(Condition.FALSE),
+    DISABLE(CONDITION_FALSE),
     /** Never add a new datanode. */
-    NEVER(Condition.FALSE),
-    /** @see ReplaceDatanodeOnFailure.Condition#DEFAULT */
-    DEFAULT(Condition.DEFAULT),
+    NEVER(CONDITION_FALSE),
+    /** @see ReplaceDatanodeOnFailure#CONDITION_DEFAULT */
+    DEFAULT(CONDITION_DEFAULT),
     /** Always add a new datanode when an existing datanode is removed. */
-    ALWAYS(Condition.TRUE);
+    ALWAYS(CONDITION_TRUE);
 
     private final Condition condition;
 
@@ -54,41 +87,6 @@
 
   /** Datanode replacement condition */
   private interface Condition {
-    /** Return true unconditionally. */
-    Condition TRUE = new Condition() {
-      @Override
-      public boolean satisfy(short replication, DatanodeInfo[] existings,
-          int nExistings, boolean isAppend, boolean isHflushed) {
-        return true;
-      }
-    };
-
-    /** Return false unconditionally. */
-    Condition FALSE = new Condition() {
-      @Override
-      public boolean satisfy(short replication, DatanodeInfo[] existings,
-          int nExistings, boolean isAppend, boolean isHflushed) {
-        return false;
-      }
-    };
-
-    /**
-     * DEFAULT condition:
-     *   Let r be the replication number.
-     *   Let n be the number of existing datanodes.
-     *   Add a new datanode only if r >= 3 and either
-     *   (1) floor(r/2) >= n; or
-     *   (2) r > n and the block is hflushed/appended.
-     */
-    Condition DEFAULT = new Condition() {
-      @Override
-      public boolean satisfy(final short replication,
-          final DatanodeInfo[] existings, final int n, final boolean isAppend,
-          final boolean isHflushed) {
-        return replication >= 3 &&
-            (n <= (replication / 2) || isAppend || isHflushed);
-      }
-    };
 
     /** Is the condition satisfied? */
     boolean satisfy(short replication, DatanodeInfo[] existings, int nExistings,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandlerFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandlerFactory.java
similarity index 100%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandlerFactory.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandlerFactory.java
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/fs/TestXAttr.java
similarity index 100%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/fs/TestXAttr.java
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java
similarity index 100%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
similarity index 97%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
index 1d8d289..fc76a07 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
@@ -17,17 +17,15 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.junit.Test;
 
 import java.net.InetSocketAddress;
 import java.net.URI;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
-
-import org.junit.Test;
+import static org.junit.Assert.assertEquals;
 
 /** Test NameNode port defaulting code. */
 public class TestDefaultNameNodePort {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java
similarity index 98%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java
index 243d09b..b24df2b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java
@@ -17,17 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.channels.ReadableByteChannel;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import com.google.common.collect.HashMultiset;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.net.unix.DomainSocket;
@@ -35,11 +25,20 @@
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import com.google.common.collect.HashMultiset;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.channels.ReadableByteChannel;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
 
 public class TestPeerCache {
-  static final Log LOG = LogFactory.getLog(TestPeerCache.class);
+  static final Logger LOG = LoggerFactory.getLogger(TestPeerCache.class);
 
   private static class FakePeer implements Peer {
     private boolean closed = false;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java
similarity index 98%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java
index a4e00d9..eb10e96 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java
@@ -17,14 +17,9 @@
  */
 package org.apache.hadoop.hdfs.client.impl;
 
-import static org.junit.Assert.assertSame;
-
-import java.io.IOException;
-import java.util.concurrent.atomic.AtomicInteger;
-
+import com.google.common.base.Supplier;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSOutputStream;
-import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
@@ -35,7 +30,10 @@
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
-import com.google.common.base.Supplier;
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.junit.Assert.assertSame;
 
 public class TestLeaseRenewer {
   private final String FAKE_AUTHORITY="hdfs://nn1/";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java
similarity index 100%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitShm.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitShm.java
similarity index 93%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitShm.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitShm.java
index 7d28736..9d48444 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitShm.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitShm.java
@@ -17,16 +17,8 @@
  */
 package org.apache.hadoop.hdfs.shortcircuit;
 
-import java.io.File;
-import java.io.FileInputStream;
-import java.util.ArrayList;
-import java.util.Iterator;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.ExtendedBlockId;
-import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
 import org.apache.hadoop.io.nativeio.SharedFileDescriptorFactory;
@@ -34,9 +26,17 @@
 import org.junit.Assume;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.util.ArrayList;
+import java.util.Iterator;
 
 public class TestShortCircuitShm {
-  public static final Log LOG = LogFactory.getLog(TestShortCircuitShm.class);
+  public static final Logger LOG = LoggerFactory.getLogger(
+      TestShortCircuitShm.class);
   
   private static final File TEST_BASE =
       new File(System.getProperty("test.build.data", "/tmp"));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
similarity index 97%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
index 35a6d9a..f5dd883 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
@@ -17,6 +17,19 @@
  */
 package org.apache.hadoop.hdfs.util;
 
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.util.ByteArrayManager.Counter;
+import org.apache.hadoop.hdfs.util.ByteArrayManager.CounterMap;
+import org.apache.hadoop.hdfs.util.ByteArrayManager.FixedLengthManager;
+import org.apache.hadoop.hdfs.util.ByteArrayManager.ManagerMap;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
+import org.apache.log4j.Level;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
@@ -31,29 +44,16 @@
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.util.ByteArrayManager.Counter;
-import org.apache.hadoop.hdfs.util.ByteArrayManager.CounterMap;
-import org.apache.hadoop.hdfs.util.ByteArrayManager.FixedLengthManager;
-import org.apache.hadoop.hdfs.util.ByteArrayManager.ManagerMap;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.Time;
-import org.apache.log4j.Level;
-import org.junit.Assert;
-import org.junit.Test;
-
 /**
  * Test {@link ByteArrayManager}.
  */
 public class TestByteArrayManager {
   static {
-    GenericTestUtils.setLogLevel(LogFactory.getLog(ByteArrayManager.class),
-        Level.ALL);
+    GenericTestUtils.setLogLevel(
+        LoggerFactory.getLogger(ByteArrayManager.class), Level.ALL);
   }
 
-  static final Log LOG = LogFactory.getLog(TestByteArrayManager.class);
+  static final Logger LOG = LoggerFactory.getLogger(TestByteArrayManager.class);
 
   private static final Comparator<Future<Integer>> CMP = new Comparator<Future<Integer>>() {
     @Override
@@ -559,9 +559,8 @@
   }
   
   public static void main(String[] args) throws Exception {
-    GenericTestUtils.setLogLevel(LogFactory.getLog(ByteArrayManager.class),
-        Level.OFF);
-
+    GenericTestUtils.setLogLevel(LoggerFactory.getLogger(ByteArrayManager.class),
+                                 Level.OFF);
     final int arrayLength = 64 * 1024; //64k
     final int nThreads = 512;
     final int nAllocations = 1 << 15;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestExactSizeInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestExactSizeInputStream.java
similarity index 100%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestExactSizeInputStream.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestExactSizeInputStream.java
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java
similarity index 100%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestOffsetUrlInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestOffsetUrlInputStream.java
similarity index 100%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestOffsetUrlInputStream.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestOffsetUrlInputStream.java
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestTokenAspect.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestTokenAspect.java
similarity index 100%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestTokenAspect.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestTokenAspect.java
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java
similarity index 100%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java
similarity index 97%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java
index e2f6230..032fff0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java
@@ -18,8 +18,6 @@
  */
 package org.apache.hadoop.hdfs.web;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -38,6 +36,8 @@
 import org.mockserver.model.Header;
 import org.mockserver.model.HttpRequest;
 import org.mockserver.model.HttpResponse;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.net.URI;
@@ -58,7 +58,8 @@
 import static org.mockserver.model.HttpResponse.response;
 
 public class TestWebHDFSOAuth2 {
-  public static final Log LOG = LogFactory.getLog(TestWebHDFSOAuth2.class);
+  public static final Logger LOG = LoggerFactory.getLogger(
+      TestWebHDFSOAuth2.class);
 
   private ClientAndServer mockWebHDFS;
   private ClientAndServer mockOAuthServer;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsContentLength.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsContentLength.java
similarity index 100%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsContentLength.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsContentLength.java
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestAccessTokenTimer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestAccessTokenTimer.java
similarity index 100%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestAccessTokenTimer.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestAccessTokenTimer.java
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java
similarity index 100%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java
similarity index 100%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7f903b6..fbf211f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -184,6 +184,14 @@
     HDFS-9070. Allow fsck display pending replica location information for
     being-written blocks. (GAO Rui via jing9)
 
+    HDFS-9261. Erasure Coding: Skip encoding the data cells if all the parity data 
+    streamers are failed for the current block group. (Rakesh R via umamahesh)
+
+    HDFS-9323. Randomize the DFSStripedOutputStreamWithFailure tests. (szetszwo)
+
+    HDFS-8777. Erasure Coding: add tests for taking snapshots on EC files. 
+    (Rakesh R via zhz)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -841,6 +849,9 @@
       HDFS-8438. Erasure Coding: Allow concat striped files if they have the same
       ErasureCodingPolicy. (Walter Su via jing9)
 
+      HDFS-9275. Wait previous ErasureCodingWork to finish before schedule
+      another one. (Walter Su via yliu)
+
 Release 2.8.0 - UNRELEASED
 
   NEW FEATURES
@@ -1524,9 +1535,6 @@
     HDFS-9110. Use Files.walkFileTree in NNUpgradeUtil#doPreUpgrade for
     better efficiency. (Charlie Helin via wang)
 
-    HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
-    values() since it creates a temporary array. (Staffan Friberg via yliu)
-
     HDFS-8988. Use LightWeightHashSet instead of LightWeightLinkedSet in
     BlockManager#excessReplicateMap. (yliu)
 
@@ -1599,6 +1607,17 @@
     HDFS-9255. Consolidate block recovery related implementation into a single
     class. (Walter Su via zhz)
 
+    HDFS-9295. Add a thorough test of the full KMS code path. 
+    (Daniel Templeton via zhz)
+
+    HDFS-8545. Refactor FS#getUsed() to use ContentSummary and add an API to fetch
+    the total file length from a specific path (J.Andreina via vinayakumarb)
+
+    HDFS-9229. Expose size of NameNode directory as a metric.
+    (Surendra Singh Lilhore via zhz)
+
+    HDFS-9339. Extend full test of KMS ACLs. (Daniel Templeton via zhz)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
@@ -1638,6 +1657,12 @@
     HDFS-9297. Update TestBlockMissingException to use corruptBlockOnDataNodesByDeletingBlockFile().
     (Tony Wu via lei)
 
+    HDFS-9168. Move client side unit test to hadoop-hdfs-client. (wheat9)
+
+    HDFS-9312. Fix TestReplication to be FsDataset-agnostic. (lei)
+
+    HDFS-9308. Add truncateMeta() and deleteMeta() to MiniDFSCluster. (Tony Wu via lei)
+
   BUG FIXES
 
     HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.
@@ -2179,6 +2204,27 @@
     HDFS-9297. Decomissioned capacity should not be considered for 
     configured/used capacity (Contributed by Kuhu Shukla)
 
+    HDFS-9044. Give Priority to FavouredNodes , before selecting
+    nodes from FavouredNode's Node Group (J.Andreina via vinayakumarb)
+
+    HDFS-9332. Fix Precondition failures from NameNodeEditLogRoller while
+    saving namespace. (wang)
+
+    HDFS-9343. Empty caller context considered invalid. (Mingliang Liu via
+    Arpit Agarwal)
+
+    HDFS-9329. TestBootstrapStandby#testRateThrottling is flaky because fsimage
+    size is smaller than IO buffer size. (zhz)
+
+    HDFS-9313. Possible NullPointerException in BlockManager if no excess
+    replica can be chosen. (mingma)
+
+    HDFS-9354. Fix TestBalancer#testBalancerWithZeroThreadsForMove on Windows.
+    (Xiaoyu Yao via cnauroth)
+
+    HDFS-9362. TestAuditLogger#testAuditLoggerWithCallContext assumes Unix line
+    endings, fails on Windows. (cnauroth)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -2195,6 +2241,9 @@
     HDFS-8099. Change "DFSInputStream has been closed already" message to
     debug log level (Charles Lamb via Colin P. McCabe)
 
+    HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
+    values() since it creates a temporary array. (Staffan Friberg via yliu)
+
   OPTIMIZATIONS
 
     HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)
@@ -3273,7 +3322,19 @@
       HDFS-7700. Document quota support for storage types. (Xiaoyu Yao via
       Arpit Agarwal)
 
-Release 2.6.2 - UNRELEASED
+Release 2.6.3 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+Release 2.6.2 - 2015-10-28
 
   INCOMPATIBLE CHANGES
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 0798248..8625a04 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -211,12 +211,6 @@
       <artifactId>leveldbjni-all</artifactId>
       <version>1.8</version>
     </dependency>
-    <dependency>
-      <groupId>org.mock-server</groupId>
-      <artifactId>mockserver-netty</artifactId>
-      <version>3.9.2</version>
-      <scope>test</scope>
-    </dependency>
     <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
     <dependency>
       <groupId>org.bouncycastle</groupId>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 43fbe1d..0a19007 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1624,6 +1624,10 @@
     }
 
     if (block.isStriped()) {
+      if (pendingNum > 0) {
+        // Wait the previous recovery to finish.
+        return null;
+      }
       short[] indices = new short[liveBlockIndices.size()];
       for (int i = 0 ; i < liveBlockIndices.size(); i++) {
         indices[i] = liveBlockIndices.get(i);
@@ -1679,6 +1683,7 @@
     if (block.isStriped()) {
       assert rw instanceof ErasureCodingWork;
       assert rw.getTargets().length > 0;
+      assert pendingNum == 0: "Should wait the previous recovery to finish";
       String src = getBlockCollection(block).getName();
       ErasureCodingPolicy ecPolicy = null;
       try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
index be169c3..526a5d7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
@@ -23,8 +23,6 @@
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
@@ -33,13 +31,17 @@
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /** 
  * This interface is used for choosing the desired number of targets
  * for placing block replicas.
  */
 @InterfaceAudience.Private
 public abstract class BlockPlacementPolicy {
-  static final Log LOG = LogFactory.getLog(BlockPlacementPolicy.class);
+  static final Logger LOG = LoggerFactory.getLogger(
+      BlockPlacementPolicy.class);
 
   @InterfaceAudience.Private
   public static class NotEnoughReplicasException extends Exception {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index ad1a739..2723ed9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -138,20 +138,9 @@
       numOfReplicas = maxNodesAndReplicas[0];
       int maxNodesPerRack = maxNodesAndReplicas[1];
 
-      for (int i = 0; i < favoredNodes.size() && results.size() < numOfReplicas; i++) {
-        DatanodeDescriptor favoredNode = favoredNodes.get(i);
-        // Choose a single node which is local to favoredNode.
-        // 'results' is updated within chooseLocalNode
-        final DatanodeStorageInfo target = chooseLocalStorage(favoredNode,
-            favoriteAndExcludedNodes, blocksize, maxNodesPerRack,
-            results, avoidStaleNodes, storageTypes, false);
-        if (target == null) {
-          LOG.warn("Could not find a target for file " + src
-              + " with favored node " + favoredNode); 
-          continue;
-        }
-        favoriteAndExcludedNodes.add(target.getDatanodeDescriptor());
-      }
+      chooseFavouredNodes(src, numOfReplicas, favoredNodes,
+          favoriteAndExcludedNodes, blocksize, maxNodesPerRack, results,
+          avoidStaleNodes, storageTypes);
 
       if (results.size() < numOfReplicas) {
         // Not enough favored nodes, choose other nodes.
@@ -177,6 +166,29 @@
     }
   }
 
+  protected void chooseFavouredNodes(String src, int numOfReplicas,
+      List<DatanodeDescriptor> favoredNodes,
+      Set<Node> favoriteAndExcludedNodes, long blocksize, int maxNodesPerRack,
+      List<DatanodeStorageInfo> results, boolean avoidStaleNodes,
+      EnumMap<StorageType, Integer> storageTypes)
+      throws NotEnoughReplicasException {
+    for (int i = 0; i < favoredNodes.size() && results.size() < numOfReplicas;
+        i++) {
+      DatanodeDescriptor favoredNode = favoredNodes.get(i);
+      // Choose a single node which is local to favoredNode.
+      // 'results' is updated within chooseLocalNode
+      final DatanodeStorageInfo target =
+          chooseLocalStorage(favoredNode, favoriteAndExcludedNodes, blocksize,
+            maxNodesPerRack, results, avoidStaleNodes, storageTypes, false);
+      if (target == null) {
+        LOG.warn("Could not find a target for file " + src
+            + " with favored node " + favoredNode);
+        continue;
+      }
+      favoriteAndExcludedNodes.add(target.getDatanodeDescriptor());
+    }
+  }
+
   /** This is the implementation. */
   private DatanodeStorageInfo[] chooseTarget(int numOfReplicas,
                                     Node writer,
@@ -969,6 +981,12 @@
                 excessTypes);
       }
       firstOne = false;
+      if (cur == null) {
+        LOG.warn("No excess replica can be found. excessTypes: {}." +
+            " moreThanOne: {}. exactlyOne: {}.", excessTypes, moreThanOne,
+            exactlyOne);
+        break;
+      }
 
       // adjust rackmap, moreThanOne, and exactlyOne
       adjustSetsWithChosenReplica(rackMap, moreThanOne, exactlyOne, cur);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
index 89f47ad..187d8d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
@@ -54,16 +54,79 @@
     super.initialize(conf, stats, clusterMap, host2datanodeMap);
   }
 
-  /** choose local node of localMachine as the target.
-   * if localMachine is not available, choose a node on the same nodegroup or 
-   * rack instead.
+  /**
+   * choose all good favored nodes as target.
+   * If no enough targets, then choose one replica from
+   * each bad favored node's node group.
+   * @throws NotEnoughReplicasException
+   */
+  @Override
+  protected void chooseFavouredNodes(String src, int numOfReplicas,
+      List<DatanodeDescriptor> favoredNodes,
+      Set<Node> favoriteAndExcludedNodes, long blocksize,
+      int maxNodesPerRack, List<DatanodeStorageInfo> results,
+      boolean avoidStaleNodes, EnumMap<StorageType, Integer> storageTypes)
+      throws NotEnoughReplicasException {
+    super.chooseFavouredNodes(src, numOfReplicas, favoredNodes,
+        favoriteAndExcludedNodes, blocksize, maxNodesPerRack, results,
+        avoidStaleNodes, storageTypes);
+    if (results.size() < numOfReplicas) {
+      // Not enough replicas, choose from unselected Favorednode's Nodegroup
+      for (int i = 0;
+          i < favoredNodes.size() && results.size() < numOfReplicas; i++) {
+        DatanodeDescriptor favoredNode = favoredNodes.get(i);
+        boolean chosenNode =
+            isNodeChosen(results, favoredNode);
+        if (chosenNode) {
+          continue;
+        }
+        NetworkTopologyWithNodeGroup clusterMapNodeGroup =
+            (NetworkTopologyWithNodeGroup) clusterMap;
+        // try a node on FavouredNode's node group
+        DatanodeStorageInfo target = null;
+        String scope =
+            clusterMapNodeGroup.getNodeGroup(favoredNode.getNetworkLocation());
+        try {
+          target =
+              chooseRandom(scope, favoriteAndExcludedNodes, blocksize,
+                maxNodesPerRack, results, avoidStaleNodes, storageTypes);
+        } catch (NotEnoughReplicasException e) {
+          // catch Exception and continue with other favored nodes
+          continue;
+        }
+        if (target == null) {
+          LOG.warn("Could not find a target for file "
+              + src + " within nodegroup of favored node " + favoredNode);
+          continue;
+        }
+        favoriteAndExcludedNodes.add(target.getDatanodeDescriptor());
+      }
+    }
+  }
+
+  private boolean isNodeChosen(
+      List<DatanodeStorageInfo> results, DatanodeDescriptor favoredNode) {
+    boolean chosenNode = false;
+    for (int j = 0; j < results.size(); j++) {
+      if (results.get(j).getDatanodeDescriptor().equals(favoredNode)) {
+        chosenNode = true;
+        break;
+      }
+    }
+    return chosenNode;
+  }
+
+  /** choose local node of <i>localMachine</i> as the target.
+   * If localMachine is not available, will fallback to nodegroup/rack
+   * when flag <i>fallbackToNodeGroupAndLocalRack</i> is set.
    * @return the chosen node
    */
   @Override
   protected DatanodeStorageInfo chooseLocalStorage(Node localMachine,
       Set<Node> excludedNodes, long blocksize, int maxNodesPerRack,
       List<DatanodeStorageInfo> results, boolean avoidStaleNodes,
-      EnumMap<StorageType, Integer> storageTypes, boolean fallbackToLocalRack)
+      EnumMap<StorageType, Integer> storageTypes,
+      boolean fallbackToNodeGroupAndLocalRack)
       throws NotEnoughReplicasException {
     DatanodeStorageInfo localStorage = chooseLocalStorage(localMachine,
         excludedNodes, blocksize, maxNodesPerRack, results,
@@ -72,6 +135,9 @@
       return localStorage;
     }
 
+    if (!fallbackToNodeGroupAndLocalRack) {
+      return null;
+    }
     // try a node on local node group
     DatanodeStorageInfo chosenStorage = chooseLocalNodeGroup(
         (NetworkTopologyWithNodeGroup)clusterMap, localMachine, excludedNodes, 
@@ -79,10 +145,6 @@
     if (chosenStorage != null) {
       return chosenStorage;
     }
-
-    if (!fallbackToLocalRack) {
-      return null;
-    }
     // try a node on local rack
     return chooseLocalRack(localMachine, excludedNodes, 
         blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index c630206..7b4b571 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -30,6 +30,7 @@
 import java.util.List;
 import java.util.Properties;
 
+import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -310,6 +311,20 @@
       return dirType;
     }    
 
+    /**
+     * Get storage directory size.
+     */
+    public long getDirecorySize() {
+      try {
+        if (!isShared() && root != null && root.exists()) {
+          return FileUtils.sizeOfDirectory(root);
+        }
+      } catch (Exception e) {
+        LOG.warn("Failed to get directory size :" + root, e);
+      }
+      return 0;
+    }
+
     public void read(File from, Storage storage) throws IOException {
       Properties props = readPropertiesFile(from);
       storage.setFieldsFromProperties(props, this);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
index 64afcd0..834bd07 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
@@ -907,15 +907,10 @@
 
       for (int i = 0; i < targetBuffers.length; i++) {
         if (targetBuffers[i] != null) {
-          cleanBuffer(targetBuffers[i]);
+          targetBuffers[i].clear();
         }
       }
     }
-    
-    private ByteBuffer cleanBuffer(ByteBuffer buffer) {
-      Arrays.fill(buffer.array(), (byte) 0);
-      return (ByteBuffer)buffer.clear();
-    }
 
     // send an empty packet to mark the end of the block
     private void endTargetBlocks(boolean[] targetsStatus) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 93dc097..341dd98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -1064,6 +1064,8 @@
     } finally {
       removeFromCheckpointing(imageTxId);
     }
+    //Update NameDirSize Metric
+    getStorage().updateNameDirSize();
   }
 
   /**
@@ -1244,6 +1246,8 @@
     // we won't miss this log segment on a restart if the edits directories
     // go missing.
     storage.writeTransactionIdFileToStorage(getEditLog().getCurSegmentTxId());
+    //Update NameDirSize Metric
+    getStorage().updateNameDirSize();
     return new CheckpointSignature(this);
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 7d45a25..dcedcc4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3729,9 +3729,7 @@
     public void run() {
       while (fsRunning && shouldRun) {
         try {
-          FSEditLog editLog = getFSImage().getEditLog();
-          long numEdits =
-              editLog.getLastWrittenTxId() - editLog.getCurSegmentTxId();
+          long numEdits = getTransactionsSinceLastLogRoll();
           if (numEdits > rollThreshold) {
             FSNamesystem.LOG.info("NameNode rolling its own edit log because"
                 + " number of edits in open segment exceeds threshold of "
@@ -6407,6 +6405,11 @@
     return VersionInfo.getVersion();
   }
 
+  @Override // NameNodeStatusMXBean
+  public String getNameDirSize() {
+    return getFSImage().getStorage().getNNDirectorySize();
+  }
+
   /**
    * Verifies that the given identifier and password are valid and match.
    * @param identifier Token identifier.
@@ -7500,9 +7503,7 @@
         sb.append(NamenodeWebHdfsMethods.isWebHdfsInvocation() ? "webhdfs" : "rpc");
         if (isCallerContextEnabled &&
             callerContext != null &&
-            callerContext.isValid() &&
-            (callerContext.getSignature() == null ||
-                callerContext.getSignature().length <= callerSignatureMaxLen)) {
+            callerContext.isContextValid()) {
           sb.append("\t").append("callerContext=");
           if (callerContext.getContext().length() > callerContextMaxLen) {
             sb.append(callerContext.getContext().substring(0,
@@ -7510,7 +7511,9 @@
           } else {
             sb.append(callerContext.getContext());
           }
-          if (callerContext.getSignature() != null) {
+          if (callerContext.getSignature() != null &&
+              callerContext.getSignature().length > 0 &&
+              callerContext.getSignature().length <= callerSignatureMaxLen) {
             sb.append(":");
             sb.append(new String(callerContext.getSignature(),
                 CallerContext.SIGNATURE_ENCODING));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
index d872c03..9b63e72 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
@@ -29,6 +29,7 @@
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 import java.util.Properties;
 import java.util.UUID;
 import java.util.concurrent.CopyOnWriteArrayList;
@@ -52,6 +53,7 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.util.Time;
+import org.mortbay.util.ajax.JSON;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -149,6 +151,11 @@
   private HashMap<String, String> deprecatedProperties;
 
   /**
+   * Name directories size for metric.
+   */
+  private Map<String, Long> nameDirSizeMap = new HashMap<>();
+
+  /**
    * Construct the NNStorage.
    * @param conf Namenode configuration.
    * @param imageDirs Directories the image can be stored in.
@@ -166,6 +173,8 @@
     setStorageDirectories(imageDirs, 
                           Lists.newArrayList(editsDirs),
                           FSNamesystem.getSharedEditsDirs(conf));
+    //Update NameDirSize metric value after NN start
+    updateNameDirSize();
   }
 
   @Override // Storage
@@ -1075,4 +1084,20 @@
         getBlockPoolID(),
         getCTime());
   }
+
+  public String getNNDirectorySize() {
+    return JSON.toString(nameDirSizeMap);
+  }
+
+  public void updateNameDirSize() {
+    Map<String, Long> nnDirSizeMap = new HashMap<>();
+    for (Iterator<StorageDirectory> it = dirIterator(); it.hasNext();) {
+      StorageDirectory sd = it.next();
+      if (!sd.isShared()) {
+        nnDirSizeMap.put(sd.getRoot().getAbsolutePath(), sd.getDirecorySize());
+      }
+    }
+    nameDirSizeMap.clear();
+    nameDirSizeMap.putAll(nnDirSizeMap);
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
index 9dcef89..d127c98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
@@ -272,4 +272,9 @@
    */
   public Map<String, Integer> getDistinctVersions();
   
+  /**
+   * Get namenode directory size.
+   */
+  String getNameDirSize();
+
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
index 59f90a6..6e60dba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
@@ -372,6 +372,8 @@
           } finally {
             namesystem.cpUnlock();
           }
+          //Update NameDirSize Metric
+          namesystem.getFSImage().getStorage().updateNameDirSize();
         } catch (EditLogInputException elie) {
           LOG.warn("Error while reading edits from disk. Will try again.", elie);
         } catch (InterruptedException ie) {
@@ -463,4 +465,4 @@
       return cachedActiveProxy;
     }
   }
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 7ebf333..c81f154 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -2117,6 +2117,28 @@
     getMaterializedReplica(i, blk).corruptMeta();
   }
 
+  /**
+   * Corrupt the metadata of a block by deleting it.
+   * @param i index of the datanode
+   * @param blk name of the block.
+   */
+  public void deleteMeta(int i, ExtendedBlock blk)
+      throws IOException {
+    getMaterializedReplica(i, blk).deleteMeta();
+  }
+
+  /**
+   * Corrupt the metadata of a block by truncating it to a new size.
+   * @param i index of the datanode.
+   * @param blk name of the block.
+   * @param newSize the new size of the metadata file.
+   * @throws IOException if any I/O errors.
+   */
+  public void truncateMeta(int i, ExtendedBlock blk, int newSize)
+      throws IOException {
+    getMaterializedReplica(i, blk).truncateMeta(newSize);
+  }
+
   public boolean changeGenStampOfBlock(int dnIndex, ExtendedBlock blk,
       long newGenStamp) throws IOException {
     File blockFile = getBlockFile(dnIndex, blk);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
index cc6e7d3..9942a2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
@@ -61,10 +61,10 @@
   public static final int BLOCK_STRIPED_CELL_SIZE = 64 * 1024;
   public static final int BLOCK_STRIPE_SIZE = BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS;
 
-  static final int stripesPerBlock = 4;
-  static final int blockSize = BLOCK_STRIPED_CELL_SIZE * stripesPerBlock;
-  static final int numDNs = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 2;
-  static final int BLOCK_GROUP_SIZE = blockSize * NUM_DATA_BLOCKS;
+  public static final int stripesPerBlock = 4;
+  public static final int blockSize = BLOCK_STRIPED_CELL_SIZE * stripesPerBlock;
+  public static final int numDNs = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 2;
+  public static final int BLOCK_GROUP_SIZE = blockSize * NUM_DATA_BLOCKS;
 
 
   static byte[] generateBytes(int cnt) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java
new file mode 100644
index 0000000..2b515d0
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java
@@ -0,0 +1,1657 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.io.Writer;
+import java.net.URI;
+import java.security.NoSuchAlgorithmException;
+import java.security.PrivilegedAction;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
+import org.apache.hadoop.crypto.key.kms.server.KMSConfiguration;
+import org.apache.hadoop.crypto.key.kms.server.KeyAuthorizationKeyProvider;
+import org.apache.hadoop.crypto.key.kms.server.MiniKMS;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * This class tests the ACLs system through the full code path.  It overlaps
+ * slightly with the ACL tests in common, but the approach is more holistic.
+ *
+ * <b>NOTE:</b> Because of the mechanics of JAXP, when the KMS config files are
+ * written to disk, a config param with a blank value ("") will be written in a
+ * way that the KMS will read as unset, which is different from blank. For this
+ * reason, when testing the effects of blank config params, this test class
+ * sets the values of those config params to a space (" ").  A whitespace value
+ * will be preserved by JAXP when writing out the config files and will be
+ * interpreted by KMS as a blank value. (The KMS strips whitespace from ACL
+ * values before interpreting them.)
+ */
+public class TestAclsEndToEnd {
+  private static final Log LOG =
+      LogFactory.getLog(TestAclsEndToEnd.class.getName());
+  private static final String TEXT =
+      "The blue zone is for loading and unloading only. "
+      + "Please park in the red zone.";
+  private static final Path ZONE1 = new Path("/tmp/BLUEZONE");
+  private static final Path ZONE2 = new Path("/tmp/REDZONE");
+  private static final Path ZONE3 = new Path("/tmp/LOADINGZONE");
+  private static final Path ZONE4 = new Path("/tmp/UNLOADINGZONE");
+  private static final Path FILE1 = new Path(ZONE1, "file1");
+  private static final Path FILE1A = new Path(ZONE1, "file1a");
+  private static final Path FILE2 = new Path(ZONE2, "file2");
+  private static final Path FILE3 = new Path(ZONE3, "file3");
+  private static final Path FILE4 = new Path(ZONE4, "file4");
+  private static final String KEY1 = "key1";
+  private static final String KEY2 = "key2";
+  private static final String KEY3 = "key3";
+  private static UserGroupInformation realUgi;
+  private static String realUser;
+
+  private MiniKMS miniKMS;
+  private File kmsDir;
+  private MiniDFSCluster cluster;
+  private DistributedFileSystem fs;
+
+  @BeforeClass
+  public static void captureUser() throws IOException {
+    realUgi = UserGroupInformation.getCurrentUser();
+    realUser = System.getProperty("user.name");
+  }
+
+  /**
+   * Extract the URI for the miniKMS.
+   *
+   * @return the URI for the miniKMS
+   */
+  private String getKeyProviderURI() {
+    return KMSClientProvider.SCHEME_NAME + "://" +
+        miniKMS.getKMSUrl().toExternalForm().replace("://", "@");
+  }
+
+  /**
+   * Write out the config files needed by the miniKMS.  The miniKMS doesn't
+   * provide a way to set the configs directly, so the only way to pass config
+   * parameters is to write them out into config files.
+   *
+   * @param confDir the directory into which to write the configs
+   * @param conf the config to write.
+   * @throws IOException
+   */
+  private void writeConf(File confDir, Configuration conf)
+      throws IOException {
+    URI keystore = new Path(kmsDir.getAbsolutePath(), "kms.keystore").toUri();
+
+    conf.set(KMSConfiguration.KEY_PROVIDER_URI, "jceks://file@" + keystore);
+    conf.set("hadoop.kms.authentication.type", "simple");
+
+    Writer writer =
+        new FileWriter(new File(confDir, KMSConfiguration.KMS_SITE_XML));
+    conf.writeXml(writer);
+    writer.close();
+
+    writer = new FileWriter(new File(confDir, KMSConfiguration.KMS_ACLS_XML));
+    conf.writeXml(writer);
+    writer.close();
+
+    //create empty core-site.xml
+    writer = new FileWriter(new File(confDir, "core-site.xml"));
+    new Configuration(false).writeXml(writer);
+    writer.close();
+  }
+
+  /**
+   * Setup a fresh miniKMS and miniDFS.
+   *
+   * @param conf the configuration to use for both the miniKMS and miniDFS
+   * @throws Exception thrown if setup fails
+   */
+  private void setup(Configuration conf) throws Exception {
+    setup(conf, true, true);
+  }
+
+  /**
+   * Setup a fresh miniDFS and a miniKMS.  The resetKms parameter controls
+   * whether the miniKMS will start fresh or reuse the existing data.
+   *
+   * @param conf the configuration to use for both the miniKMS and miniDFS
+   * @param resetKms whether to start a fresh miniKMS
+   * @throws Exception thrown if setup fails
+   */
+  private void setup(Configuration conf, boolean resetKms) throws Exception {
+    setup(conf, resetKms, true);
+  }
+
+  /**
+   * Setup a miniDFS and miniKMS.  The resetKms and resetDfs parameters control
+   * whether the services will start fresh or reuse the existing data.
+   *
+   * @param conf the configuration to use for both the miniKMS and miniDFS
+   * @param resetKms whether to start a fresh miniKMS
+   * @param resetDfs whether to start a fresh miniDFS
+   * @throws Exception thrown if setup fails
+   */
+  private void setup(Configuration conf, boolean resetKms, boolean resetDfs)
+          throws Exception {
+    if (resetKms) {
+      FileSystemTestHelper fsHelper = new FileSystemTestHelper();
+
+      kmsDir = new File(fsHelper.getTestRootDir()).getAbsoluteFile();
+
+      Assert.assertTrue(kmsDir.mkdirs());
+    }
+
+    writeConf(kmsDir, conf);
+
+    MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder();
+
+    miniKMS = miniKMSBuilder.setKmsConfDir(kmsDir).build();
+    miniKMS.start();
+
+    conf = new HdfsConfiguration();
+
+    // Set up java key store
+    conf.set(ProxyUsers.CONF_HADOOP_PROXYUSER + "." + realUser + ".users",
+        "keyadmin,hdfs,user");
+    conf.set(ProxyUsers.CONF_HADOOP_PROXYUSER + "." + realUser + ".hosts",
+        "*");
+    conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
+        getKeyProviderURI());
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,
+        true);
+
+    MiniDFSCluster.Builder clusterBuilder = new MiniDFSCluster.Builder(conf);
+
+    cluster = clusterBuilder.numDataNodes(1).format(resetDfs).build();
+    fs = cluster.getFileSystem();
+  }
+
+  /**
+   * Stop the miniKMS and miniDFS.
+   */
+  private void teardown() {
+    // Restore login user
+    UserGroupInformation.setLoginUser(realUgi);
+
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+
+    miniKMS.stop();
+  }
+
+  /**
+   * Return a new {@link Configuration} with KMS ACLs appropriate to pass the
+   * full ACL test in {@link #doFullAclTest()} set.
+   *
+   * @param hdfsUgi the hdfs user
+   * @param keyadminUgi the keyadmin user
+   * @return the configuration
+   */
+  private static Configuration getBaseConf(UserGroupInformation hdfsUgi,
+      UserGroupInformation keyadminUgi) {
+    Configuration conf = new Configuration();
+
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+        keyadminUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DELETE",
+        keyadminUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.ROLLOVER",
+        keyadminUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET", " ");
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_KEYS",
+        keyadminUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
+        hdfsUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.SET_KEY_MATERIAL", " ");
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        hdfsUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK", "*");
+
+    return conf;
+  }
+
+  /**
+   * Set the recommended blacklists.
+   *
+   * @param hdfsUgi the hdfs user
+   */
+  private static void setBlacklistAcls(Configuration conf,
+      UserGroupInformation hdfsUgi) {
+
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.CREATE",
+        hdfsUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.DELETE",
+        hdfsUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.ROLLOVER",
+        hdfsUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.GET", "*");
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.SET_KEY_MATERIAL",
+        "*");
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.DECRYPT_EEK",
+        hdfsUgi.getUserName());
+  }
+
+  /**
+   * Set the key ACLs appropriate to pass the full ACL test in
+   * {@link #doFullAclTest()} using the specified prefix.  The prefix should
+   * either be "whitelist.key.acl." or "key.acl.key1.".
+   *
+   * @param conf the configuration
+   * @param prefix the ACL prefix
+   * @param hdfsUgi the hdfs user
+   * @param keyadminUgi the keyadmin user
+   * @param userUgi the normal user
+   */
+  private static void setKeyAcls(Configuration conf, String prefix,
+      UserGroupInformation hdfsUgi,
+      UserGroupInformation keyadminUgi,
+      UserGroupInformation userUgi) {
+
+    conf.set(prefix + "MANAGEMENT", keyadminUgi.getUserName());
+    conf.set(prefix + "READ", hdfsUgi.getUserName());
+    conf.set(prefix + "GENERATE_EEK", hdfsUgi.getUserName());
+    conf.set(KeyAuthorizationKeyProvider.KEY_ACL + KEY1 + ".DECRYPT_EEK",
+        userUgi.getUserName());
+  }
+
+  /**
+   * Test the full life cycle of a key using a config with whitelist key ACLs.
+   * The configuration used is the correct configuration to pass the full ACL
+   * test in {@link #doFullAclTest()}.
+   *
+   * @throws Exception thrown on test failure
+   */
+  @Test
+  public void testGoodWithWhitelist() throws Exception {
+    UserGroupInformation hdfsUgi =
+        UserGroupInformation.createProxyUserForTesting("hdfs",
+          realUgi, new String[] {"supergroup"});
+    UserGroupInformation keyadminUgi =
+        UserGroupInformation.createProxyUserForTesting("keyadmin",
+          realUgi, new String[] {"keyadmin"});
+    UserGroupInformation userUgi =
+        UserGroupInformation.createProxyUserForTesting("user",
+          realUgi,  new String[] {"staff"});
+
+    Configuration conf = getBaseConf(hdfsUgi, keyadminUgi);
+
+    setBlacklistAcls(conf, hdfsUgi);
+    setKeyAcls(conf, KMSConfiguration.WHITELIST_KEY_ACL_PREFIX,
+        hdfsUgi, keyadminUgi, userUgi);
+    doFullAclTest(conf, hdfsUgi, keyadminUgi, userUgi);
+  }
+
+  /**
+   * Test the full life cycle of a key using a config with key ACLs.
+   * The configuration used is the correct configuration to pass the full ACL
+   * test in {@link #doFullAclTest()}.
+   *
+   * @throws Exception thrown on test failure
+   */
+  @Test
+  public void testGoodWithKeyAcls() throws Exception {
+    UserGroupInformation hdfsUgi =
+        UserGroupInformation.createProxyUserForTesting("hdfs",
+          realUgi, new String[] {"supergroup"});
+    UserGroupInformation keyadminUgi =
+        UserGroupInformation.createProxyUserForTesting("keyadmin",
+          realUgi, new String[] {"keyadmin"});
+    UserGroupInformation userUgi =
+        UserGroupInformation.createProxyUserForTesting("user",
+          realUgi,  new String[] {"staff"});
+    Configuration conf = getBaseConf(hdfsUgi, keyadminUgi);
+
+    setBlacklistAcls(conf, hdfsUgi);
+    setKeyAcls(conf, KeyAuthorizationKeyProvider.KEY_ACL + KEY1 + ".",
+        hdfsUgi, keyadminUgi, userUgi);
+    doFullAclTest(conf, hdfsUgi, keyadminUgi, userUgi);
+  }
+
+  /**
+   * Test the full life cycle of a key using a config with whitelist key ACLs
+   * and without blacklist ACLs.  The configuration used is the correct
+   * configuration to pass the full ACL test in {@link #doFullAclTest()}.
+   *
+   * @throws Exception thrown on test failure
+   */
+  @Test
+  public void testGoodWithWhitelistWithoutBlacklist() throws Exception {
+    UserGroupInformation hdfsUgi =
+        UserGroupInformation.createProxyUserForTesting("hdfs",
+          realUgi, new String[] {"supergroup"});
+    UserGroupInformation keyadminUgi =
+        UserGroupInformation.createProxyUserForTesting("keyadmin",
+          realUgi, new String[] {"keyadmin"});
+    UserGroupInformation userUgi =
+        UserGroupInformation.createProxyUserForTesting("user",
+          realUgi,  new String[] {"staff"});
+    Configuration conf = getBaseConf(hdfsUgi, keyadminUgi);
+
+    setKeyAcls(conf, KMSConfiguration.WHITELIST_KEY_ACL_PREFIX,
+        hdfsUgi, keyadminUgi, userUgi);
+    doFullAclTest(conf, hdfsUgi, keyadminUgi, userUgi);
+  }
+
+  /**
+   * Test the full life cycle of a key using a config with whitelist key ACLs
+   * and without blacklist ACLs. The configuration used is the correct
+   * configuration to pass the full ACL test in {@link #doFullAclTest()}.
+   *
+   * @throws Exception thrown on test failure
+   */
+  @Test
+  public void testGoodWithKeyAclsWithoutBlacklist() throws Exception {
+    UserGroupInformation hdfsUgi =
+        UserGroupInformation.createProxyUserForTesting("hdfs",
+          realUgi, new String[] {"supergroup"});
+    UserGroupInformation keyadminUgi =
+        UserGroupInformation.createProxyUserForTesting("keyadmin",
+          realUgi, new String[] {"keyadmin"});
+    UserGroupInformation userUgi =
+        UserGroupInformation.createProxyUserForTesting("user",
+          realUgi,  new String[] {"staff"});
+    Configuration conf = getBaseConf(hdfsUgi, keyadminUgi);
+
+    setKeyAcls(conf, KeyAuthorizationKeyProvider.KEY_ACL + KEY1 + ".",
+        hdfsUgi, keyadminUgi, userUgi);
+    doFullAclTest(conf, hdfsUgi, keyadminUgi, userUgi);
+  }
+
+  /**
+   * Run a full key life cycle test using the provided configuration and users.
+   *
+   * @param conf the configuration
+   * @param hdfs the user to use as the hdfs user
+   * @param keyadmin the user to use as the keyadmin user
+   * @param user the user to use as the normal user
+   * @throws Exception thrown if there is a test failure
+   */
+  private void doFullAclTest(final Configuration conf,
+      final UserGroupInformation hdfsUgi,
+      final UserGroupInformation keyadminUgi,
+      final UserGroupInformation userUgi) throws Exception {
+
+    try {
+      setup(conf);
+
+      // Create a test key
+      assertTrue("Exception during creation of key " + KEY1 + " by "
+          + keyadminUgi.getUserName(), createKey(keyadminUgi, KEY1, conf));
+
+      // Fail to create a test key
+      assertFalse("Allowed creation of key " + KEY2 + " by "
+          + hdfsUgi.getUserName(), createKey(hdfsUgi, KEY2, conf));
+      assertFalse("Allowed creation of key " + KEY2 + " by "
+          + userUgi.getUserName(), createKey(userUgi, KEY2, conf));
+
+      // Create a directory and chown it to the normal user.
+      fs.mkdirs(ZONE1);
+      fs.setOwner(ZONE1, userUgi.getUserName(),
+          userUgi.getPrimaryGroupName());
+
+      // Create an EZ
+      assertTrue("Exception during creation of EZ " + ZONE1 + " by "
+          + hdfsUgi.getUserName() + " using key " + KEY1,
+            createEncryptionZone(hdfsUgi, KEY1, ZONE1));
+
+      // Fail to create an EZ
+      assertFalse("Allowed creation of EZ " + ZONE2 + " by "
+          + keyadminUgi.getUserName() + " using key " + KEY1,
+            createEncryptionZone(keyadminUgi, KEY1, ZONE2));
+      assertFalse("Allowed creation of EZ " + ZONE2 + " by "
+          + userUgi.getUserName() + " using key " + KEY1,
+            createEncryptionZone(userUgi, KEY1, ZONE2));
+
+      // Create a file in the zone
+      assertTrue("Exception during creation of file " + FILE1 + " by "
+          + userUgi.getUserName(), createFile(userUgi, FILE1, TEXT));
+
+      // Fail to create a file in the zone
+      assertFalse("Allowed creation of file " + FILE1A + " by "
+          + hdfsUgi.getUserName(), createFile(hdfsUgi, FILE1A, TEXT));
+      assertFalse("Allowed creation of file " + FILE1A + " by "
+          + keyadminUgi.getUserName(), createFile(keyadminUgi, FILE1A, TEXT));
+
+      // Read a file in the zone
+      assertTrue("Exception while reading file " + FILE1 + " by "
+          + userUgi.getUserName(), compareFile(userUgi, FILE1, TEXT));
+
+      // Fail to read a file in the zone
+      assertFalse("Allowed reading of file " + FILE1 + " by "
+          + hdfsUgi.getUserName(), compareFile(hdfsUgi, FILE1, TEXT));
+      assertFalse("Allowed reading of file " + FILE1 + " by "
+          + keyadminUgi.getUserName(), compareFile(keyadminUgi, FILE1, TEXT));
+
+      // Remove the zone
+      fs.delete(ZONE1, true);
+
+      // Fail to remove the key
+      assertFalse("Allowed deletion of file " + FILE1 + " by "
+          + hdfsUgi.getUserName(), deleteKey(hdfsUgi, KEY1));
+      assertFalse("Allowed deletion of file " + FILE1 + " by "
+          + userUgi.getUserName(), deleteKey(userUgi, KEY1));
+
+      // Remove
+      assertTrue("Exception during deletion of file " + FILE1 + " by "
+          + keyadminUgi.getUserName(), deleteKey(keyadminUgi, KEY1));
+    } finally {
+      fs.delete(ZONE1, true);
+      fs.delete(ZONE2, true);
+      teardown();
+    }
+  }
+
+  /**
+   * Test that key creation is correctly governed by ACLs.
+   * @throws Exception thrown if setup fails
+   */
+  @Test
+  public void testCreateKey() throws Exception {
+    Configuration conf = new Configuration();
+
+    // Correct config with whitelist ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf);
+
+      assertTrue("Exception during key creation with correct config"
+          + " using whitelist key ACLs", createKey(realUgi, KEY1, conf));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Correct config with default ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf);
+
+      assertTrue("Exception during key creation with correct config"
+          + " using default key ACLs", createKey(realUgi, KEY2, conf));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because of blacklist
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.CREATE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf);
+
+      assertFalse("Allowed key creation with blacklist for CREATE",
+          createKey(realUgi, KEY3, conf));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Missing KMS ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE", " ");
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf);
+
+      assertFalse("Allowed key creation without CREATE KMS ACL",
+          createKey(realUgi, KEY3, conf));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Missing key ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+        realUgi.getUserName());
+
+    try {
+      setup(conf);
+
+      assertFalse("Allowed key creation without MANAGMENT key ACL",
+          createKey(realUgi, KEY3, conf));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because the key ACL set ignores the default ACL set for key3
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+    conf.set(KeyAuthorizationKeyProvider.KEY_ACL + KEY3 + ".DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf);
+
+      assertFalse("Allowed key creation when default key ACL should have been"
+          + " overridden by key ACL", createKey(realUgi, KEY3, conf));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Allowed because the default setting for KMS ACLs is fully permissive
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf);
+
+      assertTrue("Exception during key creation with default KMS ACLs",
+          createKey(realUgi, KEY3, conf));
+    } finally {
+      teardown();
+    }
+  }
+
+  /**
+   * Test that zone creation is correctly governed by ACLs.
+   * @throws Exception thrown if setup fails
+   */
+  @Test
+  public void testCreateEncryptionZone() throws Exception {
+    Configuration conf = new Configuration();
+
+    // Create a test key
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf);
+
+      assertTrue("Exception during key creation",
+          createKey(realUgi, KEY1, conf));
+    } finally {
+      teardown();
+    }
+
+    // We tear everything down and then restart it with the ACLs we want to
+    // test so that there's no contamination from the ACLs needed for setup.
+    // To make that work, we have to tell the setup() method not to create a
+    // new KMS directory.
+    conf = new Configuration();
+
+    // Correct config with whitelist ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "READ",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      fs.mkdirs(ZONE1);
+
+      assertTrue("Exception during zone creation with correct config using"
+          + " whitelist key ACLs", createEncryptionZone(realUgi, KEY1, ZONE1));
+    } finally {
+      fs.delete(ZONE1, true);
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Correct config with default ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "READ",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      fs.mkdirs(ZONE2);
+
+      assertTrue("Exception during zone creation with correct config using"
+          + " default key ACLs", createEncryptionZone(realUgi, KEY1, ZONE2));
+    } finally {
+      fs.delete(ZONE2, true);
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because the key ACL set ignores the default ACL set for key1
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "READ",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KeyAuthorizationKeyProvider.KEY_ACL + KEY1 + ".DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      fs.mkdirs(ZONE3);
+
+      assertFalse("Allowed creation of zone when default key ACLs should have"
+          + " been overridden by key ACL",
+            createEncryptionZone(realUgi, KEY1, ZONE3));
+    } finally {
+      fs.delete(ZONE3, true);
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Correct config with blacklist
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.GET_METADATA",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "READ",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      fs.mkdirs(ZONE3);
+
+      assertFalse("Allowed zone creation of zone with blacklisted GET_METADATA",
+          createEncryptionZone(realUgi, KEY1, ZONE3));
+    } finally {
+      fs.delete(ZONE3, true);
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Correct config with blacklist
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "READ",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      fs.mkdirs(ZONE3);
+
+      assertFalse("Allowed zone creation of zone with blacklisted GENERATE_EEK",
+          createEncryptionZone(realUgi, KEY1, ZONE3));
+    } finally {
+      fs.delete(ZONE3, true);
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Missing KMS ACL but works because defaults for KMS ACLs are fully
+    // permissive
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "READ",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      fs.mkdirs(ZONE3);
+
+      assertTrue("Exception during zone creation with default KMS ACLs",
+          createEncryptionZone(realUgi, KEY1, ZONE3));
+    } finally {
+      fs.delete(ZONE3, true);
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Missing GET_METADATA KMS ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA", " ");
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "READ",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      fs.mkdirs(ZONE4);
+
+      assertFalse("Allowed zone creation without GET_METADATA KMS ACL",
+          createEncryptionZone(realUgi, KEY1, ZONE4));
+    } finally {
+      fs.delete(ZONE4, true);
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Missing GET_METADATA KMS ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK", " ");
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "READ",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      fs.mkdirs(ZONE4);
+
+      assertFalse("Allowed zone creation without GENERATE_EEK KMS ACL",
+          createEncryptionZone(realUgi, KEY1, ZONE4));
+    } finally {
+      fs.delete(ZONE4, true);
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Missing READ key ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      fs.mkdirs(ZONE4);
+
+      assertFalse("Allowed zone creation without READ ACL",
+          createEncryptionZone(realUgi, KEY1, ZONE4));
+    } finally {
+      fs.delete(ZONE4, true);
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Missing GENERATE_EEK key ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "READ",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      fs.mkdirs(ZONE4);
+
+      assertFalse("Allowed zone creation without GENERATE_EEK ACL",
+          createEncryptionZone(realUgi, KEY1, ZONE4));
+    } finally {
+      fs.delete(ZONE4, true);
+      teardown();
+    }
+  }
+
+  /**
+   * Test that in-zone file creation is correctly governed by ACLs.
+   * @throws Exception thrown if setup fails
+   */
+  @Test
+  public void testCreateFileInEncryptionZone() throws Exception {
+    Configuration conf = new Configuration();
+
+    // Create a test key
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "READ",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+
+    assertTrue(new File(kmsDir, "kms.keystore").length() == 0);
+
+    try {
+      setup(conf);
+
+      assertTrue("Exception during key creation",
+          createKey(realUgi, KEY1, conf));
+      fs.mkdirs(ZONE1);
+      assertTrue("Exception during zone creation",
+          createEncryptionZone(realUgi, KEY1, ZONE1));
+      fs.mkdirs(ZONE2);
+      assertTrue("Exception during zone creation",
+          createEncryptionZone(realUgi, KEY1, ZONE2));
+      fs.mkdirs(ZONE3);
+      assertTrue("Exception during zone creation",
+          createEncryptionZone(realUgi, KEY1, ZONE3));
+      fs.mkdirs(ZONE4);
+      assertTrue("Exception during zone creation",
+          createEncryptionZone(realUgi, KEY1, ZONE4));
+    } catch (Throwable ex) {
+      fs.delete(ZONE1, true);
+      fs.delete(ZONE2, true);
+      fs.delete(ZONE3, true);
+      fs.delete(ZONE4, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    // We tear everything down and then restart it with the ACLs we want to
+    // test so that there's no contamination from the ACLs needed for setup.
+    // To make that work, we have to tell the setup() method not to create a
+    // new KMS directory or DFS dierctory.
+
+    conf = new Configuration();
+
+    // Correct config with whitelist ACLs
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertTrue("Exception during file creation with correct config"
+          + " using whitelist ACL", createFile(realUgi, FILE1, TEXT));
+    } finally {
+      fs.delete(ZONE1, true);
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Correct config with default ACLs
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertTrue("Exception during file creation with correct config"
+          + " using whitelist ACL", createFile(realUgi, FILE2, TEXT));
+    } finally {
+      fs.delete(ZONE2, true);
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because the key ACL set ignores the default ACL set for key1
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KeyAuthorizationKeyProvider.KEY_ACL + KEY1 + ".READ",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertFalse("Allowed file creation when default key ACLs should have been"
+          + " overridden by key ACL", createFile(realUgi, FILE3, TEXT));
+    } catch (Exception ex) {
+      fs.delete(ZONE3, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied by blacklist
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertFalse("Allowed file creation with blacklist for GENERATE_EEK",
+          createFile(realUgi, FILE3, TEXT));
+    } catch (Exception ex) {
+      fs.delete(ZONE3, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied by blacklist
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertFalse("Allowed file creation with blacklist for DECRYPT_EEK",
+          createFile(realUgi, FILE3, TEXT));
+    } catch (Exception ex) {
+      fs.delete(ZONE3, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Allowed because default KMS ACLs are fully permissive
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertTrue("Exception during file creation with default KMS ACLs",
+          createFile(realUgi, FILE3, TEXT));
+    } catch (Exception ex) {
+      fs.delete(ZONE3, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because of missing GENERATE_EEK KMS ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK", " ");
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertFalse("Allowed file creation without GENERATE_EEK KMS ACL",
+          createFile(realUgi, FILE4, TEXT));
+    } catch (Exception ex) {
+      fs.delete(ZONE3, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because of missing DECRYPT_EEK KMS ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK", " ");
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertFalse("Allowed file creation without DECRYPT_EEK KMS ACL",
+          createFile(realUgi, FILE3, TEXT));
+    } catch (Exception ex) {
+      fs.delete(ZONE3, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because of missing GENERATE_EEK key ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertFalse("Allowed file creation without GENERATE_EEK key ACL",
+          createFile(realUgi, FILE3, TEXT));
+    } catch (Exception ex) {
+      fs.delete(ZONE3, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because of missing DECRYPT_EEK key ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertFalse("Allowed file creation without DECRYPT_EEK key ACL",
+          createFile(realUgi, FILE3, TEXT));
+    } catch (Exception ex) {
+      fs.delete(ZONE3, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+  }
+
+  /**
+   * Test that in-zone file read is correctly governed by ACLs.
+   * @throws Exception thrown if setup fails
+   */
+  @Test
+  public void testReadFileInEncryptionZone() throws Exception {
+    Configuration conf = new Configuration();
+
+    // Create a test key
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GET_METADATA",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "READ",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "GENERATE_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    assertTrue(new File(kmsDir, "kms.keystore").length() == 0);
+
+    try {
+      setup(conf);
+
+      assertTrue("Exception during key creation",
+          createKey(realUgi, KEY1, conf));
+      fs.mkdirs(ZONE1);
+      assertTrue("Exception during zone creation",
+          createEncryptionZone(realUgi, KEY1, ZONE1));
+      assertTrue("Exception during file creation",
+              createFile(realUgi, FILE1, TEXT));
+    } catch (Throwable ex) {
+      fs.delete(ZONE1, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    // We tear everything down and then restart it with the ACLs we want to
+    // test so that there's no contamination from the ACLs needed for setup.
+    // To make that work, we have to tell the setup() method not to create a
+    // new KMS directory or DFS dierctory.
+
+    conf = new Configuration();
+
+    // Correct config with whitelist ACLs
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertTrue("Exception while reading file with correct config with"
+          + " whitelist ACLs", compareFile(realUgi, FILE1, TEXT));
+    } catch (Throwable ex) {
+      fs.delete(ZONE1, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Correct config with default ACLs
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertTrue("Exception while reading file with correct config"
+          + " with default ACLs", compareFile(realUgi, FILE1, TEXT));
+    } catch (Throwable ex) {
+      fs.delete(ZONE1, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because the key ACL set ignores the default ACL set for key1
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KeyAuthorizationKeyProvider.KEY_ACL + KEY1 + ".READ",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertFalse("Allowed file read when default key ACLs should have been"
+          + " overridden by key ACL", compareFile(realUgi, FILE1, TEXT));
+    } catch (Throwable ex) {
+      fs.delete(ZONE1, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied by blacklist
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.DECRYPT_EEK",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertFalse("Allowed file read with blacklist for DECRYPT_EEK",
+          compareFile(realUgi, FILE1, TEXT));
+    } catch (Throwable ex) {
+      fs.delete(ZONE1, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Allowed because default KMS ACLs are fully permissive
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertTrue("Exception while reading file with default KMS ACLs",
+          compareFile(realUgi, FILE1, TEXT));
+    } catch (Throwable ex) {
+      fs.delete(ZONE1, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because of missing DECRYPT_EEK KMS ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DECRYPT_EEK", " ");
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false, false);
+
+      assertFalse("Allowed file read without DECRYPT_EEK KMS ACL",
+          compareFile(realUgi, FILE1, TEXT));
+    } catch (Throwable ex) {
+      fs.delete(ZONE1, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+
+    // Denied because of missing DECRYPT_EEK key ACL
+    conf = new Configuration();
+
+    try {
+      setup(conf, false, false);
+
+      assertFalse("Allowed file read without DECRYPT_EEK key ACL",
+          compareFile(realUgi, FILE1, TEXT));
+    } catch (Throwable ex) {
+      fs.delete(ZONE1, true);
+
+      throw ex;
+    } finally {
+      teardown();
+    }
+  }
+
+  /**
+   * Test that key deletion is correctly governed by ACLs.
+   * @throws Exception thrown if setup fails
+   */
+  @Test
+  public void testDeleteKey() throws Exception {
+    Configuration conf = new Configuration();
+
+    // Create a test key
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.CREATE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf);
+
+      assertTrue("Exception during key creation",
+          createKey(realUgi, KEY1, conf));
+      assertTrue("Exception during key creation",
+          createKey(realUgi, KEY2, conf));
+      assertTrue("Exception during key creation",
+          createKey(realUgi, KEY3, conf));
+    } finally {
+      teardown();
+    }
+
+    // We tear everything down and then restart it with the ACLs we want to
+    // test so that there's no contamination from the ACLs needed for setup.
+    // To make that work, we have to tell the setup() method not to create a
+    // new KMS directory.
+
+    conf = new Configuration();
+
+    // Correct config with whitelist ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DELETE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      assertTrue("Exception during key deletion with correct config"
+          + " using whitelist key ACLs", deleteKey(realUgi, KEY1));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Correct config with default ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DELETE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      assertTrue("Exception during key deletion with correct config"
+          + " using default key ACLs", deleteKey(realUgi, KEY2));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because of blacklist
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DELETE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "blacklist.DELETE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      assertFalse("Allowed key deletion with blacklist for DELETE",
+          deleteKey(realUgi, KEY3));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Missing KMS ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DELETE", " ");
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      assertFalse("Allowed key deletion without DELETE KMS ACL",
+          deleteKey(realUgi, KEY3));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+
+    // Missing key ACL
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DELETE",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      assertFalse("Allowed key deletion without MANAGMENT key ACL",
+          deleteKey(realUgi, KEY3));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Denied because the key ACL set ignores the default ACL set for key3
+    conf.set(KMSConfiguration.CONFIG_PREFIX + "acl.DELETE",
+        realUgi.getUserName());
+    conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+    conf.set(KeyAuthorizationKeyProvider.KEY_ACL + KEY3 + ".DECRYPT_EEK",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      assertFalse("Allowed key deletion when default key ACL should have been"
+          + " overridden by key ACL", deleteKey(realUgi, KEY3));
+    } finally {
+      teardown();
+    }
+
+    conf = new Configuration();
+
+    // Allowed because the default setting for KMS ACLs is fully permissive
+    conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT",
+        realUgi.getUserName());
+
+    try {
+      setup(conf, false);
+
+      assertTrue("Exception during key deletion with default KMS ACLs",
+          deleteKey(realUgi, KEY3));
+    } finally {
+      teardown();
+    }
+  }
+
+  /**
+   * Create a key as the specified user.
+   *
+   * @param ugi the target user
+   * @param key the target key
+   * @param conf the configuration
+   * @return whether the key creation succeeded
+   */
+  private boolean createKey(UserGroupInformation ugi, final String key,
+      final Configuration conf) {
+
+    return doUserOp(ugi, new UserOp() {
+      @Override
+      public void execute() throws IOException {
+        try {
+          DFSTestUtil.createKey(key, cluster, conf);
+        } catch (NoSuchAlgorithmException ex) {
+          throw new IOException(ex);
+        }
+      }
+    });
+  }
+
+  /**
+   * Create a zone as the specified user.
+   *
+   * @param ugi the target user
+   * @param key the target key
+   * @param zone the target zone
+   * @return whether the zone creation succeeded
+   */
+  private boolean createEncryptionZone(UserGroupInformation ugi,
+      final String key, final Path zone) {
+
+    return doUserOp(ugi, new UserOp() {
+      @Override
+      public void execute() throws IOException {
+        cluster.getFileSystem().createEncryptionZone(zone, key);
+      }
+    });
+  }
+
+  /**
+   * Create a file as the specified user.
+   *
+   * @param ugi the target user
+   * @param file the target file
+   * @param text the target file contents
+   * @return whether the file creation succeeded
+   */
+  private boolean createFile(UserGroupInformation ugi,
+      final Path file, final String text) {
+
+    return doUserOp(ugi, new UserOp() {
+      @Override
+      public void execute() throws IOException {
+        FSDataOutputStream dout = cluster.getFileSystem().create(file);
+        PrintWriter out = new PrintWriter(new OutputStreamWriter(dout));
+
+        out.println(text);
+        out.close();
+      }
+    });
+  }
+
+  /**
+   * Read a file as the specified user and compare the contents to expectations.
+   *
+   * @param ugi the target user
+   * @param file the target file
+   * @param text the expected file contents
+   * @return true if the file read succeeded and the contents were as expected
+   */
+  private boolean compareFile(UserGroupInformation ugi,
+      final Path file, final String text) {
+
+    return doUserOp(ugi, new UserOp() {
+      @Override
+      public void execute() throws IOException {
+        FSDataInputStream din =  cluster.getFileSystem().open(file);
+        BufferedReader in = new BufferedReader(new InputStreamReader(din));
+
+        assertEquals("The text read does not match the text written",
+            text, in.readLine());
+      }
+    });
+  }
+
+  /**
+   * Delete a key as the specified user.
+   *
+   * @param ugi the target user
+   * @param key the target key
+   * @return whether the key deletion succeeded
+   */
+  private boolean deleteKey(UserGroupInformation ugi, final String key)
+      throws IOException, InterruptedException {
+
+    return doUserOp(ugi, new UserOp() {
+      @Override
+      public void execute() throws IOException {
+        cluster.getNameNode().getNamesystem().getProvider().deleteKey(key);
+      }
+    });
+  }
+
+  /**
+   * Perform an operation as the given user.  This method requires setting the
+   * login user. This method does not restore the login user to the setting
+   * from prior to the method call.
+   *
+   * @param ugi the target user
+   * @param op the operation to perform
+   * @return true if the operation succeeded without throwing an exception
+   */
+  private boolean doUserOp(UserGroupInformation ugi, final UserOp op) {
+    UserGroupInformation.setLoginUser(ugi);
+
+    // Create a test key
+    return ugi.doAs(new PrivilegedAction<Boolean>() {
+      @Override
+      public Boolean run() {
+        try {
+          op.execute();
+
+          return true;
+        } catch (IOException ex) {
+          LOG.error("IOException thrown during doAs() operation", ex);
+
+          return false;
+        }
+      }
+    });
+  }
+
+  /**
+   * Simple interface that defines an operation to perform.
+   */
+  private static interface UserOp {
+    public void execute() throws IOException;
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
index 3850ff2..398bcc2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
@@ -22,11 +22,8 @@
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import java.io.File;
 import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
+import java.util.List;
 import java.util.Random;
 
 import org.apache.hadoop.conf.Configuration;
@@ -35,12 +32,15 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
 import org.apache.hadoop.io.IOUtils;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A JUnit test for corrupted file handling.
@@ -70,6 +70,8 @@
  *     replica was created from the non-corrupted replica.
  */
 public class TestCrcCorruption {
+  public static final Logger LOG =
+      LoggerFactory.getLogger(TestCrcCorruption.class);
 
   private DFSClientFaultInjector faultInjector;
 
@@ -167,90 +169,26 @@
       // file disallows this Datanode to send data to another datanode.
       // However, a client is alowed access to this block.
       //
-      File storageDir = cluster.getInstanceStorageDir(0, 1);
-      String bpid = cluster.getNamesystem().getBlockPoolId();
-      File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
-      assertTrue("data directory does not exist", data_dir.exists());
-      File[] blocks = data_dir.listFiles();
-      assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
-      int num = 0;
-      for (int idx = 0; idx < blocks.length; idx++) {
-        if (blocks[idx].getName().startsWith(Block.BLOCK_FILE_PREFIX) &&
-            blocks[idx].getName().endsWith(".meta")) {
-          num++;
-          if (num % 3 == 0) {
-            //
-            // remove .meta file
-            //
-            System.out.println("Deliberately removing file " + blocks[idx].getName());
-            assertTrue("Cannot remove file.", blocks[idx].delete());
-          } else if (num % 3 == 1) {
-            //
-            // shorten .meta file
-            //
-            RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
-            FileChannel channel = file.getChannel();
-            int newsize = random.nextInt((int)channel.size()/2);
-            System.out.println("Deliberately truncating file " + 
-                               blocks[idx].getName() + 
-                               " to size " + newsize + " bytes.");
-            channel.truncate(newsize);
-            file.close();
-          } else {
-            //
-            // corrupt a few bytes of the metafile
-            //
-            RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
-            FileChannel channel = file.getChannel();
-            long position = 0;
-            //
-            // The very first time, corrupt the meta header at offset 0
-            //
-            if (num != 2) {
-              position = (long)random.nextInt((int)channel.size());
-            }
-            int length = random.nextInt((int)(channel.size() - position + 1));
-            byte[] buffer = new byte[length];
-            random.nextBytes(buffer);
-            channel.write(ByteBuffer.wrap(buffer), position);
-            System.out.println("Deliberately corrupting file " + 
-                               blocks[idx].getName() + 
-                               " at offset " + position +
-                               " length " + length);
-            file.close();
-          }
-        }
-      }
-      
-      //
-      // Now deliberately corrupt all meta blocks from the second
-      // directory of the first datanode
-      //
-      storageDir = cluster.getInstanceStorageDir(0, 1);
-      data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
-      assertTrue("data directory does not exist", data_dir.exists());
-      blocks = data_dir.listFiles();
-      assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
+      final int dnIdx = 0;
+      final DataNode dn = cluster.getDataNodes().get(dnIdx);
+      final String bpid = cluster.getNamesystem().getBlockPoolId();
+      List<FinalizedReplica> replicas =
+          dn.getFSDataset().getFinalizedBlocks(bpid);
+      assertTrue("Replicas do not exist", !replicas.isEmpty());
 
-      int count = 0;
-      File previous = null;
-      for (int idx = 0; idx < blocks.length; idx++) {
-        if (blocks[idx].getName().startsWith("blk_") &&
-            blocks[idx].getName().endsWith(".meta")) {
-          //
-          // Move the previous metafile into the current one.
-          //
-          count++;
-          if (count % 2 == 0) {
-            System.out.println("Deliberately insertimg bad crc into files " +
-                                blocks[idx].getName() + " " + previous.getName());
-            assertTrue("Cannot remove file.", blocks[idx].delete());
-            assertTrue("Cannot corrupt meta file.", previous.renameTo(blocks[idx]));
-            assertTrue("Cannot recreate empty meta file.", previous.createNewFile());
-            previous = null;
-          } else {
-            previous = blocks[idx];
-          }
+      for (int idx = 0; idx < replicas.size(); idx++) {
+        FinalizedReplica replica = replicas.get(idx);
+        ExtendedBlock eb = new ExtendedBlock(bpid, replica);
+        if (idx % 3 == 0) {
+          LOG.info("Deliberately removing meta for block " + eb);
+          cluster.deleteMeta(dnIdx, eb);
+        } else if (idx % 3 == 1) {
+          final int newSize = 2;  // bytes
+          LOG.info("Deliberately truncating meta file for block " +
+              eb + " to size " +  newSize + " bytes.");
+          cluster.truncateMeta(dnIdx, eb, newSize);
+        } else {
+          cluster.corruptMeta(dnIdx, eb);
         }
       }
 
@@ -260,7 +198,7 @@
       //
       assertTrue("Corrupted replicas not handled properly.",
                  util.checkFiles(fs, "/srcdat"));
-      System.out.println("All File still have a valid replica");
+      LOG.info("All File still have a valid replica");
 
       //
       // set replication factor back to 1. This causes only one replica of
@@ -273,7 +211,7 @@
       //System.out.println("All Files done with removing replicas");
       //assertTrue("Excess replicas deleted. Corrupted replicas found.",
       //           util.checkFiles(fs, "/srcdat"));
-      System.out.println("The excess-corrupted-replica test is disabled " +
+      LOG.info("The excess-corrupted-replica test is disabled " +
                          " pending HADOOP-1557");
 
       util.cleanup(fs, "/srcdat");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
index f1ce8ff..76b471a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
@@ -57,6 +57,8 @@
     int numDNs = dataBlocks + parityBlocks + 2;
     conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
+        false);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
index 7bd976f..b60d0f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
@@ -131,15 +131,16 @@
 
   private static final List<Integer> LENGTHS = newLengths();
 
-  static int getLength(int i) {
-    return LENGTHS.get(i);
+  static Integer getLength(int i) {
+    return i >= 0 && i < LENGTHS.size()? LENGTHS.get(i): null;
   }
 
+  private static final Random RANDOM = new Random();
+
   private MiniDFSCluster cluster;
   private DistributedFileSystem dfs;
   private final Path dir = new Path("/"
       + TestDFSStripedOutputStreamWithFailure.class.getSimpleName());
-  private final Random random = new Random();
 
   private void setup(Configuration conf) throws IOException {
     final int numDNs = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
@@ -167,19 +168,6 @@
   }
 
   @Test(timeout=240000)
-  public void testDatanodeFailure56() throws Exception {
-    runTest(getLength(56));
-  }
-
-  @Test(timeout=240000)
-  public void testDatanodeFailureRandomLength() throws Exception {
-    int lenIndex = random.nextInt(LENGTHS.size());
-    LOG.info("run testMultipleDatanodeFailureRandomLength with length index: "
-        + lenIndex);
-    runTest(getLength(lenIndex));
-  }
-
-  @Test(timeout=240000)
   public void testMultipleDatanodeFailure56() throws Exception {
     runTestWithMultipleFailure(getLength(56));
   }
@@ -190,7 +178,7 @@
    */
   //@Test(timeout=240000)
   public void testMultipleDatanodeFailureRandomLength() throws Exception {
-    int lenIndex = random.nextInt(LENGTHS.size());
+    int lenIndex = RANDOM.nextInt(LENGTHS.size());
     LOG.info("run testMultipleDatanodeFailureRandomLength with length index: "
         + lenIndex);
     runTestWithMultipleFailure(getLength(lenIndex));
@@ -484,7 +472,16 @@
         = new TestDFSStripedOutputStreamWithFailure();
     private void run(int offset) {
       final int i = offset + getBase();
-      final int length = getLength(i);
+      final Integer length = getLength(i);
+      if (length == null) {
+        System.out.println("Skip test " + i + " since length=null.");
+        return;
+      }
+      if (RANDOM.nextInt(16) != 0) {
+        System.out.println("Test " + i + ", length=" + length
+            + ", is not chosen to run.");
+        return;
+      }
       System.out.println("Run test " + i + ", length=" + length);
       test.runTest(length);
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure020.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure020.java
new file mode 100644
index 0000000..21feee6
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure020.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure020 extends TestBase {}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure030.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure030.java
new file mode 100644
index 0000000..01e6e73
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure030.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure030 extends TestBase {}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure040.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure040.java
new file mode 100644
index 0000000..8519fed
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure040.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure040 extends TestBase {}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure050.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure050.java
new file mode 100644
index 0000000..d750cbf
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure050.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure050 extends TestBase {}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure060.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure060.java
new file mode 100644
index 0000000..cb8ce23
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure060.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure060 extends TestBase {}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure070.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure070.java
new file mode 100644
index 0000000..ee72c92
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure070.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure070 extends TestBase {}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure080.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure080.java
new file mode 100644
index 0000000..90ff587
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure080.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure080 extends TestBase {}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure090.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure090.java
new file mode 100644
index 0000000..ce56cd2
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure090.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure090 extends TestBase {}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure100.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure100.java
new file mode 100644
index 0000000..d63c19d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure100.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure100 extends TestBase {}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure110.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure110.java
new file mode 100644
index 0000000..a590623
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure110.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure110 extends TestBase {}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure120.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure120.java
new file mode 100644
index 0000000..0e641ff
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure120.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure120 extends TestBase {}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure130.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure130.java
new file mode 100644
index 0000000..e84ad1f
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure130.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure130 extends TestBase {}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure140.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure140.java
new file mode 100644
index 0000000..b128c85
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure140.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure140 extends TestBase {}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure150.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure150.java
new file mode 100644
index 0000000..41940af
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure150.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure150 extends TestBase {}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure160.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure160.java
new file mode 100644
index 0000000..8e6c39b
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure160.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure160 extends TestBase {}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure170.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure170.java
new file mode 100644
index 0000000..d54be46
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure170.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure170 extends TestBase {}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure180.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure180.java
new file mode 100644
index 0000000..93f00b4
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure180.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure180 extends TestBase {}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure190.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure190.java
new file mode 100644
index 0000000..04f5e0d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure190.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure190 extends TestBase {}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure200.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure200.java
new file mode 100644
index 0000000..196d743
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure200.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure200 extends TestBase {}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure210.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure210.java
new file mode 100644
index 0000000..ef5d65b
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure210.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
+
+public class TestDFSStripedOutputStreamWithFailure210 extends TestBase {
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 79da7b8..0543e9d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -1048,4 +1048,32 @@
       cluster.shutdown();
     }
   }
+
+  @Test(timeout = 30000)
+  public void testTotalDfsUsed() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      FileSystem fs = cluster.getFileSystem();
+      // create file under root
+      FSDataOutputStream File1 = fs.create(new Path("/File1"));
+      File1.write("hi".getBytes());
+      File1.close();
+      // create file under sub-folder
+      FSDataOutputStream File2 = fs.create(new Path("/Folder1/File2"));
+      File2.write("hi".getBytes());
+      File2.close();
+      // getUsed(Path) should return total len of all the files from a path
+      assertEquals(2, fs.getUsed(new Path("/Folder1")));
+      //getUsed() should return total length of all files in filesystem
+      assertEquals(4, fs.getUsed());
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+        cluster = null;
+      }
+    }
+  }
+
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
new file mode 100644
index 0000000..515763c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
@@ -0,0 +1,199 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestErasureCodingPolicyWithSnapshot {
+  private MiniDFSCluster cluster;
+  private DistributedFileSystem fs;
+  private Configuration conf;
+
+  private final static short GROUP_SIZE = StripedFileTestUtil.NUM_DATA_BLOCKS
+      + StripedFileTestUtil.NUM_PARITY_BLOCKS;
+  private final static int SUCCESS = 0;
+  private final ErasureCodingPolicy sysDefaultPolicy = ErasureCodingPolicyManager
+      .getSystemDefaultPolicy();
+
+  @Before
+  public void setupCluster() throws IOException {
+    conf = new HdfsConfiguration();
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE).build();
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+  }
+
+  @After
+  public void shutdownCluster() throws IOException {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Test correctness of successive snapshot creation and deletion with erasure
+   * coding policies. Create snapshot of ecDir's parent directory.
+   */
+  @Test(timeout = 120000)
+  public void testSnapshotsOnErasureCodingDirsParentDir() throws Exception {
+    final int len = 1024;
+    final Path ecDirParent = new Path("/parent");
+    final Path ecDir = new Path(ecDirParent, "ecdir");
+    final Path ecFile = new Path(ecDir, "ecfile");
+    fs.mkdirs(ecDir);
+    fs.allowSnapshot(ecDirParent);
+    // set erasure coding policy
+    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy);
+    DFSTestUtil.createFile(fs, ecFile, len, (short) 1, 0xFEED);
+    String contents = DFSTestUtil.readFile(fs, ecFile);
+    final Path snap1 = fs.createSnapshot(ecDirParent, "snap1");
+    final Path snap1ECDir = new Path(snap1, ecDir.getName());
+    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
+        fs.getErasureCodingPolicy(snap1ECDir));
+
+    // Now delete the dir which has erasure coding policy. Re-create the dir again, and
+    // take another snapshot
+    fs.delete(ecDir, true);
+    fs.mkdir(ecDir, FsPermission.getDirDefault());
+    final Path snap2 = fs.createSnapshot(ecDirParent, "snap2");
+    final Path snap2ECDir = new Path(snap2, ecDir.getName());
+    assertNull("Expected null erasure coding policy",
+        fs.getErasureCodingPolicy(snap2ECDir));
+
+    // Make dir again with system default ec policy
+    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy);
+    final Path snap3 = fs.createSnapshot(ecDirParent, "snap3");
+    final Path snap3ECDir = new Path(snap3, ecDir.getName());
+    // Check that snap3's ECPolicy has the correct settings
+    ErasureCodingPolicy ezSnap3 = fs.getErasureCodingPolicy(snap3ECDir);
+    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
+        ezSnap3);
+
+    // Check that older snapshots still have the old ECPolicy settings
+    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
+        fs.getErasureCodingPolicy(snap1ECDir));
+    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
+        fs.getErasureCodingPolicy(snap2ECDir));
+
+    // Verify contents of the snapshotted file
+    final Path snapshottedECFile = new Path(snap1.toString() + "/"
+        + ecDir.getName() + "/" + ecFile.getName());
+    assertEquals("Contents of snapshotted file have changed unexpectedly",
+        contents, DFSTestUtil.readFile(fs, snapshottedECFile));
+
+    // Now delete the snapshots out of order and verify the EC policy
+    // correctness
+    fs.deleteSnapshot(ecDirParent, snap2.getName());
+    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
+        fs.getErasureCodingPolicy(snap1ECDir));
+    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
+        fs.getErasureCodingPolicy(snap3ECDir));
+    fs.deleteSnapshot(ecDirParent, snap1.getName());
+    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
+        fs.getErasureCodingPolicy(snap3ECDir));
+  }
+
+  /**
+   * Test creation of snapshot on directory has erasure coding policy.
+   */
+  @Test(timeout = 120000)
+  public void testSnapshotsOnErasureCodingDir() throws Exception {
+    final Path ecDir = new Path("/ecdir");
+    fs.mkdirs(ecDir);
+    fs.allowSnapshot(ecDir);
+
+    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy);
+    final Path snap1 = fs.createSnapshot(ecDir, "snap1");
+    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
+        fs.getErasureCodingPolicy(snap1));
+  }
+
+  /**
+   * Test verify erasure coding policy is present after restarting the NameNode.
+   */
+  @Test(timeout = 120000)
+  public void testSnapshotsOnErasureCodingDirAfterNNRestart() throws Exception {
+    final Path ecDir = new Path("/ecdir");
+    fs.mkdirs(ecDir);
+    fs.allowSnapshot(ecDir);
+
+    // set erasure coding policy
+    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy);
+    final Path snap1 = fs.createSnapshot(ecDir, "snap1");
+    ErasureCodingPolicy ecSnap = fs.getErasureCodingPolicy(snap1);
+    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
+        ecSnap);
+
+    // save namespace, restart namenode, and check ec policy correctness.
+    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    fs.saveNamespace();
+    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+    cluster.restartNameNode(true);
+
+    ErasureCodingPolicy ecSnap1 = fs.getErasureCodingPolicy(snap1);
+    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
+        ecSnap1);
+    assertEquals("Got unexpected ecSchema", ecSnap.getSchema(),
+        ecSnap1.getSchema());
+  }
+
+  /**
+   * Test copy a snapshot will not preserve its erasure coding policy info.
+   */
+  @Test(timeout = 120000)
+  public void testCopySnapshotWillNotPreserveErasureCodingPolicy()
+      throws Exception {
+    final int len = 1024;
+    final Path ecDir = new Path("/ecdir");
+    final Path ecFile = new Path(ecDir, "ecFile");
+    fs.mkdirs(ecDir);
+    fs.allowSnapshot(ecDir);
+
+    // set erasure coding policy
+    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy);
+    DFSTestUtil.createFile(fs, ecFile, len, (short) 1, 0xFEED);
+    final Path snap1 = fs.createSnapshot(ecDir, "snap1");
+
+    Path snap1Copy = new Path(ecDir.toString() + "-copy");
+    final Path snap1CopyECDir = new Path("/ecdir-copy");
+    String[] argv = new String[] { "-cp", "-px", snap1.toUri().toString(),
+        snap1Copy.toUri().toString() };
+    int ret = ToolRunner.run(new FsShell(conf), argv);
+    assertEquals("cp -px is not working on a snapshot", SUCCESS, ret);
+
+    assertNull("Got unexpected erasure coding policy",
+        fs.getErasureCodingPolicy(snap1CopyECDir));
+    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
+        fs.getErasureCodingPolicy(snap1));
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
index c9f3842..4ae130d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
@@ -20,9 +20,7 @@
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import java.io.File;
 import java.io.IOException;
-import java.io.RandomAccessFile;
 import java.util.EnumSet;
 
 import org.apache.hadoop.conf.Configuration;
@@ -32,7 +30,6 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -178,8 +175,9 @@
     Path file = new Path("/testRecoveryFile");
     DistributedFileSystem dfs = cluster.getFileSystem();
     FSDataOutputStream out = dfs.create(file);
+    final int FILE_SIZE = 2 * 1024 * 1024;
     int count = 0;
-    while (count < 2 * 1024 * 1024) {
+    while (count < FILE_SIZE) {
       out.writeBytes("Data");
       count += 4;
     }
@@ -190,15 +188,23 @@
     LocatedBlocks locations = cluster.getNameNodeRpc().getBlockLocations(
         file.toString(), 0, count);
     ExtendedBlock block = locations.get(0).getBlock();
-    DataNode dn = cluster.getDataNodes().get(0);
-    BlockLocalPathInfo localPathInfo = dn.getBlockLocalPathInfo(block, null);
-    File metafile = new File(localPathInfo.getMetaPath());
-    assertTrue(metafile.exists());
 
-    // reduce the block meta file size
-    RandomAccessFile raf = new RandomAccessFile(metafile, "rw");
-    raf.setLength(metafile.length() - 20);
-    raf.close();
+    // Calculate meta file size
+    // From DataNode.java, checksum size is given by:
+    // (length of data + BYTE_PER_CHECKSUM - 1)/BYTES_PER_CHECKSUM *
+    // CHECKSUM_SIZE
+    final int CHECKSUM_SIZE = 4; // CRC32 & CRC32C
+    final int bytesPerChecksum = conf.getInt(
+        DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,
+        DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
+    final int metaFileSize =
+        (FILE_SIZE + bytesPerChecksum - 1) / bytesPerChecksum * CHECKSUM_SIZE +
+        8; // meta file header is 8 bytes
+    final int newMetaFileSize = metaFileSize - CHECKSUM_SIZE;
+
+    // Corrupt the block meta file by dropping checksum for bytesPerChecksum
+    // bytes. Lease recovery is expected to recover the uncorrupted file length.
+    cluster.truncateMeta(0, block, newMetaFileSize);
 
     // restart DN to make replica to RWR
     DataNodeProperties dnProp = cluster.stopDataNode(0);
@@ -213,6 +219,11 @@
     }
     assertTrue("File should be closed", newdfs.recoverLease(file));
 
+    // Verify file length after lease recovery. The new file length should not
+    // include the bytes with corrupted checksum.
+    final long expectedNewFileLen = FILE_SIZE - bytesPerChecksum;
+    final long newFileLen = newdfs.getFileStatus(file).getLen();
+    assertEquals(newFileLen, expectedNewFileLen);
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
index f521d8e..b5ffb38 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
@@ -41,9 +41,9 @@
 public class TestReadStripedFileWithMissingBlocks {
   public static final Log LOG = LogFactory
       .getLog(TestReadStripedFileWithMissingBlocks.class);
-  private static MiniDFSCluster cluster;
-  private static DistributedFileSystem fs;
-  private static Configuration conf = new HdfsConfiguration();
+  private MiniDFSCluster cluster;
+  private DistributedFileSystem fs;
+  private Configuration conf = new HdfsConfiguration();
   private final short dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
   private final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
   private final int fileLength = blockSize * dataBlocks + 123;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java
index ec7594f..5b9245b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java
@@ -20,9 +20,7 @@
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
-import java.io.ByteArrayOutputStream;
 import java.io.File;
-import java.io.FileInputStream;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.BitSet;
@@ -41,11 +39,9 @@
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
-import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.BlockECRecoveryCommand.BlockECRecoveryInfo;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
@@ -80,9 +76,12 @@
   public void setup() throws IOException {
     conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
-    conf.setInt(DFSConfigKeys.DFS_DATANODE_STRIPED_READ_BUFFER_SIZE_KEY, cellSize - 1);
+    conf.setInt(DFSConfigKeys.DFS_DATANODE_STRIPED_READ_BUFFER_SIZE_KEY,
+        cellSize - 1);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dnNum).build();;
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
+        false);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dnNum).build();
     cluster.waitActive();
     
     fs = cluster.getFileSystem();
@@ -251,82 +250,56 @@
           lastBlock.getBlockSize(), cellSize, dataBlkNum, indices[toDead[i]]));
       assertTrue(metadatas[i].getName().
           endsWith(blocks[i].getGenerationStamp() + ".meta"));
-      replicaContents[i] = readReplica(replicas[i]);
+      replicaContents[i] = DFSTestUtil.readFileAsBytes(replicas[i]);
     }
     
     int cellsNum = (fileLen - 1) / cellSize + 1;
     int groupSize = Math.min(cellsNum, dataBlkNum) + parityBlkNum;
 
-    try {
-      DatanodeID[] dnIDs = new DatanodeID[toRecoverBlockNum];
-      for (int i = 0; i < toRecoverBlockNum; i++) {
-        /*
-         * Kill the datanode which contains one replica
-         * We need to make sure it dead in namenode: clear its update time and 
-         * trigger NN to check heartbeat.
-         */
-        DataNode dn = cluster.getDataNodes().get(deadDnIndices[i]);
-        dn.shutdown();
-        dnIDs[i] = dn.getDatanodeId();
-      }
-      setDataNodesDead(dnIDs);
-      
-      // Check the locatedBlocks of the file again
-      locatedBlocks = getLocatedBlocks(file);
-      lastBlock = (LocatedStripedBlock)locatedBlocks.getLastLocatedBlock();
-      storageInfos = lastBlock.getLocations();
-      assertEquals(storageInfos.length, groupSize - toRecoverBlockNum);
-      
-      int[] targetDNs = new int[dnNum - groupSize];
-      n = 0;
-      for (int i = 0; i < dnNum; i++) {
-        if (!bitset.get(i)) { // not contain replica of the block.
-          targetDNs[n++] = i;
-        }
-      }
-      
-      waitForRecoveryFinished(file, groupSize);
-      
-      targetDNs = sortTargetsByReplicas(blocks, targetDNs);
-      
-      // Check the replica on the new target node.
-      for (int i = 0; i < toRecoverBlockNum; i++) {
-        File replicaAfterRecovery = cluster.getBlockFile(targetDNs[i], blocks[i]);
-        File metadataAfterRecovery = 
-            cluster.getBlockMetadataFile(targetDNs[i], blocks[i]);
-        assertEquals(replicaAfterRecovery.length(), replicas[i].length());
-        assertTrue(metadataAfterRecovery.getName().
-            endsWith(blocks[i].getGenerationStamp() + ".meta"));
-        byte[] replicaContentAfterRecovery = readReplica(replicaAfterRecovery);
-        
-        Assert.assertArrayEquals(replicaContents[i], replicaContentAfterRecovery);
-      }
-    } finally {
-      for (int i = 0; i < toRecoverBlockNum; i++) {
-        restartDataNode(toDead[i]);
-      }
-      cluster.waitActive();
+    for (int i = 0; i < toRecoverBlockNum; i++) {
+      /*
+       * Kill the datanode which contains one replica
+       * We need to make sure it dead in namenode: clear its update time and
+       * trigger NN to check heartbeat.
+       */
+      DataNode dn = cluster.getDataNodes().get(deadDnIndices[i]);
+      dn.shutdown();
+      cluster.setDataNodeDead(dn.getDatanodeId());
     }
-    fs.delete(file, true);
-  }
-  
-  private void setDataNodesDead(DatanodeID[] dnIDs) throws IOException {
-    for (DatanodeID dn : dnIDs) {
-      DatanodeDescriptor dnd =
-          NameNodeAdapter.getDatanode(cluster.getNamesystem(), dn);
-      DFSTestUtil.setDatanodeDead(dnd);
+
+    // Check the locatedBlocks of the file again
+    locatedBlocks = getLocatedBlocks(file);
+    lastBlock = (LocatedStripedBlock)locatedBlocks.getLastLocatedBlock();
+    storageInfos = lastBlock.getLocations();
+    assertEquals(storageInfos.length, groupSize - toRecoverBlockNum);
+
+    int[] targetDNs = new int[dnNum - groupSize];
+    n = 0;
+    for (int i = 0; i < dnNum; i++) {
+      if (!bitset.get(i)) { // not contain replica of the block.
+        targetDNs[n++] = i;
+      }
     }
     
-    BlockManagerTestUtil.checkHeartbeat(cluster.getNamesystem().getBlockManager());
-  }
-  
-  private void restartDataNode(int dn) {
-    try {
-      cluster.restartDataNode(dn, true, true);
-    } catch (IOException e) {
+    waitForRecoveryFinished(file, groupSize);
+
+    targetDNs = sortTargetsByReplicas(blocks, targetDNs);
+
+    // Check the replica on the new target node.
+    for (int i = 0; i < toRecoverBlockNum; i++) {
+      File replicaAfterRecovery = cluster.getBlockFile(targetDNs[i], blocks[i]);
+      File metadataAfterRecovery =
+          cluster.getBlockMetadataFile(targetDNs[i], blocks[i]);
+      assertEquals(replicaAfterRecovery.length(), replicas[i].length());
+      assertTrue(metadataAfterRecovery.getName().
+          endsWith(blocks[i].getGenerationStamp() + ".meta"));
+      byte[] replicaContentAfterRecovery =
+          DFSTestUtil.readFileAsBytes(replicaAfterRecovery);
+
+      Assert.assertArrayEquals(replicaContents[i], replicaContentAfterRecovery);
     }
   }
-  
+
   private int[] sortTargetsByReplicas(ExtendedBlock[] blocks, int[] targetDNs) {
     int[] result = new int[blocks.length];
     for (int i = 0; i < blocks.length; i++) {
@@ -347,31 +320,7 @@
     }
     return result;
   }
-  
-  private byte[] readReplica(File replica) throws IOException {
-    int length = (int)replica.length();
-    ByteArrayOutputStream content = new ByteArrayOutputStream(length);
-    FileInputStream in = new FileInputStream(replica);
-    try {
-      byte[] buffer = new byte[1024];
-      int total = 0;
-      while (total < length) {
-        int n = in.read(buffer);
-        if (n <= 0) {
-          break;
-        }
-        content.write(buffer, 0, n);
-        total += n;
-      }
-      if (total < length) {
-        Assert.fail("Failed to read all content of replica");
-      }
-      return content.toByteArray();
-    } finally {
-      in.close();
-    }
-  }
-  
+
   private LocatedBlocks waitForRecoveryFinished(Path file, int groupSize) 
       throws Exception {
     final int ATTEMPTS = 60;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
index 6424bc3..d9c96ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
@@ -20,22 +20,14 @@
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import com.google.common.base.Supplier;
 
-import java.io.File;
 import java.io.IOException;
 import java.io.OutputStream;
-import java.io.RandomAccessFile;
 import java.net.InetSocketAddress;
-import java.nio.file.FileVisitResult;
-import java.nio.file.Files;
-import java.nio.file.SimpleFileVisitor;
-import java.nio.file.attribute.BasicFileAttributes;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
@@ -50,7 +42,6 @@
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -62,6 +53,7 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -367,7 +359,7 @@
     for (int i=0; i<buffer.length; i++) {
       buffer[i] = '1';
     }
-    
+
     try {
       Configuration conf = new HdfsConfiguration();
       conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, Integer.toString(numDataNodes));
@@ -387,41 +379,29 @@
       // get first block of the file.
       ExtendedBlock block = dfsClient.getNamenode().getBlockLocations(testFile,
           0, Long.MAX_VALUE).get(0).getBlock();
-      
-      cluster.shutdown();
-      
-      for (int i=0; i<25; i++) {
-        buffer[i] = '0';
+
+      List<MaterializedReplica> replicas = new ArrayList<>();
+      for (int dnIndex=0; dnIndex<3; dnIndex++) {
+        replicas.add(cluster.getMaterializedReplica(dnIndex, block));
       }
-      
+      assertEquals(3, replicas.size());
+
+      cluster.shutdown();
+
       int fileCount = 0;
       // Choose 3 copies of block file - delete 1 and corrupt the remaining 2
-      for (int dnIndex=0; dnIndex<3; dnIndex++) {
-        File blockFile = cluster.getBlockFile(dnIndex, block);
-        LOG.info("Checking for file " + blockFile);
-        
-        if (blockFile != null && blockFile.exists()) {
-          if (fileCount == 0) {
-            LOG.info("Deleting file " + blockFile);
-            assertTrue(blockFile.delete());
-          } else {
-            // corrupt it.
-            LOG.info("Corrupting file " + blockFile);
-            long len = blockFile.length();
-            assertTrue(len > 50);
-            RandomAccessFile blockOut = new RandomAccessFile(blockFile, "rw");
-            try {
-              blockOut.seek(len/3);
-              blockOut.write(buffer, 0, 25);
-            } finally {
-              blockOut.close();
-            }
-          }
-          fileCount++;
+      for (MaterializedReplica replica : replicas) {
+        if (fileCount == 0) {
+          LOG.info("Deleting block " + replica);
+          replica.deleteData();
+        } else {
+          // corrupt it.
+          LOG.info("Corrupting file " + replica);
+          replica.corruptData();
         }
+        fileCount++;
       }
-      assertEquals(3, fileCount);
-      
+
       /* Start the MiniDFSCluster with more datanodes since once a writeBlock
        * to a datanode node fails, same block can not be written to it
        * immediately. In our case some replication attempts will fail.
@@ -530,63 +510,28 @@
           DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
       FileSystem fs = cluster.getFileSystem();
-      FSDataOutputStream create = fs.create(new Path("/test"));
-      fs.setReplication(new Path("/test"), (short) 1);
+      Path filePath = new Path("/test");
+      FSDataOutputStream create = fs.create(filePath);
+      fs.setReplication(filePath, (short) 1);
       create.write(new byte[1024]);
       create.close();
 
-      List<File> nonParticipatedNodeDirs = new ArrayList<File>();
-      File participatedNodeDirs = null;
-      for (int i = 0; i < cluster.getDataNodes().size(); i++) {
-        File storageDir = cluster.getInstanceStorageDir(i, 0);
-        String bpid = cluster.getNamesystem().getBlockPoolId();
-        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
-        if (data_dir.listFiles().length == 0) {
-          nonParticipatedNodeDirs.add(data_dir);
-        } else {
-          assertNull("participatedNodeDirs has already been set.",
-              participatedNodeDirs);
-          participatedNodeDirs = data_dir;
-        }
-      }
-      assertEquals(2, nonParticipatedNodeDirs.size());
-
-      String blockFile = null;
-      final List<File> listFiles = new ArrayList<>();
-      Files.walkFileTree(participatedNodeDirs.toPath(),
-          new SimpleFileVisitor<java.nio.file.Path>() {
-            @Override
-            public FileVisitResult visitFile(
-                java.nio.file.Path file, BasicFileAttributes attrs)
-                throws IOException {
-              listFiles.add(file.toFile());
-              return FileVisitResult.CONTINUE;
-            }
-          }
-      );
-      assertFalse(listFiles.isEmpty());
+      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath);
       int numReplicaCreated = 0;
-      for (File file : listFiles) {
-        if (file.getName().startsWith(Block.BLOCK_FILE_PREFIX)
-            && !file.getName().endsWith("meta")) {
-          blockFile = file.getName();
-          for (File file1 : nonParticipatedNodeDirs) {
-            file1.mkdirs();
-            new File(file1, blockFile).createNewFile();
-            new File(file1, blockFile + "_1000.meta").createNewFile();
-            numReplicaCreated++;
-          }
-          break;
+      for (final DataNode dn : cluster.getDataNodes()) {
+        if (!dn.getFSDataset().contains(block)) {
+          cluster.getFsDatasetTestUtils(dn).injectCorruptReplica(block);
+          numReplicaCreated++;
         }
       }
       assertEquals(2, numReplicaCreated);
 
-      fs.setReplication(new Path("/test"), (short) 3);
+      fs.setReplication(filePath, (short) 3);
       cluster.restartDataNodes(); // Lets detect all DNs about dummy copied
       // blocks
       cluster.waitActive();
       cluster.triggerBlockReports();
-      DFSTestUtil.waitReplication(fs, new Path("/test"), (short) 3);
+      DFSTestUtil.waitReplication(fs, filePath, (short) 3);
     } finally {
       if (cluster != null) {
         cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
index 9853b8a..6d711d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
@@ -46,8 +46,8 @@
   static final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
   static final int blockSize = cellSize * 2;
 
-  static MiniDFSCluster cluster;
-  static Configuration conf;
+  private MiniDFSCluster cluster;
+  private Configuration conf;
 
   @Before
   public void setup() throws IOException {
@@ -57,7 +57,6 @@
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);
     cluster.waitActive();
-
   }
 
   @After
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
index 4beb01f..3ea6eea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
@@ -47,11 +47,11 @@
 
 public class TestWriteReadStripedFile {
   public static final Log LOG = LogFactory.getLog(TestWriteReadStripedFile.class);
-  private static MiniDFSCluster cluster;
-  private static DistributedFileSystem fs;
   private static int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
   private static short dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
-  private static Configuration conf = new HdfsConfiguration();
+  private MiniDFSCluster cluster;
+  private DistributedFileSystem fs;
+  private Configuration conf = new HdfsConfiguration();
 
   static {
     GenericTestUtils.setLogLevel(DFSOutputStream.LOG, Level.ALL);
@@ -64,6 +64,8 @@
   @Before
   public void setup() throws IOException {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
+        false);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     fs = cluster.getFileSystem();
     fs.mkdirs(new Path("/ec"));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
index 764527d..6dcff69d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
@@ -38,9 +38,9 @@
 public class TestWriteStripedFileWithFailure {
   public static final Log LOG = LogFactory
       .getLog(TestWriteStripedFileWithFailure.class);
-  private static MiniDFSCluster cluster;
-  private static FileSystem fs;
-  private static Configuration conf = new HdfsConfiguration();
+  private MiniDFSCluster cluster;
+  private FileSystem fs;
+  private Configuration conf = new HdfsConfiguration();
 
   static {
     GenericTestUtils.setLogLevel(DFSOutputStream.LOG, Level.ALL);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index 332ae15..dd54345 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -86,6 +86,7 @@
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.apache.log4j.Level;
+import org.junit.After;
 import org.junit.Test;
 
 /**
@@ -106,6 +107,14 @@
   final static Path filePath = new Path(fileName);
   private MiniDFSCluster cluster;
 
+  @After
+  public void shutdown() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+      cluster = null;
+    }
+  }
+
   ClientProtocol client;
 
   static final long TIMEOUT = 40000L; //msec
@@ -367,44 +376,38 @@
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length)
         .hosts(hosts).racks(racks).simulatedCapacities(capacities).build();
 
-    try {
-      cluster.waitActive();
-      client = NameNodeProxies.createProxy(conf,
-          cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
-      
-      // fill up the cluster to be 80% full
-      long totalCapacity = sum(capacities);
-      long totalUsedSpace = totalCapacity * 8 / 10;
-      InetSocketAddress[] favoredNodes = new InetSocketAddress[numOfDatanodes];
-      for (int i = 0; i < favoredNodes.length; i++) {
-        // DFSClient will attempt reverse lookup. In case it resolves
-        // "127.0.0.1" to "localhost", we manually specify the hostname.
-        int port = cluster.getDataNodes().get(i).getXferAddress().getPort();
-        favoredNodes[i] = new InetSocketAddress(hosts[i], port);
-      }
+    cluster.waitActive();
+    client = NameNodeProxies.createProxy(conf,
+        cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
 
-      DFSTestUtil.createFile(cluster.getFileSystem(0), filePath, false, 1024,
-          totalUsedSpace / numOfDatanodes, DEFAULT_BLOCK_SIZE,
-          (short) numOfDatanodes, 0, false, favoredNodes);
-      
-      // start up an empty node with the same capacity
-      cluster.startDataNodes(conf, 1, true, null, new String[] { RACK2 },
-          new long[] { CAPACITY });
-      
-      totalCapacity += CAPACITY;
-      
-      // run balancer and validate results
-      waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
-
-      // start rebalancing
-      Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
-      int r = Balancer.run(namenodes, BalancerParameters.DEFAULT, conf);
-      assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
-      
-    } finally {
-      cluster.shutdown();
+    // fill up the cluster to be 80% full
+    long totalCapacity = sum(capacities);
+    long totalUsedSpace = totalCapacity * 8 / 10;
+    InetSocketAddress[] favoredNodes = new InetSocketAddress[numOfDatanodes];
+    for (int i = 0; i < favoredNodes.length; i++) {
+      // DFSClient will attempt reverse lookup. In case it resolves
+      // "127.0.0.1" to "localhost", we manually specify the hostname.
+      int port = cluster.getDataNodes().get(i).getXferAddress().getPort();
+      favoredNodes[i] = new InetSocketAddress(hosts[i], port);
     }
-    
+
+    DFSTestUtil.createFile(cluster.getFileSystem(0), filePath, false, 1024,
+        totalUsedSpace / numOfDatanodes, DEFAULT_BLOCK_SIZE,
+        (short) numOfDatanodes, 0, false, favoredNodes);
+
+    // start up an empty node with the same capacity
+    cluster.startDataNodes(conf, 1, true, null, new String[] { RACK2 },
+        new long[] { CAPACITY });
+
+    totalCapacity += CAPACITY;
+
+    // run balancer and validate results
+    waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
+
+    // start rebalancing
+    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
+    int r = Balancer.run(namenodes, BalancerParameters.DEFAULT, conf);
+    assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
   }
   
   /**
@@ -588,7 +591,7 @@
   private void doTest(Configuration conf, long[] capacities,
       String[] racks, long newCapacity, String newRack, NewNodeInfo nodes,
       boolean useTool, boolean useFile) throws Exception {
-    LOG.info("capacities = " +  long2String(capacities)); 
+    LOG.info("capacities = " +  long2String(capacities));
     LOG.info("racks      = " +  Arrays.asList(racks)); 
     LOG.info("newCapacity= " +  newCapacity); 
     LOG.info("newRack    = " +  newRack); 
@@ -606,7 +609,7 @@
           ClientProtocol.class).getProxy();
 
       long totalCapacity = sum(capacities);
-      
+
       // fill up the cluster to be 30% full
       long totalUsedSpace = totalCapacity*3/10;
       createFile(cluster, filePath, totalUsedSpace / numOfDatanodes,
@@ -830,7 +833,7 @@
   /** one-node cluster test*/
   private void oneNodeTest(Configuration conf, boolean useTool) throws Exception {
     // add an empty node with half of the CAPACITY & the same rack
-    doTest(conf, new long[]{CAPACITY}, new String[]{RACK0}, CAPACITY/2, 
+    doTest(conf, new long[]{CAPACITY}, new String[]{RACK0}, CAPACITY/2,
             RACK0, useTool);
   }
   
@@ -884,31 +887,27 @@
         .racks(racks)
         .simulatedCapacities(capacities)
         .build();
-    try {
-      cluster.waitActive();
-      client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
-          ClientProtocol.class).getProxy();
+    cluster.waitActive();
+    client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
+        ClientProtocol.class).getProxy();
 
-      for(int i = 0; i < 3; i++) {
-        cluster.injectBlocks(i, Arrays.asList(blocksDN[i]), null);
-      }
-
-      cluster.startDataNodes(conf, 1, true, null,
-          new String[]{RACK0}, null,new long[]{CAPACITY});
-      cluster.triggerHeartbeats();
-
-      Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
-      Set<String>  datanodes = new HashSet<String>();
-      datanodes.add(cluster.getDataNodes().get(0).getDatanodeId().getHostName());
-      BalancerParameters.Builder pBuilder =
-          new BalancerParameters.Builder();
-      pBuilder.setExcludedNodes(datanodes);
-      pBuilder.setRunDuringUpgrade(false);
-      final int r = Balancer.run(namenodes, pBuilder.build(), conf);
-      assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
-    } finally {
-      cluster.shutdown();
+    for(int i = 0; i < 3; i++) {
+      cluster.injectBlocks(i, Arrays.asList(blocksDN[i]), null);
     }
+
+    cluster.startDataNodes(conf, 1, true, null,
+        new String[]{RACK0}, null,new long[]{CAPACITY});
+    cluster.triggerHeartbeats();
+
+    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
+    Set<String>  datanodes = new HashSet<String>();
+    datanodes.add(cluster.getDataNodes().get(0).getDatanodeId().getHostName());
+    BalancerParameters.Builder pBuilder =
+        new BalancerParameters.Builder();
+    pBuilder.setExcludedNodes(datanodes);
+    pBuilder.setRunDuringUpgrade(false);
+    final int r = Balancer.run(namenodes, pBuilder.build(), conf);
+    assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
   }
 
   /**
@@ -1339,47 +1338,44 @@
       .storageTypes(new StorageType[] { RAM_DISK, DEFAULT })
       .build();
 
-    try {
-      cluster.waitActive();
-      // Create few files on RAM_DISK
-      final String METHOD_NAME = GenericTestUtils.getMethodName();
-      final Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
-      final Path path2 = new Path("/" + METHOD_NAME + ".02.dat");
+    cluster.waitActive();
+    // Create few files on RAM_DISK
+    final String METHOD_NAME = GenericTestUtils.getMethodName();
+    final Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
+    final Path path2 = new Path("/" + METHOD_NAME + ".02.dat");
 
-      DistributedFileSystem fs = cluster.getFileSystem();
-      DFSClient client = fs.getClient();
-      DFSTestUtil.createFile(fs, path1, true,
-        DEFAULT_RAM_DISK_BLOCK_SIZE, 4 * DEFAULT_RAM_DISK_BLOCK_SIZE,
-        DEFAULT_RAM_DISK_BLOCK_SIZE, REPL_FACT, SEED, true);
-      DFSTestUtil.createFile(fs, path2, true,
-        DEFAULT_RAM_DISK_BLOCK_SIZE, 1 * DEFAULT_RAM_DISK_BLOCK_SIZE,
-        DEFAULT_RAM_DISK_BLOCK_SIZE, REPL_FACT, SEED, true);
+    DistributedFileSystem fs = cluster.getFileSystem();
+    DFSClient client = fs.getClient();
+    DFSTestUtil.createFile(fs, path1, true,
+      DEFAULT_RAM_DISK_BLOCK_SIZE, 4 * DEFAULT_RAM_DISK_BLOCK_SIZE,
+      DEFAULT_RAM_DISK_BLOCK_SIZE, REPL_FACT, SEED, true);
+    DFSTestUtil.createFile(fs, path2, true,
+      DEFAULT_RAM_DISK_BLOCK_SIZE, 1 * DEFAULT_RAM_DISK_BLOCK_SIZE,
+      DEFAULT_RAM_DISK_BLOCK_SIZE, REPL_FACT, SEED, true);
 
-      // Sleep for a short time to allow the lazy writer thread to do its job
-      Thread.sleep(6 * 1000);
+    // Sleep for a short time to allow the lazy writer thread to do its job
+    Thread.sleep(6 * 1000);
 
-      // Add another fresh DN with the same type/capacity without files on RAM_DISK
-      StorageType[][] storageTypes = new StorageType[][] {{RAM_DISK, DEFAULT}};
-      long[][] storageCapacities = new long[][]{{ramDiskStorageLimit, diskStorageLimit}};
-      cluster.startDataNodes(conf, REPL_FACT, storageTypes, true, null,
-        null, null, storageCapacities, null, false, false, false, null);
+    // Add another fresh DN with the same type/capacity without files on RAM_DISK
+    StorageType[][] storageTypes = new StorageType[][] {{RAM_DISK, DEFAULT}};
+    long[][] storageCapacities = new long[][]{{ramDiskStorageLimit,
+        diskStorageLimit}};
+    cluster.startDataNodes(conf, REPL_FACT, storageTypes, true, null,
+      null, null, storageCapacities, null, false, false, false, null);
 
-      cluster.triggerHeartbeats();
-      Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
+    cluster.triggerHeartbeats();
+    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
 
-      // Run Balancer
-      final BalancerParameters p = BalancerParameters.DEFAULT;
-      final int r = Balancer.run(namenodes, p, conf);
+    // Run Balancer
+    final BalancerParameters p = BalancerParameters.DEFAULT;
+    final int r = Balancer.run(namenodes, p, conf);
 
-      // Validate no RAM_DISK block should be moved
-      assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
+    // Validate no RAM_DISK block should be moved
+    assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
 
-      // Verify files are still on RAM_DISK
-      DFSTestUtil.verifyFileReplicasOnStorageType(fs, client, path1, RAM_DISK);
-      DFSTestUtil.verifyFileReplicasOnStorageType(fs, client, path2, RAM_DISK);
-    } finally {
-      cluster.shutdown();
-    }
+    // Verify files are still on RAM_DISK
+    DFSTestUtil.verifyFileReplicasOnStorageType(fs, client, path1, RAM_DISK);
+    DFSTestUtil.verifyFileReplicasOnStorageType(fs, client, path2, RAM_DISK);
   }
 
   /**
@@ -1403,51 +1399,45 @@
         .storageTypes(new StorageType[] { DEFAULT })
         .storagesPerDatanode(1)
         .build();
+    cluster.waitActive();
+    // Create a file on the single DN
+    final String METHOD_NAME = GenericTestUtils.getMethodName();
+    final Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
 
-    try {
-      cluster.waitActive();
-      // Create a file on the single DN
-      final String METHOD_NAME = GenericTestUtils.getMethodName();
-      final Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
+    DistributedFileSystem fs = cluster.getFileSystem();
+    DFSTestUtil.createFile(fs, path1, BLOCK_SIZE, BLOCK_SIZE * 2, BLOCK_SIZE,
+        (short) 1, SEED);
 
-      DistributedFileSystem fs = cluster.getFileSystem();
-      DFSTestUtil.createFile(fs, path1, BLOCK_SIZE, BLOCK_SIZE * 2, BLOCK_SIZE,
-          (short) 1, SEED);
+    // Add another DN with the same capacity, cluster is now unbalanced
+    cluster.startDataNodes(conf, 1, true, null, null);
+    cluster.triggerHeartbeats();
+    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
 
-      // Add another DN with the same capacity, cluster is now unbalanced
-      cluster.startDataNodes(conf, 1, true, null, null);
-      cluster.triggerHeartbeats();
-      Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
+    // Run balancer
+    final BalancerParameters p = BalancerParameters.DEFAULT;
 
-      // Run balancer
-      final BalancerParameters p = BalancerParameters.DEFAULT;
+    fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
+    fs.rollingUpgrade(HdfsConstants.RollingUpgradeAction.PREPARE);
+    fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
 
-      fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
-      fs.rollingUpgrade(HdfsConstants.RollingUpgradeAction.PREPARE);
-      fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
+    // Rolling upgrade should abort the balancer
+    assertEquals(ExitStatus.UNFINALIZED_UPGRADE.getExitCode(),
+        Balancer.run(namenodes, p, conf));
 
-      // Rolling upgrade should abort the balancer
-      assertEquals(ExitStatus.UNFINALIZED_UPGRADE.getExitCode(),
-          Balancer.run(namenodes, p, conf));
+    // Should work with the -runDuringUpgrade flag.
+    BalancerParameters.Builder b =
+        new BalancerParameters.Builder();
+    b.setRunDuringUpgrade(true);
+    final BalancerParameters runDuringUpgrade = b.build();
+    assertEquals(ExitStatus.SUCCESS.getExitCode(),
+        Balancer.run(namenodes, runDuringUpgrade, conf));
 
-      // Should work with the -runDuringUpgrade flag.
-      BalancerParameters.Builder b =
-          new BalancerParameters.Builder();
-      b.setRunDuringUpgrade(true);
-      final BalancerParameters runDuringUpgrade = b.build();
-      assertEquals(ExitStatus.SUCCESS.getExitCode(),
-          Balancer.run(namenodes, runDuringUpgrade, conf));
+    // Finalize the rolling upgrade
+    fs.rollingUpgrade(HdfsConstants.RollingUpgradeAction.FINALIZE);
 
-      // Finalize the rolling upgrade
-      fs.rollingUpgrade(HdfsConstants.RollingUpgradeAction.FINALIZE);
-
-      // Should also work after finalization.
-      assertEquals(ExitStatus.SUCCESS.getExitCode(),
-          Balancer.run(namenodes, p, conf));
-
-    } finally {
-      cluster.shutdown();
-    }
+    // Should also work after finalization.
+    assertEquals(ExitStatus.SUCCESS.getExitCode(),
+        Balancer.run(namenodes, p, conf));
   }
 
   /**
@@ -1469,7 +1459,7 @@
     conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1L);
 
     int numOfDatanodes =2;
-    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+    cluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(2)
         .racks(new String[]{"/default/rack0", "/default/rack0"})
         .storagesPerDatanode(2)
@@ -1480,39 +1470,33 @@
             {100 * blockSize, 20 * blockSize},
             {20 * blockSize, 100 * blockSize}})
         .build();
+    cluster.waitActive();
 
-    try {
-      cluster.waitActive();
+    //set "/bar" directory with ONE_SSD storage policy.
+    DistributedFileSystem fs = cluster.getFileSystem();
+    Path barDir = new Path("/bar");
+    fs.mkdir(barDir,new FsPermission((short)777));
+    fs.setStoragePolicy(barDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
 
-      //set "/bar" directory with ONE_SSD storage policy.
-      DistributedFileSystem fs = cluster.getFileSystem();
-      Path barDir = new Path("/bar");
-      fs.mkdir(barDir,new FsPermission((short)777));
-      fs.setStoragePolicy(barDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
+    // Insert 30 blocks. So (DN0,SSD) and (DN1,DISK) are about half full,
+    // and (DN0,SSD) and (DN1,DISK) are about 15% full.
+    long fileLen  = 30 * blockSize;
+    // fooFile has ONE_SSD policy. So
+    // (DN0,SSD) and (DN1,DISK) have 2 replicas belong to same block.
+    // (DN0,DISK) and (DN1,SSD) have 2 replicas belong to same block.
+    Path fooFile = new Path(barDir, "foo");
+    createFile(cluster, fooFile, fileLen, (short) numOfDatanodes, 0);
+    // update space info
+    cluster.triggerHeartbeats();
 
-      // Insert 30 blocks. So (DN0,SSD) and (DN1,DISK) are about half full,
-      // and (DN0,SSD) and (DN1,DISK) are about 15% full.
-      long fileLen  = 30 * blockSize;
-      // fooFile has ONE_SSD policy. So
-      // (DN0,SSD) and (DN1,DISK) have 2 replicas belong to same block.
-      // (DN0,DISK) and (DN1,SSD) have 2 replicas belong to same block.
-      Path fooFile = new Path(barDir, "foo");
-      createFile(cluster, fooFile, fileLen, (short) numOfDatanodes, 0);
-      // update space info
-      cluster.triggerHeartbeats();
+    BalancerParameters p = BalancerParameters.DEFAULT;
+    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
+    final int r = Balancer.run(namenodes, p, conf);
 
-      BalancerParameters p = BalancerParameters.DEFAULT;
-      Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
-      final int r = Balancer.run(namenodes, p, conf);
-
-      // Replica in (DN0,SSD) was not moved to (DN1,SSD), because (DN1,DISK)
-      // already has one. Otherwise DN1 will have 2 replicas.
-      // For same reason, no replicas were moved.
-      assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
-
-    } finally {
-      cluster.shutdown();
-    }
+    // Replica in (DN0,SSD) was not moved to (DN1,SSD), because (DN1,DISK)
+    // already has one. Otherwise DN1 will have 2 replicas.
+    // For same reason, no replicas were moved.
+    assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
   }
 
   /**
@@ -1543,50 +1527,46 @@
     int numOfDatanodes = capacities.length;
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length)
         .racks(racks).simulatedCapacities(capacities).build();
-    try {
-      cluster.waitActive();
-      client = NameNodeProxies.createProxy(conf,
-          cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
+    cluster.waitActive();
+    client = NameNodeProxies.createProxy(conf,
+        cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
 
-      long totalCapacity = sum(capacities);
+    long totalCapacity = sum(capacities);
 
-      // fill up the cluster to be 30% full
-      final long totalUsedSpace = totalCapacity * 3 / 10;
-      createFile(cluster, filePath, totalUsedSpace / numOfDatanodes,
-          (short) numOfDatanodes, 0);
-      // start up an empty node with the same capacity and on the same rack
-      cluster.startDataNodes(conf, 1, true, null, new String[] { newRack },
-          new long[] { newCapacity });
+    // fill up the cluster to be 30% full
+    final long totalUsedSpace = totalCapacity * 3 / 10;
+    createFile(cluster, filePath, totalUsedSpace / numOfDatanodes,
+        (short) numOfDatanodes, 0);
+    // start up an empty node with the same capacity and on the same rack
+    cluster.startDataNodes(conf, 1, true, null, new String[] { newRack },
+        new long[] { newCapacity });
 
-      // Case1: Simulate first balancer by creating 'balancer.id' file. It
-      // will keep this file until the balancing operation is completed.
-      FileSystem fs = cluster.getFileSystem(0);
-      final FSDataOutputStream out = fs
-          .create(Balancer.BALANCER_ID_PATH, false);
-      out.writeBytes(InetAddress.getLocalHost().getHostName());
-      out.hflush();
-      assertTrue("'balancer.id' file doesn't exist!",
-          fs.exists(Balancer.BALANCER_ID_PATH));
+    // Case1: Simulate first balancer by creating 'balancer.id' file. It
+    // will keep this file until the balancing operation is completed.
+    FileSystem fs = cluster.getFileSystem(0);
+    final FSDataOutputStream out = fs
+        .create(Balancer.BALANCER_ID_PATH, false);
+    out.writeBytes(InetAddress.getLocalHost().getHostName());
+    out.hflush();
+    assertTrue("'balancer.id' file doesn't exist!",
+        fs.exists(Balancer.BALANCER_ID_PATH));
 
-      // start second balancer
-      final String[] args = { "-policy", "datanode" };
-      final Tool tool = new Cli();
-      tool.setConf(conf);
-      int exitCode = tool.run(args); // start balancing
-      assertEquals("Exit status code mismatches",
-          ExitStatus.IO_EXCEPTION.getExitCode(), exitCode);
+    // start second balancer
+    final String[] args = { "-policy", "datanode" };
+    final Tool tool = new Cli();
+    tool.setConf(conf);
+    int exitCode = tool.run(args); // start balancing
+    assertEquals("Exit status code mismatches",
+        ExitStatus.IO_EXCEPTION.getExitCode(), exitCode);
 
-      // Case2: Release lease so that another balancer would be able to
-      // perform balancing.
-      out.close();
-      assertTrue("'balancer.id' file doesn't exist!",
-          fs.exists(Balancer.BALANCER_ID_PATH));
-      exitCode = tool.run(args); // start balancing
-      assertEquals("Exit status code mismatches",
-          ExitStatus.SUCCESS.getExitCode(), exitCode);
-    } finally {
-      cluster.shutdown();
-    }
+    // Case2: Release lease so that another balancer would be able to
+    // perform balancing.
+    out.close();
+    assertTrue("'balancer.id' file doesn't exist!",
+        fs.exists(Balancer.BALANCER_ID_PATH));
+    exitCode = tool.run(args); // start balancing
+    assertEquals("Exit status code mismatches",
+        ExitStatus.SUCCESS.getExitCode(), exitCode);
   }
 
   /** Balancer should not move blocks with size < minBlockSize. */
@@ -1606,101 +1586,97 @@
         .simulatedCapacities(capacities)
         .build();
     final DistributedFileSystem dfs = cluster.getFileSystem();
+    cluster.waitActive();
+    client = NameNodeProxies.createProxy(conf, dfs.getUri(),
+        ClientProtocol.class).getProxy();
 
-    try {
-      cluster.waitActive();
-      client = NameNodeProxies.createProxy(conf, dfs.getUri(),
-          ClientProtocol.class).getProxy();
-      
-      // fill up the cluster to be 80% full
-      for(int i = 0; i < lengths.length; i++) {
-        final long size = lengths[i];
-        final Path p = new Path("/file" + i + "_size" + size);
-        try(final OutputStream out = dfs.create(p)) {
-          for(int j = 0; j < size; j++) {
-            out.write(j);
-          }
+    // fill up the cluster to be 80% full
+    for(int i = 0; i < lengths.length; i++) {
+      final long size = lengths[i];
+      final Path p = new Path("/file" + i + "_size" + size);
+      try(final OutputStream out = dfs.create(p)) {
+        for(int j = 0; j < size; j++) {
+          out.write(j);
         }
       }
-      
-      // start up an empty node with the same capacity
-      cluster.startDataNodes(conf, capacities.length, true, null, null, capacities);
-      LOG.info("capacities    = " + Arrays.toString(capacities));
-      LOG.info("totalUsedSpace= " + totalUsed);
-      LOG.info("lengths       = " + Arrays.toString(lengths) + ", #=" + lengths.length);
-      waitForHeartBeat(totalUsed, 2*capacities[0]*capacities.length, client, cluster);
-      
-      final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
+    }
 
-      { // run Balancer with min-block-size=50
-        BalancerParameters.Builder b =
-            new BalancerParameters.Builder();
-        b.setBalancingPolicy(BalancingPolicy.Node.INSTANCE);
-        b.setThreshold(1);
-        final BalancerParameters p = b.build();
+    // start up an empty node with the same capacity
+    cluster.startDataNodes(conf, capacities.length, true, null, null, capacities);
+    LOG.info("capacities    = " + Arrays.toString(capacities));
+    LOG.info("totalUsedSpace= " + totalUsed);
+    LOG.info("lengths       = " + Arrays.toString(lengths) + ", #=" + lengths.length);
+    waitForHeartBeat(totalUsed, 2*capacities[0]*capacities.length, client, cluster);
 
-        conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 50);
-        final int r = Balancer.run(namenodes, p, conf);
-        assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
+    final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
+
+    { // run Balancer with min-block-size=50
+      BalancerParameters.Builder b =
+          new BalancerParameters.Builder();
+      b.setBalancingPolicy(BalancingPolicy.Node.INSTANCE);
+      b.setThreshold(1);
+      final BalancerParameters p = b.build();
+
+      conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 50);
+      final int r = Balancer.run(namenodes, p, conf);
+      assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
+    }
+
+    conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
+
+    { // run Balancer with empty nodes as source nodes
+      final Set<String> sourceNodes = new HashSet<>();
+      final List<DataNode> datanodes = cluster.getDataNodes();
+      for(int i = capacities.length; i < datanodes.size(); i++) {
+        sourceNodes.add(datanodes.get(i).getDisplayName());
       }
-      
+      BalancerParameters.Builder b =
+          new BalancerParameters.Builder();
+      b.setBalancingPolicy(BalancingPolicy.Node.INSTANCE);
+      b.setThreshold(1);
+      b.setSourceNodes(sourceNodes);
+      final BalancerParameters p = b.build();
+
+      conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 50);
+      final int r = Balancer.run(namenodes, p, conf);
+      assertEquals(ExitStatus.NO_MOVE_BLOCK.getExitCode(), r);
+    }
+
+    { // run Balancer with a filled node as a source node
+      final Set<String> sourceNodes = new HashSet<>();
+      final List<DataNode> datanodes = cluster.getDataNodes();
+      sourceNodes.add(datanodes.get(0).getDisplayName());
+      BalancerParameters.Builder b =
+          new BalancerParameters.Builder();
+      b.setBalancingPolicy(BalancingPolicy.Node.INSTANCE);
+      b.setThreshold(1);
+      b.setSourceNodes(sourceNodes);
+      final BalancerParameters p = b.build();
+
       conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
+      final int r = Balancer.run(namenodes, p, conf);
+      assertEquals(ExitStatus.NO_MOVE_BLOCK.getExitCode(), r);
+    }
 
-      { // run Balancer with empty nodes as source nodes
-        final Set<String> sourceNodes = new HashSet<>();
-        final List<DataNode> datanodes = cluster.getDataNodes();
-        for(int i = capacities.length; i < datanodes.size(); i++) {
-          sourceNodes.add(datanodes.get(i).getDisplayName());
-        }
-        BalancerParameters.Builder b =
-            new BalancerParameters.Builder();
-        b.setBalancingPolicy(BalancingPolicy.Node.INSTANCE);
-        b.setThreshold(1);
-        b.setSourceNodes(sourceNodes);
-        final BalancerParameters p = b.build();
-
-        conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 50);
-        final int r = Balancer.run(namenodes, p, conf);
-        assertEquals(ExitStatus.NO_MOVE_BLOCK.getExitCode(), r);
+    { // run Balancer with all filled node as source nodes
+      final Set<String> sourceNodes = new HashSet<>();
+      final List<DataNode> datanodes = cluster.getDataNodes();
+      for(int i = 0; i < capacities.length; i++) {
+        sourceNodes.add(datanodes.get(i).getDisplayName());
       }
+      BalancerParameters.Builder b =
+          new BalancerParameters.Builder();
+      b.setBalancingPolicy(BalancingPolicy.Node.INSTANCE);
+      b.setThreshold(1);
+      b.setSourceNodes(sourceNodes);
+      final BalancerParameters p = b.build();
 
-      { // run Balancer with a filled node as a source node
-        final Set<String> sourceNodes = new HashSet<>();
-        final List<DataNode> datanodes = cluster.getDataNodes();
-        sourceNodes.add(datanodes.get(0).getDisplayName());
-        BalancerParameters.Builder b =
-            new BalancerParameters.Builder();
-        b.setBalancingPolicy(BalancingPolicy.Node.INSTANCE);
-        b.setThreshold(1);
-        b.setSourceNodes(sourceNodes);
-        final BalancerParameters p = b.build();
-
-        conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
-        final int r = Balancer.run(namenodes, p, conf);
-        assertEquals(ExitStatus.NO_MOVE_BLOCK.getExitCode(), r);
-      }
-
-      { // run Balancer with all filled node as source nodes
-        final Set<String> sourceNodes = new HashSet<>();
-        final List<DataNode> datanodes = cluster.getDataNodes();
-        for(int i = 0; i < capacities.length; i++) {
-          sourceNodes.add(datanodes.get(i).getDisplayName());
-        }
-        BalancerParameters.Builder b =
-            new BalancerParameters.Builder();
-        b.setBalancingPolicy(BalancingPolicy.Node.INSTANCE);
-        b.setThreshold(1);
-        b.setSourceNodes(sourceNodes);
-        final BalancerParameters p = b.build();
-
-        conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
-        final int r = Balancer.run(namenodes, p, conf);
-        assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
-      }
-    } finally {
-      cluster.shutdown();
+      conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
+      final int r = Balancer.run(namenodes, p, conf);
+      assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
     }
   }
+
   public void integrationTestWithStripedFile(Configuration conf) throws Exception {
     initConfWithStripe(conf);
     doTestBalancerWithStripedFile(conf);
@@ -1778,4 +1754,4 @@
     balancerTest.testBalancer1();
     balancerTest.testBalancer2();
   }
-}
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
index ae33ffe..a9b2aaa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
@@ -36,8 +36,8 @@
   private final static int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
   private final static int stripesPerBlock = 4;
   private final static int numDNs = dataBlocks + parityBlocks + 2;
-  private static MiniDFSCluster cluster;
-  private static Configuration conf;
+  private MiniDFSCluster cluster;
+  private Configuration conf;
 
   {
     BLOCK_SIZE = cellSize * stripesPerBlock;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index 4e744b4..8e9ce12 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -1002,6 +1002,14 @@
     BlockStoragePolicySuite POLICY_SUITE = BlockStoragePolicySuite
         .createDefaultSuite();
     BlockStoragePolicy storagePolicy = POLICY_SUITE.getDefaultPolicy();
+    DatanodeStorageInfo excessSSD = DFSTestUtil.createDatanodeStorageInfo(
+        "Storage-excess-SSD-ID", "localhost",
+        storages[0].getDatanodeDescriptor().getNetworkLocation(),
+        "foo.com", StorageType.SSD);
+    updateHeartbeatWithUsage(excessSSD.getDatanodeDescriptor(),
+        2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+        2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0,
+        0);
 
     // use delete hint case.
 
@@ -1024,6 +1032,29 @@
     excessReplicas = replicator.chooseReplicasToDelete(nonExcess, 3,
         excessTypes, storages[3].getDatanodeDescriptor(), null);
     assertTrue(excessReplicas.contains(excessStorage));
+
+
+    // The block was initially created on excessSSD(rack r1),
+    // storages[4](rack r3) and storages[5](rack r3) with
+    // ONESSD_STORAGE_POLICY_NAME storage policy.
+    // Right after balancer moves the block from storages[5] to
+    // storages[3](rack r2), the application changes the storage policy from
+    // ONESSD_STORAGE_POLICY_NAME to HOT_STORAGE_POLICY_ID. In this case,
+    // no replica can be chosen as the excessive replica as
+    // chooseReplicasToDelete only considers storages[4] and storages[5] that
+    // are the same rack. But neither's storage type is SSD.
+    // TODO BlockPlacementPolicyDefault should be able to delete excessSSD.
+    nonExcess.clear();
+    nonExcess.add(excessSSD);
+    nonExcess.add(storages[3]);
+    nonExcess.add(storages[4]);
+    nonExcess.add(storages[5]);
+    excessTypes = storagePolicy.chooseExcess((short) 3,
+        DatanodeStorageInfo.toStorageTypes(nonExcess));
+    excessReplicas = replicator.chooseReplicasToDelete(nonExcess, 3,
+        excessTypes, storages[3].getDatanodeDescriptor(),
+        storages[5].getDatanodeDescriptor());
+    assertTrue(excessReplicas.size() == 0);
   }
 
  @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
index 528021d..0ff7770 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
@@ -162,6 +162,16 @@
     return cluster.isOnSameNodeGroup(left, right.getDatanodeDescriptor());
   }
 
+  private DatanodeStorageInfo[] chooseTarget(
+      int numOfReplicas,
+      DatanodeDescriptor writer,
+      Set<Node> excludedNodes,
+      List<DatanodeDescriptor> favoredNodes) {
+    return replicator.chooseTarget(filename, numOfReplicas, writer,
+      excludedNodes, BLOCK_SIZE, favoredNodes,
+      TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
+  }
+
   /**
    * In this testcase, client is dataNodes[0]. So the 1st replica should be
    * placed on dataNodes[0], the 2nd replica should be placed on 
@@ -723,4 +733,52 @@
       assertTrue(excludedNodes.contains(dataNodesForDependencies[i]));
     }
   }
+
+  /**
+   * In this testcase, favored node is dataNodes[6].
+   * 1st replica should be placed on favored node.
+   * @throws Exception
+   */
+  @Test
+  public void testChooseTargetAsFavouredNodes() throws Exception {
+    DatanodeStorageInfo[] targets;
+    List<DatanodeDescriptor> favoredNodes =
+        new ArrayList<DatanodeDescriptor>();
+    favoredNodes.add(dataNodes[6]);
+    favoredNodes.add(dataNodes[0]);
+    favoredNodes.add(dataNodes[1]);
+    targets = chooseTarget(1, dataNodes[7], null, favoredNodes);
+    assertEquals(targets.length, 1);
+    assertTrue(favoredNodes.contains(targets[0].getDatanodeDescriptor()));
+  }
+
+  /**
+   * In this testcase, passed 2 favored nodes
+   * dataNodes[0](Good Node), dataNodes[3](Bad node).
+   * 1st replica should be placed on good favored node dataNodes[0].
+   * 2nd replica should be on bad favored node's nodegroup dataNodes[4].
+   * @throws Exception
+   */
+  @Test
+  public void testChooseFavoredNodesNodeGroup() throws Exception {
+    updateHeartbeatWithUsage(dataNodes[3],
+        2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+        (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L,
+        0L, 0L, 0, 0); // no space
+
+    DatanodeStorageInfo[] targets;
+    List<DatanodeDescriptor> expectedTargets =
+        new ArrayList<DatanodeDescriptor>();
+    expectedTargets.add(dataNodes[0]);
+    expectedTargets.add(dataNodes[4]);
+    List<DatanodeDescriptor> favouredNodes =
+        new ArrayList<DatanodeDescriptor>();
+    favouredNodes.add(dataNodes[3]);
+    favouredNodes.add(dataNodes[0]);
+    targets = chooseTarget(2, dataNodes[7], null, favouredNodes);
+    assertTrue("1st Replica is incorrect",
+      expectedTargets.contains(targets[0].getDatanodeDescriptor()));
+    assertTrue("2nd Replica is incorrect",
+      expectedTargets.contains(targets[1].getDatanodeDescriptor()));
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
index eb986ff..40c4438 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java
@@ -199,4 +199,11 @@
    * @throws IOException
    */
   void checkStoredReplica(final Replica replica) throws IOException;
+
+  /**
+   * Create dummy replicas for block data and metadata.
+   * @param block the block of which replica to be created.
+   * @throws IOException on I/O error.
+   */
+  void injectCorruptReplica(ExtendedBlock block) throws IOException;
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
index ed32fae..e8e4532 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
 import com.google.common.base.Preconditions;
+import org.apache.commons.io.FileExistsException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -292,4 +293,28 @@
     ReplicaInfo r = (ReplicaInfo) replica;
     FsDatasetImpl.checkReplicaFiles(r);
   }
+
+  @Override
+  public void injectCorruptReplica(ExtendedBlock block) throws IOException {
+    Preconditions.checkState(!dataset.contains(block),
+        "Block " + block + " already exists on dataset.");
+    try (FsVolumeReferences volRef = dataset.getFsVolumeReferences()) {
+      FsVolumeImpl volume = (FsVolumeImpl) volRef.get(0);
+      FinalizedReplica finalized = new FinalizedReplica(
+          block.getLocalBlock(),
+          volume,
+          volume.getFinalizedDir(block.getBlockPoolId()));
+      File blockFile = finalized.getBlockFile();
+      if (!blockFile.createNewFile()) {
+        throw new FileExistsException(
+            "Block file " + blockFile + " already exists.");
+      }
+      File metaFile = FsDatasetUtil.getMetaFile(blockFile, 1000);
+      if (!metaFile.createNewFile()) {
+        throw new FileExistsException(
+            "Meta file " + metaFile + " already exists."
+        );
+      }
+    }
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
index 62b5f8f..d637abc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.collect.Lists;
-import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -243,8 +243,8 @@
       CallerContext.setCurrent(context);
       LOG.info("Set current caller context as {}", CallerContext.getCurrent());
       fs.setTimes(p, time, time);
-      System.out.println("LLLLLL" + auditlog.getOutput());
-      assertTrue(auditlog.getOutput().endsWith("callerContext=setTimes\n"));
+      assertTrue(auditlog.getOutput().endsWith(
+          String.format("callerContext=setTimes%n")));
       auditlog.clearOutput();
 
       // context with signature
@@ -255,11 +255,11 @@
       LOG.info("Set current caller context as {}", CallerContext.getCurrent());
       fs.setTimes(p, time, time);
       assertTrue(auditlog.getOutput().endsWith(
-          "callerContext=setTimes:L\n"));
+          String.format("callerContext=setTimes:L%n")));
       auditlog.clearOutput();
 
       // long context is truncated
-      final String longContext = RandomStringUtils.randomAscii(200);
+      final String longContext = StringUtils.repeat("foo", 100);
       context = new CallerContext.Builder(longContext)
           .setSignature("L".getBytes(CallerContext.SIGNATURE_ENCODING))
           .build();
@@ -267,7 +267,17 @@
       LOG.info("Set current caller context as {}", CallerContext.getCurrent());
       fs.setTimes(p, time, time);
       assertTrue(auditlog.getOutput().endsWith(
-          "callerContext=" + longContext.substring(0, 128) + ":L\n"));
+          String.format("callerContext=%s:L%n", longContext.substring(0, 128))));
+      auditlog.clearOutput();
+
+      // empty context is ignored
+      context = new CallerContext.Builder("")
+          .setSignature("L".getBytes(CallerContext.SIGNATURE_ENCODING))
+          .build();
+      CallerContext.setCurrent(context);
+      LOG.info("Set empty caller context");
+      fs.setTimes(p, time, time);
+      assertFalse(auditlog.getOutput().contains("callerContext="));
       auditlog.clearOutput();
 
       // caller context is inherited in child thread
@@ -293,7 +303,8 @@
       } catch (InterruptedException ignored) {
         // Ignore
       }
-      assertTrue(auditlog.getOutput().endsWith("callerContext=setTimes:L\n"));
+      assertTrue(auditlog.getOutput().endsWith(
+          String.format("callerContext=setTimes:L%n")));
       auditlog.clearOutput();
 
       // caller context is overridden in child thread
@@ -321,7 +332,7 @@
         // Ignore
       }
       assertTrue(auditlog.getOutput().endsWith(
-          "callerContext=setPermission:L\n"));
+          String.format("callerContext=setPermission:L%n")));
       auditlog.clearOutput();
 
       // reuse the current context's signature
@@ -330,17 +341,19 @@
       CallerContext.setCurrent(context);
       LOG.info("Set current caller context as {}", CallerContext.getCurrent());
       fs.mkdirs(new Path("/reuse-context-signature"));
-      assertTrue(auditlog.getOutput().endsWith("callerContext=mkdirs:L\n"));
+      assertTrue(auditlog.getOutput().endsWith(
+          String.format("callerContext=mkdirs:L%n")));
       auditlog.clearOutput();
 
-      // caller context with too long signature is abandoned
+      // too long signature is ignored
       context = new CallerContext.Builder("setTimes")
           .setSignature(new byte[41])
           .build();
       CallerContext.setCurrent(context);
       LOG.info("Set current caller context as {}", CallerContext.getCurrent());
       fs.setTimes(p, time, time);
-      assertFalse(auditlog.getOutput().contains("callerContext="));
+      assertTrue(auditlog.getOutput().endsWith(
+          String.format("callerContext=setTimes%n")));
       auditlog.clearOutput();
 
       // null signature is ignored
@@ -349,7 +362,8 @@
       CallerContext.setCurrent(context);
       LOG.info("Set current caller context as {}", CallerContext.getCurrent());
       fs.setTimes(p, time, time);
-      assertTrue(auditlog.getOutput().endsWith("callerContext=setTimes\n"));
+      assertTrue(auditlog.getOutput().endsWith(
+          String.format("callerContext=setTimes%n")));
       auditlog.clearOutput();
 
       // empty signature is ignored
@@ -359,7 +373,8 @@
       CallerContext.setCurrent(context);
       LOG.info("Set current caller context as {}", CallerContext.getCurrent());
       fs.mkdirs(new Path("/empty-signature"));
-      assertTrue(auditlog.getOutput().endsWith("callerContext=mkdirs\n"));
+      assertTrue(auditlog.getOutput().endsWith(
+          String.format("callerContext=mkdirs%n")));
       auditlog.clearOutput();
 
       // invalid context is not passed to the rpc
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index 559aae6..5c865e1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -26,12 +26,16 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
+import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.util.VersionInfo;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.junit.Test;
@@ -40,6 +44,7 @@
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
 import java.io.File;
+import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.net.URI;
 import java.util.Collection;
@@ -186,7 +191,7 @@
       }
       assertEquals(2, statusMap.get("active").size());
       assertEquals(0, statusMap.get("failed").size());
-      
+
       // This will cause the first dir to fail.
       File failedNameDir = new File(nameDirUris.iterator().next());
       assertEquals(0, FileUtil.chmod(
@@ -412,4 +417,59 @@
       }
     }
   }
+
+  @Test(timeout = 120000)
+  public void testNNDirectorySize() throws Exception{
+    Configuration conf = new Configuration();
+    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+    // Have to specify IPC ports so the NNs can talk to each other.
+    MiniDFSNNTopology topology = new MiniDFSNNTopology()
+        .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
+            .addNN(new MiniDFSNNTopology.NNConf("nn1")
+                .setIpcPort(ServerSocketUtil.getPort(0, 100)))
+            .addNN(new MiniDFSNNTopology.NNConf("nn2")
+                .setIpcPort(ServerSocketUtil.getPort(0, 100))));
+
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .nnTopology(topology).numDataNodes(0)
+        .build();
+    FileSystem fs = null;
+    try {
+      cluster.waitActive();
+
+      FSNamesystem nn0 = cluster.getNamesystem(0);
+      FSNamesystem nn1 = cluster.getNamesystem(1);
+      checkNNDirSize(cluster.getNameDirs(0), nn0.getNameDirSize());
+      checkNNDirSize(cluster.getNameDirs(1), nn1.getNameDirSize());
+      cluster.transitionToActive(0);
+      fs = cluster.getFileSystem(0);
+      DFSTestUtil.createFile(fs, new Path("/file"), 0, (short) 1, 0L);
+
+      //rollEditLog
+      HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
+          cluster.getNameNode(1));
+      checkNNDirSize(cluster.getNameDirs(0), nn0.getNameDirSize());
+      checkNNDirSize(cluster.getNameDirs(1), nn1.getNameDirSize());
+
+      //Test metric after call saveNamespace
+      DFSTestUtil.createFile(fs, new Path("/file"), 0, (short) 1, 0L);
+      nn0.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+      nn0.saveNamespace(0, 0);
+      checkNNDirSize(cluster.getNameDirs(0), nn0.getNameDirSize());
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  @SuppressWarnings("unchecked")
+  private void checkNNDirSize(Collection<URI> nameDirUris, String metric){
+    Map<String, Long> nnDirMap =
+        (Map<String, Long>) JSON.parse(metric);
+    assertEquals(nameDirUris.size(), nnDirMap.size());
+    for (URI dirUrl : nameDirUris) {
+      File dir = new File(dirUrl);
+      assertEquals(nnDirMap.get(dir.getAbsolutePath()).longValue(),
+          FileUtils.sizeOfDirectory(dir));
+    }
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRecoverStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRecoverStripedBlocks.java
index 6774aed..101601e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRecoverStripedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRecoverStripedBlocks.java
@@ -21,30 +21,41 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.protocol.BlockECRecoveryCommand.BlockECRecoveryInfo;
+
+import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.junit.Test;
 import java.util.List;
 
 import static org.apache.hadoop.hdfs.StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
 import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_DATA_BLOCKS;
 import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_PARITY_BLOCKS;
+import static org.apache.hadoop.hdfs.StripedFileTestUtil.blockSize;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 public class TestRecoverStripedBlocks {
+  private static final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
   private final short GROUP_SIZE =
       (short) (NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS);
+
   private MiniDFSCluster cluster;
   private final Path dirPath = new Path("/dir");
   private Path filePath = new Path(dirPath, "file");
@@ -166,4 +177,63 @@
       cluster.shutdown();
     }
   }
+
+  @Test
+  public void test2RecoveryTasksForSameBlockGroup() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1000);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE + 2)
+        .build();
+    try {
+      cluster.waitActive();
+      DistributedFileSystem fs = cluster.getFileSystem();
+      BlockManager bm = cluster.getNamesystem().getBlockManager();
+      fs.getClient().setErasureCodingPolicy("/", null);
+      int fileLen = NUM_DATA_BLOCKS * blockSize;
+      Path p = new Path("/test2RecoveryTasksForSameBlockGroup");
+      final byte[] data = new byte[fileLen];
+      DFSTestUtil.writeFile(fs, p, data);
+
+      LocatedStripedBlock lb = (LocatedStripedBlock)fs.getClient()
+          .getLocatedBlocks(p.toString(), 0).get(0);
+      LocatedBlock[] lbs = StripedBlockUtil.parseStripedBlockGroup(lb,
+          cellSize, NUM_DATA_BLOCKS, NUM_PARITY_BLOCKS);
+
+      assertEquals(0, getNumberOfBlocksToBeErasureCoded(cluster));
+      assertEquals(0, bm.getPendingReplicationBlocksCount());
+
+      // missing 1 block, so 1 task should be scheduled
+      DatanodeInfo dn0 = lbs[0].getLocations()[0];
+      cluster.stopDataNode(dn0.getName());
+      cluster.setDataNodeDead(dn0);
+      BlockManagerTestUtil.getComputedDatanodeWork(bm);
+      assertEquals(1, getNumberOfBlocksToBeErasureCoded(cluster));
+      assertEquals(1, bm.getPendingReplicationBlocksCount());
+
+      // missing another block, but no new task should be scheduled because
+      // previous task isn't finished.
+      DatanodeInfo dn1 = lbs[1].getLocations()[0];
+      cluster.stopDataNode(dn1.getName());
+      cluster.setDataNodeDead(dn1);
+      BlockManagerTestUtil.getComputedDatanodeWork(bm);
+      assertEquals(1, getNumberOfBlocksToBeErasureCoded(cluster));
+      assertEquals(1, bm.getPendingReplicationBlocksCount());
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  private static int getNumberOfBlocksToBeErasureCoded(MiniDFSCluster cluster)
+      throws Exception {
+    DatanodeManager dm =
+        cluster.getNamesystem().getBlockManager().getDatanodeManager();
+    int count = 0;
+    for( DataNode dn : cluster.getDataNodes()){
+      DatanodeDescriptor dd = dm.getDatanode(dn.getDatanodeId());
+      count += dd.getNumberOfBlocksToBeErasureCoded();
+    }
+    return count;
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
index fd45816..9f0d95b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
@@ -25,13 +25,16 @@
 import java.io.IOException;
 import java.net.URI;
 import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 import com.google.common.base.Supplier;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
@@ -109,12 +112,16 @@
             "storage directory does not exist or is not accessible", ioe);
       }
 
+      int expectedCheckpointTxId = (int)NameNodeAdapter.getNamesystem(nn0)
+          .getFSImage().getMostRecentCheckpointTxId();
+
       int rc = BootstrapStandby.run(new String[] { "-nonInteractive" },
           cluster.getConfiguration(index));
       assertEquals(0, rc);
 
       // Should have copied over the namespace from the active
-      FSImageTestUtil.assertNNHasCheckpoints(cluster, index, ImmutableList.of(0));
+      FSImageTestUtil.assertNNHasCheckpoints(cluster, index,
+          ImmutableList.of(expectedCheckpointTxId));
     }
 
     // We should now be able to start the standbys successfully.
@@ -221,7 +228,7 @@
    * {@link DFSConfigKeys#DFS_IMAGE_TRANSFER_BOOTSTRAP_STANDBY_RATE_KEY}
    * created by HDFS-8808.
    */
-  @Test
+  @Test(timeout=30000)
   public void testRateThrottling() throws Exception {
     cluster.getConfiguration(0).setLong(
         DFSConfigKeys.DFS_IMAGE_TRANSFER_RATE_KEY, 1);
@@ -229,23 +236,46 @@
     cluster.waitActive();
     nn0 = cluster.getNameNode(0);
     cluster.transitionToActive(0);
-    // Each edit has at least 1 byte. So the lowRate definitely should cause
-    // a timeout, if enforced. If lowRate is not enforced, any reasonable test
-    // machine should at least download an image with 5 edits in 5 seconds.
-    for (int i = 0; i < 5; i++) {
+    // Any reasonable test machine should be able to transfer 1 byte per MS
+    // (which is ~1K/s)
+    final int minXferRatePerMS = 1;
+    int imageXferBufferSize = DFSUtilClient.getIoFileBufferSize(
+        new Configuration());
+    File imageFile = null;
+    int dirIdx = 0;
+    while (imageFile == null || imageFile.length() < imageXferBufferSize) {
+      for (int i = 0; i < 5; i++) {
+        cluster.getFileSystem(0).mkdirs(new Path("/foo" + dirIdx++));
+      }
       nn0.getRpcServer().rollEditLog();
+      NameNodeAdapter.enterSafeMode(nn0, false);
+      NameNodeAdapter.saveNamespace(nn0);
+      NameNodeAdapter.leaveSafeMode(nn0);
+      imageFile = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
+          .getFSImage(nn0).getStorage().getStorageDir(0));
     }
+
+    final int timeOut = (int)(imageFile.length() / minXferRatePerMS) + 1;
     // A very low DFS_IMAGE_TRANSFER_RATE_KEY value won't affect bootstrapping
+    final AtomicBoolean bootStrapped = new AtomicBoolean(false);
+    new Thread(
+        new Runnable() {
+          @Override
+          public void run() {
+            try {
+              testSuccessfulBaseCase();
+              bootStrapped.set(true);
+            } catch (Exception e) {
+              fail(e.getMessage());
+            }
+          }
+        }
+    ).start();
     GenericTestUtils.waitFor(new Supplier<Boolean>() {
       public Boolean get() {
-        try {
-          testSuccessfulBaseCase();
-          return true;
-        } catch (Exception e) {
-          return false;
-        }
+        return bootStrapped.get();
       }
-    }, 500, 5000);
+    }, 50, timeOut);
 
     shutdownCluster();
     setupCluster();
@@ -257,17 +287,26 @@
     cluster.transitionToActive(0);
     // A very low DFS_IMAGE_TRANSFER_BOOTSTRAP_STANDBY_RATE_KEY value should
     // cause timeout
+    bootStrapped.set(false);
+    new Thread(
+        new Runnable() {
+          @Override
+          public void run() {
+            try {
+              testSuccessfulBaseCase();
+              bootStrapped.set(true);
+            } catch (Exception e) {
+              LOG.info(e.getMessage());
+            }
+          }
+        }
+    ).start();
     try {
       GenericTestUtils.waitFor(new Supplier<Boolean>() {
         public Boolean get() {
-          try {
-            testSuccessfulBaseCase();
-            return true;
-          } catch (Exception e) {
-            return false;
-          }
+          return bootStrapped.get();
         }
-      }, 500, 5000);
+      }, 50, timeOut);
       fail("Did not timeout");
     } catch (TimeoutException e) {
       LOG.info("Encountered expected timeout.");
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 51e2cf7..23bef37 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -287,6 +287,9 @@
     MAPREDUCE-5801. Uber mode's log message is missing a vcore reason
     (Steven Wong via aw)
 
+    MAPREDUCE-6525. Fix test failure of TestMiniMRClientCluster.testRestart.
+    (Masatake Iwasaki via aajisaka)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -513,10 +516,6 @@
     MAPREDUCE-5708. Duplicate String.format in YarnOutputFiles.getSpillFileForWrite.
     (Konstantin Weitz via devaraj)
 
-    MAPREDUCE-6273. HistoryFileManager should check whether summaryFile exists to 
-    avoid FileNotFoundException causing HistoryFileInfo into MOVE_FAILED state.
-    (zhihai xu via devaraj)
-
     MAPREDUCE-6204. TestJobCounters should use new properties instead of
     JobConf.MAPRED_TASK_JAVA_OPTS. (Sam Liu via ozawa)
 
@@ -620,6 +619,9 @@
    MAPREDUCE-6508. TestNetworkedJob fails consistently due to delegation 
    token changes on RM. (Akira AJISAKA via junping_du)
 
+   MAPREDUCE-6515. Update Application priority in AM side from AM-RM heartbeat
+   (Sunil G via jlowe)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -668,6 +670,16 @@
     MAPREDUCE-6518. Set SO_KEEPALIVE on shuffle connections (Chang Li via
     jlowe)
 
+    MAPREDUCE-6273. HistoryFileManager should check whether summaryFile exists to 
+    avoid FileNotFoundException causing HistoryFileInfo into MOVE_FAILED state.
+    (zhihai xu via devaraj)
+
+    MAPREDUCE-6528. Memory leak for HistoryFileManager.getJobSummary()
+    (Junping Du via jlowe)
+
+    MAPREDUCE-6451. DistCp has incorrect chunkFilePath for multiple jobs when
+    strategy is dynamic (Kuhu Shukla via kihwal)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES
@@ -885,7 +897,7 @@
     MAPREDUCE-6285. ClientServiceDelegate should not retry upon
     AuthenticationException. (Jonathan Eagles via ozawa)
 
-Release 2.6.2 - UNRELEASED
+Release 2.6.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES
 
@@ -897,6 +909,32 @@
 
   BUG FIXES
 
+    MAPREDUCE-6273. HistoryFileManager should check whether summaryFile exists to 
+    avoid FileNotFoundException causing HistoryFileInfo into MOVE_FAILED state.
+    (zhihai xu via devaraj)
+
+    MAPREDUCE-6528. Memory leak for HistoryFileManager.getJobSummary()
+    (Junping Du via jlowe)
+
+Release 2.6.2 - 2015-10-28
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    MAPREDUCE-6454. Fixed MapReduce to modify HADOOP_CLASSPATH to have distributed
+    cache files so that child processes running hadoop scripts can access these
+    files. (Junping Du via vinodkv)
+
+    MAPREDUCE-6334. Fetcher#copyMapOutput is leaking usedMemory upon
+    IOException during InMemoryMapOutput shuffle handler (Eric Payne via jlowe)
+
     MAPREDUCE-6497. Fix wrong value of JOB_FINISHED event in
     JobHistoryEventHandler. (Shinichi Yamashita via aajisaka)
 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java
index a40e5d2..7738810 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java
@@ -36,6 +36,7 @@
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.yarn.api.records.Priority;
 
 
 /**
@@ -100,4 +101,5 @@
   boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation);
   
   public void setQueueName(String queueName);
+  public void setJobPriority(Priority priority);
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
index fc9a3a5..5ed0762 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
@@ -120,6 +120,7 @@
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
@@ -653,6 +654,8 @@
 
   private JobState lastNonFinalState = JobState.NEW;
 
+  private volatile Priority jobPriority = Priority.newInstance(0);
+
   public JobImpl(JobId jobId, ApplicationAttemptId applicationAttemptId,
       Configuration conf, EventHandler eventHandler,
       TaskAttemptListener taskAttemptListener,
@@ -878,7 +881,8 @@
           reporterUserName,
           state, appSubmitTime, startTime, finishTime, setupProgress,
           this.mapProgress, this.reduceProgress,
-          cleanupProgress, jobFile, amInfos, isUber, diagsb.toString());
+          cleanupProgress, jobFile, amInfos, isUber, diagsb.toString(),
+          jobPriority);
       return report;
     } finally {
       readLock.unlock();
@@ -2166,7 +2170,7 @@
       }
     }
   }
-  
+
   private static class InternalTerminationTransition implements
       SingleArcTransition<JobImpl, JobEvent> {
     JobStateInternal terminationState = null;
@@ -2219,4 +2223,9 @@
   public int getMaxFetchFailuresNotifications() {
     return maxFetchFailuresNotifications;
   }
+
+  @Override
+  public void setJobPriority(Priority priority) {
+    this.jobPriority = priority;
+  }
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
index bf9b1f8..496886e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
@@ -797,6 +797,7 @@
     computeIgnoreBlacklisting();
 
     handleUpdatedNodes(response);
+    handleJobPriorityChange(response);
 
     for (ContainerStatus cont : finishedContainers) {
       LOG.info("Received completed container " + cont.getContainerId());
@@ -921,6 +922,14 @@
     }
   }
 
+  private void handleJobPriorityChange(AllocateResponse response) {
+    Priority priorityFromResponse = Priority.newInstance(response
+        .getApplicationPriority().getPriority());
+
+    // Update the job priority to Job directly.
+    getJob().setJobPriority(priorityFromResponse);
+  }
+
   @Private
   public Resource getResourceLimit() {
     Resource headRoom = getAvailableResources();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
index 2b07efb..f213b32 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
@@ -489,9 +489,6 @@
     TestParams t = new TestParams(false);
     Configuration conf = new YarnConfiguration();
     conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
-    conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
-    JHEvenHandlerForTest jheh = new JHEvenHandlerForTest(t.mockAppContext, 0);
-    jheh.init(conf);
     MiniYARNCluster yarnCluster = null;
     long currentTime = System.currentTimeMillis();
     try {
@@ -499,6 +496,13 @@
             TestJobHistoryEventHandler.class.getSimpleName(), 1, 1, 1, 1);
       yarnCluster.init(conf);
       yarnCluster.start();
+      Configuration confJHEH = new YarnConfiguration(conf);
+      confJHEH.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
+      confJHEH.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+          MiniYARNCluster.getHostname() + ":" +
+          yarnCluster.getApplicationHistoryServer().getPort());
+      JHEvenHandlerForTest jheh = new JHEvenHandlerForTest(t.mockAppContext, 0);
+      jheh.init(confJHEH);
       jheh.start();
       TimelineStore ts = yarnCluster.getApplicationHistoryServer()
               .getTimelineStore();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
index fd9c094..ccacf1c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
@@ -66,6 +66,7 @@
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.util.Records;
 
 import com.google.common.collect.Iterators;
@@ -634,6 +635,11 @@
       public void setQueueName(String queueName) {
         // do nothing
       }
+
+      @Override
+      public void setJobPriority(Priority priority) {
+        // do nothing
+      }
     };
   }
 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
index 475cd1f..0b7d1b1 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
@@ -69,6 +69,7 @@
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -526,6 +527,11 @@
     public void setQueueName(String queueName) {
       // do nothing
     }
+
+    @Override
+    public void setJobPriority(Priority priority) {
+      // do nothing
+    }
   }
 
   /*
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java
index 2af4380..9e18920 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java
@@ -72,6 +72,7 @@
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobSetupCompletedEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobStartEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptCompletedEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent;
@@ -92,6 +93,7 @@
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
@@ -889,6 +891,39 @@
                       job.getDiagnostics().toString().contains(EXCEPTIONMSG));
   }
 
+  @Test
+  public void testJobPriorityUpdate() throws Exception {
+    Configuration conf = new Configuration();
+    AsyncDispatcher dispatcher = new AsyncDispatcher();
+    Priority submittedPriority = Priority.newInstance(5);
+
+    AppContext mockContext = mock(AppContext.class);
+    when(mockContext.hasSuccessfullyUnregistered()).thenReturn(false);
+    JobImpl job = createStubbedJob(conf, dispatcher, 2, mockContext);
+
+    JobId jobId = job.getID();
+    job.handle(new JobEvent(jobId, JobEventType.JOB_INIT));
+    assertJobState(job, JobStateInternal.INITED);
+    job.handle(new JobStartEvent(jobId));
+    assertJobState(job, JobStateInternal.SETUP);
+    // Update priority of job to 5, and it will be updated
+    job.setJobPriority(submittedPriority);
+    Assert.assertEquals(submittedPriority, job.getReport().getJobPriority());
+
+    job.handle(new JobSetupCompletedEvent(jobId));
+    assertJobState(job, JobStateInternal.RUNNING);
+
+    // Update priority of job to 8, and see whether its updated
+    Priority updatedPriority = Priority.newInstance(5);
+    job.setJobPriority(updatedPriority);
+    assertJobState(job, JobStateInternal.RUNNING);
+    Priority jobPriority = job.getReport().getJobPriority();
+    Assert.assertNotNull(jobPriority);
+
+    // Verify whether changed priority is same as what is set in Job.
+    Assert.assertEquals(updatedPriority, jobPriority);
+  }
+
   private static CommitterEventHandler createCommitterEventHandler(
       Dispatcher dispatcher, OutputCommitter committer) {
     final SystemClock clock = new SystemClock();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
index e4421a8..0a10434 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
@@ -2948,6 +2948,8 @@
           Collections.<NodeReport>emptyList(),
           Resource.newInstance(512000, 1024), null, 10, null,
           Collections.<NMToken>emptyList());
+      // RM will always ensure that a default priority is sent to AM
+      response.setApplicationPriority(Priority.newInstance(0));
       containersToComplete.clear();
       containersToAllocate.clear();
       return response;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
index 5b8d3a7..88f61b0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
@@ -296,21 +296,41 @@
     }
     return yCntrs;
   }
-  
+
   public static JobStatus fromYarn(JobReport jobreport, String trackingUrl) {
-    JobPriority jobPriority = JobPriority.NORMAL;
+    JobPriority jobPriority = (jobreport.getJobPriority() == null)
+        ? JobPriority.DEFAULT
+        : fromYarnPriority(jobreport.getJobPriority().getPriority());
     JobStatus jobStatus = new org.apache.hadoop.mapred.JobStatus(
-        fromYarn(jobreport.getJobId()), jobreport.getSetupProgress(), jobreport
-            .getMapProgress(), jobreport.getReduceProgress(), jobreport
-            .getCleanupProgress(), fromYarn(jobreport.getJobState()),
-        jobPriority, jobreport.getUser(), jobreport.getJobName(), jobreport
-            .getJobFile(), trackingUrl, jobreport.isUber());
+        fromYarn(jobreport.getJobId()), jobreport.getSetupProgress(),
+        jobreport.getMapProgress(), jobreport.getReduceProgress(),
+        jobreport.getCleanupProgress(), fromYarn(jobreport.getJobState()),
+        jobPriority, jobreport.getUser(), jobreport.getJobName(),
+        jobreport.getJobFile(), trackingUrl, jobreport.isUber());
     jobStatus.setStartTime(jobreport.getStartTime());
     jobStatus.setFinishTime(jobreport.getFinishTime());
     jobStatus.setFailureInfo(jobreport.getDiagnostics());
     return jobStatus;
   }
 
+  private static JobPriority fromYarnPriority(int priority) {
+    switch (priority) {
+    case 5 :
+      return JobPriority.VERY_HIGH;
+    case 4 :
+      return JobPriority.HIGH;
+    case 3 :
+      return JobPriority.NORMAL;
+    case 2 :
+      return JobPriority.LOW;
+    case 1 :
+      return JobPriority.VERY_LOW;
+    case 0 :
+      return JobPriority.DEFAULT;
+    }
+    return JobPriority.UNDEFINED_PRIORITY;
+  }
+
   public static org.apache.hadoop.mapreduce.QueueState fromYarn(
       QueueState state) {
     org.apache.hadoop.mapreduce.QueueState qState =
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java
index b2f2cc1..38dfcae 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java
@@ -20,6 +20,8 @@
 
 import java.util.List;
 
+import org.apache.hadoop.yarn.api.records.Priority;
+
 public interface JobReport {
   public abstract JobId getJobId();
   public abstract JobState getJobState();
@@ -37,6 +39,7 @@
   public abstract String getJobFile();
   public abstract List<AMInfo> getAMInfos();
   public abstract boolean isUber();
+  public abstract Priority getJobPriority();
 
   public abstract void setJobId(JobId jobId);
   public abstract void setJobState(JobState jobState);
@@ -54,4 +57,5 @@
   public abstract void setJobFile(String jobFile);
   public abstract void setAMInfos(List<AMInfo> amInfos);
   public abstract void setIsUber(boolean isUber);
+  public abstract void setJobPriority(Priority priority);
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java
index 5c90942..f4cb0a6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java
@@ -32,7 +32,10 @@
 import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobReportProtoOrBuilder;
 import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobStateProto;
 import org.apache.hadoop.mapreduce.v2.util.MRProtoUtils;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.impl.pb.PriorityPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto;
 
 
     
@@ -41,11 +44,11 @@
   JobReportProto proto = JobReportProto.getDefaultInstance();
   JobReportProto.Builder builder = null;
   boolean viaProto = false;
-  
+
   private JobId jobId = null;
   private List<AMInfo> amInfos = null;
-  
-  
+  private Priority jobPriority = null;
+
   public JobReportPBImpl() {
     builder = JobReportProto.newBuilder();
   }
@@ -69,6 +72,9 @@
     if (this.amInfos != null) {
       addAMInfosToProto();
     }
+    if (this.jobPriority != null) {
+      builder.setJobPriority(convertToProtoFormat(this.jobPriority));
+    }
   }
 
   private synchronized void mergeLocalToProto() {
@@ -333,6 +339,14 @@
     return MRProtoUtils.convertFromProtoFormat(e);
   }
 
+  private PriorityPBImpl convertFromProtoFormat(PriorityProto p) {
+    return new PriorityPBImpl(p);
+  }
+
+  private PriorityProto convertToProtoFormat(Priority t) {
+    return ((PriorityPBImpl)t).getProto();
+  }
+
   @Override
   public synchronized boolean isUber() {
     JobReportProtoOrBuilder p = viaProto ? proto : builder;
@@ -344,4 +358,26 @@
     maybeInitBuilder();
     builder.setIsUber(isUber);
   }
-}  
+
+  @Override
+  public synchronized Priority getJobPriority() {
+    JobReportProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.jobPriority != null) {
+      return this.jobPriority;
+    }
+    if (!p.hasJobPriority()) {
+      return null;
+    }
+    this.jobPriority = convertFromProtoFormat(p.getJobPriority());
+    return this.jobPriority;
+  }
+
+  @Override
+  public synchronized void setJobPriority(Priority priority) {
+    maybeInitBuilder();
+    if (priority == null) {
+      builder.clearJobPriority();
+    }
+    this.jobPriority = priority;
+  }
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java
index 95e0083..893f76a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java
@@ -30,6 +30,7 @@
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.util.Records;
 
 public class MRBuilderUtils {
@@ -63,10 +64,21 @@
   }
 
   public static JobReport newJobReport(JobId jobId, String jobName,
+      String userName, JobState state, long submitTime, long startTime,
+      long finishTime, float setupProgress, float mapProgress,
+      float reduceProgress, float cleanupProgress, String jobFile,
+      List<AMInfo> amInfos, boolean isUber, String diagnostics) {
+    return newJobReport(jobId, jobName, userName, state, submitTime, startTime,
+        finishTime, setupProgress, mapProgress, reduceProgress,
+        cleanupProgress, jobFile, amInfos, isUber, diagnostics,
+        Priority.newInstance(0));
+  }
+
+  public static JobReport newJobReport(JobId jobId, String jobName,
       String userName, JobState state, long submitTime, long startTime, long finishTime,
       float setupProgress, float mapProgress, float reduceProgress,
       float cleanupProgress, String jobFile, List<AMInfo> amInfos,
-      boolean isUber, String diagnostics) {
+      boolean isUber, String diagnostics, Priority priority) {
     JobReport report = Records.newRecord(JobReport.class);
     report.setJobId(jobId);
     report.setJobName(jobName);
@@ -83,6 +95,7 @@
     report.setAMInfos(amInfos);
     report.setIsUber(isUber);
     report.setDiagnostics(diagnostics);
+    report.setJobPriority(priority);
     return report;
   }
 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto
index b74eef6..5a4bac1 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto
@@ -146,6 +146,7 @@
   repeated AMInfoProto am_infos = 14;
   optional int64 submit_time = 15;
   optional bool is_uber = 16 [default = false];
+  optional hadoop.yarn.PriorityProto jobPriority = 17;
 }
 
 message AMInfoProto {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java
index e36efec..60ce170 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.mapreduce;
 
 import org.apache.hadoop.util.StringUtils;
+
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -35,6 +36,7 @@
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueState;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
@@ -201,10 +203,12 @@
     jobReport.setJobState(state);
     jobReport.setStartTime(jobStartTime);
     jobReport.setFinishTime(jobFinishTime);
-    jobReport.setUser("TestTypeConverter-user");    
+    jobReport.setUser("TestTypeConverter-user");
+    jobReport.setJobPriority(Priority.newInstance(0));
     JobStatus jobStatus = TypeConverter.fromYarn(jobReport, "dummy-jobfile");
     Assert.assertEquals(jobStartTime, jobStatus.getStartTime());
     Assert.assertEquals(jobFinishTime, jobStatus.getFinishTime());    
     Assert.assertEquals(state.toString(), jobStatus.getState().toString());
-  }  
+    Assert.assertEquals(JobPriority.DEFAULT, jobStatus.getPriority());
+  }
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobPriority.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobPriority.java
index 376d8a4..b76d46d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobPriority.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobPriority.java
@@ -31,6 +31,7 @@
   HIGH,
   NORMAL,
   LOW,
-  VERY_LOW;
-  
+  VERY_LOW,
+  DEFAULT,
+  UNDEFINED_PRIORITY;
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobPriority.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobPriority.java
index b0d232a..7178568 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobPriority.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobPriority.java
@@ -31,5 +31,7 @@
   HIGH,
   NORMAL,
   LOW,
-  VERY_LOW;
+  VERY_LOW,
+  DEFAULT,
+  UNDEFINED_PRIORITY;
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
index 0f1f391..3c12bdf 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
@@ -64,6 +64,7 @@
 import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.util.Records;
 
@@ -481,4 +482,10 @@
   public void setQueueName(String queueName) {
     throw new UnsupportedOperationException("Can't set job's queue name in history");
   }
+
+  @Override
+  public void setJobPriority(Priority priority) {
+    throw new UnsupportedOperationException(
+        "Can't set job's priority in history");
+  }
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
index f0786da3..b221961 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
@@ -966,9 +966,16 @@
 
   private String getJobSummary(FileContext fc, Path path) throws IOException {
     Path qPath = fc.makeQualified(path);
-    FSDataInputStream in = fc.open(qPath);
-    String jobSummaryString = in.readUTF();
-    in.close();
+    FSDataInputStream in = null;
+    String jobSummaryString = null;
+    try {
+      in = fc.open(qPath);
+      jobSummaryString = in.readUTF();
+    } finally {
+      if (in != null) {
+        in.close();
+      }
+    }
     return jobSummaryString;
   }
 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java
index 0725f46..b3b181c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java
@@ -39,6 +39,7 @@
 import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 
 
@@ -196,4 +197,10 @@
     throw new UnsupportedOperationException("Can't set job's queue name in history");
   }
 
+  @Override
+  public void setJobPriority(Priority priority) {
+    throw new UnsupportedOperationException(
+        "Can't set job's priority in history");
+  }
+
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAcls.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAcls.java
index fd87b94..14961d2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAcls.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAcls.java
@@ -58,6 +58,7 @@
 import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.webapp.WebApp;
 import org.junit.Before;
 import org.junit.Test;
@@ -419,5 +420,9 @@
     @Override
     public void setQueueName(String queueName) {
     }
+
+    @Override
+    public void setJobPriority(Priority priority) {
+    }
   }
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
index 1dd6fca..cad6f3a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java
@@ -186,10 +186,32 @@
     super.serviceInit(conf);
   }
 
+  @Override
+  protected void serviceStart() throws Exception {
+    super.serviceStart();
+
+    //need to do this because historyServer.init creates a new Configuration
+    getConfig().set(JHAdminConfig.MR_HISTORY_ADDRESS,
+                    historyServer.getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS));
+    MRWebAppUtil.setJHSWebappURLWithoutScheme(getConfig(),
+        MRWebAppUtil.getJHSWebappURLWithoutScheme(historyServer.getConfig()));
+
+    LOG.info("MiniMRYARN ResourceManager address: " +
+        getConfig().get(YarnConfiguration.RM_ADDRESS));
+    LOG.info("MiniMRYARN ResourceManager web address: " +
+        WebAppUtils.getRMWebAppURLWithoutScheme(getConfig()));
+    LOG.info("MiniMRYARN HistoryServer address: " +
+        getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS));
+    LOG.info("MiniMRYARN HistoryServer web address: " +
+        getResolvedMRHistoryWebAppURLWithoutScheme(getConfig(),
+            MRWebAppUtil.getJHSHttpPolicy() == HttpConfig.Policy.HTTPS_ONLY));
+  }
+
   private class JobHistoryServerWrapper extends AbstractService {
     public JobHistoryServerWrapper() {
       super(JobHistoryServerWrapper.class.getName());
     }
+    private volatile boolean jhsStarted = false;
 
     @Override
     public synchronized void serviceStart() throws Exception {
@@ -211,9 +233,11 @@
         new Thread() {
           public void run() {
             historyServer.start();
+            jhsStarted = true;
           };
         }.start();
-        while (historyServer.getServiceState() == STATE.INITED) {
+
+        while (!jhsStarted) {
           LOG.info("Waiting for HistoryServer to start...");
           Thread.sleep(1500);
         }
@@ -225,21 +249,6 @@
       } catch (Throwable t) {
         throw new YarnRuntimeException(t);
       }
-      //need to do this because historyServer.init creates a new Configuration
-      getConfig().set(JHAdminConfig.MR_HISTORY_ADDRESS,
-                      historyServer.getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS));
-      MRWebAppUtil.setJHSWebappURLWithoutScheme(getConfig(),
-          MRWebAppUtil.getJHSWebappURLWithoutScheme(historyServer.getConfig()));
-
-      LOG.info("MiniMRYARN ResourceManager address: " +
-               getConfig().get(YarnConfiguration.RM_ADDRESS));
-      LOG.info("MiniMRYARN ResourceManager web address: " +
-               WebAppUtils.getRMWebAppURLWithoutScheme(getConfig()));
-      LOG.info("MiniMRYARN HistoryServer address: " +
-               getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS));
-      LOG.info("MiniMRYARN HistoryServer web address: "
-          + getResolvedMRHistoryWebAppURLWithoutScheme(getConfig(),
-              MRWebAppUtil.getJHSHttpPolicy() == HttpConfig.Policy.HTTPS_ONLY));
     }
 
     @Override
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index d59c44a..efc3a7d 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -780,6 +780,11 @@
         <version>1.8.5</version>
       </dependency>
       <dependency>
+        <groupId>org.mock-server</groupId>
+        <artifactId>mockserver-netty</artifactId>
+        <version>3.9.2</version>
+      </dependency>
+      <dependency>
         <groupId>org.apache.avro</groupId>
         <artifactId>avro</artifactId>
         <version>${avro.version}</version>
@@ -795,6 +800,11 @@
         <version>1.8.1</version>
       </dependency>
       <dependency>
+        <groupId>com.google.re2j</groupId>
+        <artifactId>re2j</artifactId>
+        <version>${re2j.version}</version>
+      </dependency>
+      <dependency>
         <groupId>com.google.protobuf</groupId>
         <artifactId>protobuf-java</artifactId>
         <version>${protobuf.version}</version>
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 6412714..69ece4a 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -2370,7 +2370,37 @@
 
   @Override
   public void delete(String key) throws IOException {
-    delete(key, null);
+    try {
+      delete(key, null);
+    } catch (IOException e) {
+      Throwable t = e.getCause();
+      if(t != null && t instanceof StorageException) {
+        StorageException se = (StorageException) t;
+        if(se.getErrorCode().equals(("LeaseIdMissing"))){
+          SelfRenewingLease lease = null;
+          try {
+            lease = acquireLease(key);
+            delete(key, lease);
+          } catch (AzureException e3) {
+            LOG.warn("Got unexpected exception trying to acquire lease on "
+                + key + "." + e3.getMessage());
+            throw e3;
+          } finally {
+            try {
+              if(lease != null){
+                lease.free();
+              }
+            } catch (Exception e4){
+              LOG.error("Unable to free lease on " + key, e4);
+            }
+          }
+        } else {
+          throw e;
+        }
+      } else {
+        throw e;
+      }
+    }
   }
 
   @Override
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 7c5a504..73bc6b3 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.fs.azure;
 
 import java.io.DataInputStream;
+import java.io.EOFException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
@@ -49,6 +50,7 @@
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -62,7 +64,6 @@
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Progressable;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.codehaus.jackson.JsonNode;
@@ -74,9 +75,11 @@
 import com.google.common.annotations.VisibleForTesting;
 import com.microsoft.azure.storage.AccessCondition;
 import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.StorageErrorCode;
 import com.microsoft.azure.storage.StorageException;
 import com.microsoft.azure.storage.blob.CloudBlob;
-import com.microsoft.azure.storage.core.*;
+import com.microsoft.azure.storage.StorageErrorCodeStrings;
+import org.apache.hadoop.io.IOUtils;
 
 /**
  * A {@link FileSystem} for reading and writing files stored on <a
@@ -88,7 +91,6 @@
 @InterfaceStability.Stable
 public class NativeAzureFileSystem extends FileSystem {
   private static final int USER_WX_PERMISION = 0300;
-
   /**
    * A description of a folder rename operation, including the source and
    * destination keys, and descriptions of the files in the source folder.
@@ -712,7 +714,7 @@
      * @returns int An integer corresponding to the byte read.
      */
     @Override
-    public synchronized int read() throws IOException {
+    public synchronized int read() throws FileNotFoundException, IOException {
       try {
         int result = 0;
         result = in.read();
@@ -726,13 +728,21 @@
       //
         return result;
       } catch(IOException e) {
-        if (e.getCause() instanceof StorageException) {
-          StorageException storageExcp  = (StorageException) e.getCause();
+
+        Throwable innerException = checkForAzureStorageException(e);
+
+        if (innerException instanceof StorageException) {
+
           LOG.error("Encountered Storage Exception for read on Blob : {}"
               + " Exception details: {} Error Code : {}",
-              key, e.getMessage(), storageExcp.getErrorCode());
+              key, e, ((StorageException) innerException).getErrorCode());
+
+          if (isFileNotFoundException((StorageException) innerException)) {
+            throw new FileNotFoundException(String.format("%s is not found", key));
+          }
         }
-        throw e;
+
+       throw e;
       }
     }
 
@@ -757,7 +767,7 @@
      * there is no more data because the end of stream is reached.
      */
     @Override
-    public synchronized int read(byte[] b, int off, int len) throws IOException {
+    public synchronized int read(byte[] b, int off, int len) throws FileNotFoundException, IOException {
       try {
         int result = 0;
         result = in.read(b, off, len);
@@ -772,29 +782,56 @@
         // Return to the caller with the result.
         return result;
       } catch(IOException e) {
-        if (e.getCause() instanceof StorageException) {
-          StorageException storageExcp  = (StorageException) e.getCause();
+
+        Throwable innerException = checkForAzureStorageException(e);
+
+        if (innerException instanceof StorageException) {
+
           LOG.error("Encountered Storage Exception for read on Blob : {}"
               + " Exception details: {} Error Code : {}",
-              key, e.getMessage(), storageExcp.getErrorCode());
+              key, e, ((StorageException) innerException).getErrorCode());
+
+          if (isFileNotFoundException((StorageException) innerException)) {
+            throw new FileNotFoundException(String.format("%s is not found", key));
+          }
         }
-        throw e;
+
+       throw e;
       }
     }
 
     @Override
-    public void close() throws IOException {
-      in.close();
-      closed = true;
+    public synchronized void close() throws IOException {
+      if (!closed) {
+        closed = true;
+        IOUtils.closeStream(in);
+        in = null;
+      }
     }
 
     @Override
-    public synchronized void seek(long pos) throws IOException {
-     in.close();
-     in = store.retrieve(key);
-     this.pos = in.skip(pos);
-     LOG.debug("Seek to position {}. Bytes skipped {}", pos,
-         this.pos);
+    public synchronized void seek(long pos) throws FileNotFoundException, EOFException, IOException {
+      try {
+        checkNotClosed();
+        if (pos < 0) {
+          throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
+        }
+        IOUtils.closeStream(in);
+        in = store.retrieve(key);
+        this.pos = in.skip(pos);
+        LOG.debug("Seek to position {}. Bytes skipped {}", pos,
+          this.pos);
+      } catch(IOException e) {
+
+        Throwable innerException = checkForAzureStorageException(e);
+
+        if (innerException instanceof StorageException
+             && isFileNotFoundException((StorageException) innerException)) {
+          throw new FileNotFoundException(String.format("%s is not found", key));
+        }
+
+        throw e;
+      }
     }
 
     @Override
@@ -806,6 +843,50 @@
     public boolean seekToNewSource(long targetPos) throws IOException {
       return false;
     }
+
+    /*
+     * Helper method to recursively check if the cause of the exception is
+     * a Azure storage exception.
+     */
+    private Throwable checkForAzureStorageException(IOException e) {
+
+      Throwable innerException = e.getCause();
+
+      while (innerException != null
+              && !(innerException instanceof StorageException)) {
+        innerException = innerException.getCause();
+      }
+
+      return innerException;
+    }
+
+    /*
+     * Helper method to check if the AzureStorageException is
+     * because backing blob was not found.
+     */
+    private boolean isFileNotFoundException(StorageException e) {
+
+      String errorCode = ((StorageException) e).getErrorCode();
+      if (errorCode != null
+          && (errorCode.equals(StorageErrorCodeStrings.BLOB_NOT_FOUND)
+              || errorCode.equals(StorageErrorCodeStrings.RESOURCE_NOT_FOUND)
+              || errorCode.equals(StorageErrorCode.BLOB_NOT_FOUND.toString())
+              || errorCode.equals(StorageErrorCode.RESOURCE_NOT_FOUND.toString()))) {
+
+        return true;
+      }
+
+      return false;
+    }
+
+    /*
+     * Helper method to check if a stream is closed.
+     */
+    private void checkNotClosed() throws IOException {
+      if (closed) {
+        throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
+      }
+    }
   }
 
   private class NativeAzureFsOutputStream extends OutputStream {
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
index 06f32ce..900d730 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
@@ -22,6 +22,8 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper;
 
+import com.google.common.annotations.VisibleForTesting;
+
 import com.microsoft.azure.storage.AccessCondition;
 import com.microsoft.azure.storage.StorageException;
 import com.microsoft.azure.storage.blob.CloudBlob;
@@ -61,7 +63,8 @@
 
 
   // Time to wait to retry getting the lease in milliseconds
-  private static final int LEASE_ACQUIRE_RETRY_INTERVAL = 2000;
+  @VisibleForTesting
+  static final int LEASE_ACQUIRE_RETRY_INTERVAL = 2000;
 
   public SelfRenewingLease(CloudBlobWrapper blobWrapper)
       throws StorageException {
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java
new file mode 100644
index 0000000..35a1f50
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java
@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.FileNotFoundException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.junit.After;
+import org.junit.Test;
+
+
+public class TestFileSystemOperationExceptionHandling extends
+  NativeAzureFileSystemBaseTest {
+
+  FSDataInputStream inputStream = null;
+  /*
+   * Helper method to create a PageBlob test storage account.
+   */
+  private AzureBlobStorageTestAccount getPageBlobTestStorageAccount()
+      throws Exception {
+
+    Configuration conf = new Configuration();
+
+    // Configure the page blob directories key so every file created is a page blob.
+    conf.set(AzureNativeFileSystemStore.KEY_PAGE_BLOB_DIRECTORIES, "/");
+
+    // Configure the atomic rename directories key so every folder will have
+    // atomic rename applied.
+    conf.set(AzureNativeFileSystemStore.KEY_ATOMIC_RENAME_DIRECTORIES, "/");
+    return AzureBlobStorageTestAccount.create(conf);
+  }
+
+
+  /*
+   * Helper method that creates a InputStream to validate exceptions
+   * for various scenarios
+   */
+  private void setupInputStreamToTest(AzureBlobStorageTestAccount testAccount)
+      throws Exception {
+
+    fs = testAccount.getFileSystem();
+
+    // Step 1: Create a file and write dummy data.
+    Path testFilePath1 = new Path("test1.dat");
+    Path testFilePath2 = new Path("test2.dat");
+    FSDataOutputStream outputStream = fs.create(testFilePath1);
+    String testString = "This is a test string";
+    outputStream.write(testString.getBytes());
+    outputStream.close();
+
+    // Step 2: Open a read stream on the file.
+    inputStream = fs.open(testFilePath1);
+
+    // Step 3: Rename the file
+    fs.rename(testFilePath1, testFilePath2);
+  }
+
+  /*
+   * Tests a basic single threaded read scenario for Page blobs.
+   */
+  @Test(expected=FileNotFoundException.class)
+  public void testSingleThreadedPageBlobReadScenario() throws Throwable {
+    AzureBlobStorageTestAccount testAccount = getPageBlobTestStorageAccount();
+    setupInputStreamToTest(testAccount);
+    byte[] readBuffer = new byte[512];
+    inputStream.read(readBuffer);
+  }
+
+  /*
+   * Tests a basic single threaded seek scenario for Page blobs.
+   */
+  @Test(expected=FileNotFoundException.class)
+  public void testSingleThreadedPageBlobSeekScenario() throws Throwable {
+    AzureBlobStorageTestAccount testAccount = getPageBlobTestStorageAccount();
+    setupInputStreamToTest(testAccount);
+    inputStream.seek(5);
+  }
+
+  /*
+   * Test a basic single thread seek scenario for Block blobs.
+   */
+  @Test(expected=FileNotFoundException.class)
+  public void testSingleThreadBlockBlobSeekScenario() throws Throwable {
+
+    AzureBlobStorageTestAccount testAccount = createTestAccount();
+    setupInputStreamToTest(testAccount);
+    inputStream.seek(5);
+  }
+
+  /*
+   * Tests a basic single threaded read scenario for Block blobs.
+   */
+  @Test(expected=FileNotFoundException.class)
+  public void testSingledThreadBlockBlobReadScenario() throws Throwable{
+    AzureBlobStorageTestAccount testAccount = createTestAccount();
+    setupInputStreamToTest(testAccount);
+    byte[] readBuffer = new byte[512];
+    inputStream.read(readBuffer);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    if (inputStream != null) {
+      inputStream.close();
+    }
+  }
+
+  @Override
+  protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+    return AzureBlobStorageTestAccount.create();
+  }
+}
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsExceptionHandlingMultiThreaded.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsExceptionHandlingMultiThreaded.java
new file mode 100644
index 0000000..0f91500
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsExceptionHandlingMultiThreaded.java
@@ -0,0 +1,185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.FileNotFoundException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.junit.After;
+import org.junit.Test;
+
+public class TestFileSystemOperationsExceptionHandlingMultiThreaded extends
+  NativeAzureFileSystemBaseTest {
+
+  FSDataInputStream inputStream = null;
+  /*
+   * Helper method to creates an input stream to test various scenarios.
+   */
+  private void getInputStreamToTest(FileSystem fs, Path testPath) throws Throwable {
+
+    FSDataOutputStream outputStream = fs.create(testPath);
+    String testString = "This is a test string";
+    outputStream.write(testString.getBytes());
+    outputStream.close();
+
+    inputStream = fs.open(testPath);
+  }
+
+  /*
+   * Test to validate correct exception is thrown for Multithreaded read
+   * scenario for block blobs
+   */
+  @Test(expected=FileNotFoundException.class)
+  public void testMultiThreadedBlockBlobReadScenario() throws Throwable {
+
+    AzureBlobStorageTestAccount testAccount = createTestAccount();
+    fs = testAccount.getFileSystem();
+    Path testFilePath1 = new Path("test1.dat");
+
+    getInputStreamToTest(fs, testFilePath1);
+    Thread renameThread = new Thread(new RenameThread(fs, testFilePath1));
+    renameThread.start();
+
+    renameThread.join();
+
+    byte[] readBuffer = new byte[512];
+    inputStream.read(readBuffer);
+  }
+
+  /*
+   * Test to validate correct exception is thrown for Multithreaded seek
+   * scenario for block blobs
+   */
+
+  @Test(expected=FileNotFoundException.class)
+  public void testMultiThreadBlockBlobSeekScenario() throws Throwable {
+
+    AzureBlobStorageTestAccount testAccount = createTestAccount();
+    fs = testAccount.getFileSystem();
+    Path testFilePath1 = new Path("test1.dat");
+
+    getInputStreamToTest(fs, testFilePath1);
+    Thread renameThread = new Thread(new RenameThread(fs, testFilePath1));
+    renameThread.start();
+
+    renameThread.join();
+
+    inputStream.seek(5);
+  }
+
+  /*
+   * Test to validate correct exception is thrown for Multithreaded read
+   * scenario for page blobs
+   */
+
+  @Test(expected=FileNotFoundException.class)
+  public void testMultiThreadedPageBlobReadScenario() throws Throwable {
+
+    AzureBlobStorageTestAccount testAccount = getPageBlobTestStorageAccount();
+    fs = testAccount.getFileSystem();
+    Path testFilePath1 = new Path("test1.dat");
+
+    getInputStreamToTest(fs, testFilePath1);
+    Thread renameThread = new Thread(new RenameThread(fs, testFilePath1));
+    renameThread.start();
+
+    renameThread.join();
+    byte[] readBuffer = new byte[512];
+    inputStream.read(readBuffer);
+  }
+
+  /*
+   * Test to validate correct exception is thrown for Multithreaded seek
+   * scenario for page blobs
+   */
+
+  @Test(expected=FileNotFoundException.class)
+  public void testMultiThreadedPageBlobSeekScenario() throws Throwable {
+
+    AzureBlobStorageTestAccount testAccount = getPageBlobTestStorageAccount();
+    fs = testAccount.getFileSystem();
+    Path testFilePath1 = new Path("test1.dat");
+
+    getInputStreamToTest(fs, testFilePath1);
+    Thread renameThread = new Thread(new RenameThread(fs, testFilePath1));
+    renameThread.start();
+
+    renameThread.join();
+    inputStream.seek(5);
+  }
+
+  @Override
+  protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+    return AzureBlobStorageTestAccount.create();
+  }
+
+  /*
+   * Helper method to create a PageBlob test storage account.
+   */
+  private AzureBlobStorageTestAccount getPageBlobTestStorageAccount()
+      throws Exception {
+
+    Configuration conf = new Configuration();
+
+    // Configure the page blob directories key so every file created is a page blob.
+    conf.set(AzureNativeFileSystemStore.KEY_PAGE_BLOB_DIRECTORIES, "/");
+
+    // Configure the atomic rename directories key so every folder will have
+    // atomic rename applied.
+    conf.set(AzureNativeFileSystemStore.KEY_ATOMIC_RENAME_DIRECTORIES, "/");
+    return AzureBlobStorageTestAccount.create(conf);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    if (inputStream != null) {
+      inputStream.close();
+    }
+  }
+}
+
+/*
+ * Helper thread that just renames the test file.
+ */
+class RenameThread implements Runnable {
+
+  private FileSystem fs;
+  private Path testPath;
+  private Path renamePath = new Path("test2.dat");
+
+  public RenameThread(FileSystem fs, Path testPath) {
+    this.fs = fs;
+    this.testPath = testPath;
+  }
+
+  @Override
+  public void run(){
+    try {
+      fs.rename(testPath, renamePath);
+    }catch (Exception e) {
+      // Swallowing the exception as the
+      // correctness of the test is controlled
+      // by the other thread
+    }
+  }
+}
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java
index b033460..721cb5f 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java
@@ -21,10 +21,16 @@
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+
+import java.util.concurrent.CountDownLatch;
+
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+
 import org.junit.Test;
 
+import com.microsoft.azure.storage.StorageException;
+
 /*
  * Tests the Native Azure file system (WASB) against an actual blob store if
  * provided in the environment.
@@ -38,6 +44,86 @@
   }
 
   /**
+   * Tests fs.delete() function to delete a blob when another blob is holding a
+   * lease on it. Delete if called without a lease should fail if another process
+   * is holding a lease and throw appropriate exception
+   * This is a scenario that would happen in HMaster startup when it tries to
+   * clean up the temp dirs while the HMaster process which was killed earlier
+   * held lease on the blob when doing some DDL operation
+   */
+  @Test
+  public void testDeleteThrowsExceptionWithLeaseExistsErrorMessage()
+      throws Exception {
+    LOG.info("Starting test");
+    final String FILE_KEY = "fileWithLease";
+    // Create the file
+    Path path = new Path(FILE_KEY);
+    fs.create(path);
+    assertTrue(fs.exists(path));
+    NativeAzureFileSystem nfs = (NativeAzureFileSystem)fs;
+    final String fullKey = nfs.pathToKey(nfs.makeAbsolute(path));
+    final AzureNativeFileSystemStore store = nfs.getStore();
+
+    // Acquire the lease on the file in a background thread
+    final CountDownLatch leaseAttemptComplete = new CountDownLatch(1);
+    final CountDownLatch beginningDeleteAttempt = new CountDownLatch(1);
+    Thread t = new Thread() {
+      @Override
+      public void run() {
+        // Acquire the lease and then signal the main test thread.
+        SelfRenewingLease lease = null;
+        try {
+          lease = store.acquireLease(fullKey);
+          LOG.info("Lease acquired: " + lease.getLeaseID());
+        } catch (AzureException e) {
+          LOG.warn("Lease acqusition thread unable to acquire lease", e);
+        } finally {
+          leaseAttemptComplete.countDown();
+        }
+
+        // Wait for the main test thread to signal it will attempt the delete.
+        try {
+          beginningDeleteAttempt.await();
+        } catch (InterruptedException e) {
+          Thread.currentThread().interrupt();
+        }
+
+        // Keep holding the lease past the lease acquisition retry interval, so
+        // the test covers the case of delete retrying to acquire the lease.
+        try {
+          Thread.sleep(SelfRenewingLease.LEASE_ACQUIRE_RETRY_INTERVAL * 3);
+        } catch (InterruptedException ex) {
+          Thread.currentThread().interrupt();
+        }
+
+        try {
+          if (lease != null){
+            LOG.info("Freeing lease");
+            lease.free();
+          }
+        } catch (StorageException se) {
+          LOG.warn("Unable to free lease.", se);
+        }
+      }
+    };
+
+    // Start the background thread and wait for it to signal the lease is held.
+    t.start();
+    try {
+      leaseAttemptComplete.await();
+    } catch (InterruptedException ex) {
+      Thread.currentThread().interrupt();
+    }
+
+    // Try to delete the same file
+    beginningDeleteAttempt.countDown();
+    store.delete(fullKey);
+
+    // At this point file SHOULD BE DELETED
+    assertFalse(fs.exists(path));
+  }
+
+  /**
    * Check that isPageBlobKey works as expected. This assumes that
    * in the test configuration, the list of supported page blob directories
    * only includes "pageBlobs". That's why this test is made specific
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
index 8482e7d..9bf8e47 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
@@ -20,14 +20,10 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.tools.CopyListingFileStatus;
-import org.apache.hadoop.tools.DistCpConstants;
 import org.apache.hadoop.tools.util.DistCpUtils;
 import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader;
 import org.apache.hadoop.mapreduce.lib.input.FileSplit;
@@ -47,72 +43,28 @@
  */
 class DynamicInputChunk<K, V> {
   private static Log LOG = LogFactory.getLog(DynamicInputChunk.class);
-
-  private static Configuration configuration;
-  private static Path chunkRootPath;
-  private static String chunkFilePrefix;
-  private static int numChunksLeft = -1; // Un-initialized before 1st dir-scan.
-  private static FileSystem fs;
-
   private Path chunkFilePath;
   private SequenceFileRecordReader<K, V> reader;
   private SequenceFile.Writer writer;
+  private DynamicInputChunkContext chunkContext;
 
-  private static void initializeChunkInvariants(Configuration config)
-                                                  throws IOException {
-    configuration = config;
-    Path listingFilePath = new Path(getListingFilePath(configuration));
-    chunkRootPath = new Path(listingFilePath.getParent(), "chunkDir");
-    fs = chunkRootPath.getFileSystem(configuration);
-    chunkFilePrefix = listingFilePath.getName() + ".chunk.";
-  }
-
-  private static String getListingFilePath(Configuration configuration) {
-    final String listingFileString = configuration.get(
-            DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, "");
-    assert !listingFileString.equals("") : "Listing file not found.";
-    return listingFileString;
-  }
-
-  private static boolean areInvariantsInitialized() {
-    return chunkRootPath != null;
-  }
-
-  private DynamicInputChunk(String chunkId, Configuration configuration)
+  DynamicInputChunk(String chunkId, DynamicInputChunkContext chunkContext)
                                                       throws IOException {
-    if (!areInvariantsInitialized())
-      initializeChunkInvariants(configuration);
-
-    chunkFilePath = new Path(chunkRootPath, chunkFilePrefix + chunkId);
+    this.chunkContext = chunkContext;
+    chunkFilePath = new Path(chunkContext.getChunkRootPath(),
+        chunkContext.getChunkFilePrefix() + chunkId);
     openForWrite();
   }
 
-
   private void openForWrite() throws IOException {
     writer = SequenceFile.createWriter(
-            chunkFilePath.getFileSystem(configuration), configuration,
+            chunkContext.getFs(), chunkContext.getConfiguration(),
             chunkFilePath, Text.class, CopyListingFileStatus.class,
             SequenceFile.CompressionType.NONE);
 
   }
 
   /**
-   * Factory method to create chunk-files for writing to.
-   * (For instance, when the DynamicInputFormat splits the input-file into
-   * chunks.)
-   * @param chunkId String to identify the chunk.
-   * @param configuration Configuration, describing the location of the listing-
-   * file, file-system for the map-job, etc.
-   * @return A DynamicInputChunk, corresponding to a chunk-file, with the name
-   * incorporating the chunk-id.
-   * @throws IOException Exception on failure to create the chunk.
-   */
-  public static DynamicInputChunk createChunkForWrite(String chunkId,
-                          Configuration configuration) throws IOException {
-    return new DynamicInputChunk(chunkId, configuration);
-  }
-
-  /**
    * Method to write records into a chunk.
    * @param key Key from the listing file.
    * @param value Corresponding value from the listing file.
@@ -135,19 +87,19 @@
    * @throws IOException Exception on failure to reassign.
    */
   public void assignTo(TaskID taskId) throws IOException {
-    Path newPath = new Path(chunkRootPath, taskId.toString());
-    if (!fs.rename(chunkFilePath, newPath)) {
+    Path newPath = new Path(chunkContext.getChunkRootPath(), taskId.toString());
+    if (!chunkContext.getFs().rename(chunkFilePath, newPath)) {
       LOG.warn(chunkFilePath + " could not be assigned to " + taskId);
     }
   }
 
-  private DynamicInputChunk(Path chunkFilePath,
-                            TaskAttemptContext taskAttemptContext)
-                                   throws IOException, InterruptedException {
-    if (!areInvariantsInitialized())
-      initializeChunkInvariants(taskAttemptContext.getConfiguration());
+  public DynamicInputChunk(Path chunkFilePath,
+      TaskAttemptContext taskAttemptContext,
+      DynamicInputChunkContext chunkContext) throws IOException,
+      InterruptedException {
 
     this.chunkFilePath = chunkFilePath;
+    this.chunkContext = chunkContext;
     openForRead(taskAttemptContext);
   }
 
@@ -155,45 +107,8 @@
           throws IOException, InterruptedException {
     reader = new SequenceFileRecordReader<K, V>();
     reader.initialize(new FileSplit(chunkFilePath, 0,
-            DistCpUtils.getFileSize(chunkFilePath, configuration), null),
-            taskAttemptContext);
-  }
-
-  /**
-   * Factory method that
-   * 1. acquires a chunk for the specified map-task attempt
-   * 2. returns a DynamicInputChunk associated with the acquired chunk-file.
-   * @param taskAttemptContext The attempt-context for the map task that's
-   * trying to acquire a chunk.
-   * @return The acquired dynamic-chunk. The chunk-file is renamed to the
-   * attempt-id (from the attempt-context.)
-   * @throws IOException Exception on failure.
-   * @throws InterruptedException Exception on failure.
-   */
-  public static DynamicInputChunk acquire(TaskAttemptContext taskAttemptContext)
-                                      throws IOException, InterruptedException {
-    if (!areInvariantsInitialized())
-        initializeChunkInvariants(taskAttemptContext.getConfiguration());
-
-    String taskId
-            = taskAttemptContext.getTaskAttemptID().getTaskID().toString();
-    Path acquiredFilePath = new Path(chunkRootPath, taskId);
-
-    if (fs.exists(acquiredFilePath)) {
-      LOG.info("Acquiring pre-assigned chunk: " + acquiredFilePath);
-      return new DynamicInputChunk(acquiredFilePath, taskAttemptContext);
-    }
-
-    for (FileStatus chunkFile : getListOfChunkFiles()) {
-      if (fs.rename(chunkFile.getPath(), acquiredFilePath)) {
-        LOG.info(taskId + " acquired " + chunkFile.getPath());
-        return new DynamicInputChunk(acquiredFilePath, taskAttemptContext);
-      }
-      else
-        LOG.warn(taskId + " could not acquire " + chunkFile.getPath());
-    }
-
-    return null;
+            DistCpUtils.getFileSize(chunkFilePath,
+                chunkContext.getConfiguration()), null), taskAttemptContext);
   }
 
   /**
@@ -204,19 +119,13 @@
    */
   public void release() throws IOException {
     close();
-    if (!fs.delete(chunkFilePath, false)) {
+    if (!chunkContext.getFs().delete(chunkFilePath, false)) {
       LOG.error("Unable to release chunk at path: " + chunkFilePath);
-      throw new IOException("Unable to release chunk at path: " + chunkFilePath);
+      throw new IOException("Unable to release chunk at path: " +
+          chunkFilePath);
     }
   }
 
-  static FileStatus [] getListOfChunkFiles() throws IOException {
-    Path chunkFilePattern = new Path(chunkRootPath, chunkFilePrefix + "*");
-    FileStatus chunkFiles[] = fs.globStatus(chunkFilePattern);
-    numChunksLeft = chunkFiles.length;
-    return chunkFiles;
-  }
-
   /**
    * Getter for the chunk-file's path, on HDFS.
    * @return The qualified path to the chunk-file.
@@ -234,14 +143,4 @@
     return reader;
   }
 
-  /**
-   * Getter for the number of chunk-files left in the chunk-file directory.
-   * Useful to determine how many chunks (and hence, records) are left to be
-   * processed.
-   * @return Before the first scan of the directory, the number returned is -1.
-   * Otherwise, the number of chunk-files seen from the last scan is returned.
-   */
-  public static int getNumChunksLeft() {
-    return numChunksLeft;
-  }
 }
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java
new file mode 100644
index 0000000..043ff1c
--- /dev/null
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.tools.mapred.lib;
+
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.tools.DistCpConstants;
+
+import java.io.IOException;
+
+/**
+ * Class to initialize the DynamicInputChunk invariants.
+ */
+class DynamicInputChunkContext<K, V> {
+
+  private static Log LOG = LogFactory.getLog(DynamicInputChunkContext.class);
+  private Configuration configuration;
+  private Path chunkRootPath = null;
+  private String chunkFilePrefix;
+  private FileSystem fs;
+  private int numChunksLeft = -1; // Un-initialized before 1st dir-scan.
+
+  public DynamicInputChunkContext(Configuration config)
+      throws IOException {
+    this.configuration = config;
+    Path listingFilePath = new Path(getListingFilePath(configuration));
+    chunkRootPath = new Path(listingFilePath.getParent(), "chunkDir");
+    fs = chunkRootPath.getFileSystem(configuration);
+    chunkFilePrefix = listingFilePath.getName() + ".chunk.";
+  }
+
+  public Configuration getConfiguration() {
+    return configuration;
+  }
+
+  public Path getChunkRootPath() {
+    return chunkRootPath;
+  }
+
+  public String getChunkFilePrefix() {
+    return chunkFilePrefix;
+  }
+
+  public FileSystem getFs() {
+    return fs;
+  }
+
+  private static String getListingFilePath(Configuration configuration) {
+    final String listingFileString = configuration.get(
+        DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, "");
+    assert !listingFileString.equals("") : "Listing file not found.";
+    return listingFileString;
+  }
+
+  public int getNumChunksLeft() {
+    return numChunksLeft;
+  }
+
+  public DynamicInputChunk acquire(TaskAttemptContext taskAttemptContext)
+      throws IOException, InterruptedException {
+
+    String taskId
+        = taskAttemptContext.getTaskAttemptID().getTaskID().toString();
+    Path acquiredFilePath = new Path(getChunkRootPath(), taskId);
+
+    if (fs.exists(acquiredFilePath)) {
+      LOG.info("Acquiring pre-assigned chunk: " + acquiredFilePath);
+      return new DynamicInputChunk(acquiredFilePath, taskAttemptContext, this);
+    }
+
+    for (FileStatus chunkFile : getListOfChunkFiles()) {
+      if (fs.rename(chunkFile.getPath(), acquiredFilePath)) {
+        LOG.info(taskId + " acquired " + chunkFile.getPath());
+        return new DynamicInputChunk(acquiredFilePath, taskAttemptContext,
+            this);
+      }
+    }
+    return null;
+  }
+
+  public DynamicInputChunk createChunkForWrite(String chunkId)
+      throws IOException {
+    return new DynamicInputChunk(chunkId, this);
+  }
+
+  public FileStatus [] getListOfChunkFiles() throws IOException {
+    Path chunkFilePattern = new Path(chunkRootPath, chunkFilePrefix + "*");
+    FileStatus chunkFiles[] = fs.globStatus(chunkFilePattern);
+    numChunksLeft = chunkFiles.length;
+    return chunkFiles;
+  }
+}
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputFormat.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputFormat.java
index 38269c7..fe8604a 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputFormat.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputFormat.java
@@ -57,7 +57,8 @@
           = "mapred.num.splits";
   private static final String CONF_LABEL_NUM_ENTRIES_PER_CHUNK
           = "mapred.num.entries.per.chunk";
-  
+  private DynamicInputChunkContext<K, V> chunkContext = null;
+
   /**
    * Implementation of InputFormat::getSplits(). This method splits up the
    * copy-listing file into chunks, and assigns the first batch to different
@@ -72,6 +73,7 @@
       throws IOException, InterruptedException {
     LOG.info("DynamicInputFormat: Getting splits for job:"
              + jobContext.getJobID());
+    chunkContext = getChunkContext(jobContext.getConfiguration());
     return createSplits(jobContext,
                         splitCopyListingIntoChunksWithShuffle(jobContext));
   }
@@ -101,6 +103,13 @@
 
   private static int N_CHUNKS_OPEN_AT_ONCE_DEFAULT = 16;
 
+  public  DynamicInputChunkContext<K, V> getChunkContext(
+      Configuration configuration) throws IOException {
+    if(chunkContext == null) {
+      chunkContext = new DynamicInputChunkContext<K, V>(configuration);
+    }
+    return chunkContext;
+  }
   private List<DynamicInputChunk> splitCopyListingIntoChunksWithShuffle
                                     (JobContext context) throws IOException {
 
@@ -146,8 +155,8 @@
           closeAll(openChunks);
           chunksFinal.addAll(openChunks);
 
-          openChunks = createChunks(
-                  configuration, chunkCount, nChunksTotal, nChunksOpenAtOnce);
+          openChunks = createChunks(chunkCount, nChunksTotal,
+              nChunksOpenAtOnce);
 
           chunkCount += openChunks.size();
 
@@ -183,9 +192,9 @@
       chunk.close();
   }
 
-  private static List<DynamicInputChunk> createChunks(Configuration config,
-                      int chunkCount, int nChunksTotal, int nChunksOpenAtOnce)
-                                          throws IOException {
+  private List<DynamicInputChunk> createChunks(int chunkCount,
+      int nChunksTotal, int nChunksOpenAtOnce)
+      throws IOException {
     List<DynamicInputChunk> chunks = new ArrayList<DynamicInputChunk>();
     int chunkIdUpperBound
             = Math.min(nChunksTotal, chunkCount + nChunksOpenAtOnce);
@@ -197,14 +206,13 @@
       chunkIdUpperBound = nChunksTotal;
 
     for (int i=chunkCount; i < chunkIdUpperBound; ++i)
-      chunks.add(createChunk(i, config));
+      chunks.add(createChunk(i));
     return chunks;
   }
 
-  private static DynamicInputChunk createChunk(int chunkId, Configuration config)
+  private DynamicInputChunk createChunk(int chunkId)
                                               throws IOException {
-    return DynamicInputChunk.createChunkForWrite(String.format("%05d", chunkId),
-                                              config);
+    return chunkContext.createChunkForWrite(String.format("%05d", chunkId));
   }
 
 
@@ -351,6 +359,7 @@
           InputSplit inputSplit,
           TaskAttemptContext taskAttemptContext)
           throws IOException, InterruptedException {
-    return new DynamicRecordReader<K, V>();
+    chunkContext = getChunkContext(taskAttemptContext.getConfiguration());
+    return new DynamicRecordReader<K, V>(chunkContext);
   }
 }
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicRecordReader.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicRecordReader.java
index 00b3c69..87b8f08 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicRecordReader.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicRecordReader.java
@@ -49,9 +49,14 @@
   private int numRecordsProcessedByThisMap = 0;
   private long timeOfLastChunkDirScan = 0;
   private boolean isChunkDirAlreadyScanned = false;
+  private DynamicInputChunkContext<K, V> chunkContext;
 
   private static long TIME_THRESHOLD_FOR_DIR_SCANS = TimeUnit.MINUTES.toMillis(5);
 
+  DynamicRecordReader(DynamicInputChunkContext<K, V> chunkContext) {
+    this.chunkContext = chunkContext;
+  }
+
   /**
    * Implementation for RecordReader::initialize(). Initializes the internal
    * RecordReader to read from chunks.
@@ -69,7 +74,7 @@
     this.taskAttemptContext = taskAttemptContext;
     configuration = taskAttemptContext.getConfiguration();
     taskId = taskAttemptContext.getTaskAttemptID().getTaskID();
-    chunk = DynamicInputChunk.acquire(this.taskAttemptContext);
+    chunk = chunkContext.acquire(this.taskAttemptContext);
     timeOfLastChunkDirScan = System.currentTimeMillis();
     isChunkDirAlreadyScanned = false;
 
@@ -114,7 +119,7 @@
     timeOfLastChunkDirScan = System.currentTimeMillis();
     isChunkDirAlreadyScanned = false;
     
-    chunk = DynamicInputChunk.acquire(taskAttemptContext);
+    chunk = chunkContext.acquire(taskAttemptContext);
 
     if (chunk == null) return false;
 
@@ -182,12 +187,12 @@
             || (!isChunkDirAlreadyScanned &&
                     numRecordsProcessedByThisMap%numRecordsPerChunk
                               > numRecordsPerChunk/2)) {
-      DynamicInputChunk.getListOfChunkFiles();
+      chunkContext.getListOfChunkFiles();
       isChunkDirAlreadyScanned = true;
       timeOfLastChunkDirScan = now;
     }
 
-    return DynamicInputChunk.getNumChunksLeft();
+    return chunkContext.getNumChunksLeft();
   }
   /**
    * Implementation of RecordReader::close().
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/StubContext.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/StubContext.java
index 1a2227c..4bb6c98 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/StubContext.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/StubContext.java
@@ -64,6 +64,10 @@
     return reader;
   }
 
+  public void setReader(RecordReader<Text, CopyListingFileStatus> reader) {
+    this.reader = reader;
+  }
+
   public StubInMemoryWriter getWriter() {
     return writer;
   }
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/lib/TestDynamicInputFormat.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/lib/TestDynamicInputFormat.java
index 8cc8317..bb2dd9d 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/lib/TestDynamicInputFormat.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/lib/TestDynamicInputFormat.java
@@ -40,6 +40,7 @@
 import org.junit.Test;
 
 import java.io.DataOutputStream;
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -126,13 +127,14 @@
     int taskId = 0;
 
     for (InputSplit split : splits) {
-      RecordReader<Text, CopyListingFileStatus> recordReader =
-           inputFormat.createRecordReader(split, null);
       StubContext stubContext = new StubContext(jobContext.getConfiguration(),
-                                                recordReader, taskId);
+                                                null, taskId);
       final TaskAttemptContext taskAttemptContext
          = stubContext.getContext();
-      
+
+      RecordReader<Text, CopyListingFileStatus> recordReader =
+          inputFormat.createRecordReader(split, taskAttemptContext);
+      stubContext.setReader(recordReader);
       recordReader.initialize(splits.get(0), taskAttemptContext);
       float previousProgressValue = 0f;
       while (recordReader.nextKeyValue()) {
@@ -182,4 +184,27 @@
     conf.setInt(DistCpConstants.CONF_LABEL_SPLIT_RATIO, 53);
     Assert.assertEquals(53, DynamicInputFormat.getSplitRatio(3, 200, conf));
   }
+
+  @Test
+  public void testDynamicInputChunkContext() throws IOException {
+    Configuration configuration = new Configuration();
+    configuration.set(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH,
+        "/tmp/test/file1.seq");
+    DynamicInputFormat firstInputFormat = new DynamicInputFormat();
+    DynamicInputFormat secondInputFormat = new DynamicInputFormat();
+    DynamicInputChunkContext firstContext =
+        firstInputFormat.getChunkContext(configuration);
+    DynamicInputChunkContext secondContext =
+        firstInputFormat.getChunkContext(configuration);
+    DynamicInputChunkContext thirdContext =
+        secondInputFormat.getChunkContext(configuration);
+    DynamicInputChunkContext fourthContext =
+        secondInputFormat.getChunkContext(configuration);
+    Assert.assertTrue("Chunk contexts from the same DynamicInputFormat " +
+        "object should be the same.",firstContext.equals(secondContext));
+    Assert.assertTrue("Chunk contexts from the same DynamicInputFormat " +
+        "object should be the same.",thirdContext.equals(fourthContext));
+    Assert.assertTrue("Contexts from different DynamicInputFormat " +
+        "objects should be different.",!firstContext.equals(thirdContext));
+  }
 }
diff --git a/hadoop-tools/hadoop-sls/src/main/assemblies/sls.xml b/hadoop-tools/hadoop-sls/src/main/assemblies/sls.xml
index 5b09c81..085a5d8 100644
--- a/hadoop-tools/hadoop-sls/src/main/assemblies/sls.xml
+++ b/hadoop-tools/hadoop-sls/src/main/assemblies/sls.xml
@@ -15,7 +15,9 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
-<assembly>
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
   <id>sls</id>
   <formats>
     <format>dir</format>
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f4e7514..d6ad672 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -549,6 +549,8 @@
     YARN-3216. Max-AM-Resource-Percentage should respect node labels. 
     (Sunil G via wangda)
 
+    YARN-4310. FairScheduler: Log skipping reservation messages at DEBUG level (asuresh)
+
   OPTIMIZATIONS
 
     YARN-3339. TestDockerContainerExecutor should pull a single image and not
@@ -687,9 +689,6 @@
 
     YARN-3582. NPE in WebAppProxyServlet. (jian he via xgong)
     
-    YARN-3580. [JDK8] TestClientRMService.testGetLabelsToNodes fails. (Robert Kanter
-    via junping_du)
-
     YARN-3577. Misspelling of threshold in log4j.properties for tests.
     (Brahma Reddy Battula via aajisaka)
 
@@ -1031,6 +1030,18 @@
     YARN-4251. TestAMRMClientOnRMRestart#testAMRMClientOnAMRMTokenRollOverOnRMRestart
     is failing. (Brahma Reddy Battula via ozawa)
 
+    YARN-4130. Duplicate declaration of ApplicationId in RMAppManager#submitApplication method. 
+    (Kai Sasaki via rohithsharmaks)
+
+    YARN-4288. Fixed RMProxy to retry on IOException from local host.
+    (Junping Du via jianhe)
+
+    YARN-4127. RM fail with noAuth error if switched from failover to non-failover.
+    (Varun Saxena via jianhe)
+
+    YARN-4326. Fix TestDistributedShell timeout as AHS in MiniYarnCluster no longer 
+    binds to default port 8188. (Meng Ding via wangda)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -1127,6 +1138,24 @@
     YARN-4041. Slow delegation token renewal can severely prolong RM recovery
     (Sunil G via jlowe)
 
+    YARN-2902. Killing a container that is localizing can orphan resources in
+    the DOWNLOADING state (Varun Saxena via jlowe)
+
+    YARN-4183. Enabling generic application history forces every job to get a
+    timeline service delegation token (Mit Desai via jeagles)
+
+    YARN-4313. Race condition in MiniMRYarnCluster when getting history server
+    address. (Jian He via xgong)
+
+    YARN-3580. [JDK8] TestClientRMService.testGetLabelsToNodes fails. (Robert Kanter
+    via junping_du)
+
+    YARN-4312. TestSubmitApplicationWithRMHA fails on branch-2.7 and branch-2.6
+    as some of the test cases time out. (Varun Saxena via ozawa)
+
+    YARN-4320. TestJobHistoryEventHandler fails as AHS in MiniYarnCluster no longer
+    binds to default port 8188. (Varun Saxena via ozawa)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES
@@ -1849,17 +1878,29 @@
     YARN-3466. Fix RM nodes web page to sort by node HTTP-address, #containers 
     and node-label column (Jason Lowe via wangda)
 
-Release 2.6.2 - UNRELEASED
+Release 2.6.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES
 
   NEW FEATURES
 
-    YARN-2019. Retrospect on decision of making RM crashed if any exception throw 
-    in ZKRMStateStore. (Jian He via junping_du)
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+Release 2.6.2 - 2015-10-28
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
 
   IMPROVEMENTS
 
+    YARN-3727. For better error recovery, check if the directory exists before
+    using it for localization. (Zhihai Xu via jlowe)
+
     YARN-4092. Fixed UI redirection to print useful messages when both RMs are
     in standby mode. (Xuan Gong via jianhe)
 
@@ -1870,15 +1911,15 @@
 
   BUG FIXES
 
+    YARN-2019. Retrospect on decision of making RM crashed if any exception throw
+    in ZKRMStateStore. (Jian He via junping_du)
+
     YARN-4087. Followup fixes after YARN-2019 regarding RM behavior when
     state-store error occurs. (Jian He via xgong)
 
     YARN-3554. Default value for maximum nodemanager connect wait time is too
     high (Naganarasimha G R via jlowe)
 
-    YARN-3727. For better error recovery, check if the directory exists before
-    using it for localization. (Zhihai Xu via jlowe)
-
     YARN-4005. Completed container whose app is finished is possibly not
     removed from NMStateStore. (Jun Gong via jianhe)
 
@@ -1897,9 +1938,6 @@
     YARN-3798. ZKRMStateStore shouldn't create new session without occurrance of 
     SESSIONEXPIED. (ozawa and Varun Saxena)
 
-    YARN-2859. ApplicationHistoryServer binds to default port 8188 in MiniYARNCluster.
-    (Vinod Kumar Vavilapalli via xgong)
-
 Release 2.6.1 - 2015-09-23
 
   INCOMPATIBLE CHANGES
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
index dcb6e72..3197875 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
@@ -92,9 +92,14 @@
       yarnCluster.init(conf);
       
       yarnCluster.start();
-      
+
+      conf.set(
+          YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+          MiniYARNCluster.getHostname() + ":"
+              + yarnCluster.getApplicationHistoryServer().getPort());
+
       waitForNMsToRegister();
-      
+
       URL url = Thread.currentThread().getContextClassLoader().getResource("yarn-site.xml");
       if (url == null) {
         throw new RuntimeException("Could not find 'yarn-site.xml' dummy file in classpath");
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
index be08f2f6..23e1691 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
@@ -250,8 +250,10 @@
     exceptionToPolicyMap.put(ConnectTimeoutException.class, retryPolicy);
     exceptionToPolicyMap.put(RetriableException.class, retryPolicy);
     exceptionToPolicyMap.put(SocketException.class, retryPolicy);
-
-    return RetryPolicies.retryByException(
+    // YARN-4288: local IOException is also possible.
+    exceptionToPolicyMap.put(IOException.class, retryPolicy);
+    // Not retry on remote IO exception.
+    return RetryPolicies.retryOtherThanRemoteException(
         RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
index 111a85f..21e1c1a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
@@ -143,7 +143,7 @@
 
   @Private
   @VisibleForTesting
-  int getPort() {
+  public int getPort() {
     return this.webApp.httpServer().getConnectorAddress(0).getPort();
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
index 5819f23..9ccde5f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
@@ -25,6 +25,7 @@
 
 import java.io.DataOutputStream;
 import java.io.File;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.net.InetSocketAddress;
@@ -486,8 +487,12 @@
     for (Path baseDir : baseDirs) {
       Path del = subDir == null ? baseDir : new Path(baseDir, subDir);
       LOG.info("Deleting path : " + del);
-      if (!lfs.delete(del, true)) {
-        LOG.warn("delete returned false for path: [" + del + "]");
+      try {
+        if (!lfs.delete(del, true)) {
+          LOG.warn("delete returned false for path: [" + del + "]");
+        }
+      } catch (FileNotFoundException e) {
+        continue;
       }
     }
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DockerContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DockerContainerExecutor.java
index 9dffff30..96f8b51 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DockerContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DockerContainerExecutor.java
@@ -24,6 +24,7 @@
 import java.io.ByteArrayOutputStream;
 import java.io.DataOutputStream;
 import java.io.File;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.PrintStream;
@@ -469,8 +470,12 @@
     for (Path baseDir : baseDirs) {
       Path del = subDir == null ? baseDir : new Path(baseDir, subDir);
       LOG.info("Deleting path : " + del);
-      if (!lfs.delete(del, true)) {
-        LOG.warn("delete returned false for path: [" + del + "]");
+      try {
+        if (!lfs.delete(del, true)) {
+          LOG.warn("delete returned false for path: [" + del + "]");
+        }
+      } catch (FileNotFoundException e) {
+        continue;
       }
     }
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
index 51dbcaa..fb8f767 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
@@ -187,6 +187,16 @@
 
     rsrc.handle(event);
 
+    // Remove the resource if its downloading and its reference count has
+    // become 0 after RELEASE. This maybe because a container was killed while
+    // localizing and no other container is referring to the resource.
+    if (event.getType() == ResourceEventType.RELEASE) {
+      if (rsrc.getState() == ResourceState.DOWNLOADING &&
+          rsrc.getRefCount() <= 0) {
+        removeResource(req);
+      }
+    }
+
     if (event.getType() == ResourceEventType.LOCALIZED) {
       if (rsrc.getLocalPath() != null) {
         try {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index 2cc5683..5db5145 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -1165,9 +1165,22 @@
           dispatcher.getEventHandler().handle(new ContainerResourceFailedEvent(
               cId, null, exception.getMessage()));
         }
+        List<Path> paths = new ArrayList<Path>();
         for (LocalizerResourceRequestEvent event : scheduled.values()) {
+          // This means some resources were in downloading state. Schedule
+          // deletion task for localization dir and tmp dir used for downloading
+          Path locRsrcPath = event.getResource().getLocalPath();
+          if (locRsrcPath != null) {
+            Path locRsrcDirPath = locRsrcPath.getParent();
+            paths.add(locRsrcDirPath);
+            paths.add(new Path(locRsrcDirPath + "_tmp"));
+          }
           event.getResource().unlock();
         }
+        if (!paths.isEmpty()) {
+          delService.delete(context.getUser(),
+              null, paths.toArray(new Path[paths.size()]));
+        }
         delService.delete(null, nmPrivateCTokensPath, new Path[] {});
       }
     }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index e81f0c7..102111b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1729,8 +1729,13 @@
     char* full_path = NULL;
     struct stat sb;
     if (stat(*ptr, &sb) != 0) {
-      fprintf(LOGFILE, "Could not stat %s\n", *ptr);
-      return -1;
+      if (errno == ENOENT) {
+        // Ignore missing dir. Continue deleting other directories.
+        continue;
+      } else {
+        fprintf(LOGFILE, "Could not stat %s - %s\n", *ptr, strerror(errno));
+        return -1;
+      }
     }
     if (!S_ISDIR(sb.st_mode)) {
       if (!subDirEmptyStr) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index 001a37d..3db75eab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -410,6 +410,17 @@
     exit(1);
   }
 
+  sprintf(buffer, "%s", app_dir);
+  char missing_dir[20];
+  strcpy(missing_dir, "/some/missing/dir");
+  char * dirs_with_missing[] = {missing_dir, buffer, 0};
+  ret = delete_as_user(yarn_username, "" , dirs_with_missing);
+  printf("%d" , ret);
+  if (access(buffer, R_OK) == 0) {
+    printf("FAIL: directory not deleted\n");
+    exit(1);
+  }
+
   sprintf(buffer, "%s/local-1/usercache/%s", TEST_ROOT, yarn_username);
   if (access(buffer, R_OK) != 0) {
     printf("FAIL: directory missing before test\n");
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
index e6aeae0..4eb8675 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
@@ -141,12 +141,12 @@
       tracker.handle(rel21Event);
 
       dispatcher.await();
-      verifyTrackedResourceCount(tracker, 2);
+      verifyTrackedResourceCount(tracker, 1);
 
       // Verify resource with non zero ref count is not removed.
       Assert.assertEquals(2, lr1.getRefCount());
       Assert.assertFalse(tracker.remove(lr1, mockDelService));
-      verifyTrackedResourceCount(tracker, 2);
+      verifyTrackedResourceCount(tracker, 1);
 
       // Localize resource1
       ResourceLocalizedEvent rle =
@@ -161,7 +161,7 @@
 
       // Verify resources in state LOCALIZED with ref-count=0 is removed.
       Assert.assertTrue(tracker.remove(lr1, mockDelService));
-      verifyTrackedResourceCount(tracker, 1);
+      verifyTrackedResourceCount(tracker, 0);
     } finally {
       if (dispatcher != null) {
         dispatcher.stop();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
index e7a9db8..f9e4188 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
@@ -43,6 +43,8 @@
 
 import java.io.File;
 import java.io.IOException;
+import java.io.NotSerializableException;
+import java.io.ObjectInputStream;
 import java.lang.reflect.Constructor;
 import java.net.InetSocketAddress;
 import java.net.URI;
@@ -101,6 +103,7 @@
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
 import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext;
@@ -150,9 +153,12 @@
 import org.mockito.ArgumentCaptor;
 import org.mockito.ArgumentMatcher;
 import org.mockito.Mockito;
+import org.mockito.internal.matchers.VarargMatcher;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
+import com.google.common.collect.Sets;
+
 public class TestResourceLocalizationService {
 
   static final Path basedir =
@@ -481,16 +487,14 @@
         Assert.assertEquals("Incorrect reference count", 0, lr.getRefCount());
         pubRsrcs.remove(lr.getRequest());
       }
-      Assert.assertEquals(0, pubRsrcs.size());
-      Assert.assertEquals(2, pubRsrcCount);
+      Assert.assertEquals(2, pubRsrcs.size());
+      Assert.assertEquals(0, pubRsrcCount);
 
       appRsrcCount = 0;
       for (LocalizedResource lr : appTracker) {
         appRsrcCount++;
-        Assert.assertEquals("Incorrect reference count", 0, lr.getRefCount());
-        Assert.assertEquals(appReq, lr.getRequest());
       }
-      Assert.assertEquals(1, appRsrcCount);
+      Assert.assertEquals(0, appRsrcCount);
     } finally {
       dispatcher.stop();
       delService.stop();
@@ -1066,6 +1070,285 @@
     }
   }
 
+  private static class DownloadingPathsMatcher extends ArgumentMatcher<Path[]>
+      implements VarargMatcher {
+    static final long serialVersionUID = 0;
+
+    private transient Set<Path> matchPaths;
+
+    DownloadingPathsMatcher(Set<Path> matchPaths) {
+      this.matchPaths = matchPaths;
+    }
+
+    @Override
+    public boolean matches(Object varargs) {
+      Path[] downloadingPaths = (Path[]) varargs;
+      if (matchPaths.size() != downloadingPaths.length) {
+        return false;
+      }
+      for (Path downloadingPath : downloadingPaths) {
+        if (!matchPaths.contains(downloadingPath)) {
+          return false;
+        }
+      }
+      return true;
+    }
+
+    private void readObject(ObjectInputStream os) throws NotSerializableException {
+      throw new NotSerializableException(this.getClass().getName());
+    }
+  }
+
+  private static class DummyExecutor extends DefaultContainerExecutor {
+    private volatile boolean stopLocalization = false;
+    @Override
+    public void startLocalizer(LocalizerStartContext ctx)
+        throws IOException, InterruptedException {
+      while (!stopLocalization) {
+        Thread.yield();
+      }
+    }
+    void setStopLocalization() {
+      stopLocalization = true;
+    }
+  }
+
+  @Test(timeout = 20000)
+  @SuppressWarnings("unchecked")
+  public void testDownloadingResourcesOnContainerKill() throws Exception {
+    List<Path> localDirs = new ArrayList<Path>();
+    String[] sDirs = new String[1];
+    localDirs.add(lfs.makeQualified(new Path(basedir, 0 + "")));
+    sDirs[0] = localDirs.get(0).toString();
+
+    conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs);
+    DrainDispatcher dispatcher = new DrainDispatcher();
+    dispatcher.init(conf);
+    dispatcher.start();
+    EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class);
+    dispatcher.register(ApplicationEventType.class, applicationBus);
+    EventHandler<ContainerEvent> containerBus = mock(EventHandler.class);
+    dispatcher.register(ContainerEventType.class, containerBus);
+
+    DummyExecutor exec = new DummyExecutor();
+    LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
+    dirsHandler.init(conf);
+
+    DeletionService delServiceReal = new DeletionService(exec);
+    DeletionService delService = spy(delServiceReal);
+    delService.init(new Configuration());
+    delService.start();
+
+    ResourceLocalizationService rawService = new ResourceLocalizationService(
+        dispatcher, exec, delService, dirsHandler, nmContext);
+    ResourceLocalizationService spyService = spy(rawService);
+    doReturn(mockServer).when(spyService).createServer();
+    doReturn(lfs).when(spyService).getLocalFileContext(isA(Configuration.class));
+    FsPermission defaultPermission =
+        FsPermission.getDirDefault().applyUMask(lfs.getUMask());
+    FsPermission nmPermission =
+        ResourceLocalizationService.NM_PRIVATE_PERM.applyUMask(lfs.getUMask());
+    final Path userDir =
+        new Path(sDirs[0].substring("file:".length()),
+          ContainerLocalizer.USERCACHE);
+    final Path fileDir =
+        new Path(sDirs[0].substring("file:".length()),
+          ContainerLocalizer.FILECACHE);
+    final Path sysDir =
+        new Path(sDirs[0].substring("file:".length()),
+          ResourceLocalizationService.NM_PRIVATE_DIR);
+    final FileStatus fs =
+        new FileStatus(0, true, 1, 0, System.currentTimeMillis(), 0,
+          defaultPermission, "", "", new Path(sDirs[0]));
+    final FileStatus nmFs =
+        new FileStatus(0, true, 1, 0, System.currentTimeMillis(), 0,
+          nmPermission, "", "", sysDir);
+
+    doAnswer(new Answer<FileStatus>() {
+      @Override
+      public FileStatus answer(InvocationOnMock invocation) throws Throwable {
+        Object[] args = invocation.getArguments();
+        if (args.length > 0) {
+          if (args[0].equals(userDir) || args[0].equals(fileDir)) {
+            return fs;
+          }
+        }
+        return nmFs;
+      }
+    }).when(spylfs).getFileStatus(isA(Path.class));
+
+    try {
+      spyService.init(conf);
+      spyService.start();
+
+      final Application app = mock(Application.class);
+      final ApplicationId appId =
+          BuilderUtils.newApplicationId(314159265358979L, 3);
+      String user = "user0";
+      when(app.getUser()).thenReturn(user);
+      when(app.getAppId()).thenReturn(appId);
+      spyService.handle(new ApplicationLocalizationEvent(
+          LocalizationEventType.INIT_APPLICATION_RESOURCES, app));
+      ArgumentMatcher<ApplicationEvent> matchesAppInit =
+        new ArgumentMatcher<ApplicationEvent>() {
+          @Override
+          public boolean matches(Object o) {
+            ApplicationEvent evt = (ApplicationEvent) o;
+            return evt.getType() == ApplicationEventType.APPLICATION_INITED
+              && appId == evt.getApplicationID();
+          }
+        };
+      dispatcher.await();
+      verify(applicationBus).handle(argThat(matchesAppInit));
+
+      // Initialize localizer.
+      Random r = new Random();
+      long seed = r.nextLong();
+      System.out.println("SEED: " + seed);
+      r.setSeed(seed);
+      final Container c1 = getMockContainer(appId, 42, "user0");
+      final Container c2 = getMockContainer(appId, 43, "user0");
+      FSDataOutputStream out =
+        new FSDataOutputStream(new DataOutputBuffer(), null);
+      doReturn(out).when(spylfs).createInternal(isA(Path.class),
+          isA(EnumSet.class), isA(FsPermission.class), anyInt(), anyShort(),
+          anyLong(), isA(Progressable.class), isA(ChecksumOpt.class),
+          anyBoolean());
+      final LocalResource resource1 = getPrivateMockedResource(r);
+      LocalResource resource2 = null;
+      do {
+        resource2 = getPrivateMockedResource(r);
+      } while (resource2 == null || resource2.equals(resource1));
+      LocalResource resource3 = null;
+      do {
+        resource3 = getPrivateMockedResource(r);
+      } while (resource3 == null || resource3.equals(resource1)
+          || resource3.equals(resource2));
+
+      // Send localization requests for container c1 and c2.
+      final LocalResourceRequest req1 = new LocalResourceRequest(resource1);
+      final LocalResourceRequest req2 = new LocalResourceRequest(resource2);
+      final LocalResourceRequest req3 = new LocalResourceRequest(resource3);
+      Map<LocalResourceVisibility, Collection<LocalResourceRequest>> rsrcs =
+        new HashMap<LocalResourceVisibility,
+                    Collection<LocalResourceRequest>>();
+      List<LocalResourceRequest> privateResourceList =
+          new ArrayList<LocalResourceRequest>();
+      privateResourceList.add(req1);
+      privateResourceList.add(req2);
+      privateResourceList.add(req3);
+      rsrcs.put(LocalResourceVisibility.PRIVATE, privateResourceList);
+      spyService.handle(new ContainerLocalizationRequestEvent(c1, rsrcs));
+
+      final LocalResourceRequest req1_1 = new LocalResourceRequest(resource2);
+      Map<LocalResourceVisibility, Collection<LocalResourceRequest>> rsrcs1 =
+        new HashMap<LocalResourceVisibility,
+                    Collection<LocalResourceRequest>>();
+      List<LocalResourceRequest> privateResourceList1 =
+          new ArrayList<LocalResourceRequest>();
+      privateResourceList1.add(req1_1);
+      rsrcs1.put(LocalResourceVisibility.PRIVATE, privateResourceList1);
+      spyService.handle(new ContainerLocalizationRequestEvent(c2, rsrcs1));
+
+      dispatcher.await();
+      final String containerIdStr = c1.getContainerId().toString();
+      // Heartbeats from container localizer
+      LocalResourceStatus rsrc1success = mock(LocalResourceStatus.class);
+      LocalResourceStatus rsrc2pending = mock(LocalResourceStatus.class);
+      LocalizerStatus stat = mock(LocalizerStatus.class);
+      when(stat.getLocalizerId()).thenReturn(containerIdStr);
+      when(rsrc1success.getResource()).thenReturn(resource1);
+      when(rsrc2pending.getResource()).thenReturn(resource2);
+      when(rsrc1success.getLocalSize()).thenReturn(4344L);
+      URL locPath = getPath("/some/path");
+      when(rsrc1success.getLocalPath()).thenReturn(locPath);
+      when(rsrc1success.getStatus()).
+          thenReturn(ResourceStatusType.FETCH_SUCCESS);
+      when(rsrc2pending.getStatus()).
+          thenReturn(ResourceStatusType.FETCH_PENDING);
+
+      when(stat.getResources())
+        .thenReturn(Collections.<LocalResourceStatus>emptyList())
+        .thenReturn(Collections.singletonList(rsrc1success))
+        .thenReturn(Collections.singletonList(rsrc2pending))
+        .thenReturn(Collections.singletonList(rsrc2pending))
+        .thenReturn(Collections.<LocalResourceStatus>emptyList());
+
+      // First heartbeat which schedules first resource.
+      LocalizerHeartbeatResponse response = spyService.heartbeat(stat);
+      assertEquals(LocalizerAction.LIVE, response.getLocalizerAction());
+
+      // Second heartbeat which reports first resource as success.
+      // Second resource is scheduled.
+      response = spyService.heartbeat(stat);
+      assertEquals(LocalizerAction.LIVE, response.getLocalizerAction());
+      final String locPath1 = response.getResourceSpecs().get(0).
+          getDestinationDirectory().getFile();
+
+      // Third heartbeat which reports second resource as pending.
+      // Third resource is scheduled.
+      response = spyService.heartbeat(stat);
+      assertEquals(LocalizerAction.LIVE, response.getLocalizerAction());
+      final String locPath2 = response.getResourceSpecs().get(0).
+          getDestinationDirectory().getFile();
+
+      // Container c1 is killed which leads to cleanup
+      spyService.handle(new ContainerLocalizationCleanupEvent(c1, rsrcs));
+
+      // This heartbeat will indicate to container localizer to die as localizer
+      // runner has stopped.
+      response = spyService.heartbeat(stat);
+      assertEquals(LocalizerAction.DIE, response.getLocalizerAction());
+
+      exec.setStopLocalization();
+      dispatcher.await();
+      // verify container notification
+      ArgumentMatcher<ContainerEvent> successContainerLoc =
+        new ArgumentMatcher<ContainerEvent>() {
+          @Override
+          public boolean matches(Object o) {
+            ContainerEvent evt = (ContainerEvent) o;
+            return evt.getType() == ContainerEventType.RESOURCE_LOCALIZED
+              && c1.getContainerId() == evt.getContainerID();
+          }
+        };
+      // Only one resource gets localized for container c1.
+      verify(containerBus).handle(argThat(successContainerLoc));
+
+      Set<Path> paths =
+          Sets.newHashSet(new Path(locPath1), new Path(locPath1 + "_tmp"),
+              new Path(locPath2), new Path(locPath2 + "_tmp"));
+      // Verify if downloading resources were submitted for deletion.
+      verify(delService).delete(eq(user),
+          (Path) eq(null), argThat(new DownloadingPathsMatcher(paths)));
+
+      LocalResourcesTracker tracker = spyService.getLocalResourcesTracker(
+          LocalResourceVisibility.PRIVATE, "user0", appId);
+      // Container c1 was killed but this resource was localized before kill
+      // hence its not removed despite ref cnt being 0.
+      LocalizedResource rsrc1 = tracker.getLocalizedResource(req1);
+      assertNotNull(rsrc1);
+      assertEquals(rsrc1.getState(), ResourceState.LOCALIZED);
+      assertEquals(rsrc1.getRefCount(), 0);
+
+      // Container c1 was killed but this resource is referenced by container c2
+      // as well hence its ref cnt is 1.
+      LocalizedResource rsrc2 = tracker.getLocalizedResource(req2);
+      assertNotNull(rsrc2);
+      assertEquals(rsrc2.getState(), ResourceState.DOWNLOADING);
+      assertEquals(rsrc2.getRefCount(), 1);
+
+      // As container c1 was killed and this resource was not referenced by any
+      // other container, hence its removed.
+      LocalizedResource rsrc3 = tracker.getLocalizedResource(req3);
+      assertNull(rsrc3);
+    } finally {
+      spyService.stop();
+      dispatcher.stop();
+      delService.stop();
+    }
+  }
+
   @Test
   @SuppressWarnings("unchecked")
   public void testPublicResourceInitializesLocalDir() throws Exception {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 0b7083c..711a7a7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -281,14 +281,14 @@
 
     RMAppImpl application =
         createAndPopulateNewRMApp(submissionContext, submitTime, user, false);
-    ApplicationId appId = submissionContext.getApplicationId();
     Credentials credentials = null;
     try {
       credentials = parseCredentials(submissionContext);
       if (UserGroupInformation.isSecurityEnabled()) {
-        this.rmContext.getDelegationTokenRenewer().addApplicationAsync(appId,
-            credentials, submissionContext.getCancelTokensWhenComplete(),
-            application.getUser());
+        this.rmContext.getDelegationTokenRenewer()
+            .addApplicationAsync(applicationId, credentials,
+                submissionContext.getCancelTokensWhenComplete(),
+                application.getUser());
       } else {
         // Dispatcher is not yet started at this time, so these START events
         // enqueued should be guaranteed to be first processed when dispatcher
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
index 0f09735..c73e835 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
@@ -77,8 +77,8 @@
   @Override
   protected void serviceInit(Configuration conf) throws Exception {
     publishSystemMetrics =
-        conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED,
-            YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ENABLED) &&
+        conf.getBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED,
+            YarnConfiguration.DEFAULT_APPLICATION_HISTORY_ENABLED) &&
         conf.getBoolean(YarnConfiguration.RM_SYSTEM_METRICS_PUBLISHER_ENABLED,
             YarnConfiguration.DEFAULT_RM_SYSTEM_METRICS_PUBLISHER_ENABLED);
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
index 0550087..a86b60c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
@@ -282,8 +282,9 @@
     // ensure root dirs exist
     createRootDirRecursively(znodeWorkingPath);
     create(zkRootNodePath);
-    if (HAUtil.isHAEnabled(getConfig())){
-      fence();
+    setRootNodeAcls();
+    delete(fencingNodePath);
+    if (HAUtil.isHAEnabled(getConfig())) {
       verifyActiveStatusThread = new VerifyActiveStatusThread();
       verifyActiveStatusThread.start();
     }
@@ -309,16 +310,19 @@
     LOG.debug(builder.toString());
   }
 
-  private synchronized void fence() throws Exception {
-    if (LOG.isTraceEnabled()) {
-      logRootNodeAcls("Before fencing\n");
+  private void setRootNodeAcls() throws Exception {
+    if (LOG.isDebugEnabled()) {
+      logRootNodeAcls("Before setting ACLs'\n");
     }
 
-    curatorFramework.setACL().withACL(zkRootNodeAcl).forPath(zkRootNodePath);
-    delete(fencingNodePath);
+    if (HAUtil.isHAEnabled(getConfig())) {
+      curatorFramework.setACL().withACL(zkRootNodeAcl).forPath(zkRootNodePath);
+    } else {
+      curatorFramework.setACL().withACL(zkAcl).forPath(zkRootNodePath);
+    }
 
-    if (LOG.isTraceEnabled()) {
-      logRootNodeAcls("After fencing\n");
+    if (LOG.isDebugEnabled()) {
+      logRootNodeAcls("After setting ACLs'\n");
     }
   }
 
@@ -933,7 +937,8 @@
     return curatorFramework.getData().forPath(path);
   }
 
-  private List<ACL> getACL(final String path) throws Exception {
+  @VisibleForTesting
+  List<ACL> getACL(final String path) throws Exception {
     return curatorFramework.getACL().forPath(path);
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 6da4ebf..abbf77a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -488,13 +488,15 @@
       if (existingReservations >= numAllowedReservations) {
         DecimalFormat df = new DecimalFormat();
         df.setMaximumFractionDigits(2);
-        LOG.info("Reservation Exceeds Allowed number of nodes:" +
-                " app_id=" + getApplicationId() +
-                " existingReservations=" + existingReservations +
-                " totalAvailableNodes=" + totalAvailNodes +
-                " reservableNodesRatio=" + df.format(
-                                        scheduler.getReservableNodesRatio()) +
-                " numAllowedReservations=" + numAllowedReservations);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Reservation Exceeds Allowed number of nodes:" +
+                  " app_id=" + getApplicationId() +
+                  " existingReservations=" + existingReservations +
+                  " totalAvailableNodes=" + totalAvailNodes +
+                  " reservableNodesRatio=" + df.format(
+                                          scheduler.getReservableNodesRatio()) +
+                  " numAllowedReservations=" + numAllowedReservations);
+        }
         return true;
       }
     }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
index 98daae7e..e2fbf8d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
@@ -73,7 +73,7 @@
   @BeforeClass
   public static void setup() throws Exception {
     YarnConfiguration conf = new YarnConfiguration();
-    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+    conf.setBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED, true);
     conf.setBoolean(YarnConfiguration.RM_SYSTEM_METRICS_PUBLISHER_ENABLED, true);
     conf.setClass(YarnConfiguration.TIMELINE_SERVICE_STORE,
         MemoryTimelineStore.class, TimelineStore.class);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
index df966537..9a12ca8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
@@ -54,6 +54,8 @@
 import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.ZooDefs.Perms;
+import org.apache.zookeeper.data.ACL;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -65,6 +67,8 @@
 import static org.mockito.Mockito.when;
 
 import java.io.IOException;
+import java.util.List;
+
 import javax.crypto.SecretKey;
 
 public class TestZKRMStateStore extends RMStateStoreTestBase {
@@ -248,6 +252,70 @@
     return conf;
   }
 
+  private static boolean verifyZKACL(String id, String scheme, int perm,
+      List<ACL> acls) {
+    for (ACL acl : acls) {
+      if (acl.getId().getScheme().equals(scheme) &&
+          acl.getId().getId().startsWith(id) &&
+          acl.getPerms() == perm) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /**
+   * Test if RM can successfully start in HA disabled mode if it was previously
+   * running in HA enabled mode. And then start it in HA mode after running it
+   * with HA disabled. NoAuth Exception should not be sent by zookeeper and RM
+   * should start successfully.
+   */
+  @Test
+  public void testZKRootPathAcls() throws Exception {
+    StateChangeRequestInfo req = new StateChangeRequestInfo(
+        HAServiceProtocol.RequestSource.REQUEST_BY_USER);
+    String rootPath =
+        YarnConfiguration.DEFAULT_ZK_RM_STATE_STORE_PARENT_PATH + "/" +
+            ZKRMStateStore.ROOT_ZNODE_NAME;
+
+    // Start RM with HA enabled
+    Configuration conf = createHARMConf("rm1,rm2", "rm1", 1234);
+    conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
+    ResourceManager rm = new MockRM(conf);
+    rm.start();
+    rm.getRMContext().getRMAdminService().transitionToActive(req);
+    List<ACL> acls =
+        ((ZKRMStateStore)rm.getRMContext().getStateStore()).getACL(rootPath);
+    assertEquals(acls.size(), 2);
+    // CREATE and DELETE permissions for root node based on RM ID
+    verifyZKACL("digest", "localhost", Perms.CREATE | Perms.DELETE, acls);
+    verifyZKACL(
+        "world", "anyone", Perms.ALL ^ (Perms.CREATE | Perms.DELETE), acls);
+    rm.close();
+
+    // Now start RM with HA disabled. NoAuth Exception should not be thrown.
+    conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, false);
+    rm = new MockRM(conf);
+    rm.start();
+    rm.getRMContext().getRMAdminService().transitionToActive(req);
+    acls = ((ZKRMStateStore)rm.getRMContext().getStateStore()).getACL(rootPath);
+    assertEquals(acls.size(), 1);
+    verifyZKACL("world", "anyone", Perms.ALL, acls);
+    rm.close();
+
+    // Start RM with HA enabled.
+    conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
+    rm = new MockRM(conf);
+    rm.start();
+    rm.getRMContext().getRMAdminService().transitionToActive(req);
+    acls = ((ZKRMStateStore)rm.getRMContext().getStateStore()).getACL(rootPath);
+    assertEquals(acls.size(), 2);
+    verifyZKACL("digest", "localhost", Perms.CREATE | Perms.DELETE, acls);
+    verifyZKACL(
+        "world", "anyone", Perms.ALL ^ (Perms.CREATE | Perms.DELETE), acls);
+    rm.close();
+  }
+
   @SuppressWarnings("unchecked")
   @Test
   public void testFencing() throws Exception {