Merged latest changes from trunk


git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5442@1603355 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
index 333eb8e..4827390 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
@@ -193,7 +193,7 @@
       
       for (String spnegoPrincipal : spnegoPrincipals) {
         LOG.info("Login using keytab {}, for principal {}",
-            keytab, principal);
+            keytab, spnegoPrincipal);
         final KerberosConfiguration kerberosConfiguration =
             new KerberosConfiguration(keytab, spnegoPrincipal);
         final LoginContext loginContext =
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index a70221e..e01bacc 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -342,24 +342,6 @@
 
     HADOOP-8589. ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
 
-  BREAKDOWN OF HADOOP-10514 SUBTASKS AND RELATED JIRAS
-
-    HADOOP-10520. Extended attributes definition and FileSystem APIs for
-    extended attributes. (Yi Liu via wang)
-
-    HADOOP-10546. Javadoc and other small fixes for extended attributes in
-    hadoop-common. (Charles Lamb via wang)
-
-    HADOOP-10521. FsShell commands for extended attributes. (Yi Liu via wang)
-
-    HADOOP-10548. Improve FsShell xattr error handling and other fixes. (Charles Lamb via umamahesh)
-
-    HADOOP-10567. Shift XAttr value encoding code out for reuse. (Yi Liu via umamahesh)
-
-    HADOOP-10621. Remove CRLF for xattr value base64 encoding for better display.(Yi Liu via umamahesh)
-
-    HADOOP-10575. Small fixes for XAttrCommands and test. (Yi Liu via umamahesh)
-
 Release 2.5.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -435,6 +417,25 @@
     TCP RST and miss session expiration event due to bug in client connection
     management. (cnauroth)
 
+    HADOOP-10376. Refactor refresh*Protocols into a single generic
+    refreshConfigProtocol. (Chris Li via Arpit Agarwal)
+
+    HADOOP-6350. Documenting Hadoop metrics. (Akira Ajisaka via Arpit Agarwal)
+
+    HADOOP-10691. Improve the readability of 'hadoop fs -help'.
+    (Lei Xu via wang)
+
+    HADOOP-10688. Expose thread-level FileSystem StatisticsData (Sandy Ryza)
+
+    HADOOP-10657. Have RetryInvocationHandler log failover attempt at INFO
+    level. (Ming Ma via jing9)
+
+    HADOOP-10666. Remove Copyright /d/d/d/d Apache Software Foundation from
+    the source files license header. (Henry Saputra via wang)
+
+    HADOOP-10557. FsShell -cp -pa option for preserving extended ACLs.
+    (Akira Ajisaka via cnauroth)
+
   OPTIMIZATIONS
 
   BUG FIXES 
@@ -547,7 +548,48 @@
 
     HADOOP-10664. TestNetUtils.testNormalizeHostName fails. (atm)
 
-Release 2.4.1 - UNRELEASED
+    HADOOP-10656. The password keystore file is not picked by LDAP group mapping
+    (brandonli)
+
+    HADOOP-10622. Shell.runCommand can deadlock (Gera Shegalov via jlowe)
+
+    HADOOP-10686. Writables are not always configured. 
+    (Abraham Elmahrek via kasha)
+
+    HADOOP-10678. SecurityUtil has unnecessary synchronization on collection
+    used for only tests. (Benoy Antony via cnauroth)
+
+    HADOOP-10683. Users authenticated with KERBEROS are recorded as being
+    authenticated with SIMPLE. (Benoy Antony via cnauroth)
+
+    HADOOP-10702. KerberosAuthenticationHandler does not log the principal names
+    correctly. (Benoy Antony via cnauroth)
+
+    HADOOP-10699. Fix build native library on mac osx (Binglin Chang via
+    jlowe)
+
+  BREAKDOWN OF HADOOP-10514 SUBTASKS AND RELATED JIRAS
+
+    HADOOP-10520. Extended attributes definition and FileSystem APIs for
+    extended attributes. (Yi Liu via wang)
+
+    HADOOP-10546. Javadoc and other small fixes for extended attributes in
+    hadoop-common. (Charles Lamb via wang)
+
+    HADOOP-10521. FsShell commands for extended attributes. (Yi Liu via wang)
+
+    HADOOP-10548. Improve FsShell xattr error handling and other fixes. (Charles Lamb via umamahesh)
+
+    HADOOP-10567. Shift XAttr value encoding code out for reuse. (Yi Liu via umamahesh)
+
+    HADOOP-10621. Remove CRLF for xattr value base64 encoding for better display.(Yi Liu via umamahesh)
+
+    HADOOP-10575. Small fixes for XAttrCommands and test. (Yi Liu via umamahesh)
+
+    HADOOP-10561. Copy command with preserve option should handle Xattrs.
+    (Yi Liu via cnauroth)
+
+Release 2.4.1 - 2014-06-23 
 
   INCOMPATIBLE CHANGES
 
diff --git a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
index e8f80dd..e0b2171 100644
--- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
@@ -287,6 +287,10 @@
       <!-- protobuf generated code -->
       <Class name="~org\.apache\.hadoop\.ipc\.proto\.RefreshCallQueueProtocolProtos.*"/>
     </Match>
+    <Match>
+      <!-- protobuf generated code -->
+      <Class name="~org\.apache\.hadoop\.ipc\.proto\.GenericRefreshProtocolProtos.*"/>
+    </Match>
 
     <!--
        Manually checked, misses child thread manually syncing on parent's intrinsic lock.
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 7f0b79a..ce14b21 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -318,6 +318,7 @@
                   <include>RefreshAuthorizationPolicyProtocol.proto</include>
                   <include>RefreshUserMappingsProtocol.proto</include>
                   <include>RefreshCallQueueProtocol.proto</include>
+                  <include>GenericRefreshProtocol.proto</include>
                 </includes>
               </source>
               <output>${project.build.directory}/generated-sources/java</output>
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
index a548525..f264389 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
@@ -1,5 +1,3 @@
-# Copyright 2011 The Apache Software Foundation
-# 
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
index ee9d70f..39e4f4d 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
@@ -1,8 +1,6 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <!--
-
- Copyright 2011 The Apache Software Foundation
  
  Licensed to the Apache Software Foundation (ASF) under one
  or more contributor license agreements.  See the NOTICE file
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index 8ef5eb4..ef9acbf 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -1,5 +1,3 @@
-# Copyright 2011 The Apache Software Foundation
-# 
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
diff --git a/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html b/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
index 1516ea2..6a6d4b4 100644
--- a/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
+++ b/hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
@@ -1,4 +1,662 @@
 <META http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<title>Hadoop  2.4.1 Release Notes</title>
+<STYLE type="text/css">
+	H1 {font-family: sans-serif}
+	H2 {font-family: sans-serif; margin-left: 7mm}
+	TABLE {margin-left: 7mm}
+</STYLE>
+</head>
+<body>
+<h1>Hadoop  2.4.1 Release Notes</h1>
+These release notes include new developer and user-facing incompatibilities, features, and major improvements. 
+<a name="changes"/>
+<h2>Changes since Hadoop 2.4.0</h2>
+<ul>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2081">YARN-2081</a>.
+     Minor bug reported by Hong Zhiguo and fixed by Hong Zhiguo (applications/distributed-shell)<br>
+     <b>TestDistributedShell fails after YARN-1962</b><br>
+     <blockquote>java.lang.AssertionError: expected:&lt;1&gt; but was:&lt;0&gt;
+        at org.junit.Assert.fail(Assert.java:88)
+        at org.junit.Assert.failNotEquals(Assert.java:743)
+        at org.junit.Assert.assertEquals(Assert.java:118)
+        at org.junit.Assert.assertEquals(Assert.java:555)
+        at org.junit.Assert.assertEquals(Assert.java:542)
+        at org.apache.hadoop.yarn.applications.distributedshell.TestDistributedShell.testDSShell(TestDistributedShell.java:198)</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2066">YARN-2066</a>.
+     Minor bug reported by Ted Yu and fixed by Hong Zhiguo <br>
+     <b>Wrong field is referenced in GetApplicationsRequestPBImpl#mergeLocalToBuilder()</b><br>
+     <blockquote>{code}
+    if (this.finish != null) {
+      builder.setFinishBegin(start.getMinimumLong());
+      builder.setFinishEnd(start.getMaximumLong());
+    }
+{code}
+this.finish should be referenced in the if block.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2053">YARN-2053</a>.
+     Major sub-task reported by Sumit Mohanty and fixed by Wangda Tan (resourcemanager)<br>
+     <b>Slider AM fails to restart: NPE in RegisterApplicationMasterResponseProto$Builder.addAllNmTokensFromPreviousAttempts</b><br>
+     <blockquote>Slider AppMaster restart fails with the following:
+{code}
+org.apache.hadoop.yarn.proto.YarnServiceProtos$RegisterApplicationMasterResponseProto$Builder.addAllNmTokensFromPreviousAttempts(YarnServiceProtos.java:2700)
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2016">YARN-2016</a>.
+     Major bug reported by Venkat Ranganathan and fixed by Junping Du (resourcemanager)<br>
+     <b>Yarn getApplicationRequest start time range is not honored</b><br>
+     <blockquote>When we query for the previous applications by creating an instance of GetApplicationsRequest and setting the start time range and application tag, we see that the start range provided is not honored and all applications with the tag are returned
+
+Attaching a reproducer.
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1986">YARN-1986</a>.
+     Critical bug reported by Jon Bringhurst and fixed by Hong Zhiguo <br>
+     <b>In Fifo Scheduler, node heartbeat in between creating app and attempt causes NPE</b><br>
+     <blockquote>After upgrade from 2.2.0 to 2.4.0, NPE on first job start.
+
+-After RM was restarted, the job runs without a problem.-
+
+{noformat}
+19:11:13,441 FATAL ResourceManager:600 - Error in handling event type NODE_UPDATE to the scheduler
+java.lang.NullPointerException
+	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.assignContainers(FifoScheduler.java:462)
+	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.nodeUpdate(FifoScheduler.java:714)
+	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.handle(FifoScheduler.java:743)
+	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.handle(FifoScheduler.java:104)
+	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessor.run(ResourceManager.java:591)
+	at java.lang.Thread.run(Thread.java:744)
+19:11:13,443  INFO ResourceManager:604 - Exiting, bbye..
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1976">YARN-1976</a>.
+     Major bug reported by Yesha Vora and fixed by Junping Du <br>
+     <b>Tracking url missing http protocol for FAILED application</b><br>
+     <blockquote>Run yarn application -list -appStates FAILED,  It does not print http protocol name like FINISHED apps.
+
+{noformat}
+-bash-4.1$ yarn application -list -appStates FINISHED,FAILED,KILLED
+14/04/15 23:55:07 INFO client.RMProxy: Connecting to ResourceManager at host
+Total number of applications (application-types: [] and states: [FINISHED, FAILED, KILLED]):4
+                Application-Id	    Application-Name	    Application-Type	      User	     Queue	             State	       Final-State	       Progress	                       Tracking-URL
+application_1397598467870_0004	           Sleep job	           MAPREDUCE	    hrt_qa	   default	          FINISHED	         SUCCEEDED	           100%	http://host:19888/jobhistory/job/job_1397598467870_0004
+application_1397598467870_0003	           Sleep job	           MAPREDUCE	    hrt_qa	   default	          FINISHED	         SUCCEEDED	           100%	http://host:19888/jobhistory/job/job_1397598467870_0003
+application_1397598467870_0002	           Sleep job	           MAPREDUCE	    hrt_qa	   default	            FAILED	            FAILED	           100%	host:8088/cluster/app/application_1397598467870_0002
+application_1397598467870_0001	          word count	           MAPREDUCE	    hrt_qa	   default	          FINISHED	         SUCCEEDED	           100%	http://host:19888/jobhistory/job/job_1397598467870_0001
+{noformat}
+
+It only prints 'host:8088/cluster/app/application_1397598467870_0002' instead 'http://host:8088/cluster/app/application_1397598467870_0002' </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1975">YARN-1975</a>.
+     Major bug reported by Nathan Roberts and fixed by Mit Desai (resourcemanager)<br>
+     <b>Used resources shows escaped html in CapacityScheduler and FairScheduler page</b><br>
+     <blockquote>Used resources displays as &amp;amp;lt;memory:1111, vCores;&amp;amp;gt; with capacity scheduler
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1962">YARN-1962</a>.
+     Major sub-task reported by Mohammad Kamrul Islam and fixed by Mohammad Kamrul Islam <br>
+     <b>Timeline server is enabled by default</b><br>
+     <blockquote>Since Timeline server is not matured and secured yet, enabling  it by default might create some confusion.
+
+We were playing with 2.4.0 and found a lot of exceptions for distributed shell example related to connection refused error. Btw, we didn't run TS because it is not secured yet.
+
+Although it is possible to explicitly turn it off through yarn-site config. In my opinion,  this extra change for this new service is not worthy at this point,.  
+
+This JIRA is to turn it off by default.
+If there is an agreement, i can put a simple patch about this.
+
+{noformat}
+14/04/17 23:24:33 ERROR impl.TimelineClientImpl: Failed to get the response from the timeline server.
+com.sun.jersey.api.client.ClientHandlerException: java.net.ConnectException: Connection refused
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler.handle(URLConnectionClientHandler.java:149)
+	at com.sun.jersey.api.client.Client.handle(Client.java:648)
+	at com.sun.jersey.api.client.WebResource.handle(WebResource.java:670)
+	at com.sun.jersey.api.client.WebResource.access$200(WebResource.java:74)
+	at com.sun.jersey.api.client.WebResource$Builder.post(WebResource.java:563)
+	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPostingEntities(TimelineClientImpl.java:131)
+	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.putEntities(TimelineClientImpl.java:104)
+	at org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster.publishApplicationAttemptEvent(ApplicationMaster.java:1072)
+	at org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster.run(ApplicationMaster.java:515)
+	at org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster.main(ApplicationMaster.java:281)
+Caused by: java.net.ConnectException: Connection refused
+	at java.net.PlainSocketImpl.socketConnect(Native Method)
+	at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:339)
+	at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:198)
+	at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:182)
+	at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)
+	at java.net.Socket.connect(Socket.java:579)
+	at java.net.Socket.connect(Socket.java:528)
+	at sun.net.NetworkClient.doConnect(NetworkClient.java:180)
+	at sun.net.www.http.HttpClient.openServer(HttpClient.java:432)
+	at sun.net.www.http.HttpClient.openServer(HttpClient.java:527)
+	at sun.net.www.http.HttpClient.&lt;in14/04/17 23:24:33 ERROR impl.TimelineClientImpl: Failed to get the response from the timeline server.
+com.sun.jersey.api.client.ClientHandlerException: java.net.ConnectException: Connection refused
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler.handle(URLConnectionClientHandler.java:149)
+	at com.sun.jersey.api.client.Client.handle(Client.java:648)
+	at com.sun.jersey.api.client.WebResource.handle(WebResource.java:670)
+	at com.sun.jersey.api.client.WebResource.access$200(WebResource.java:74)
+	at com.sun.jersey.api.client.WebResource$Builder.post(WebResource.java:563)
+	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPostingEntities(TimelineClientImpl.java:131)
+	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.putEntities(TimelineClientImpl.java:104)
+	at org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster.publishApplicationAttemptEvent(ApplicationMaster.java:1072)
+	at org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster.run(ApplicationMaster.java:515)
+	at org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster.main(ApplicationMaster.java:281)
+Caused by: java.net.ConnectException: Connection refused
+	at java.net.PlainSocketImpl.socketConnect(Native Method)
+	at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:339)
+	at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:198)
+	at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:182)
+	at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)
+	at java.net.Socket.connect(Socket.java:579)
+	at java.net.Socket.connect(Socket.java:528)
+	at sun.net.NetworkClient.doConnect(NetworkClient.java:180)
+	at sun.net.www.http.HttpClient.openServer(HttpClient.java:432)
+	at sun.net.www.http.HttpClient.openServer(HttpClient.java:527)
+	at sun.net.www.http.HttpClient.&lt;init&gt;(HttpClient.java:211)
+	at sun.net.www.http.HttpClient.New(HttpClient.java:308)
+	at sun.net.www.http.HttpClient.New(HttpClient.java:326)
+	at sun.net.www.protocol.http.HttpURLConnection.getNewHttpClient(HttpURLConnection.java:996)
+	at sun.net.www.protocol.http.HttpURLConnection.plainConnect(HttpURLConnection.java:932)
+	at sun.net.www.protocol.http.HttpURLConnection.connect(HttpURLConnection.java:850)
+	at sun.net.www.protocol.http.HttpURLConnection.getOutputStream(HttpURLConnection.java:1091)
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler$1$1.getOutputStream(URLConnectionClientHandler.java:225)
+	at com.sun.jersey.api.client.CommittingOutputStream.commitWrite(CommittingOutputStream.java:117)
+	at com.sun.jersey.api.client.CommittingOutputStream.write(CommittingOutputStream.java:89)
+	at org.codehaus.jackson.impl.Utf8Generator._flushBuffer(Utf8Generator.java:1754)
+	at org.codehaus.jackson.impl.Utf8Generator.flush(Utf8Generator.java:1088)
+	at org.codehaus.jackson.map.ObjectMapper.writeValue(ObjectMapper.java:1354)
+	at org.codehaus.jackson.jaxrs.JacksonJsonProvider.writeTo(JacksonJsonProvider.java:527)
+	at com.sun.jersey.api.client.RequestWriter.writeRequestEntity(RequestWriter.java:300)
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler._invoke(URLConnectionClientHandler.java:204)
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler.handle(URLConnectionClientHandler.java:147)
+	... 9 moreit&gt;(HttpClient.java:211)
+	at sun.net.www.http.HttpClient.New(HttpClient.java:308)
+	at sun.net.www.http.HttpClient.New(HttpClient.java:326)
+	at sun.net.www.protocol.http.HttpURLConnection.getNewHttpClient(HttpURLConnection.java:996)
+	at sun.net.www.protocol.http.HttpURLConnection.plainConnect(HttpURLConnection.java:932)
+	at sun.net.www.protocol.http.HttpURLConnection.connect(HttpURLConnection.java:850)
+	at sun.net.www.protocol.http.HttpURLConnection.getOutputStream(HttpURLConnection.java:1091)
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler$1$1.getOutputStream(URLConnectionClientHandler.java:225)
+	at com.sun.jersey.api.client.CommittingOutputStream.commitWrite(CommittingOutputStream.java:117)
+	at com.sun.jersey.api.client.CommittingOutputStream.write(CommittingOutputStream.java:89)
+	at org.codehaus.jackson.impl.Utf8Generator._flushBuffer(Utf8Generator.java:1754)
+	at org.codehaus.jackson.impl.Utf8Generator.flush(Utf8Generator.java:1088)
+	at org.codehaus.jackson.map.ObjectMapper.writeValue(ObjectMapper.java:1354)
+	at org.codehaus.jackson.jaxrs.JacksonJsonProvider.writeTo(JacksonJsonProvider.java:527)
+	at com.sun.jersey.api.client.RequestWriter.writeRequestEntity(RequestWriter.java:300)
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler._invoke(URLConnectionClientHandler.java:204)
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler.handle(URLConnectionClientHandler.java:147)
+	... 9 more
+
+{noformat}
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1957">YARN-1957</a>.
+     Major sub-task reported by Carlo Curino and fixed by Carlo Curino (resourcemanager)<br>
+     <b>ProportionalCapacitPreemptionPolicy handling of corner cases...</b><br>
+     <blockquote>The current version of ProportionalCapacityPreemptionPolicy should be improved to deal with the following two scenarios:
+1) when rebalancing over-capacity allocations, it potentially preempts without considering the maxCapacity constraints of a queue (i.e., preempting possibly more than strictly necessary)
+2) a zero capacity queue is preempted even if there is no demand (coherent with old use of zero-capacity to disabled queues)
+
+The proposed patch fixes both issues, and introduce few new test cases.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1947">YARN-1947</a>.
+     Major test reported by Jian He and fixed by Jian He <br>
+     <b>TestRMDelegationTokens#testRMDTMasterKeyStateOnRollingMasterKey is failing intermittently</b><br>
+     <blockquote>java.lang.AssertionError: null
+	at org.junit.Assert.fail(Assert.java:92)
+	at org.junit.Assert.assertTrue(Assert.java:43)
+	at org.junit.Assert.assertTrue(Assert.java:54)
+	at org.apache.hadoop.yarn.server.resourcemanager.security.TestRMDelegationTokens.testRMDTMasterKeyStateOnRollingMasterKey(TestRMDelegationTokens.java:117)</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1934">YARN-1934</a>.
+     Blocker bug reported by Rohith and fixed by Karthik Kambatla (resourcemanager)<br>
+     <b>Potential NPE in ZKRMStateStore caused by handling Disconnected event from ZK.</b><br>
+     <blockquote>For ZK disconnected event , zkClient is set to null. It is very much prone to throw NPE.
+
+{noformat}
+        case Disconnected:
+          LOG.info("ZKRMStateStore Session disconnected");
+          oldZkClient = zkClient;
+          zkClient = null;
+          break;
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1933">YARN-1933</a>.
+     Major bug reported by Jian He and fixed by Jian He <br>
+     <b>TestAMRestart and TestNodeHealthService failing sometimes on Windows</b><br>
+     <blockquote>TestNodeHealthService failures:
+testNodeHealthScript(org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService)  Time elapsed: 1.405 sec  &lt;&lt;&lt; ERROR!
+java.io.FileNotFoundException: C:\Users\Administrator\Documents\hadoop-common\hadoop-yarn-project\hadoop-yarn\hadoop-yarn-server\hadoop-yarn-server-nodemanager\target\org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService-localDir\failingscript.cmd (The process cannot access the file because it is being used by another process)
+	at java.io.FileOutputStream.open(Native Method)
+	at java.io.FileOutputStream.&lt;init&gt;(FileOutputStream.java:221)
+	at java.io.FileOutputStream.&lt;init&gt;(FileOutputStream.java:171)
+	at org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService.writeNodeHealthScriptFile(TestNodeHealthService.java:82)
+	at org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService.testNodeHealthScript(TestNodeHealthService.java:154)
+
+testNodeHealthScriptShouldRun(org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService)  Time elapsed: 0 sec  &lt;&lt;&lt; ERROR!
+java.io.FileNotFoundException: C:\Users\Administrator\Documents\hadoop-common\hadoop-yarn-project\hadoop-yarn\hadoop-yarn-server\hadoop-yarn-server-nodemanager\target\org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService-localDir\failingscript.cmd (Access is denied)
+	at java.io.FileOutputStream.open(Native Method)
+	at java.io.FileOutputStream.&lt;init&gt;(FileOutputStream.java:221)
+	at java.io.FileOutputStream.&lt;init&gt;(FileOutputStream.java:171)
+	at org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService.writeNodeHealthScriptFile(TestNodeHealthService.java:82)
+	at org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService.testNodeHealthScriptShouldRun(TestNodeHealthService.java:103)
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1932">YARN-1932</a>.
+     Blocker bug reported by Mit Desai and fixed by Mit Desai <br>
+     <b>Javascript injection on the job status page</b><br>
+     <blockquote>Scripts can be injected into the job status page as the diagnostics field is
+not sanitized. Whatever string you set there will show up to the jobs page as it is ... ie. if you put any script commands, they will be executed in the browser of the user who is opening the page.
+
+We need escaping the diagnostic string in order to not run the scripts.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1931">YARN-1931</a>.
+     Blocker bug reported by Thomas Graves and fixed by Sandy Ryza (applications)<br>
+     <b>Private API change in YARN-1824 in 2.4 broke compatibility with previous releases</b><br>
+     <blockquote>YARN-1824 broke compatibility with previous 2.x releases by changes the API's in org.apache.hadoop.yarn.util.Apps.{setEnvFromInputString,addToEnvironment}  The old api should be added back in.
+
+This affects any ApplicationMasters who were using this api.  It also breaks previously built MapReduce libraries from working with the new Yarn release as MR uses this api. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1929">YARN-1929</a>.
+     Blocker bug reported by Rohith and fixed by Karthik Kambatla (resourcemanager)<br>
+     <b>DeadLock in RM when automatic failover is enabled.</b><br>
+     <blockquote>Dead lock detected  in RM when automatic failover is enabled.
+
+
+{noformat}
+Found one Java-level deadlock:
+=============================
+"Thread-2":
+  waiting to lock monitor 0x00007fb514303cf0 (object 0x00000000ef153fd0, a org.apache.hadoop.ha.ActiveStandbyElector),
+  which is held by "main-EventThread"
+"main-EventThread":
+  waiting to lock monitor 0x00007fb514750a48 (object 0x00000000ef154020, a org.apache.hadoop.yarn.server.resourcemanager.EmbeddedElectorService),
+  which is held by "Thread-2"
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1928">YARN-1928</a>.
+     Major bug reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>TestAMRMRPCNodeUpdates fails ocassionally</b><br>
+     <blockquote>{code}
+junit.framework.AssertionFailedError: expected:&lt;0&gt; but was:&lt;4&gt;
+	at junit.framework.Assert.fail(Assert.java:50)
+	at junit.framework.Assert.failNotEquals(Assert.java:287)
+	at junit.framework.Assert.assertEquals(Assert.java:67)
+	at junit.framework.Assert.assertEquals(Assert.java:199)
+	at junit.framework.Assert.assertEquals(Assert.java:205)
+	at org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager.TestAMRMRPCNodeUpdates.testAMRMUnusableNodes(TestAMRMRPCNodeUpdates.java:136)
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1926">YARN-1926</a>.
+     Major bug reported by Varun Vasudev and fixed by Varun Vasudev <br>
+     <b>DistributedShell unit tests fail on Windows</b><br>
+     <blockquote>Couple of unit tests for the DistributedShell fail on Windows - specifically testDSShellWithShellScript and testDSRestartWithPreviousRunningContainers </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1924">YARN-1924</a>.
+     Critical bug reported by Arpit Gupta and fixed by Jian He <br>
+     <b>STATE_STORE_OP_FAILED happens when ZKRMStateStore tries to update app(attempt) before storing it</b><br>
+     <blockquote>Noticed on a HA cluster Both RM shut down with this error. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1920">YARN-1920</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli <br>
+     <b>TestFileSystemApplicationHistoryStore.testMissingApplicationAttemptHistoryData fails in windows</b><br>
+     <blockquote>Though this was only failing in Windows, after debugging, I realized that the test fails because we are leaking a file-handle in the history service.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1914">YARN-1914</a>.
+     Major bug reported by Varun Vasudev and fixed by Varun Vasudev <br>
+     <b>Test TestFSDownload.testDownloadPublicWithStatCache fails on Windows</b><br>
+     <blockquote>The TestFSDownload.testDownloadPublicWithStatCache test in hadoop-yarn-common consistently fails on Windows environments.
+
+The root cause is that the test checks for execute permission for all users on every ancestor of the target directory. In windows, by default, group "Everyone" has no permissions on any directory in the install drive. It's unreasonable to expect this test to pass and we should skip it on Windows.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1910">YARN-1910</a>.
+     Major bug reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>TestAMRMTokens fails on windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1908">YARN-1908</a>.
+     Major bug reported by Tassapol Athiapinya and fixed by Vinod Kumar Vavilapalli (applications/distributed-shell)<br>
+     <b>Distributed shell with custom script has permission error.</b><br>
+     <blockquote>Create test1.sh having "pwd".
+
+Run this command as user1:
+hadoop jar /usr/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell.jar -jar /usr/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell.jar -shell_script test1.sh
+
+NM is run by yarn user. An exception is thrown because yarn user has no permissions on custom script in hdfs path. The custom script is created with distributed shell app.
+{code}
+Caused by: org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.security.AccessControlException): Permission denied: user=yarn, access=WRITE, inode="/user/user1/DistributedShell/70":user1:user1:drwxr-xr-x
+	at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkFsPermission(FSPermissionChecker.java:265)
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1907">YARN-1907</a>.
+     Major bug reported by Mit Desai and fixed by Mit Desai <br>
+     <b>TestRMApplicationHistoryWriter#testRMWritingMassiveHistory runs slow and intermittently fails</b><br>
+     <blockquote>The test has 10000 containers that it tries to cleanup.
+The cleanup has a timeout of 20000ms in which the test sometimes cannot do the cleanup completely and gives out an Assertion Failure.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1905">YARN-1905</a>.
+     Trivial test reported by Chris Nauroth and fixed by Chris Nauroth (nodemanager)<br>
+     <b>TestProcfsBasedProcessTree must only run on Linux.</b><br>
+     <blockquote>The tests in {{TestProcfsBasedProcessTree}} only make sense on Linux, where the process tree calculations are based on reading the /proc file system.  Right now, not all of the individual tests are skipped when the OS is not Linux.  This patch will make it consistent.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1903">YARN-1903</a>.
+     Major bug reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Killing Container on NEW and LOCALIZING will result in exitCode and diagnostics not set</b><br>
+     <blockquote>The container status after stopping container is not expected.
+{code}
+java.lang.AssertionError: 4: 
+	at org.junit.Assert.fail(Assert.java:93)
+	at org.junit.Assert.assertTrue(Assert.java:43)
+	at org.apache.hadoop.yarn.client.api.impl.TestNMClient.testGetContainerStatus(TestNMClient.java:382)
+	at org.apache.hadoop.yarn.client.api.impl.TestNMClient.testContainerManagement(TestNMClient.java:346)
+	at org.apache.hadoop.yarn.client.api.impl.TestNMClient.testNMClient(TestNMClient.java:226)
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1898">YARN-1898</a>.
+     Major sub-task reported by Yesha Vora and fixed by Xuan Gong (resourcemanager)<br>
+     <b>Standby RM's conf, stacks, logLevel, metrics, jmx and logs links are redirecting to Active RM</b><br>
+     <blockquote>Standby RM links /conf, /stacks, /logLevel, /metrics, /jmx is redirected to Active RM.
+
+It should not be redirected to Active RM</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1892">YARN-1892</a>.
+     Minor improvement reported by Siddharth Seth and fixed by Jian He (scheduler)<br>
+     <b>Excessive logging in RM</b><br>
+     <blockquote>Mostly in the CS I believe
+
+{code}
+ INFO org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt: Application application_1395435468498_0011 reserved container container_1395435468498_0011_01_000213 on node host:  #containers=5 available=4096 used=20960, currently has 1 at priority 4; currentReservation 4096
+{code}
+
+{code}
+INFO org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue: hive2 usedResources: &lt;memory:20480, vCores:5&gt; clusterResources: &lt;memory:81920, vCores:16&gt; currentCapacity 0.25 required &lt;memory:4096, vCores:1&gt; potentialNewCapacity: 0.255 (  max-capacity: 0.25)
+{code}
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1883">YARN-1883</a>.
+     Major bug reported by Mit Desai and fixed by Mit Desai <br>
+     <b>TestRMAdminService fails due to inconsistent entries in UserGroups</b><br>
+     <blockquote>testRefreshUserToGroupsMappingsWithFileSystemBasedConfigurationProvider fails with the following error:
+{noformat}
+java.lang.AssertionError: null
+	at org.junit.Assert.fail(Assert.java:92)
+	at org.junit.Assert.assertTrue(Assert.java:43)
+	at org.junit.Assert.assertTrue(Assert.java:54)
+	at org.apache.hadoop.yarn.server.resourcemanager.TestRMAdminService.testRefreshUserToGroupsMappingsWithFileSystemBasedConfigurationProvider(TestRMAdminService.java:421)
+	at org.apache.hadoop.yarn.server.resourcemanager.TestRMAdminService.testOrder(TestRMAdminService.java:104)
+{noformat}
+
+Line Numbers will be inconsistent as I was testing to run it in a particular order. But the Line on which the failure occurs is
+{code}
+Assert.assertTrue(groupBefore.contains("test_group_A")
+        &amp;&amp; groupBefore.contains("test_group_B")
+        &amp;&amp; groupBefore.contains("test_group_C") &amp;&amp; groupBefore.size() == 3);
+{code}
+
+testRMInitialsWithFileSystemBasedConfigurationProvider() and
+testRefreshUserToGroupsMappingsWithFileSystemBasedConfigurationProvider()
+calls the function {{MockUnixGroupsMapping.updateGroups();}} which changes the list of userGroups.
+
+testRefreshUserToGroupsMappingsWithFileSystemBasedConfigurationProvider() tries to verify the groups before changing it and fails if testRMInitialsWithFileSystemBasedConfigurationProvider() already ran and made the changes.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1861">YARN-1861</a>.
+     Blocker sub-task reported by Arpit Gupta and fixed by Karthik Kambatla (resourcemanager)<br>
+     <b>Both RM stuck in standby mode when automatic failover is enabled</b><br>
+     <blockquote>In our HA tests we noticed that the tests got stuck because both RM's got into standby state and no one became active.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1837">YARN-1837</a>.
+     Major bug reported by Tsuyoshi OZAWA and fixed by Hong Zhiguo <br>
+     <b>TestMoveApplication.testMoveRejectedByScheduler randomly fails</b><br>
+     <blockquote>TestMoveApplication#testMoveRejectedByScheduler fails because of NullPointerException. It looks caused by unhandled exception handling at server-side.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1750">YARN-1750</a>.
+     Major test reported by Ming Ma and fixed by Wangda Tan (nodemanager)<br>
+     <b>TestNodeStatusUpdater#testNMRegistration is incorrect in test case</b><br>
+     <blockquote>This test case passes. However, the test output log has
+
+java.lang.AssertionError: Number of applications should only be one! expected:&lt;1&gt; but was:&lt;2&gt;
+        at org.junit.Assert.fail(Assert.java:93)
+        at org.junit.Assert.failNotEquals(Assert.java:647)
+        at org.junit.Assert.assertEquals(Assert.java:128)
+        at org.junit.Assert.assertEquals(Assert.java:472)
+        at org.apache.hadoop.yarn.server.nodemanager.TestNodeStatusUpdater$MyResourceTracker.nodeHeartbeat(TestNodeStatusUpdater.java:267)
+        at org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl$1.run(NodeStatusUpdaterImpl.java:469)
+        at java.lang.Thread.run(Thread.java:695)
+
+TestNodeStatusUpdater.java has invalid asserts.
+
+      } else if (heartBeatID == 3) {
+        // Checks on the RM end
+        Assert.assertEquals("Number of applications should only be one!", 1,
+            appToContainers.size());
+        Assert.assertEquals("Number of container for the app should be two!",
+            2, appToContainers.get(appId2).size());
+
+
+We should fix the assert and add more check to the test.
+
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1701">YARN-1701</a>.
+     Major sub-task reported by Gera Shegalov and fixed by Tsuyoshi OZAWA <br>
+     <b>Improve default paths of timeline store and generic history store</b><br>
+     <blockquote>When I enable AHS via yarn.ahs.enabled, the app history is still not visible in AHS webUI. This is due to NullApplicationHistoryStore as yarn.resourcemanager.history-writer.class. It would be good to have just one key to enable basic functionality.
+
+yarn.ahs.fs-history-store.uri uses {code}${hadoop.log.dir}{code}, which is local file system location. However, FileSystemApplicationHistoryStore uses DFS by default.  </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1696">YARN-1696</a>.
+     Blocker sub-task reported by Karthik Kambatla and fixed by Tsuyoshi OZAWA (resourcemanager)<br>
+     <b>Document RM HA</b><br>
+     <blockquote>Add documentation for RM HA. Marking this a blocker for 2.4 as this is required to call RM HA Stable and ready for public consumption. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1281">YARN-1281</a>.
+     Major test reported by Karthik Kambatla and fixed by Tsuyoshi OZAWA (resourcemanager)<br>
+     <b>TestZKRMStateStoreZKClientConnections fails intermittently</b><br>
+     <blockquote>The test fails intermittently - haven't been able to reproduce the failure deterministically. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1201">YARN-1201</a>.
+     Minor bug reported by Nemon Lou and fixed by Wangda Tan (resourcemanager)<br>
+     <b>TestAMAuthorization fails with local hostname cannot be resolved</b><br>
+     <blockquote>When hostname is 158-1-131-10, TestAMAuthorization fails.
+{code}
+Running org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization
+Tests run: 4, Failures: 0, Errors: 2, Skipped: 0, Time elapsed: 14.034 sec &lt;&lt;&lt; FAILURE! - in org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization
+testUnauthorizedAccess[0](org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization)  Time elapsed: 3.952 sec  &lt;&lt;&lt; ERROR!
+java.lang.NullPointerException: null
+        at org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.testUnauthorizedAccess(TestAMAuthorization.java:284)
+
+testUnauthorizedAccess[1](org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization)  Time elapsed: 3.116 sec  &lt;&lt;&lt; ERROR!
+java.lang.NullPointerException: null
+        at org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.testUnauthorizedAccess(TestAMAuthorization.java:284)
+
+
+Results :
+
+Tests in error:
+  TestAMAuthorization.testUnauthorizedAccess:284 NullPointer
+  TestAMAuthorization.testUnauthorizedAccess:284 NullPointer
+
+Tests run: 4, Failures: 0, Errors: 2, Skipped: 0
+
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5843">MAPREDUCE-5843</a>.
+     Major test reported by Varun Vasudev and fixed by Varun Vasudev <br>
+     <b>TestMRKeyValueTextInputFormat failing on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5841">MAPREDUCE-5841</a>.
+     Major bug reported by Sangjin Lee and fixed by Sangjin Lee (mrv2)<br>
+     <b>uber job doesn't terminate on getting mapred job kill</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5835">MAPREDUCE-5835</a>.
+     Critical bug reported by Ming Ma and fixed by Ming Ma <br>
+     <b>Killing Task might cause the job to go to ERROR state</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5833">MAPREDUCE-5833</a>.
+     Major test reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>TestRMContainerAllocator fails ocassionally</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5832">MAPREDUCE-5832</a>.
+     Major bug reported by Jian He and fixed by Vinod Kumar Vavilapalli <br>
+     <b>Few tests in TestJobClient fail on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5828">MAPREDUCE-5828</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli <br>
+     <b>TestMapReduceJobControl fails on JDK 7 + Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5827">MAPREDUCE-5827</a>.
+     Major bug reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>TestSpeculativeExecutionWithMRApp fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5826">MAPREDUCE-5826</a>.
+     Major bug reported by Varun Vasudev and fixed by Varun Vasudev <br>
+     <b>TestHistoryServerFileSystemStateStoreService.testTokenStore fails in windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5824">MAPREDUCE-5824</a>.
+     Major bug reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>TestPipesNonJavaInputFormat.testFormat fails in windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5821">MAPREDUCE-5821</a>.
+     Major bug reported by Todd Lipcon and fixed by Todd Lipcon (performance , task)<br>
+     <b>IFile merge allocates new byte array for every value</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5818">MAPREDUCE-5818</a>.
+     Major bug reported by Jian He and fixed by Jian He <br>
+     <b>hsadmin cmd is missing in mapred.cmd</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5815">MAPREDUCE-5815</a>.
+     Blocker bug reported by Gera Shegalov and fixed by Akira AJISAKA (client , mrv2)<br>
+     <b>Fix NPE in TestMRAppMaster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5714">MAPREDUCE-5714</a>.
+     Major bug reported by Jinghui Wang and fixed by Jinghui Wang (test)<br>
+     <b>TestMRAppComponentDependencies causes surefire to exit without saying proper goodbye</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3191">MAPREDUCE-3191</a>.
+     Trivial bug reported by Todd Lipcon and fixed by Chen He <br>
+     <b>docs for map output compression incorrectly reference SequenceFile</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6411">HDFS-6411</a>.
+     Major bug reported by Zhongyi Xie and fixed by Brandon Li (nfs)<br>
+     <b>nfs-hdfs-gateway mount raises I/O error and hangs when a unauthorized user attempts to access it</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6402">HDFS-6402</a>.
+     Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (namenode)<br>
+     <b>Suppress findbugs warning for failure to override equals and hashCode in FsAclPermission.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6397">HDFS-6397</a>.
+     Critical bug reported by Mohammad Kamrul Islam and fixed by Mohammad Kamrul Islam <br>
+     <b>NN shows inconsistent value in deadnode count </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6362">HDFS-6362</a>.
+     Blocker bug reported by Arpit Agarwal and fixed by Arpit Agarwal (namenode)<br>
+     <b>InvalidateBlocks is inconsistent in usage of DatanodeUuid and StorageID</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6361">HDFS-6361</a>.
+     Major bug reported by Yongjun Zhang and fixed by Yongjun Zhang (nfs)<br>
+     <b>TestIdUserGroup.testUserUpdateSetting failed due to out of range nfsnobody Id</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6340">HDFS-6340</a>.
+     Blocker bug reported by Rahul Singhal and fixed by Rahul Singhal (datanode)<br>
+     <b>DN can't finalize upgrade</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6329">HDFS-6329</a>.
+     Blocker bug reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>WebHdfs does not work if HA is enabled on NN but logical URI is not configured.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6326">HDFS-6326</a>.
+     Blocker bug reported by Daryn Sharp and fixed by Chris Nauroth (webhdfs)<br>
+     <b>WebHdfs ACL compatibility is broken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6325">HDFS-6325</a>.
+     Major bug reported by Konstantin Shvachko and fixed by Keith Pak (namenode)<br>
+     <b>Append should fail if the last block has insufficient number of replicas</b><br>
+     <blockquote>I have committed the fix to the trunk, branch-2, and branch-2.4 respectively. Thanks Keith!</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6313">HDFS-6313</a>.
+     Blocker bug reported by Daryn Sharp and fixed by Kihwal Lee (webhdfs)<br>
+     <b>WebHdfs may use the wrong NN when configured for multiple HA NNs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6245">HDFS-6245</a>.
+     Major bug reported by Arpit Gupta and fixed by Arpit Agarwal <br>
+     <b>datanode fails to start with a bad disk even when failed volumes is set</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6236">HDFS-6236</a>.
+     Minor bug reported by Chris Nauroth and fixed by Chris Nauroth (namenode)<br>
+     <b>ImageServlet should use Time#monotonicNow to measure latency.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6235">HDFS-6235</a>.
+     Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (namenode , test)<br>
+     <b>TestFileJournalManager can fail on Windows due to file locking if tests run out of order.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6234">HDFS-6234</a>.
+     Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (datanode , test)<br>
+     <b>TestDatanodeConfig#testMemlockLimit fails on Windows due to invalid file path.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6232">HDFS-6232</a>.
+     Major bug reported by Stephen Chu and fixed by Akira AJISAKA (tools)<br>
+     <b>OfflineEditsViewer throws a NPE on edits containing ACL modifications</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6231">HDFS-6231</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (hdfs-client)<br>
+     <b>DFSClient hangs infinitely if using hedged reads and all eligible datanodes die.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6229">HDFS-6229</a>.
+     Major bug reported by Jing Zhao and fixed by Jing Zhao (ha)<br>
+     <b>Race condition in failover can cause RetryCache fail to work</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6215">HDFS-6215</a>.
+     Minor bug reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>Wrong error message for upgrade</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6209">HDFS-6209</a>.
+     Minor bug reported by Arpit Agarwal and fixed by Arpit Agarwal (test)<br>
+     <b>Fix flaky test TestValidateConfigurationSettings.testThatDifferentRPCandHttpPortsAreOK</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6208">HDFS-6208</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (datanode)<br>
+     <b>DataNode caching can leak file descriptors.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6206">HDFS-6206</a>.
+     Major bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze <br>
+     <b>DFSUtil.substituteForWildcardAddress may throw NPE</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6204">HDFS-6204</a>.
+     Minor bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (test)<br>
+     <b>TestRBWBlockInvalidation may fail</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6198">HDFS-6198</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (datanode)<br>
+     <b>DataNode rolling upgrade does not correctly identify current block pool directory and replace with trash on Windows.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6197">HDFS-6197</a>.
+     Minor bug reported by Chris Nauroth and fixed by Chris Nauroth (namenode)<br>
+     <b>Rolling upgrade rollback on Windows can fail attempting to rename edit log segment files to a destination that already exists.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6189">HDFS-6189</a>.
+     Major test reported by Chris Nauroth and fixed by Chris Nauroth (test)<br>
+     <b>Multiple HDFS tests fail on Windows attempting to use a test root path containing a colon.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4052">HDFS-4052</a>.
+     Minor improvement reported by Jing Zhao and fixed by Jing Zhao <br>
+     <b>BlockManager#invalidateWork should print logs outside the lock</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2882">HDFS-2882</a>.
+     Major bug reported by Todd Lipcon and fixed by Vinayakumar B (datanode)<br>
+     <b>DN continues to start up, even if block pool fails to initialize</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10612">HADOOP-10612</a>.
+     Major bug reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>NFS failed to refresh the user group id mapping table</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10562">HADOOP-10562</a>.
+     Critical bug reported by Suresh Srinivas and fixed by Suresh Srinivas <br>
+     <b>Namenode exits on exception without printing stack trace in AbstractDelegationTokenSecretManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10527">HADOOP-10527</a>.
+     Major bug reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>Fix incorrect return code and allow more retries on EINTR</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10522">HADOOP-10522</a>.
+     Critical bug reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>JniBasedUnixGroupMapping mishandles errors</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10490">HADOOP-10490</a>.
+     Minor bug reported by Chris Nauroth and fixed by Chris Nauroth (test)<br>
+     <b>TestMapFile and TestBloomMapFile leak file descriptors.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10473">HADOOP-10473</a>.
+     Minor bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (test)<br>
+     <b>TestCallQueueManager is still flaky</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10466">HADOOP-10466</a>.
+     Minor improvement reported by Nicolas Liochon and fixed by Nicolas Liochon (security)<br>
+     <b>Lower the log level in UserGroupInformation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10456">HADOOP-10456</a>.
+     Major bug reported by Nishkam Ravi and fixed by Nishkam Ravi (conf)<br>
+     <b>Bug in Configuration.java exposed by Spark (ConcurrentModificationException)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10455">HADOOP-10455</a>.
+     Major bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (ipc)<br>
+     <b>When there is an exception, ipc.Server should first check whether it is an terse exception</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8826">HADOOP-8826</a>.
+     Minor bug reported by Robert Joseph Evans and fixed by Mit Desai <br>
+     <b>Docs still refer to 0.20.205 as stable line</b><br>
+     <blockquote></blockquote></li>
+</ul>
+</body></html>
+<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
 <title>Hadoop  2.4.0 Release Notes</title>
 <STYLE type="text/css">
 	H1 {font-family: sans-serif}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 2d4fb80..c5b7903 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -142,6 +142,9 @@
   public static final String
   HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_CALLQUEUE =
       "security.refresh.callqueue.protocol.acl";
+  public static final String
+  HADOOP_SECURITY_SERVICE_AUTHORIZATION_GENERIC_REFRESH =
+      "security.refresh.generic.protocol.acl";
   public static final String 
   SECURITY_HA_SERVICE_PROTOCOL_ACL = "security.ha.service.protocol.acl";
   public static final String 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 998db2f..deaceb3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -2804,7 +2804,7 @@
      * be perceived as atomic with respect to other threads, which is all we
      * need.
      */
-    private static class StatisticsData {
+    public static class StatisticsData {
       volatile long bytesRead;
       volatile long bytesWritten;
       volatile int readOps;
@@ -2849,6 +2849,26 @@
             + readOps + " read ops, " + largeReadOps + " large read ops, "
             + writeOps + " write ops";
       }
+      
+      public long getBytesRead() {
+        return bytesRead;
+      }
+      
+      public long getBytesWritten() {
+        return bytesWritten;
+      }
+      
+      public int getReadOps() {
+        return readOps;
+      }
+      
+      public int getLargeReadOps() {
+        return largeReadOps;
+      }
+      
+      public int getWriteOps() {
+        return writeOps;
+      }
     }
 
     private interface StatisticsAggregator<T> {
@@ -2907,7 +2927,7 @@
     /**
      * Get or create the thread-local data associated with the current thread.
      */
-    private StatisticsData getThreadData() {
+    public StatisticsData getThreadStatistics() {
       StatisticsData data = threadData.get();
       if (data == null) {
         data = new StatisticsData(
@@ -2928,7 +2948,7 @@
      * @param newBytes the additional bytes read
      */
     public void incrementBytesRead(long newBytes) {
-      getThreadData().bytesRead += newBytes;
+      getThreadStatistics().bytesRead += newBytes;
     }
     
     /**
@@ -2936,7 +2956,7 @@
      * @param newBytes the additional bytes written
      */
     public void incrementBytesWritten(long newBytes) {
-      getThreadData().bytesWritten += newBytes;
+      getThreadStatistics().bytesWritten += newBytes;
     }
     
     /**
@@ -2944,7 +2964,7 @@
      * @param count number of read operations
      */
     public void incrementReadOps(int count) {
-      getThreadData().readOps += count;
+      getThreadStatistics().readOps += count;
     }
 
     /**
@@ -2952,7 +2972,7 @@
      * @param count number of large read operations
      */
     public void incrementLargeReadOps(int count) {
-      getThreadData().largeReadOps += count;
+      getThreadStatistics().largeReadOps += count;
     }
 
     /**
@@ -2960,7 +2980,7 @@
      * @param count number of write operations
      */
     public void incrementWriteOps(int count) {
-      getThreadData().writeOps += count;
+      getThreadStatistics().writeOps += count;
     }
 
     /**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
index 4b72de3..db73f6d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
@@ -23,6 +23,7 @@
 import java.util.Arrays;
 import java.util.LinkedList;
 
+import org.apache.commons.lang.WordUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -31,6 +32,7 @@
 import org.apache.hadoop.fs.shell.Command;
 import org.apache.hadoop.fs.shell.CommandFactory;
 import org.apache.hadoop.fs.shell.FsCommand;
+import org.apache.hadoop.tools.TableListing;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -40,6 +42,8 @@
   
   static final Log LOG = LogFactory.getLog(FsShell.class);
 
+  private static final int MAX_LINE_WIDTH = 80;
+
   private FileSystem fs;
   private Trash trash;
   protected CommandFactory commandFactory;
@@ -117,7 +121,7 @@
     public static final String NAME = "usage";
     public static final String USAGE = "[cmd ...]";
     public static final String DESCRIPTION =
-      "Displays the usage for given command or all commands if none\n" +
+      "Displays the usage for given command or all commands if none " +
       "is specified.";
     
     @Override
@@ -137,7 +141,7 @@
     public static final String NAME = "help";
     public static final String USAGE = "[cmd ...]";
     public static final String DESCRIPTION =
-      "Displays help for given command or all commands if none\n" +
+      "Displays help for given command or all commands if none " +
       "is specified.";
     
     @Override
@@ -197,7 +201,7 @@
       for (String name : commandFactory.getNames()) {
         Command instance = commandFactory.getInstance(name);
         if (!instance.isDeprecated()) {
-          System.out.println("\t[" + instance.getUsage() + "]");
+          out.println("\t[" + instance.getUsage() + "]");
           instances.add(instance);
         }
       }
@@ -217,20 +221,48 @@
     out.println(usagePrefix + " " + instance.getUsage());
   }
 
-  // TODO: will eventually auto-wrap the text, but this matches the expected
-  // output for the hdfs tests...
   private void printInstanceHelp(PrintStream out, Command instance) {
-    boolean firstLine = true;
+    out.println(instance.getUsage() + " :");
+    TableListing listing = null;
+    final String prefix = "  ";
     for (String line : instance.getDescription().split("\n")) {
-      String prefix;
-      if (firstLine) {
-        prefix = instance.getUsage() + ":\t";
-        firstLine = false;
-      } else {
-        prefix = "\t\t";
+      if (line.matches("^[ \t]*[-<].*$")) {
+        String[] segments = line.split(":");
+        if (segments.length == 2) {
+          if (listing == null) {
+            listing = createOptionTableListing();
+          }
+          listing.addRow(segments[0].trim(), segments[1].trim());
+          continue;
+        }
       }
-      System.out.println(prefix + line);
-    }    
+
+      // Normal literal description.
+      if (listing != null) {
+        for (String listingLine : listing.toString().split("\n")) {
+          out.println(prefix + listingLine);
+        }
+        listing = null;
+      }
+
+      for (String descLine : WordUtils.wrap(
+          line, MAX_LINE_WIDTH, "\n", true).split("\n")) {
+        out.println(prefix + descLine);
+      }
+    }
+
+    if (listing != null) {
+      for (String listingLine : listing.toString().split("\n")) {
+        out.println(prefix + listingLine);
+      }
+    }
+  }
+
+  // Creates a two-row table, the first row is for the command line option,
+  // the second row is for the option description.
+  private TableListing createOptionTableListing() {
+    return new TableListing.Builder().addField("").addField("", true)
+        .wrapWidth(MAX_LINE_WIDTH).build();
   }
 
   /**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java
index 5ac10ce..0a82929 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java
@@ -63,18 +63,18 @@
     public static final String NAME = "chmod";
     public static final String USAGE = "[-R] <MODE[,MODE]... | OCTALMODE> PATH...";
     public static final String DESCRIPTION =
-      "Changes permissions of a file.\n" +
-      "\tThis works similar to shell's chmod with a few exceptions.\n\n" +
-      "-R\tmodifies the files recursively. This is the only option\n" +
-      "\tcurrently supported.\n\n" +
-      "MODE\tMode is same as mode used for chmod shell command.\n" +
-      "\tOnly letters recognized are 'rwxXt'. E.g. +t,a+r,g-w,+rwx,o=r\n\n" +
-      "OCTALMODE Mode specifed in 3 or 4 digits. If 4 digits, the first may\n" +
-      "be 1 or 0 to turn the sticky bit on or off, respectively.  Unlike " +
-      "shell command, it is not possible to specify only part of the mode\n" +
-      "\tE.g. 754 is same as u=rwx,g=rx,o=r\n\n" +
-      "\tIf none of 'augo' is specified, 'a' is assumed and unlike\n" +
-      "\tshell command, no umask is applied.";
+      "Changes permissions of a file. " +
+      "This works similar to the shell's chmod command with a few exceptions.\n" +
+      "-R: modifies the files recursively. This is the only option" +
+      " currently supported.\n" +
+      "<MODE>: Mode is the same as mode used for the shell's command. " +
+      "The only letters recognized are 'rwxXt', e.g. +t,a+r,g-w,+rwx,o=r.\n" +
+      "<OCTALMODE>: Mode specifed in 3 or 4 digits. If 4 digits, the first " +
+      "may be 1 or 0 to turn the sticky bit on or off, respectively.  Unlike " +
+      "the shell command, it is not possible to specify only part of the " +
+      "mode, e.g. 754 is same as u=rwx,g=rx,o=r.\n\n" +
+      "If none of 'augo' is specified, 'a' is assumed and unlike the " +
+      "shell command, no umask is applied.";
 
     protected ChmodParser pp;
 
@@ -121,18 +121,18 @@
     public static final String NAME = "chown";
     public static final String USAGE = "[-R] [OWNER][:[GROUP]] PATH...";
     public static final String DESCRIPTION =
-      "Changes owner and group of a file.\n" +
-      "\tThis is similar to shell's chown with a few exceptions.\n\n" +
-      "\t-R\tmodifies the files recursively. This is the only option\n" +
-      "\tcurrently supported.\n\n" +
-      "\tIf only owner or group is specified then only owner or\n" +
-      "\tgroup is modified.\n\n" +
-      "\tThe owner and group names may only consist of digits, alphabet,\n"+
-      "\tand any of " + allowedChars + ". The names are case sensitive.\n\n" +
-      "\tWARNING: Avoid using '.' to separate user name and group though\n" +
-      "\tLinux allows it. If user names have dots in them and you are\n" +
-      "\tusing local file system, you might see surprising results since\n" +
-      "\tshell command 'chown' is used for local files.";
+      "Changes owner and group of a file. " +
+      "This is similar to the shell's chown command with a few exceptions.\n" +
+      "-R: modifies the files recursively. This is the only option " +
+      "currently supported.\n\n" +
+      "If only the owner or group is specified, then only the owner or " +
+      "group is modified. " +
+      "The owner and group names may only consist of digits, alphabet, "+
+      "and any of " + allowedChars + ". The names are case sensitive.\n\n" +
+      "WARNING: Avoid using '.' to separate user name and group though " +
+      "Linux allows it. If user names have dots in them and you are " +
+      "using local file system, you might see surprising results since " +
+      "the shell command 'chown' is used for local files.";
 
     ///allows only "allowedChars" above in names for owner and group
     static private final Pattern chownPattern = Pattern.compile(
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java
index e50be00..0ce7fbf 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java
@@ -298,4 +298,18 @@
     AclEntry aclEntry = builder.build();
     return aclEntry;
   }
+
+  /**
+   * Convert a List of AclEntries into a string - the reverse of parseAclSpec.
+   * @param aclSpec List of AclEntries to convert
+   * @return String representation of aclSpec
+   */
+  public static String aclSpecToString(List<AclEntry> aclSpec) {
+    StringBuilder buf = new StringBuilder();
+    for ( AclEntry e : aclSpec ) {
+      buf.append(e.toString());
+      buf.append(",");
+    }
+    return buf.substring(0, buf.length()-1);  // remove last ,
+  }
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
index 5aa285c..206576c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
@@ -59,8 +59,8 @@
     public static String DESCRIPTION = "Displays the Access Control Lists"
         + " (ACLs) of files and directories. If a directory has a default ACL,"
         + " then getfacl also displays the default ACL.\n"
-        + "-R: List the ACLs of all files and directories recursively.\n"
-        + "<path>: File or directory to list.\n";
+        + "  -R: List the ACLs of all files and directories recursively.\n"
+        + "  <path>: File or directory to list.\n";
 
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
@@ -153,19 +153,19 @@
     public static String DESCRIPTION = "Sets Access Control Lists (ACLs)"
         + " of files and directories.\n" 
         + "Options:\n"
-        + "-b :Remove all but the base ACL entries. The entries for user,"
+        + "  -b :Remove all but the base ACL entries. The entries for user,"
         + " group and others are retained for compatibility with permission "
         + "bits.\n" 
-        + "-k :Remove the default ACL.\n"
-        + "-R :Apply operations to all files and directories recursively.\n"
-        + "-m :Modify ACL. New entries are added to the ACL, and existing"
+        + "  -k :Remove the default ACL.\n"
+        + "  -R :Apply operations to all files and directories recursively.\n"
+        + "  -m :Modify ACL. New entries are added to the ACL, and existing"
         + " entries are retained.\n"
-        + "-x :Remove specified ACL entries. Other ACL entries are retained.\n"
-        + "--set :Fully replace the ACL, discarding all existing entries."
+        + "  -x :Remove specified ACL entries. Other ACL entries are retained.\n"
+        + "  --set :Fully replace the ACL, discarding all existing entries."
         + " The <acl_spec> must include entries for user, group, and others"
         + " for compatibility with permission bits.\n"
-        + "<acl_spec>: Comma separated list of ACL entries.\n"
-        + "<path>: File or directory to modify.\n";
+        + "  <acl_spec>: Comma separated list of ACL entries.\n"
+        + "  <path>: File or directory to modify.\n";
 
     CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "b", "k", "R",
         "m", "x", "-set");
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
index 1684ec5..66b3629 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
@@ -22,7 +22,13 @@
 import java.io.InputStream;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.EnumSet;
+import java.util.Iterator;
 import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NoSuchElementException;
 
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -34,6 +40,9 @@
 import org.apache.hadoop.fs.PathIsNotDirectoryException;
 import org.apache.hadoop.fs.PathNotFoundException;
 import org.apache.hadoop.fs.PathOperationException;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclUtil;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.IOUtils;
 
 /**
@@ -45,7 +54,6 @@
 abstract class CommandWithDestination extends FsCommand {  
   protected PathData dst;
   private boolean overwrite = false;
-  private boolean preserve = false;
   private boolean verifyChecksum = true;
   private boolean writeChecksum = true;
   
@@ -74,7 +82,54 @@
    * implementation allows.
    */
   protected void setPreserve(boolean preserve) {
-    this.preserve = preserve;
+    if (preserve) {
+      preserve(FileAttribute.TIMESTAMPS);
+      preserve(FileAttribute.OWNERSHIP);
+      preserve(FileAttribute.PERMISSION);
+    } else {
+      preserveStatus.clear();
+    }
+  }
+  
+  protected static enum FileAttribute {
+    TIMESTAMPS, OWNERSHIP, PERMISSION, ACL, XATTR;
+
+    public static FileAttribute getAttribute(char symbol) {
+      for (FileAttribute attribute : values()) {
+        if (attribute.name().charAt(0) == Character.toUpperCase(symbol)) {
+          return attribute;
+        }
+      }
+      throw new NoSuchElementException("No attribute for " + symbol);
+    }
+  }
+  
+  private EnumSet<FileAttribute> preserveStatus = 
+      EnumSet.noneOf(FileAttribute.class);
+  
+  /**
+   * Checks if the input attribute should be preserved or not
+   *
+   * @param attribute - Attribute to check
+   * @return boolean true if attribute should be preserved, false otherwise
+   */
+  private boolean shouldPreserve(FileAttribute attribute) {
+    return preserveStatus.contains(attribute);
+  }
+  
+  /**
+   * Add file attributes that need to be preserved. This method may be
+   * called multiple times to add attributes.
+   *
+   * @param fileAttribute - Attribute to add, one at a time
+   */
+  protected void preserve(FileAttribute fileAttribute) {
+    for (FileAttribute attribute : preserveStatus) {
+      if (attribute.equals(fileAttribute)) {
+        return;
+      }
+    }
+    preserveStatus.add(fileAttribute);
   }
 
   /**
@@ -243,19 +298,44 @@
     try {
       in = src.fs.open(src.path);
       copyStreamToTarget(in, target);
-      if(preserve) {
+      if (shouldPreserve(FileAttribute.TIMESTAMPS)) {
         target.fs.setTimes(
           target.path,
           src.stat.getModificationTime(),
           src.stat.getAccessTime());
+      }
+      if (shouldPreserve(FileAttribute.OWNERSHIP)) {
         target.fs.setOwner(
           target.path,
           src.stat.getOwner(),
           src.stat.getGroup());
+      }
+      if (shouldPreserve(FileAttribute.PERMISSION) ||
+          shouldPreserve(FileAttribute.ACL)) {
         target.fs.setPermission(
           target.path,
           src.stat.getPermission());
       }
+      if (shouldPreserve(FileAttribute.ACL)) {
+        FsPermission perm = src.stat.getPermission();
+        if (perm.getAclBit()) {
+          List<AclEntry> srcEntries =
+              src.fs.getAclStatus(src.path).getEntries();
+          List<AclEntry> srcFullEntries =
+              AclUtil.getAclFromPermAndEntries(perm, srcEntries);
+          target.fs.setAcl(target.path, srcFullEntries);
+        }
+      }
+      if (shouldPreserve(FileAttribute.XATTR)) {
+        Map<String, byte[]> srcXAttrs = src.fs.getXAttrs(src.path);
+        if (srcXAttrs != null) {
+          Iterator<Entry<String, byte[]>> iter = srcXAttrs.entrySet().iterator();
+          while (iter.hasNext()) {
+            Entry<String, byte[]> entry = iter.next();
+            target.fs.setXAttr(target.path, entry.getKey(), entry.getValue());
+          }
+        }
+      }
     } finally {
       IOUtils.closeStream(in);
     }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
index 0e2283c..4dd2f4a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
@@ -23,6 +23,7 @@
 import java.io.InputStream;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
 
@@ -54,10 +55,10 @@
     public static final String NAME = "getmerge";    
     public static final String USAGE = "[-nl] <src> <localdst>";
     public static final String DESCRIPTION =
-      "Get all the files in the directories that\n" +
-      "match the source file pattern and merge and sort them to only\n" +
+      "Get all the files in the directories that " +
+      "match the source file pattern and merge and sort them to only " +
       "one file on local fs. <src> is kept.\n" +
-      "  -nl   Add a newline character at the end of each file.";
+      "-nl: Add a newline character at the end of each file.";
 
     protected PathData dst = null;
     protected String delimiter = null;
@@ -132,24 +133,49 @@
 
   static class Cp extends CommandWithDestination {
     public static final String NAME = "cp";
-    public static final String USAGE = "[-f] [-p] <src> ... <dst>";
+    public static final String USAGE = "[-f] [-p | -p[topax]] <src> ... <dst>";
     public static final String DESCRIPTION =
-      "Copy files that match the file pattern <src> to a\n" +
-      "destination.  When copying multiple files, the destination\n" +
-      "must be a directory. Passing -p preserves access and\n" +
-      "modification times, ownership and the mode. Passing -f\n" +
-      "overwrites the destination if it already exists.\n";
-    
+      "Copy files that match the file pattern <src> to a " +
+      "destination.  When copying multiple files, the destination " +
+      "must be a directory. Passing -p preserves status " +
+      "[topax] (timestamps, ownership, permission, ACLs, XAttr). " +
+      "If -p is specified with no <arg>, then preserves " +
+      "timestamps, ownership, permission. If -pa is specified, " +
+      "then preserves permission also because ACL is a super-set of " +
+      "permission. Passing -f overwrites the destination if it " +
+      "already exists.\n";
+
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
-      CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "f", "p");
+      popPreserveOption(args);
+      CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "f");
       cf.parse(args);
       setOverwrite(cf.getOpt("f"));
-      setPreserve(cf.getOpt("p"));
       // should have a -r option
       setRecursive(true);
       getRemoteDestination(args);
     }
+    
+    private void popPreserveOption(List<String> args) {
+      for (Iterator<String> iter = args.iterator(); iter.hasNext(); ) {
+        String cur = iter.next();
+        if (cur.equals("--")) {
+          // stop parsing arguments when you see --
+          break;
+        } else if (cur.startsWith("-p")) {
+          iter.remove();
+          if (cur.length() == 2) {
+            setPreserve(true);
+          } else {
+            String attributes = cur.substring(2);
+            for (int index = 0; index < attributes.length(); index++) {
+              preserve(FileAttribute.getAttribute(attributes.charAt(index)));
+            }
+          }
+          return;
+        }
+      }
+    }
   }
   
   /** 
@@ -160,10 +186,10 @@
     public static final String USAGE =
       "[-p] [-ignoreCrc] [-crc] <src> ... <localdst>";
     public static final String DESCRIPTION =
-      "Copy files that match the file pattern <src>\n" +
-      "to the local name.  <src> is kept.  When copying multiple,\n" +
-      "files, the destination must be a directory. Passing\n" +
-      "-p preserves access and modification times,\n" +
+      "Copy files that match the file pattern <src> " +
+      "to the local name.  <src> is kept.  When copying multiple " +
+      "files, the destination must be a directory. Passing " +
+      "-p preserves access and modification times, " +
       "ownership and the mode.\n";
 
     @Override
@@ -187,11 +213,11 @@
     public static final String NAME = "put";
     public static final String USAGE = "[-f] [-p] <localsrc> ... <dst>";
     public static final String DESCRIPTION =
-      "Copy files from the local file system\n" +
-      "into fs. Copying fails if the file already\n" +
-      "exists, unless the -f flag is given. Passing\n" +
-      "-p preserves access and modification times,\n" +
-      "ownership and the mode. Passing -f overwrites\n" +
+      "Copy files from the local file system " +
+      "into fs. Copying fails if the file already " +
+      "exists, unless the -f flag is given. Passing " +
+      "-p preserves access and modification times, " +
+      "ownership and the mode. Passing -f overwrites " +
       "the destination if it already exists.\n";
 
     @Override
@@ -254,9 +280,9 @@
     public static final String NAME = "appendToFile";
     public static final String USAGE = "<localsrc> ... <dst>";
     public static final String DESCRIPTION =
-        "Appends the contents of all the given local files to the\n" +
-            "given dst file. The dst file will be created if it does\n" +
-            "not exist. If <localSrc> is -, then the input is read\n" +
+        "Appends the contents of all the given local files to the " +
+            "given dst file. The dst file will be created if it does " +
+            "not exist. If <localSrc> is -, then the input is read " +
             "from stdin.";
 
     private static final int DEFAULT_IO_LENGTH = 1024 * 1024;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
index ed190d3..fcb0690 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
@@ -51,13 +51,13 @@
     public static final String NAME = "rm";
     public static final String USAGE = "[-f] [-r|-R] [-skipTrash] <src> ...";
     public static final String DESCRIPTION =
-      "Delete all files that match the specified file pattern.\n" +
+      "Delete all files that match the specified file pattern. " +
       "Equivalent to the Unix command \"rm <src>\"\n" +
-      "-skipTrash option bypasses trash, if enabled, and immediately\n" +
+      "-skipTrash: option bypasses trash, if enabled, and immediately " +
       "deletes <src>\n" +
-      "  -f     If the file does not exist, do not display a diagnostic\n" +
-      "         message or modify the exit status to reflect an error.\n" +
-      "  -[rR]  Recursively deletes directories";
+      "-f: If the file does not exist, do not display a diagnostic " +
+      "message or modify the exit status to reflect an error.\n" +
+      "-[rR]:  Recursively deletes directories";
 
     private boolean skipTrash = false;
     private boolean deleteDirs = false;
@@ -147,7 +147,7 @@
     public static final String USAGE =
       "[--ignore-fail-on-non-empty] <dir> ...";
     public static final String DESCRIPTION =
-      "Removes the directory entry specified by each directory argument,\n" +
+      "Removes the directory entry specified by each directory argument, " +
       "provided it is empty.\n"; 
     
     private boolean ignoreNonEmpty = false;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
index 79bb824..a72af7a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
@@ -75,7 +75,7 @@
     public static final String NAME = "cat";
     public static final String USAGE = "[-ignoreCrc] <src> ...";
     public static final String DESCRIPTION =
-      "Fetch all files that match the file pattern <src> \n" +
+      "Fetch all files that match the file pattern <src> " +
       "and display their content on stdout.\n";
 
     private boolean verifyChecksum = true;
@@ -170,11 +170,11 @@
     public static final String NAME = "checksum";
     public static final String USAGE = "<src> ...";
     public static final String DESCRIPTION =
-      "Dump checksum information for files that match the file\n" +
-      "pattern <src> to stdout. Note that this requires a round-trip\n" +
-      "to a datanode storing each block of the file, and thus is not\n" +
-      "efficient to run on a large number of files. The checksum of a\n" +
-      "file depends on its content, block size and the checksum\n" +
+      "Dump checksum information for files that match the file " +
+      "pattern <src> to stdout. Note that this requires a round-trip " +
+      "to a datanode storing each block of the file, and thus is not " +
+      "efficient to run on a large number of files. The checksum of a " +
+      "file depends on its content, block size and the checksum " +
       "algorithm and parameters used for creating the file.";
 
     @Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java
index a994945..f48ba16 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java
@@ -57,12 +57,12 @@
     public static final String NAME = "df";
     public static final String USAGE = "[-h] [<path> ...]";
     public static final String DESCRIPTION =
-      "Shows the capacity, free and used space of the filesystem.\n"+
-      "If the filesystem has multiple partitions, and no path to a\n" +
-      "particular partition is specified, then the status of the root\n" +
+      "Shows the capacity, free and used space of the filesystem. "+
+      "If the filesystem has multiple partitions, and no path to a " +
+      "particular partition is specified, then the status of the root " +
       "partitions will be shown.\n" +
-      "  -h   Formats the sizes of files in a human-readable fashion\n" +
-      "       rather than a number of bytes.\n\n";
+      "-h: Formats the sizes of files in a human-readable fashion " +
+      "rather than a number of bytes.";
     
     @Override
     protected void processOptions(LinkedList<String> args)
@@ -108,14 +108,14 @@
     public static final String NAME = "du";
     public static final String USAGE = "[-s] [-h] <path> ...";
     public static final String DESCRIPTION =
-    "Show the amount of space, in bytes, used by the files that\n" +
+    "Show the amount of space, in bytes, used by the files that " +
     "match the specified file pattern. The following flags are optional:\n" +
-    "  -s   Rather than showing the size of each individual file that\n" +
-    "       matches the pattern, shows the total (summary) size.\n" +
-    "  -h   Formats the sizes of files in a human-readable fashion\n" +
-    "       rather than a number of bytes.\n\n" +
-    "Note that, even without the -s option, this only shows size summaries\n" +
-    "one level deep into a directory.\n" +
+    "-s: Rather than showing the size of each individual file that" +
+    " matches the pattern, shows the total (summary) size.\n" +
+    "-h: Formats the sizes of files in a human-readable fashion" +
+    " rather than a number of bytes.\n\n" +
+    "Note that, even without the -s option, this only shows size summaries " +
+    "one level deep into a directory.\n\n" +
     "The output is in the form \n" + 
     "\tsize\tname(full path)\n"; 
 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
index b2a1fbd..edc3b0a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
@@ -49,16 +49,16 @@
   public static final String NAME = "ls";
   public static final String USAGE = "[-d] [-h] [-R] [<path> ...]";
   public static final String DESCRIPTION =
-		    "List the contents that match the specified file pattern. If\n" + 
-		    "path is not specified, the contents of /user/<currentUser>\n" +
-		    "will be listed. Directory entries are of the form \n" +
-		    "\tpermissions - userid groupid size_of_directory(in bytes) modification_date(yyyy-MM-dd HH:mm) directoryName \n" +
-		    "and file entries are of the form \n" + 
-		    "\tpermissions number_of_replicas userid groupid size_of_file(in bytes) modification_date(yyyy-MM-dd HH:mm) fileName \n" +
-		    "  -d  Directories are listed as plain files.\n" +
-		    "  -h  Formats the sizes of files in a human-readable fashion\n" +
-		    "      rather than a number of bytes.\n" +
-		    "  -R  Recursively list the contents of directories.";
+		    "List the contents that match the specified file pattern. If " +
+		    "path is not specified, the contents of /user/<currentUser> " +
+		    "will be listed. Directory entries are of the form:\n" +
+		    "\tpermissions - userId groupId sizeOfDirectory(in bytes) modificationDate(yyyy-MM-dd HH:mm) directoryName\n\n" +
+		    "and file entries are of the form:\n" +
+		    "\tpermissions numberOfReplicas userId groupId sizeOfFile(in bytes) modificationDate(yyyy-MM-dd HH:mm) fileName\n" +
+		    "-d:  Directories are listed as plain files.\n" +
+		    "-h:  Formats the sizes of files in a human-readable fashion " +
+		    "rather than a number of bytes.\n" +
+		    "-R:  Recursively list the contents of directories.";
 		  
   
 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
index 0500115..74bad62 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
@@ -44,7 +44,7 @@
   public static final String USAGE = "[-p] <path> ...";
   public static final String DESCRIPTION =
     "Create a directory in specified location.\n" +
-    "  -p  Do not fail if the directory already exists";
+    "-p: Do not fail if the directory already exists";
 
   private boolean createParents;
   
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
index ffb3483..4e347ef 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
@@ -45,7 +45,7 @@
     public static final String NAME = "moveFromLocal";
     public static final String USAGE = "<localsrc> ... <dst>";
     public static final String DESCRIPTION = 
-      "Same as -put, except that the source is\n" +
+      "Same as -put, except that the source is " +
       "deleted after it's copied.";
 
     @Override
@@ -87,8 +87,8 @@
     public static final String NAME = "mv";
     public static final String USAGE = "<src> ... <dst>";
     public static final String DESCRIPTION = 
-      "Move files that match the specified file pattern <src>\n" +
-      "to a destination <dst>.  When moving multiple files, the\n" +
+      "Move files that match the specified file pattern <src> " +
+      "to a destination <dst>.  When moving multiple files, the " +
       "destination must be a directory.";
 
     @Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java
index 1f65fed..fab0349 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java
@@ -41,12 +41,12 @@
   public static final String NAME = "setrep";
   public static final String USAGE = "[-R] [-w] <rep> <path> ...";
   public static final String DESCRIPTION =
-    "Set the replication level of a file. If <path> is a directory\n" +
-    "then the command recursively changes the replication factor of\n" +
+    "Set the replication level of a file. If <path> is a directory " +
+    "then the command recursively changes the replication factor of " +
     "all files under the directory tree rooted at <path>.\n" +
-    "The -w flag requests that the command wait for the replication\n" +
+    "-w: It requests that the command waits for the replication " +
     "to complete. This can potentially take a very long time.\n" +
-    "The -R flag is accepted for backwards compatibility. It has no effect.";
+    "-R: It is accepted for backwards compatibility. It has no effect.";
   
   protected short newRep = 0;
   protected List<PathData> waitList = new LinkedList<PathData>();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
index d603439..652c928 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
@@ -51,8 +51,8 @@
   public static final String NAME = "stat";
   public static final String USAGE = "[format] <path> ...";
   public static final String DESCRIPTION =
-    "Print statistics about the file/directory at <path>\n" +
-    "in the specified format. Format accepts filesize in blocks (%b), group name of owner(%g),\n" +
+    "Print statistics about the file/directory at <path> " +
+    "in the specified format. Format accepts filesize in blocks (%b), group name of owner(%g), " +
     "filename (%n), block size (%o), replication (%r), user name of owner(%u), modification date (%y, %Y)\n";
 
   protected static final SimpleDateFormat timeFmt;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java
index e6139db..1d49bf1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java
@@ -43,7 +43,7 @@
   public static final String USAGE = "[-f] <file>";
   public static final String DESCRIPTION =
     "Show the last 1KB of the file.\n" +
-    "\t\tThe -f option shows appended data as the file grows.\n";
+    "-f: Shows appended data as the file grows.\n";
 
   private long startingOffset = -1024;
   private boolean follow = false;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Test.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Test.java
index 31f16ea..4cfdb08 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Test.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Test.java
@@ -43,8 +43,7 @@
     "  -e  return 0 if <path> exists.\n" +
     "  -f  return 0 if <path> is a file.\n" +
     "  -s  return 0 if file <path> is greater than zero bytes in size.\n" +
-    "  -z  return 0 if file <path> is zero bytes in size.\n" +
-    "else, return 1.";
+    "  -z  return 0 if file <path> is zero bytes in size, else return 1.";
 
   private char flag;
   
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touchz.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touchz.java
index a37df99..7925a0f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touchz.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touchz.java
@@ -47,8 +47,8 @@
     public static final String NAME = "touchz";
     public static final String USAGE = "<path> ...";
     public static final String DESCRIPTION =
-      "Creates a file of zero length\n" +
-      "at <path> with current time as the timestamp of that <path>.\n" +
+      "Creates a file of zero length " +
+      "at <path> with current time as the timestamp of that <path>. " +
       "An error is returned if the file exists with non-zero length\n";
 
     @Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java
index c6dafbc..44e970b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java
@@ -59,10 +59,10 @@
       "-R: Recursively list the attributes for all files and directories.\n" +
       "-n name: Dump the named extended attribute value.\n" +
       "-d: Dump all extended attribute values associated with pathname.\n" +
-      "-e <encoding>: Encode values after retrieving them.\n" +
-      "Valid encodings are \"text\", \"hex\", and \"base64\".\n" +
-      "Values encoded as text strings are enclosed in double quotes (\"),\n" +
-      " and values encoded as hexadecimal and base64 are prefixed with\n" +
+      "-e <encoding>: Encode values after retrieving them." +
+      "Valid encodings are \"text\", \"hex\", and \"base64\". " +
+      "Values encoded as text strings are enclosed in double quotes (\")," +
+      " and values encoded as hexadecimal and base64 are prefixed with " +
       "0x and 0s, respectively.\n" +
       "<path>: The file or directory.\n";
     private final static Function<String, XAttrCodec> enValueOfFunc =
@@ -137,11 +137,11 @@
     public static final String DESCRIPTION =
       "Sets an extended attribute name and value for a file or directory.\n" +
       "-n name: The extended attribute name.\n" +
-      "-v value: The extended attribute value. There are three different\n" +
-      "encoding methods for the value. If the argument is enclosed in double\n" +
-      "quotes, then the value is the string inside the quotes. If the\n" +
-      "argument is prefixed with 0x or 0X, then it is taken as a hexadecimal\n" +
-      "number. If the argument begins with 0s or 0S, then it is taken as a\n" +
+      "-v value: The extended attribute value. There are three different " +
+      "encoding methods for the value. If the argument is enclosed in double " +
+      "quotes, then the value is the string inside the quotes. If the " +
+      "argument is prefixed with 0x or 0X, then it is taken as a hexadecimal " +
+      "number. If the argument begins with 0s or 0S, then it is taken as a " +
       "base64 encoding.\n" +
       "-x name: Remove the extended attribute.\n" +
       "<path>: The file or directory.\n";
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
index 8555ad7..dfa03e8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
@@ -19,6 +19,7 @@
 
 import java.io.IOException;
 import java.io.PrintStream;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Map;
@@ -68,7 +69,7 @@
   protected final static Map<String, UsageInfo> USAGE =
     ImmutableMap.<String, UsageInfo>builder()
     .put("-transitionToActive",
-        new UsageInfo(" <serviceId> [--"+FORCEACTIVE+"]", "Transitions the service into Active state"))
+        new UsageInfo("<serviceId> [--"+FORCEACTIVE+"]", "Transitions the service into Active state"))
     .put("-transitionToStandby",
         new UsageInfo("<serviceId>", "Transitions the service into Standby state"))
     .put("-failover",
@@ -104,7 +105,8 @@
   protected abstract HAServiceTarget resolveTarget(String string);
   
   protected Collection<String> getTargetIds(String targetNodeToActivate) {
-    return Arrays.asList(new String[]{targetNodeToActivate});
+    return new ArrayList<String>(
+        Arrays.asList(new String[]{targetNodeToActivate}));
   }
 
   protected String getUsageString() {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
index 1a571d6..84c9dcc 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
@@ -256,7 +256,7 @@
       } else {
         keyClass= 
           (Class<? extends WritableComparable>) keyClassOption.getValue();
-        this.comparator = WritableComparator.get(keyClass);
+        this.comparator = WritableComparator.get(keyClass, conf);
       }
       this.lastKey = comparator.newKey();
       FileSystem fs = dirName.getFileSystem(conf);
@@ -428,12 +428,13 @@
       this.data = createDataFileReader(dataFile, conf, options);
       this.firstPosition = data.getPosition();
 
-      if (comparator == null)
-        this.comparator = 
-          WritableComparator.get(data.getKeyClass().
-                                   asSubclass(WritableComparable.class));
-      else
+      if (comparator == null) {
+        Class<? extends WritableComparable> cls;
+        cls = data.getKeyClass().asSubclass(WritableComparable.class);
+        this.comparator = WritableComparator.get(cls, conf);
+      } else {
         this.comparator = comparator;
+      }
 
       // open the index
       SequenceFile.Reader.Option[] indexOptions =
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
index 2d42a93..0c5f315 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
@@ -2676,7 +2676,7 @@
     /** Sort and merge files containing the named classes. */
     public Sorter(FileSystem fs, Class<? extends WritableComparable> keyClass,
                   Class valClass, Configuration conf)  {
-      this(fs, WritableComparator.get(keyClass), keyClass, valClass, conf);
+      this(fs, WritableComparator.get(keyClass, conf), keyClass, valClass, conf);
     }
 
     /** Sort and merge using an arbitrary {@link RawComparator}. */
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SetFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SetFile.java
index 068ca9d..118cce7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SetFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SetFile.java
@@ -52,7 +52,7 @@
                   Class<? extends WritableComparable> keyClass,
                   SequenceFile.CompressionType compress)
       throws IOException {
-      this(conf, fs, dirName, WritableComparator.get(keyClass), compress);
+      this(conf, fs, dirName, WritableComparator.get(keyClass, conf), compress);
     }
 
     /** Create a set naming the element comparator and compression type. */
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java
index d2cbe3b..b2738ff 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java
@@ -24,6 +24,8 @@
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.ReflectionUtils;
 
 /** A Comparator for {@link WritableComparable}s.
@@ -37,13 +39,21 @@
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class WritableComparator implements RawComparator {
+public class WritableComparator implements RawComparator, Configurable {
 
   private static final ConcurrentHashMap<Class, WritableComparator> comparators 
           = new ConcurrentHashMap<Class, WritableComparator>(); // registry
 
-  /** Get a comparator for a {@link WritableComparable} implementation. */
+  private Configuration conf;
+
+  /** For backwards compatibility. **/
   public static WritableComparator get(Class<? extends WritableComparable> c) {
+    return get(c, null);
+  }
+
+  /** Get a comparator for a {@link WritableComparable} implementation. */
+  public static WritableComparator get(
+      Class<? extends WritableComparable> c, Configuration conf) {
     WritableComparator comparator = comparators.get(c);
     if (comparator == null) {
       // force the static initializers to run
@@ -52,12 +62,24 @@
       comparator = comparators.get(c);
       // if not, use the generic one
       if (comparator == null) {
-        comparator = new WritableComparator(c, true);
+        comparator = new WritableComparator(c, conf, true);
       }
     }
+    // Newly passed Configuration objects should be used.
+    ReflectionUtils.setConf(comparator, conf);
     return comparator;
   }
 
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+  }
+
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+
   /**
    * Force initialization of the static members.
    * As of Java 5, referencing a class doesn't force it to initialize. Since
@@ -91,12 +113,19 @@
 
   /** Construct for a {@link WritableComparable} implementation. */
   protected WritableComparator(Class<? extends WritableComparable> keyClass) {
-    this(keyClass, false);
+    this(keyClass, null, false);
   }
 
   protected WritableComparator(Class<? extends WritableComparable> keyClass,
       boolean createInstances) {
+    this(keyClass, null, createInstances);
+  }
+
+  protected WritableComparator(Class<? extends WritableComparable> keyClass,
+                               Configuration conf,
+                               boolean createInstances) {
     this.keyClass = keyClass;
+    this.conf = (conf != null) ? conf : new Configuration();
     if (createInstances) {
       key1 = newKey();
       key2 = newKey();
@@ -112,7 +141,7 @@
 
   /** Construct a new {@link WritableComparable} instance. */
   public WritableComparable newKey() {
-    return ReflectionUtils.newInstance(keyClass, null);
+    return ReflectionUtils.newInstance(keyClass, conf);
   }
 
   /** Optimization hook.  Override this to make SequenceFile.Sorter's scream.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
index f3ba6cf..543567e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
@@ -136,9 +136,7 @@
               msg += " after " + invocationFailoverCount + " fail over attempts"; 
             }
             msg += ". Trying to fail over " + formatSleepMessage(action.delayMillis);
-            if (LOG.isDebugEnabled()) {
-              LOG.debug(msg, e);
-            }
+            LOG.info(msg, e);
           } else {
             if(LOG.isDebugEnabled()) {
               LOG.debug("Exception while invoking " + method.getName()
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/GenericRefreshProtocol.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/GenericRefreshProtocol.java
new file mode 100644
index 0000000..bfa055b
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/GenericRefreshProtocol.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import java.io.IOException;
+import java.util.Collection;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.io.retry.Idempotent;
+import org.apache.hadoop.security.KerberosInfo;
+
+/**
+ * Protocol which is used to refresh arbitrary things at runtime.
+ */
+@KerberosInfo(
+    serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface GenericRefreshProtocol {
+  /**
+   * Version 1: Initial version.
+   */
+  public static final long versionID = 1L;
+
+  /**
+   * Refresh the resource based on identity passed in.
+   * @throws IOException
+   */
+  @Idempotent
+  Collection<RefreshResponse> refresh(String identifier, String[] args)
+      throws IOException;
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshHandler.java
new file mode 100644
index 0000000..3fe9eb7
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshHandler.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Used to registry custom methods to refresh at runtime.
+ */
+@InterfaceStability.Unstable
+public interface RefreshHandler {
+  /**
+   * Implement this method to accept refresh requests from the administrator.
+   * @param identifier is the identifier you registered earlier
+   * @param args contains a list of string args from the administrator
+   * @throws Exception as a shorthand for a RefreshResponse(-1, message)
+   * @return a RefreshResponse
+   */
+  RefreshResponse handleRefresh(String identifier, String[] args);
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshRegistry.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshRegistry.java
new file mode 100644
index 0000000..ee84a04
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshRegistry.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import java.util.ArrayList;
+import java.util.Collection;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Multimap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Used to registry custom methods to refresh at runtime.
+ * Each identifier maps to one or more RefreshHandlers.
+ */
+@InterfaceStability.Unstable
+public class RefreshRegistry {
+  public static final Log LOG = LogFactory.getLog(RefreshRegistry.class);
+
+  // Used to hold singleton instance
+  private static class RegistryHolder {
+    @SuppressWarnings("All")
+    public static RefreshRegistry registry = new RefreshRegistry();
+  }
+
+  // Singleton access
+  public static RefreshRegistry defaultRegistry() {
+    return RegistryHolder.registry;
+  }
+
+  private final Multimap<String, RefreshHandler> handlerTable;
+
+  public RefreshRegistry() {
+    handlerTable = HashMultimap.create();
+  }
+
+  /**
+   * Registers an object as a handler for a given identity.
+   * Note: will prevent handler from being GC'd, object should unregister itself
+   *  when done
+   * @param identifier a unique identifier for this resource,
+   *                   such as org.apache.hadoop.blacklist
+   * @param handler the object to register
+   */
+  public synchronized void register(String identifier, RefreshHandler handler) {
+    if (identifier == null) {
+      throw new NullPointerException("Identifier cannot be null");
+    }
+    handlerTable.put(identifier, handler);
+  }
+
+  /**
+   * Remove the registered object for a given identity.
+   * @param identifier the resource to unregister
+   * @return the true if removed
+   */
+  public synchronized boolean unregister(String identifier, RefreshHandler handler) {
+    return handlerTable.remove(identifier, handler);
+  }
+
+  public synchronized void unregisterAll(String identifier) {
+    handlerTable.removeAll(identifier);
+  }
+
+  /**
+   * Lookup the responsible handler and return its result.
+   * This should be called by the RPC server when it gets a refresh request.
+   * @param identifier the resource to refresh
+   * @param args the arguments to pass on, not including the program name
+   * @throws IllegalArgumentException on invalid identifier
+   * @return the response from the appropriate handler
+   */
+  public synchronized Collection<RefreshResponse> dispatch(String identifier, String[] args) {
+    Collection<RefreshHandler> handlers = handlerTable.get(identifier);
+
+    if (handlers.size() == 0) {
+      String msg = "Identifier '" + identifier +
+        "' does not exist in RefreshRegistry. Valid options are: " +
+        Joiner.on(", ").join(handlerTable.keySet());
+
+      throw new IllegalArgumentException(msg);
+    }
+
+    ArrayList<RefreshResponse> responses =
+      new ArrayList<RefreshResponse>(handlers.size());
+
+    // Dispatch to each handler and store response
+    for(RefreshHandler handler : handlers) {
+      RefreshResponse response;
+
+      // Run the handler
+      try {
+        response = handler.handleRefresh(identifier, args);
+        if (response == null) {
+          throw new NullPointerException("Handler returned null.");
+        }
+
+        LOG.info(handlerName(handler) + " responds to '" + identifier +
+          "', says: '" + response.getMessage() + "', returns " +
+          response.getReturnCode());
+      } catch (Exception e) {
+        response = new RefreshResponse(-1, e.getLocalizedMessage());
+      }
+
+      response.setSenderName(handlerName(handler));
+      responses.add(response);
+    }
+
+    return responses;
+  }
+
+  private String handlerName(RefreshHandler h) {
+    return h.getClass().getName() + '@' + Integer.toHexString(h.hashCode());
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshResponse.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshResponse.java
new file mode 100644
index 0000000..493305a
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshResponse.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Return a response in the handler method for the user to see.
+ * Useful since you may want to display status to a user even though an
+ * error has not occurred.
+ */
+@InterfaceStability.Unstable
+public class RefreshResponse {
+  private int returnCode = -1;
+  private String message;
+  private String senderName;
+
+  /**
+   * Convenience method to create a response for successful refreshes.
+   * @return void response
+   */
+  public static RefreshResponse successResponse() {
+    return new RefreshResponse(0, "Success");
+  }
+
+  // Most RefreshHandlers will use this
+  public RefreshResponse(int returnCode, String message) {
+    this.returnCode = returnCode;
+    this.message = message;
+  }
+
+  /**
+   * Optionally set the sender of this RefreshResponse.
+   * This helps clarify things when multiple handlers respond.
+   * @param name The name of the sender
+   */
+  public void setSenderName(String name) {
+    senderName = name;
+  }
+  public String getSenderName() { return senderName; }
+
+  public int getReturnCode() { return returnCode; }
+  public void setReturnCode(int rc) { returnCode = rc; }
+
+  public void setMessage(String m) { message = m; }
+  public String getMessage() { return message; }
+
+  @Override
+  public String toString() {
+    String ret = "";
+
+    if (senderName != null) {
+      ret += senderName + ": ";
+    }
+
+    if (message != null) {
+      ret += message;
+    }
+
+    ret += " (exit " + returnCode + ")";
+    return ret;
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index f1afe19..0f11c97 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -1221,7 +1221,7 @@
         ugi.addTokenIdentifier(tokenId);
         return ugi;
       } else {
-        return UserGroupInformation.createRemoteUser(authorizedId);
+        return UserGroupInformation.createRemoteUser(authorizedId, authMethod);
       }
     }
 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolClientSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolClientSideTranslatorPB.java
new file mode 100644
index 0000000..078b2db
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolClientSideTranslatorPB.java
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc.protocolPB;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+
+import org.apache.hadoop.ipc.ProtobufHelper;
+import org.apache.hadoop.ipc.ProtocolMetaInterface;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RefreshResponse;
+import org.apache.hadoop.ipc.RpcClientUtil;
+import org.apache.hadoop.ipc.GenericRefreshProtocol;
+import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshRequestProto;
+import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshResponseProto;
+import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshResponseCollectionProto;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+public class GenericRefreshProtocolClientSideTranslatorPB implements
+    ProtocolMetaInterface, GenericRefreshProtocol, Closeable {
+
+  /** RpcController is not used and hence is set to null. */
+  private final static RpcController NULL_CONTROLLER = null;
+  private final GenericRefreshProtocolPB rpcProxy;
+
+  public GenericRefreshProtocolClientSideTranslatorPB(
+      GenericRefreshProtocolPB rpcProxy) {
+    this.rpcProxy = rpcProxy;
+  }
+
+  @Override
+  public void close() throws IOException {
+    RPC.stopProxy(rpcProxy);
+  }
+
+  @Override
+  public Collection<RefreshResponse> refresh(String identifier, String[] args) throws IOException {
+    List<String> argList = Arrays.asList(args);
+
+    try {
+      GenericRefreshRequestProto request = GenericRefreshRequestProto.newBuilder()
+        .setIdentifier(identifier)
+        .addAllArgs(argList)
+        .build();
+
+      GenericRefreshResponseCollectionProto resp = rpcProxy.refresh(NULL_CONTROLLER, request);
+      return unpack(resp);
+    } catch (ServiceException se) {
+      throw ProtobufHelper.getRemoteException(se);
+    }
+  }
+
+  private Collection<RefreshResponse> unpack(GenericRefreshResponseCollectionProto collection) {
+    List<GenericRefreshResponseProto> responseProtos = collection.getResponsesList();
+    List<RefreshResponse> responses = new ArrayList<RefreshResponse>();
+
+    for (GenericRefreshResponseProto rp : responseProtos) {
+      RefreshResponse response = unpack(rp);
+      responses.add(response);
+    }
+
+    return responses;
+  }
+
+  private RefreshResponse unpack(GenericRefreshResponseProto proto) {
+    // The default values
+    String message = null;
+    String sender = null;
+    int returnCode = -1;
+
+    // ... that can be overridden by data from the protobuf
+    if (proto.hasUserMessage()) {
+      message = proto.getUserMessage();
+    }
+    if (proto.hasExitStatus()) {
+      returnCode = proto.getExitStatus();
+    }
+    if (proto.hasSenderName()) {
+      sender = proto.getSenderName();
+    }
+
+    // ... and put into a RefreshResponse
+    RefreshResponse response = new RefreshResponse(returnCode, message);
+    response.setSenderName(sender);
+
+    return response;
+  }
+
+  @Override
+  public boolean isMethodSupported(String methodName) throws IOException {
+    return RpcClientUtil.isMethodSupported(rpcProxy,
+      GenericRefreshProtocolPB.class,
+      RPC.RpcKind.RPC_PROTOCOL_BUFFER,
+      RPC.getProtocolVersion(GenericRefreshProtocolPB.class),
+      methodName);
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolPB.java
new file mode 100644
index 0000000..930ae2b
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolPB.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc.protocolPB;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshProtocolService;
+
+@KerberosInfo(
+    serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
+@ProtocolInfo(
+    protocolName = "org.apache.hadoop.ipc.GenericRefreshProtocol",
+    protocolVersion = 1)
+@InterfaceAudience.LimitedPrivate({"HDFS"})
+@InterfaceStability.Evolving
+public interface GenericRefreshProtocolPB extends
+  GenericRefreshProtocolService.BlockingInterface {
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolServerSideTranslatorPB.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolServerSideTranslatorPB.java
new file mode 100644
index 0000000..ae57cbd
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolServerSideTranslatorPB.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc.protocolPB;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+
+import org.apache.hadoop.ipc.GenericRefreshProtocol;
+import org.apache.hadoop.ipc.RefreshResponse;
+import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshRequestProto;
+import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshResponseProto;
+import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshResponseCollectionProto;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+public class GenericRefreshProtocolServerSideTranslatorPB implements
+    GenericRefreshProtocolPB {
+
+  private final GenericRefreshProtocol impl;
+
+  public GenericRefreshProtocolServerSideTranslatorPB(
+      GenericRefreshProtocol impl) {
+    this.impl = impl;
+  }
+
+  @Override
+  public GenericRefreshResponseCollectionProto refresh(
+      RpcController controller, GenericRefreshRequestProto request)
+      throws ServiceException {
+    try {
+      List<String> argList = request.getArgsList();
+      String[] args = argList.toArray(new String[argList.size()]);
+
+      if (!request.hasIdentifier()) {
+        throw new ServiceException("Request must contain identifier");
+      }
+
+      Collection<RefreshResponse> results = impl.refresh(request.getIdentifier(), args);
+
+      return pack(results);
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  // Convert a collection of RefreshResponse objects to a
+  // RefreshResponseCollection proto
+  private GenericRefreshResponseCollectionProto pack(
+    Collection<RefreshResponse> responses) {
+    GenericRefreshResponseCollectionProto.Builder b =
+      GenericRefreshResponseCollectionProto.newBuilder();
+
+    for (RefreshResponse response : responses) {
+      GenericRefreshResponseProto.Builder respBuilder =
+        GenericRefreshResponseProto.newBuilder();
+      respBuilder.setExitStatus(response.getReturnCode());
+      respBuilder.setUserMessage(response.getMessage());
+      respBuilder.setSenderName(response.getSenderName());
+
+      // Add to collection
+      b.addResponses(respBuilder);
+    }
+
+    return b.build();
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
index 754c387..a7ffc93 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
@@ -40,6 +40,7 @@
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IOUtils;
 
 /**
  * An implementation of {@link GroupMappingServiceProvider} which
@@ -312,8 +313,8 @@
     keystorePass =
         conf.get(LDAP_KEYSTORE_PASSWORD_KEY, LDAP_KEYSTORE_PASSWORD_DEFAULT);
     if (keystorePass.isEmpty()) {
-      keystorePass = extractPassword(
-        conf.get(LDAP_KEYSTORE_PASSWORD_KEY, LDAP_KEYSTORE_PASSWORD_DEFAULT));
+      keystorePass = extractPassword(conf.get(LDAP_KEYSTORE_PASSWORD_FILE_KEY,
+          LDAP_KEYSTORE_PASSWORD_FILE_DEFAULT));
     }
     
     bindUser = conf.get(BIND_USER_KEY, BIND_USER_DEFAULT);
@@ -346,18 +347,20 @@
       return "";
     }
     
+    Reader reader = null;
     try {
       StringBuilder password = new StringBuilder();
-      Reader reader = new FileReader(pwFile);
+      reader = new FileReader(pwFile);
       int c = reader.read();
       while (c > -1) {
         password.append((char)c);
         c = reader.read();
       }
-      reader.close();
       return password.toString().trim();
     } catch (IOException ioe) {
       throw new RuntimeException("Could not read password file: " + pwFile, ioe);
+    } finally {
+      IOUtils.cleanup(LOG, reader);
     }
   }
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
index 623c3eb..b71fbda 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
@@ -289,12 +289,10 @@
    */
   public static KerberosInfo 
   getKerberosInfo(Class<?> protocol, Configuration conf) {
-    synchronized (testProviders) {
-      for(SecurityInfo provider: testProviders) {
-        KerberosInfo result = provider.getKerberosInfo(protocol, conf);
-        if (result != null) {
-          return result;
-        }
+    for(SecurityInfo provider: testProviders) {
+      KerberosInfo result = provider.getKerberosInfo(protocol, conf);
+      if (result != null) {
+        return result;
       }
     }
     
@@ -317,13 +315,11 @@
    * @return the TokenInfo or null if it has no KerberosInfo defined
    */
   public static TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
-    synchronized (testProviders) {
-      for(SecurityInfo provider: testProviders) {
-        TokenInfo result = provider.getTokenInfo(protocol, conf);
-        if (result != null) {
-          return result;
-        }      
-      }
+    for(SecurityInfo provider: testProviders) {
+      TokenInfo result = provider.getTokenInfo(protocol, conf);
+      if (result != null) {
+        return result;
+      }      
     }
     
     synchronized (securityInfoProviders) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index af552a7..1b024eb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -1157,13 +1157,25 @@
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
   public static UserGroupInformation createRemoteUser(String user) {
+    return createRemoteUser(user, AuthMethod.SIMPLE);
+  }
+  
+  /**
+   * Create a user from a login name. It is intended to be used for remote
+   * users in RPC, since it won't have any credentials.
+   * @param user the full user principal name, must not be empty or null
+   * @return the UserGroupInformation for the remote user.
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
+  public static UserGroupInformation createRemoteUser(String user, AuthMethod authMethod) {
     if (user == null || user.isEmpty()) {
       throw new IllegalArgumentException("Null user");
     }
     Subject subject = new Subject();
     subject.getPrincipals().add(new User(user));
     UserGroupInformation result = new UserGroupInformation(subject);
-    result.setAuthenticationMethod(AuthenticationMethod.SIMPLE);
+    result.setAuthenticationMethod(authMethod);
     return result;
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java
similarity index 98%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java
rename to hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java
index 46cc3c0..bc2e2d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.tools;
+package org.apache.hadoop.tools;
 
 import java.util.ArrayList;
 import java.util.LinkedList;
@@ -26,14 +26,14 @@
 
 /**
  * This class implements a "table listing" with column headers.
- * 
+ *
  * Example:
- * 
+ *
  * NAME   OWNER   GROUP   MODE       WEIGHT
  * pool1  andrew  andrew  rwxr-xr-x     100
  * pool2  andrew  andrew  rwxr-xr-x     100
  * pool3  andrew  andrew  rwxr-xr-x     100
- * 
+ *
  */
 @InterfaceAudience.Private
 public class TableListing {
@@ -141,14 +141,14 @@
 
     /**
      * Add a new field to the Table under construction.
-     * 
+     *
      * @param title Field title.
      * @param justification Right or left justification. Defaults to left.
      * @param wrap Width at which to auto-wrap the content of the cell.
      *        Defaults to Integer.MAX_VALUE.
      * @return This Builder object
      */
-    public Builder addField(String title, Justification justification, 
+    public Builder addField(String title, Justification justification,
         boolean wrap) {
       columns.add(new Column(title, justification, wrap));
       return this;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
index 0117fe5..e6f24a8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
@@ -526,12 +526,8 @@
       }
       // wait for the process to finish and check the exit code
       exitCode  = process.waitFor();
-      try {
-        // make sure that the error thread exits
-        errThread.join();
-      } catch (InterruptedException ie) {
-        LOG.warn("Interrupted while reading the error stream", ie);
-      }
+      // make sure that the error thread exits
+      joinThread(errThread);
       completed.set(true);
       //the timeout thread handling
       //taken care in finally block
@@ -560,13 +556,9 @@
       } catch (IOException ioe) {
         LOG.warn("Error while closing the input stream", ioe);
       }
-      try {
-        if (!completed.get()) {
-          errThread.interrupt();
-          errThread.join();
-        }
-      } catch (InterruptedException ie) {
-        LOG.warn("Interrupted while joining errThread");
+      if (!completed.get()) {
+        errThread.interrupt();
+        joinThread(errThread);
       }
       try {
         InputStream stderr = process.getErrorStream();
@@ -581,6 +573,19 @@
     }
   }
 
+  private static void joinThread(Thread t) {
+    while (t.isAlive()) {
+      try {
+        t.join();
+      } catch (InterruptedException ie) {
+        if (LOG.isWarnEnabled()) {
+          LOG.warn("Interrupted while joining on: " + t, ie);
+        }
+        t.interrupt(); // propagate interrupt
+      }
+    }
+  }
+
   /** return an array containing the command name & its parameters */ 
   protected abstract String[] getExecString();
   
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
index 1177d72..de73a8a 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
@@ -73,7 +73,10 @@
   // was successful or not (as long as it was called we need to call
   // endnetgrent)
   setnetgrentCalledFlag = 1;
-#ifndef __FreeBSD__
+#if defined(__FreeBSD__) || defined(__MACH__)
+  setnetgrent(cgroup);
+  {
+#else
   if(setnetgrent(cgroup) == 1) {
 #endif
     current = NULL;
@@ -90,9 +93,7 @@
         userListSize++;
       }
     }
-#ifndef __FreeBSD__
   }
-#endif
 
   //--------------------------------------------------
   // build return data (java array)
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/GenericRefreshProtocol.proto b/hadoop-common-project/hadoop-common/src/main/proto/GenericRefreshProtocol.proto
new file mode 100644
index 0000000..fe46549
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/proto/GenericRefreshProtocol.proto
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and stable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *stable* .proto interface.
+ */
+
+option java_package = "org.apache.hadoop.ipc.proto";
+option java_outer_classname = "GenericRefreshProtocolProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+package hadoop.common;
+
+/**
+ *  Refresh request.
+ */
+message GenericRefreshRequestProto {
+    optional string identifier = 1;
+    repeated string args = 2;
+}
+
+/**
+ * A single response from a refresh handler.
+ */
+message GenericRefreshResponseProto {
+    optional int32 exitStatus = 1; // unix exit status to return
+    optional string userMessage = 2; // to be displayed to the user
+    optional string senderName = 3; // which handler sent this message
+}
+
+/**
+ * Collection of responses from zero or more handlers.
+ */
+message GenericRefreshResponseCollectionProto {
+    repeated GenericRefreshResponseProto responses = 1;
+}
+
+/**
+ * Protocol which is used to refresh a user-specified feature.
+ */
+service GenericRefreshProtocolService {
+  rpc refresh(GenericRefreshRequestProto)
+      returns(GenericRefreshResponseCollectionProto);
+}
diff --git a/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm
index af33f59..4a82884 100644
--- a/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm
+++ b/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm
@@ -159,7 +159,7 @@
 
 cp
 
-   Usage: <<<hdfs dfs -cp [-f] URI [URI ...] <dest> >>>
+   Usage: <<<hdfs dfs -cp [-f] [-p | -p[topax]] URI [URI ...] <dest> >>>
 
    Copy files from source to destination. This command allows multiple sources
    as well in which case the destination must be a directory.
@@ -167,6 +167,12 @@
     Options:
 
       * The -f option will overwrite the destination if it already exists.
+      
+      * The -p option will preserve file attributes [topx] (timestamps, 
+        ownership, permission, ACL, XAttr). If -p is specified with no <arg>,
+        then preserves timestamps, ownership, permission. If -pa is specified,
+        then preserves permission also because ACL is a super-set of
+        permission.
 
    Example:
 
diff --git a/hadoop-common-project/hadoop-common/src/site/apt/Metrics.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/Metrics.apt.vm
new file mode 100644
index 0000000..55e532d
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/apt/Metrics.apt.vm
@@ -0,0 +1,732 @@
+~~ Licensed to the Apache Software Foundation (ASF) under one or more
+~~ contributor license agreements.  See the NOTICE file distributed with
+~~ this work for additional information regarding copyright ownership.
+~~ The ASF licenses this file to You under the Apache License, Version 2.0
+~~ (the "License"); you may not use this file except in compliance with
+~~ the License.  You may obtain a copy of the License at
+~~
+~~     http://www.apache.org/licenses/LICENSE-2.0
+~~
+~~ Unless required by applicable law or agreed to in writing, software
+~~ distributed under the License is distributed on an "AS IS" BASIS,
+~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~~ See the License for the specific language governing permissions and
+~~ limitations under the License.
+
+  ---
+  Metrics Guide
+  ---
+  ---
+  ${maven.build.timestamp}
+
+%{toc}
+
+Overview
+
+  Metrics are statistical information exposed by Hadoop daemons,
+  used for monitoring, performance tuning and debug.
+  There are many metrics available by default
+  and they are very useful for troubleshooting.
+  This page shows the details of the available metrics.
+
+  Each section describes each context into which metrics are grouped.
+
+  The documentation of Metrics 2.0 framework is
+  {{{../../api/org/apache/hadoop/metrics2/package-summary.html}here}}.
+
+jvm context
+
+* JvmMetrics
+
+  Each metrics record contains tags such as ProcessName, SessionID
+  and Hostname as additional information along with metrics.
+
+*-------------------------------------+--------------------------------------+
+|| Name                               || Description
+*-------------------------------------+--------------------------------------+
+|<<<MemNonHeapUsedM>>> | Current non-heap memory used in MB
+*-------------------------------------+--------------------------------------+
+|<<<MemNonHeapCommittedM>>> | Current non-heap memory committed in MB
+*-------------------------------------+--------------------------------------+
+|<<<MemNonHeapMaxM>>> | Max non-heap memory size in MB
+*-------------------------------------+--------------------------------------+
+|<<<MemHeapUsedM>>> | Current heap memory used in MB
+*-------------------------------------+--------------------------------------+
+|<<<MemHeapCommittedM>>> | Current heap memory committed in MB
+*-------------------------------------+--------------------------------------+
+|<<<MemHeapMaxM>>> | Max heap memory size in MB
+*-------------------------------------+--------------------------------------+
+|<<<MemMaxM>>> | Max memory size in MB
+*-------------------------------------+--------------------------------------+
+|<<<ThreadsNew>>> | Current number of NEW threads
+*-------------------------------------+--------------------------------------+
+|<<<ThreadsRunnable>>> | Current number of RUNNABLE threads
+*-------------------------------------+--------------------------------------+
+|<<<ThreadsBlocked>>> | Current number of BLOCKED threads
+*-------------------------------------+--------------------------------------+
+|<<<ThreadsWaiting>>> | Current number of WAITING threads
+*-------------------------------------+--------------------------------------+
+|<<<ThreadsTimedWaiting>>> | Current number of TIMED_WAITING threads
+*-------------------------------------+--------------------------------------+
+|<<<ThreadsTerminated>>> | Current number of TERMINATED threads
+*-------------------------------------+--------------------------------------+
+|<<<GcInfo>>>  | Total GC count and GC time in msec, grouped by the kind of GC. \
+               | ex.) GcCountPS Scavenge=6, GCTimeMillisPS Scavenge=40,
+               | GCCountPS MarkSweep=0, GCTimeMillisPS MarkSweep=0
+*-------------------------------------+--------------------------------------+
+|<<<GcCount>>> | Total GC count
+*-------------------------------------+--------------------------------------+
+|<<<GcTimeMillis>>> | Total GC time in msec
+*-------------------------------------+--------------------------------------+
+|<<<LogFatal>>> | Total number of FATAL logs
+*-------------------------------------+--------------------------------------+
+|<<<LogError>>> | Total number of ERROR logs
+*-------------------------------------+--------------------------------------+
+|<<<LogWarn>>> | Total number of WARN logs
+*-------------------------------------+--------------------------------------+
+|<<<LogInfo>>> | Total number of INFO logs
+*-------------------------------------+--------------------------------------+
+
+rpc context
+
+* rpc
+
+  Each metrics record contains tags such as Hostname
+  and port (number to which server is bound)
+  as additional information along with metrics.
+
+*-------------------------------------+--------------------------------------+
+|| Name                               || Description
+*-------------------------------------+--------------------------------------+
+|<<<ReceivedBytes>>> | Total number of received bytes
+*-------------------------------------+--------------------------------------+
+|<<<SentBytes>>> | Total number of sent bytes
+*-------------------------------------+--------------------------------------+
+|<<<RpcQueueTimeNumOps>>> | Total number of RPC calls
+*-------------------------------------+--------------------------------------+
+|<<<RpcQueueTimeAvgTime>>> | Average queue time in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<RpcProcessingTimeNumOps>>> | Total number of RPC calls (same to
+                               | RpcQueueTimeNumOps)
+*-------------------------------------+--------------------------------------+
+|<<<RpcProcessingAvgTime>>> | Average Processing time in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<RpcAuthenticationFailures>>> | Total number of authentication failures
+*-------------------------------------+--------------------------------------+
+|<<<RpcAuthenticationSuccesses>>> | Total number of authentication successes
+*-------------------------------------+--------------------------------------+
+|<<<RpcAuthorizationFailures>>> | Total number of authorization failures
+*-------------------------------------+--------------------------------------+
+|<<<RpcAuthorizationSuccesses>>> | Total number of authorization successes
+*-------------------------------------+--------------------------------------+
+|<<<NumOpenConnections>>> | Current number of open connections
+*-------------------------------------+--------------------------------------+
+|<<<CallQueueLength>>> | Current length of the call queue
+*-------------------------------------+--------------------------------------+
+|<<<rpcQueueTime>>><num><<<sNumOps>>> | Shows total number of RPC calls
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<rpcQueueTime>>><num><<<s50thPercentileLatency>>> |
+| | Shows the 50th percentile of RPC queue time in milliseconds
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<rpcQueueTime>>><num><<<s75thPercentileLatency>>> |
+| | Shows the 75th percentile of RPC queue time in milliseconds
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<rpcQueueTime>>><num><<<s90thPercentileLatency>>> |
+| | Shows the 90th percentile of RPC queue time in milliseconds
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<rpcQueueTime>>><num><<<s95thPercentileLatency>>> |
+| | Shows the 95th percentile of RPC queue time in milliseconds
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<rpcQueueTime>>><num><<<s99thPercentileLatency>>> |
+| | Shows the 99th percentile of RPC queue time in milliseconds
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<rpcProcessingTime>>><num><<<sNumOps>>> | Shows total number of RPC calls
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<rpcProcessingTime>>><num><<<s50thPercentileLatency>>> |
+| | Shows the 50th percentile of RPC processing time in milliseconds
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<rpcProcessingTime>>><num><<<s75thPercentileLatency>>> |
+| | Shows the 75th percentile of RPC processing time in milliseconds
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<rpcProcessingTime>>><num><<<s90thPercentileLatency>>> |
+| | Shows the 90th percentile of RPC processing time in milliseconds
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<rpcProcessingTime>>><num><<<s95thPercentileLatency>>> |
+| | Shows the 95th percentile of RPC processing time in milliseconds
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<rpcProcessingTime>>><num><<<s99thPercentileLatency>>> |
+| | Shows the 99th percentile of RPC processing time in milliseconds
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+
+* RetryCache/NameNodeRetryCache
+
+  RetryCache metrics is useful to monitor NameNode fail-over.
+  Each metrics record contains Hostname tag.
+
+*-------------------------------------+--------------------------------------+
+|| Name                               || Description
+*-------------------------------------+--------------------------------------+
+|<<<CacheHit>>> | Total number of RetryCache hit
+*-------------------------------------+--------------------------------------+
+|<<<CacheCleared>>> | Total number of RetryCache cleared
+*-------------------------------------+--------------------------------------+
+|<<<CacheUpdated>>> | Total number of RetryCache updated
+*-------------------------------------+--------------------------------------+
+
+rpcdetailed context
+
+  Metrics of rpcdetailed context are exposed in unified manner by RPC
+  layer. Two metrics are exposed for each RPC based on its name.
+  Metrics named "(RPC method name)NumOps" indicates total number of
+  method calls, and metrics named "(RPC method name)AvgTime" shows
+  average turn around time for method calls in milliseconds.
+
+* rpcdetailed
+
+  Each metrics record contains tags such as Hostname
+  and port (number to which server is bound)
+  as additional information along with metrics.
+
+  The Metrics about RPCs which is not called are not included
+  in metrics record.
+
+*-------------------------------------+--------------------------------------+
+|| Name                               || Description
+*-------------------------------------+--------------------------------------+
+|<methodname><<<NumOps>>> | Total number of the times the method is called
+*-------------------------------------+--------------------------------------+
+|<methodname><<<AvgTime>>> | Average turn around time of the method in
+                           | milliseconds
+*-------------------------------------+--------------------------------------+
+
+dfs context
+
+* namenode
+
+  Each metrics record contains tags such as ProcessName, SessionId,
+  and Hostname as additional information along with metrics.
+
+*-------------------------------------+--------------------------------------+
+|| Name                               || Description
+*-------------------------------------+--------------------------------------+
+|<<<CreateFileOps>>> | Total number of files created
+*-------------------------------------+--------------------------------------+
+|<<<FilesCreated>>> | Total number of files and directories created by create
+                    | or mkdir operations
+*-------------------------------------+--------------------------------------+
+|<<<FilesAppended>>> | Total number of files appended
+*-------------------------------------+--------------------------------------+
+|<<<GetBlockLocations>>> | Total number of getBlockLocations operations
+*-------------------------------------+--------------------------------------+
+|<<<FilesRenamed>>> | Total number of rename <<operations>> (NOT number of
+                    | files/dirs renamed)
+*-------------------------------------+--------------------------------------+
+|<<<GetListingOps>>> | Total number of directory listing operations
+*-------------------------------------+--------------------------------------+
+|<<<DeleteFileOps>>> | Total number of delete operations
+*-------------------------------------+--------------------------------------+
+|<<<FilesDeleted>>> | Total number of files and directories deleted by delete
+                    | or rename operations
+*-------------------------------------+--------------------------------------+
+|<<<FileInfoOps>>> | Total number of getFileInfo and getLinkFileInfo
+                   | operations
+*-------------------------------------+--------------------------------------+
+|<<<AddBlockOps>>> | Total number of addBlock operations succeeded
+*-------------------------------------+--------------------------------------+
+|<<<GetAdditionalDatanodeOps>>> | Total number of getAdditionalDatanode
+                                | operations
+*-------------------------------------+--------------------------------------+
+|<<<CreateSymlinkOps>>> | Total number of createSymlink operations
+*-------------------------------------+--------------------------------------+
+|<<<GetLinkTargetOps>>> | Total number of getLinkTarget operations
+*-------------------------------------+--------------------------------------+
+|<<<FilesInGetListingOps>>> | Total number of files and directories listed by
+                            | directory listing operations
+*-------------------------------------+--------------------------------------+
+|<<<AllowSnapshotOps>>> | Total number of allowSnapshot operations
+*-------------------------------------+--------------------------------------+
+|<<<DisallowSnapshotOps>>> | Total number of disallowSnapshot operations
+*-------------------------------------+--------------------------------------+
+|<<<CreateSnapshotOps>>> | Total number of createSnapshot operations
+*-------------------------------------+--------------------------------------+
+|<<<DeleteSnapshotOps>>> | Total number of deleteSnapshot operations
+*-------------------------------------+--------------------------------------+
+|<<<RenameSnapshotOps>>> | Total number of renameSnapshot operations
+*-------------------------------------+--------------------------------------+
+|<<<ListSnapshottableDirOps>>> | Total number of snapshottableDirectoryStatus
+                               | operations
+*-------------------------------------+--------------------------------------+
+|<<<SnapshotDiffReportOps>>> | Total number of getSnapshotDiffReport
+                             | operations
+*-------------------------------------+--------------------------------------+
+|<<<TransactionsNumOps>>> | Total number of Journal transactions
+*-------------------------------------+--------------------------------------+
+|<<<TransactionsAvgTime>>> | Average time of Journal transactions in
+                           | milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<SyncsNumOps>>> | Total number of Journal syncs
+*-------------------------------------+--------------------------------------+
+|<<<SyncsAvgTime>>> | Average time of Journal syncs in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<TransactionsBatchedInSync>>> | Total number of Journal transactions batched
+                                 | in sync
+*-------------------------------------+--------------------------------------+
+|<<<BlockReportNumOps>>> | Total number of processing block reports from
+                         | DataNode
+*-------------------------------------+--------------------------------------+
+|<<<BlockReportAvgTime>>> | Average time of processing block reports in
+                          | milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<CacheReportNumOps>>> | Total number of processing cache reports from
+                         | DataNode
+*-------------------------------------+--------------------------------------+
+|<<<CacheReportAvgTime>>> | Average time of processing cache reports in
+                          | milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<SafeModeTime>>> | The interval between FSNameSystem starts and the last
+                    | time safemode leaves in milliseconds. \
+                    | (sometimes not equal to the time in SafeMode,
+                    | see {{{https://issues.apache.org/jira/browse/HDFS-5156}HDFS-5156}})
+*-------------------------------------+--------------------------------------+
+|<<<FsImageLoadTime>>> | Time loading FS Image at startup in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<FsImageLoadTime>>> | Time loading FS Image at startup in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<GetEditNumOps>>> | Total number of edits downloads from SecondaryNameNode
+*-------------------------------------+--------------------------------------+
+|<<<GetEditAvgTime>>> | Average edits download time in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<GetImageNumOps>>> |Total number of fsimage downloads from SecondaryNameNode
+*-------------------------------------+--------------------------------------+
+|<<<GetImageAvgTime>>> | Average fsimage download time in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<PutImageNumOps>>> | Total number of fsimage uploads to SecondaryNameNode
+*-------------------------------------+--------------------------------------+
+|<<<PutImageAvgTime>>> | Average fsimage upload time in milliseconds
+*-------------------------------------+--------------------------------------+
+
+* FSNamesystem
+
+  Each metrics record contains tags such as HAState and Hostname
+  as additional information along with metrics.
+
+*-------------------------------------+--------------------------------------+
+|| Name                               || Description
+*-------------------------------------+--------------------------------------+
+|<<<MissingBlocks>>> | Current number of missing blocks
+*-------------------------------------+--------------------------------------+
+|<<<ExpiredHeartbeats>>> | Total number of expired heartbeats
+*-------------------------------------+--------------------------------------+
+|<<<TransactionsSinceLastCheckpoint>>> | Total number of transactions since
+                                       | last checkpoint
+*-------------------------------------+--------------------------------------+
+|<<<TransactionsSinceLastLogRoll>>> | Total number of transactions since last
+                                    | edit log roll
+*-------------------------------------+--------------------------------------+
+|<<<LastWrittenTransactionId>>> | Last transaction ID written to the edit log
+*-------------------------------------+--------------------------------------+
+|<<<LastCheckpointTime>>> | Time in milliseconds since epoch of last checkpoint
+*-------------------------------------+--------------------------------------+
+|<<<CapacityTotal>>> | Current raw capacity of DataNodes in bytes
+*-------------------------------------+--------------------------------------+
+|<<<CapacityTotalGB>>> | Current raw capacity of DataNodes in GB
+*-------------------------------------+--------------------------------------+
+|<<<CapacityUsed>>> | Current used capacity across all DataNodes in bytes
+*-------------------------------------+--------------------------------------+
+|<<<CapacityUsedGB>>> | Current used capacity across all DataNodes in GB
+*-------------------------------------+--------------------------------------+
+|<<<CapacityRemaining>>> | Current remaining capacity in bytes
+*-------------------------------------+--------------------------------------+
+|<<<CapacityRemainingGB>>> | Current remaining capacity in GB
+*-------------------------------------+--------------------------------------+
+|<<<CapacityUsedNonDFS>>> | Current space used by DataNodes for non DFS
+                          | purposes in bytes
+*-------------------------------------+--------------------------------------+
+|<<<TotalLoad>>> | Current number of connections
+*-------------------------------------+--------------------------------------+
+|<<<SnapshottableDirectories>>> | Current number of snapshottable directories
+*-------------------------------------+--------------------------------------+
+|<<<Snapshots>>> | Current number of snapshots
+*-------------------------------------+--------------------------------------+
+|<<<BlocksTotal>>> | Current number of allocated blocks in the system
+*-------------------------------------+--------------------------------------+
+|<<<FilesTotal>>> | Current number of files and directories
+*-------------------------------------+--------------------------------------+
+|<<<PendingReplicationBlocks>>> | Current number of blocks pending to be
+                                | replicated
+*-------------------------------------+--------------------------------------+
+|<<<UnderReplicatedBlocks>>> | Current number of blocks under replicated
+*-------------------------------------+--------------------------------------+
+|<<<CorruptBlocks>>> | Current number of blocks with corrupt replicas.
+*-------------------------------------+--------------------------------------+
+|<<<ScheduledReplicationBlocks>>> | Current number of blocks scheduled for
+                                  | replications
+*-------------------------------------+--------------------------------------+
+|<<<PendingDeletionBlocks>>> | Current number of blocks pending deletion
+*-------------------------------------+--------------------------------------+
+|<<<ExcessBlocks>>> | Current number of excess blocks
+*-------------------------------------+--------------------------------------+
+|<<<PostponedMisreplicatedBlocks>>> | (HA-only) Current number of blocks
+                                    | postponed to replicate
+*-------------------------------------+--------------------------------------+
+|<<<PendingDataNodeMessageCourt>>> | (HA-only) Current number of pending
+                                   | block-related messages for later
+                                   | processing in the standby NameNode
+*-------------------------------------+--------------------------------------+
+|<<<MillisSinceLastLoadedEdits>>> | (HA-only) Time in milliseconds since the
+                                  | last time standby NameNode load edit log.
+                                  | In active NameNode, set to 0
+*-------------------------------------+--------------------------------------+
+|<<<BlockCapacity>>> | Current number of block capacity
+*-------------------------------------+--------------------------------------+
+|<<<StaleDataNodes>>> | Current number of DataNodes marked stale due to delayed
+                      | heartbeat
+*-------------------------------------+--------------------------------------+
+|<<<TotalFiles>>> |Current number of files and directories (same as FilesTotal)
+*-------------------------------------+--------------------------------------+
+
+* JournalNode
+
+  The server-side metrics for a journal from the JournalNode's perspective.
+  Each metrics record contains Hostname tag as additional information
+  along with metrics.
+
+*-------------------------------------+--------------------------------------+
+|| Name                               || Description
+*-------------------------------------+--------------------------------------+
+|<<<Syncs60sNumOps>>> | Number of sync operations (1 minute granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs60s50thPercentileLatencyMicros>>> | The 50th percentile of sync
+| | latency in microseconds (1 minute granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs60s75thPercentileLatencyMicros>>> | The 75th percentile of sync
+| | latency in microseconds (1 minute granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs60s90thPercentileLatencyMicros>>> | The 90th percentile of sync
+| | latency in microseconds (1 minute granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs60s95thPercentileLatencyMicros>>> | The 95th percentile of sync
+| | latency in microseconds (1 minute granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs60s99thPercentileLatencyMicros>>> | The 99th percentile of sync
+| | latency in microseconds (1 minute granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs300sNumOps>>> | Number of sync operations (5 minutes granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs300s50thPercentileLatencyMicros>>> | The 50th percentile of sync
+| | latency in microseconds (5 minutes granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs300s75thPercentileLatencyMicros>>> | The 75th percentile of sync
+| | latency in microseconds (5 minutes granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs300s90thPercentileLatencyMicros>>> | The 90th percentile of sync
+| | latency in microseconds (5 minutes granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs300s95thPercentileLatencyMicros>>> | The 95th percentile of sync
+| | latency in microseconds (5 minutes granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs300s99thPercentileLatencyMicros>>> | The 99th percentile of sync
+| | latency in microseconds (5 minutes granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs3600sNumOps>>> | Number of sync operations (1 hour granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs3600s50thPercentileLatencyMicros>>> | The 50th percentile of sync
+| | latency in microseconds (1 hour granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs3600s75thPercentileLatencyMicros>>> | The 75th percentile of sync
+| | latency in microseconds (1 hour granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs3600s90thPercentileLatencyMicros>>> | The 90th percentile of sync
+| | latency in microseconds (1 hour granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs3600s95thPercentileLatencyMicros>>> | The 95th percentile of sync
+| | latency in microseconds (1 hour granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs3600s99thPercentileLatencyMicros>>> | The 99th percentile of sync
+| | latency in microseconds (1 hour granularity)
+*-------------------------------------+--------------------------------------+
+|<<<BatchesWritten>>> | Total number of batches written since startup
+*-------------------------------------+--------------------------------------+
+|<<<TxnsWritten>>> | Total number of transactions written since startup
+*-------------------------------------+--------------------------------------+
+|<<<BytesWritten>>> | Total number of bytes written since startup
+*-------------------------------------+--------------------------------------+
+|<<<BatchesWrittenWhileLagging>>> | Total number of batches written where this
+| | node was lagging
+*-------------------------------------+--------------------------------------+
+|<<<LastWriterEpoch>>> | Current writer's epoch number
+*-------------------------------------+--------------------------------------+
+|<<<CurrentLagTxns>>> | The number of transactions that this JournalNode is
+| | lagging
+*-------------------------------------+--------------------------------------+
+|<<<LastWrittenTxId>>> | The highest transaction id stored on this JournalNode
+*-------------------------------------+--------------------------------------+
+|<<<LastPromisedEpoch>>> | The last epoch number which this node has promised
+| | not to accept any lower epoch, or 0 if no promises have been made
+*-------------------------------------+--------------------------------------+
+
+* datanode
+
+  Each metrics record contains tags such as SessionId and Hostname
+  as additional information along with metrics.
+
+*-------------------------------------+--------------------------------------+
+|| Name                               || Description
+*-------------------------------------+--------------------------------------+
+|<<<BytesWritten>>> | Total number of bytes written to DataNode
+*-------------------------------------+--------------------------------------+
+|<<<BytesRead>>> | Total number of bytes read from DataNode
+*-------------------------------------+--------------------------------------+
+|<<<BlocksWritten>>> | Total number of blocks written to DataNode
+*-------------------------------------+--------------------------------------+
+|<<<BlocksRead>>> | Total number of blocks read from DataNode
+*-------------------------------------+--------------------------------------+
+|<<<BlocksReplicated>>> | Total number of blocks replicated
+*-------------------------------------+--------------------------------------+
+|<<<BlocksRemoved>>> | Total number of blocks removed
+*-------------------------------------+--------------------------------------+
+|<<<BlocksVerified>>> | Total number of blocks verified
+*-------------------------------------+--------------------------------------+
+|<<<BlockVerificationFailures>>> | Total number of verifications failures
+*-------------------------------------+--------------------------------------+
+|<<<BlocksCached>>> | Total number of blocks cached
+*-------------------------------------+--------------------------------------+
+|<<<BlocksUncached>>> | Total number of blocks uncached
+*-------------------------------------+--------------------------------------+
+|<<<ReadsFromLocalClient>>> | Total number of read operations from local client
+*-------------------------------------+--------------------------------------+
+|<<<ReadsFromRemoteClient>>> | Total number of read operations from remote
+                             | client
+*-------------------------------------+--------------------------------------+
+|<<<WritesFromLocalClient>>> | Total number of write operations from local
+                             | client
+*-------------------------------------+--------------------------------------+
+|<<<WritesFromRemoteClient>>> | Total number of write operations from remote
+                              | client
+*-------------------------------------+--------------------------------------+
+|<<<BlocksGetLocalPathInfo>>> | Total number of operations to get local path
+                              | names of blocks
+*-------------------------------------+--------------------------------------+
+|<<<FsyncCount>>> | Total number of fsync
+*-------------------------------------+--------------------------------------+
+|<<<VolumeFailures>>> | Total number of volume failures occurred
+*-------------------------------------+--------------------------------------+
+|<<<ReadBlockOpNumOps>>> | Total number of read operations
+*-------------------------------------+--------------------------------------+
+|<<<ReadBlockOpAvgTime>>> | Average time of read operations in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<WriteBlockOpNumOps>>> | Total number of write operations
+*-------------------------------------+--------------------------------------+
+|<<<WriteBlockOpAvgTime>>> | Average time of write operations in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<BlockChecksumOpNumOps>>> | Total number of blockChecksum operations
+*-------------------------------------+--------------------------------------+
+|<<<BlockChecksumOpAvgTime>>> | Average time of blockChecksum operations in
+                              | milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<CopyBlockOpNumOps>>> | Total number of block copy operations
+*-------------------------------------+--------------------------------------+
+|<<<CopyBlockOpAvgTime>>> | Average time of block copy operations in
+                          | milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<ReplaceBlockOpNumOps>>> | Total number of block replace operations
+*-------------------------------------+--------------------------------------+
+|<<<ReplaceBlockOpAvgTime>>> | Average time of block replace operations in
+                             | milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<HeartbeatsNumOps>>> | Total number of heartbeats
+*-------------------------------------+--------------------------------------+
+|<<<HeartbeatsAvgTime>>> | Average heartbeat time in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<BlockReportsNumOps>>> | Total number of block report operations
+*-------------------------------------+--------------------------------------+
+|<<<BlockReportsAvgTime>>> | Average time of block report operations in
+                           | milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<CacheReportsNumOps>>> | Total number of cache report operations
+*-------------------------------------+--------------------------------------+
+|<<<CacheReportsAvgTime>>> | Average time of cache report operations in
+                           | milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<PacketAckRoundTripTimeNanosNumOps>>> | Total number of ack round trip
+*-------------------------------------+--------------------------------------+
+|<<<PacketAckRoundTripTimeNanosAvgTime>>> | Average time from ack send to
+| | receive minus the downstream ack time in nanoseconds
+*-------------------------------------+--------------------------------------+
+|<<<FlushNanosNumOps>>> | Total number of flushes
+*-------------------------------------+--------------------------------------+
+|<<<FlushNanosAvgTime>>> | Average flush time in nanoseconds
+*-------------------------------------+--------------------------------------+
+|<<<FsyncNanosNumOps>>> | Total number of fsync
+*-------------------------------------+--------------------------------------+
+|<<<FsyncNanosAvgTime>>> | Average fsync time in nanoseconds
+*-------------------------------------+--------------------------------------+
+|<<<SendDataPacketBlockedOnNetworkNanosNumOps>>> | Total number of sending
+                                                 | packets
+*-------------------------------------+--------------------------------------+
+|<<<SendDataPacketBlockedOnNetworkNanosAvgTime>>> | Average waiting time of
+| | sending packets in nanoseconds
+*-------------------------------------+--------------------------------------+
+|<<<SendDataPacketTransferNanosNumOps>>> | Total number of sending packets
+*-------------------------------------+--------------------------------------+
+|<<<SendDataPacketTransferNanosAvgTime>>> | Average transfer time of sending
+                                          | packets in nanoseconds
+*-------------------------------------+--------------------------------------+
+
+ugi context
+
+* UgiMetrics
+
+  UgiMetrics is related to user and group information.
+  Each metrics record contains Hostname tag as additional information
+  along with metrics.
+
+*-------------------------------------+--------------------------------------+
+|| Name                               || Description
+*-------------------------------------+--------------------------------------+
+|<<<LoginSuccessNumOps>>> | Total number of successful kerberos logins
+*-------------------------------------+--------------------------------------+
+|<<<LoginSuccessAvgTime>>> | Average time for successful kerberos logins in
+                           | milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<LoginFailureNumOps>>> | Total number of failed kerberos logins
+*-------------------------------------+--------------------------------------+
+|<<<LoginFailureAvgTime>>> | Average time for failed kerberos logins in
+                           | milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<getGroupsNumOps>>> | Total number of group resolutions
+*-------------------------------------+--------------------------------------+
+|<<<getGroupsAvgTime>>> | Average time for group resolution in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<getGroups>>><num><<<sNumOps>>> |
+| | Total number of group resolutions (<num> seconds granularity). <num> is
+| | specified by <<<hadoop.user.group.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<getGroups>>><num><<<s50thPercentileLatency>>> |
+| | Shows the 50th percentile of group resolution time in milliseconds
+| | (<num> seconds granularity). <num> is specified by
+| | <<<hadoop.user.group.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<getGroups>>><num><<<s75thPercentileLatency>>> |
+| | Shows the 75th percentile of group resolution time in milliseconds
+| | (<num> seconds granularity). <num> is specified by
+| | <<<hadoop.user.group.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<getGroups>>><num><<<s90thPercentileLatency>>> |
+| | Shows the 90th percentile of group resolution time in milliseconds
+| | (<num> seconds granularity). <num> is specified by
+| | <<<hadoop.user.group.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<getGroups>>><num><<<s95thPercentileLatency>>> |
+| | Shows the 95th percentile of group resolution time in milliseconds
+| | (<num> seconds granularity). <num> is specified by
+| | <<<hadoop.user.group.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<getGroups>>><num><<<s99thPercentileLatency>>> |
+| | Shows the 99th percentile of group resolution time in milliseconds
+| | (<num> seconds granularity). <num> is specified by
+| | <<<hadoop.user.group.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+
+metricssystem context
+
+* MetricsSystem
+
+  MetricsSystem shows the statistics for metrics snapshots and publishes.
+  Each metrics record contains Hostname tag as additional information
+  along with metrics.
+
+*-------------------------------------+--------------------------------------+
+|| Name                               || Description
+*-------------------------------------+--------------------------------------+
+|<<<NumActiveSources>>> | Current number of active metrics sources
+*-------------------------------------+--------------------------------------+
+|<<<NumAllSources>>> | Total number of metrics sources
+*-------------------------------------+--------------------------------------+
+|<<<NumActiveSinks>>> | Current number of active sinks
+*-------------------------------------+--------------------------------------+
+|<<<NumAllSinks>>> | Total number of sinks \
+                   | (BUT usually less than <<<NumActiveSinks>>>,
+                   | see {{{https://issues.apache.org/jira/browse/HADOOP-9946}HADOOP-9946}})
+*-------------------------------------+--------------------------------------+
+|<<<SnapshotNumOps>>> | Total number of operations to snapshot statistics from
+                      | a metrics source
+*-------------------------------------+--------------------------------------+
+|<<<SnapshotAvgTime>>> | Average time in milliseconds to snapshot statistics
+                       | from a metrics source
+*-------------------------------------+--------------------------------------+
+|<<<PublishNumOps>>> | Total number of operations to publish statistics to a
+                     | sink
+*-------------------------------------+--------------------------------------+
+|<<<PublishAvgTime>>> | Average time in milliseconds to publish statistics to
+                      | a sink
+*-------------------------------------+--------------------------------------+
+|<<<DroppedPubAll>>> | Total number of dropped publishes
+*-------------------------------------+--------------------------------------+
+|<<<Sink_>>><instance><<<NumOps>>> | Total number of sink operations for the
+                                   | <instance>
+*-------------------------------------+--------------------------------------+
+|<<<Sink_>>><instance><<<AvgTime>>> | Average time in milliseconds of sink
+                                    | operations for the <instance>
+*-------------------------------------+--------------------------------------+
+|<<<Sink_>>><instance><<<Dropped>>> | Total number of dropped sink operations
+                                    | for the <instance>
+*-------------------------------------+--------------------------------------+
+|<<<Sink_>>><instance><<<Qsize>>> | Current queue length of sink operations \
+                                  | (BUT always set to 0 because nothing to
+                                  | increment this metrics, see
+                                  | {{{https://issues.apache.org/jira/browse/HADOOP-9941}HADOOP-9941}})
+*-------------------------------------+--------------------------------------+
+
+default context
+
+* StartupProgress
+
+  StartupProgress metrics shows the statistics of NameNode startup.
+  Four metrics are exposed for each startup phase based on its name.
+  The startup <phase>s are <<<LoadingFsImage>>>, <<<LoadingEdits>>>,
+  <<<SavingCheckpoint>>>, and <<<SafeMode>>>.
+  Each metrics record contains Hostname tag as additional information
+  along with metrics.
+
+*-------------------------------------+--------------------------------------+
+|| Name                               || Description
+*-------------------------------------+--------------------------------------+
+|<<<ElapsedTime>>> | Total elapsed time in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<PercentComplete>>> | Current rate completed in NameNode startup progress \
+                       | (The max value is not 100 but 1.0)
+*-------------------------------------+--------------------------------------+
+|<phase><<<Count>>> | Total number of steps completed in the phase
+*-------------------------------------+--------------------------------------+
+|<phase><<<ElapsedTime>>> | Total elapsed time in the phase in milliseconds
+*-------------------------------------+--------------------------------------+
+|<phase><<<Total>>> | Total number of steps in the phase
+*-------------------------------------+--------------------------------------+
+|<phase><<<PercentComplete>>> | Current rate completed in the phase \
+                              | (The max value is not 100 but 1.0)
+*-------------------------------------+--------------------------------------+
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritable.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritable.java
index 971e237..41dfb7a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritable.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritable.java
@@ -23,6 +23,7 @@
 import java.io.IOException;
 import java.util.Random;
 
+import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.ReflectionUtils;
 
@@ -30,6 +31,11 @@
 
 /** Unit tests for Writable. */
 public class TestWritable extends TestCase {
+private static final String TEST_CONFIG_PARAM = "frob.test";
+private static final String TEST_CONFIG_VALUE = "test";
+private static final String TEST_WRITABLE_CONFIG_PARAM = "test.writable";
+private static final String TEST_WRITABLE_CONFIG_VALUE = TEST_CONFIG_VALUE;
+
   public TestWritable(String name) { super(name); }
 
   /** Example class used in test cases below. */
@@ -64,6 +70,25 @@
     }
   }
 
+  public static class SimpleWritableComparable extends SimpleWritable
+      implements WritableComparable<SimpleWritableComparable>, Configurable {
+    private Configuration conf;
+
+    public SimpleWritableComparable() {}
+
+    public void setConf(Configuration conf) {
+      this.conf = conf;
+    }
+
+    public Configuration getConf() {
+      return this.conf;
+    }
+
+    public int compareTo(SimpleWritableComparable o) {
+      return this.state - o.state;
+    }
+  }
+
   /** Test 1: Check that SimpleWritable. */
   public void testSimpleWritable() throws Exception {
     testWritable(new SimpleWritable());
@@ -121,9 +146,34 @@
     @Override public int compareTo(Frob o) { return 0; }
   }
 
-  /** Test that comparator is defined. */
+  /** Test that comparator is defined and configured. */
   public static void testGetComparator() throws Exception {
-    assert(WritableComparator.get(Frob.class) instanceof FrobComparator);
+    Configuration conf = new Configuration();
+
+    // Without conf.
+    WritableComparator frobComparator = WritableComparator.get(Frob.class);
+    assert(frobComparator instanceof FrobComparator);
+    assertNotNull(frobComparator.getConf());
+    assertNull(frobComparator.getConf().get(TEST_CONFIG_PARAM));
+
+    // With conf.
+    conf.set(TEST_CONFIG_PARAM, TEST_CONFIG_VALUE);
+    frobComparator = WritableComparator.get(Frob.class, conf);
+    assert(frobComparator instanceof FrobComparator);
+    assertNotNull(frobComparator.getConf());
+    assertEquals(conf.get(TEST_CONFIG_PARAM), TEST_CONFIG_VALUE);
+
+    // Without conf. should reuse configuration.
+    frobComparator = WritableComparator.get(Frob.class);
+    assert(frobComparator instanceof FrobComparator);
+    assertNotNull(frobComparator.getConf());
+    assertEquals(conf.get(TEST_CONFIG_PARAM), TEST_CONFIG_VALUE);
+
+    // New conf. should use new configuration.
+    frobComparator = WritableComparator.get(Frob.class, new Configuration());
+    assert(frobComparator instanceof FrobComparator);
+    assertNotNull(frobComparator.getConf());
+    assertNull(frobComparator.getConf().get(TEST_CONFIG_PARAM));
   }
 
   /**
@@ -153,4 +203,17 @@
         .compare(writable1, writable3) == 0);
   }
 
+  /**
+   * Test that Writable's are configured by Comparator.
+   */
+  public void testConfigurableWritableComparator() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(TEST_WRITABLE_CONFIG_PARAM, TEST_WRITABLE_CONFIG_VALUE);
+
+    WritableComparator wc = WritableComparator.get(SimpleWritableComparable.class, conf);
+    SimpleWritableComparable key = ((SimpleWritableComparable)wc.newKey());
+    assertNotNull(wc.getConf());
+    assertNotNull(key.getConf());
+    assertEquals(key.getConf().get(TEST_WRITABLE_CONFIG_PARAM), TEST_WRITABLE_CONFIG_VALUE);
+  }
 }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
index ed27762..d676782 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
@@ -20,6 +20,7 @@
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.token.Token;
@@ -31,6 +32,7 @@
 import javax.security.auth.kerberos.KerberosPrincipal;
 import javax.security.auth.login.AppConfigurationEntry;
 import javax.security.auth.login.LoginContext;
+
 import java.io.BufferedReader;
 import java.io.IOException;
 import java.io.InputStreamReader;
@@ -151,6 +153,18 @@
     assertEquals(AuthenticationMethod.PROXY, ugi.getAuthenticationMethod());
     assertEquals(AuthenticationMethod.SIMPLE, ugi.getRealAuthenticationMethod());
   }
+  
+  @Test (timeout = 30000)
+  public void testCreateRemoteUser() {
+    UserGroupInformation ugi = UserGroupInformation.createRemoteUser("user1");
+    assertEquals(AuthenticationMethod.SIMPLE, ugi.getAuthenticationMethod());
+    assertTrue (ugi.toString().contains("(auth:SIMPLE)"));
+    ugi = UserGroupInformation.createRemoteUser("user1", 
+        AuthMethod.KERBEROS);
+    assertEquals(AuthenticationMethod.KERBEROS, ugi.getAuthenticationMethod());
+    assertTrue (ugi.toString().contains("(auth:KERBEROS)"));
+  }
+  
   /** Test login method */
   @Test (timeout = 30000)
   public void testLogin() throws Exception {
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
index d162ac0..a91f041 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
@@ -54,47 +54,55 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-ls \[-d\] \[-h\] \[-R\] \[&lt;path&gt; \.\.\.\]:( |\t)*List the contents that match the specified file pattern. If( )*</expected-output>
+          <expected-output>^-ls \[-d\] \[-h\] \[-R\] \[&lt;path&gt; \.\.\.\] :( |\t)*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*path is not specified, the contents of /user/&lt;currentUser&gt;( )*</expected-output>
+          <expected-output>^\s*List the contents that match the specified file pattern. If path is not</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*path is not specified, the contents of /user/&lt;currentUser&gt;( )*</expected-output>
+          <expected-output>^\s*specified, the contents of /user/&lt;currentUser&gt; will be listed. Directory entries( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*will be listed. Directory entries are of the form( )*</expected-output>
+          <expected-output>^\s*are of the form:( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*permissions - userid groupid size_of_directory\(in bytes\) modification_date\(yyyy-MM-dd HH:mm\) directoryName( )*</expected-output>
-        </comparator>
-         <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^( |\t)*and file entries are of the form( )*</expected-output>
-        </comparator>
-          <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^( |\t)*permissions number_of_replicas userid groupid size_of_file\(in bytes\) modification_date\(yyyy-MM-dd HH:mm\) fileName( )*</expected-output>
+          <expected-output>^\s*permissions - userId groupId sizeOfDirectory\(in bytes\)( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*-d\s+Directories are listed as plain files\.</expected-output>
+          <expected-output>^\s*modificationDate\(yyyy-MM-dd HH:mm\) directoryName( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*-h\s+Formats the sizes of files in a human-readable fashion( )*</expected-output>
+          <expected-output>^\s*and file entries are of the form:( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*rather than a number of bytes\.( )*</expected-output>
+          <expected-output>^\s*permissions numberOfReplicas userId groupId sizeOfFile\(in bytes\)( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*-R\s+Recursively list the contents of directories\.</expected-output>
+          <expected-output>^\s*modificationDate\(yyyy-MM-dd HH:mm\) fileName( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*-d\s+Directories are listed as plain files\.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*-h\s+Formats the sizes of files in a human-readable fashion rather than a number( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*of bytes\.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*-R\s+Recursively list the contents of directories\.( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -109,7 +117,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-lsr:\s+\(DEPRECATED\) Same as 'ls -R'</expected-output>
+          <expected-output>^-lsr :\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s+\(DEPRECATED\) Same as 'ls -R'</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -125,23 +137,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-get( )*\[-p\]( )*\[-ignoreCrc\]( )*\[-crc\]( )*&lt;src&gt; \.\.\. &lt;localdst&gt;:( |\t)*Copy files that match the file pattern &lt;src&gt;( )*</expected-output>
+          <expected-output>^-get( )*\[-p\]( )*\[-ignoreCrc\]( )*\[-crc\]( )*&lt;src&gt; \.\.\. &lt;localdst&gt; :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*to the local name.( )*&lt;src&gt; is kept.( )*When copying multiple,( )*</expected-output>
+          <expected-output>\s*Copy files that match the file pattern &lt;src&gt; to the local name.  &lt;src&gt; is kept.\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*files, the destination must be a directory.( )*Passing( )*</expected-output>
+          <expected-output>\s*When copying multiple files, the destination must be a directory. Passing -p\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*-p preserves access and modification times,( )*</expected-output>
-        </comparator>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^( |\t)*ownership and the mode.( )*</expected-output>
+          <expected-output>^( |\t)*preserves access and modification times, ownership and the mode.*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -156,35 +164,39 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-du \[-s\] \[-h\] &lt;path&gt; \.\.\.:\s+Show the amount of space, in bytes, used by the files that\s*</expected-output>
+          <expected-output>^-du \[-s\] \[-h\] &lt;path&gt; \.\.\. :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s*match the specified file pattern. The following flags are optional:</expected-output>
+          <expected-output>^\s*Show the amount of space, in bytes, used by the files that match the specified\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s*-s\s*Rather than showing the size of each individual file that</expected-output>
+          <expected-output>^\s*file pattern. The following flags are optional:\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s*matches the pattern, shows the total \(summary\) size.</expected-output>
+          <expected-output>^\s*-s\s*Rather than showing the size of each individual file that matches the\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s*-h\s*Formats the sizes of files in a human-readable fashion</expected-output>
+          <expected-output>^\s*pattern, shows the total \(summary\) size.\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>\s*rather than a number of bytes.</expected-output>
+          <expected-output>^\s*-h\s*Formats the sizes of files in a human-readable fashion rather than a number\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s*Note that, even without the -s option, this only shows size summaries</expected-output>
+          <expected-output>\s*of bytes.\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s*one level deep into a directory.</expected-output>
+          <expected-output>^\s*Note that, even without the -s option, this only shows size summaries one level\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*deep into a directory.</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
@@ -207,7 +219,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-dus:\s+\(DEPRECATED\) Same as 'du -s'</expected-output>
+          <expected-output>^-dus :</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*\(DEPRECATED\) Same as 'du -s'</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -222,7 +238,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-count \[-q\] &lt;path&gt; \.\.\.:( |\t)*Count the number of directories, files and bytes under the paths( )*</expected-output>
+          <expected-output>^-count \[-q\] &lt;path&gt; \.\.\. :\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*Count the number of directories, files and bytes under the paths( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
@@ -253,15 +273,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-mv &lt;src&gt; \.\.\. &lt;dst&gt;:( |\t)*Move files that match the specified file pattern &lt;src&gt;( )*</expected-output>
+          <expected-output>^-mv &lt;src&gt; \.\.\. &lt;dst&gt; :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*to a destination &lt;dst&gt;.  When moving multiple files, the( )*</expected-output>
+          <expected-output>\s*Move files that match the specified file pattern &lt;src&gt; to a destination &lt;dst&gt;.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*destination must be a directory.( )*</expected-output>
+          <expected-output>^( |\t)*When moving multiple files, the destination must be a directory.( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -276,23 +296,31 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-cp \[-f\] \[-p\] &lt;src&gt; \.\.\. &lt;dst&gt;:( |\t)*Copy files that match the file pattern &lt;src&gt; to a( )*</expected-output>
+          <expected-output>^-cp \[-f\] \[-p \| -p\[topax\]\] &lt;src&gt; \.\.\. &lt;dst&gt; :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*destination.  When copying multiple files, the destination( )*</expected-output>
+          <expected-output>^\s*Copy files that match the file pattern &lt;src&gt; to a destination.  When copying( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*must be a directory.( )*Passing -p preserves access and( )*</expected-output>
+          <expected-output>^( |\t)*multiple files, the destination must be a directory.( )*Passing -p preserves status( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*modification times, ownership and the mode. Passing -f( )*</expected-output>
+          <expected-output>^( |\t)*\[topax\] \(timestamps, ownership, permission, ACLs, XAttr\). If -p is specified( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*overwrites the destination if it already exists.( )*</expected-output>
+          <expected-output>^( |\t)*with no &lt;arg&gt;, then preserves timestamps, ownership, permission. If -pa is( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*specified, then preserves permission also because ACL is a super-set of( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*permission. Passing -f overwrites the destination if it already exists.( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -307,31 +335,31 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rm \[-f\] \[-r\|-R\] \[-skipTrash\] &lt;src&gt; \.\.\.:( |\t)*Delete all files that match the specified file pattern.( )*</expected-output>
+          <expected-output>^-rm \[-f\] \[-r\|-R\] \[-skipTrash\] &lt;src&gt; \.\.\. :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*Equivalent to the Unix command "rm &lt;src&gt;"( )*</expected-output>
+          <expected-output>^\s*Delete all files that match the specified file pattern. Equivalent to the Unix( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*-skipTrash option bypasses trash, if enabled, and immediately( )*</expected-output>
+          <expected-output>^\s*command "rm &lt;src&gt;"( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*deletes &lt;src&gt;( )*</expected-output>
+          <expected-output>^\s*-skipTrash\s+option bypasses trash, if enabled, and immediately deletes &lt;src&gt;( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s+-f\s+If the file does not exist, do not display a diagnostic</expected-output>
+          <expected-output>^\s+-f\s+If the file does not exist, do not display a diagnostic message or\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s+message or modify the exit status to reflect an error\.</expected-output>
+          <expected-output>^\s+modify the exit status to reflect an error\.\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s+-\[rR\]\s+Recursively deletes directories</expected-output>
+          <expected-output>^\s+-\[rR\]\s+Recursively deletes directories\s*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -346,11 +374,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rmdir \[--ignore-fail-on-non-empty\] &lt;dir&gt; \.\.\.:\s+Removes the directory entry specified by each directory argument,</expected-output>
+          <expected-output>^-rmdir \[--ignore-fail-on-non-empty\] &lt;dir&gt; \.\.\. :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>\s+provided it is empty.</expected-output>
+          <expected-output>\s+Removes the directory entry specified by each directory argument, provided it is\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>\s+empty\.\s*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -365,7 +397,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rmr:\s+\(DEPRECATED\) Same as 'rm -r'</expected-output>
+          <expected-output>^-rmr :\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*\(DEPRECATED\) Same as 'rm -r'\s*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -380,27 +416,23 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-put \[-f\] \[-p\] &lt;localsrc&gt; \.\.\. &lt;dst&gt;:\s+Copy files from the local file system</expected-output>
+          <expected-output>^-put \[-f\] \[-p\] &lt;localsrc&gt; \.\.\. &lt;dst&gt; :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*into fs.( )*Copying fails if the file already( )*</expected-output>
+          <expected-output>^\s*Copy files from the local file system into fs.( )*Copying fails if the file already( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*exists, unless the -f flag is given.( )*Passing( )*</expected-output>
+          <expected-output>^\s*exists, unless the -f flag is given.( )*Passing -p preserves access and( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*-p preserves access and modification times,( )*</expected-output>
+          <expected-output>^\s*modification times, ownership and the mode. Passing -f overwrites the( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*ownership and the mode. Passing -f overwrites( )*</expected-output>
-        </comparator>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^( |\t)*the destination if it already exists.( )*</expected-output>
+          <expected-output>^( |\t)*destination if it already exists.( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -415,7 +447,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-copyFromLocal \[-f\] \[-p\] &lt;localsrc&gt; \.\.\. &lt;dst&gt;:\s+Identical to the -put command\.</expected-output>
+          <expected-output>^-copyFromLocal \[-f\] \[-p\] &lt;localsrc&gt; \.\.\. &lt;dst&gt; :\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*Identical to the -put command\.\s*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -430,11 +466,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-moveFromLocal &lt;localsrc&gt; \.\.\. &lt;dst&gt;:\s+Same as -put, except that the source is</expected-output>
+          <expected-output>^-moveFromLocal &lt;localsrc&gt; \.\.\. &lt;dst&gt; :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*deleted after it's copied.</expected-output>
+          <expected-output>^( |\t)*Same as -put, except that the source is deleted after it's copied.</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -450,23 +486,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-get( )*\[-p\]( )*\[-ignoreCrc\]( )*\[-crc\]( )*&lt;src&gt; \.\.\. &lt;localdst&gt;:( |\t)*Copy files that match the file pattern &lt;src&gt;( )*</expected-output>
+          <expected-output>^-get( )*\[-p\]( )*\[-ignoreCrc\]( )*\[-crc\]( )*&lt;src&gt; \.\.\. &lt;localdst&gt; :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*to the local name.( )*&lt;src&gt; is kept.( )*When copying multiple,( )*</expected-output>
+          <expected-output>^( |\t)*Copy files that match the file pattern &lt;src&gt; to the local name.( )*&lt;src&gt; is kept.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*files, the destination must be a directory.( )*Passing( )*</expected-output>
+          <expected-output>^( |\t)*When copying multiple files, the destination must be a directory. Passing -p( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*-p preserves access and modification times,( )*</expected-output>
-        </comparator>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^( |\t)*ownership and the mode.( )*</expected-output>
+          <expected-output>^( |\t)*preserves access and modification times, ownership and the mode.( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -481,19 +513,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-getmerge \[-nl\] &lt;src&gt; &lt;localdst&gt;:( |\t)*Get all the files in the directories that( )*</expected-output>
+          <expected-output>^-getmerge \[-nl\] &lt;src&gt; &lt;localdst&gt; :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*match the source file pattern and merge and sort them to only( )*</expected-output>
+          <expected-output>^( |\t)*Get all the files in the directories that match the source file pattern and( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*one file on local fs. &lt;src&gt; is kept.( )*</expected-output>
+          <expected-output>^( |\t)*merge and sort them to only one file on local fs. &lt;src&gt; is kept.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*-nl   Add a newline character at the end of each file.( )*</expected-output>
+          <expected-output>^( |\t)*-nl\s+Add a newline character at the end of each file.( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -509,11 +541,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-cat \[-ignoreCrc\] &lt;src&gt; \.\.\.:( |\t)*Fetch all files that match the file pattern &lt;src&gt;( )*</expected-output>
+          <expected-output>^-cat \[-ignoreCrc\] &lt;src&gt; \.\.\. :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*and display their content on stdout.</expected-output>
+          <expected-output>^\s*Fetch all files that match the file pattern &lt;src&gt; and display their content on\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*stdout.</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -529,7 +565,27 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-checksum &lt;src&gt; \.\.\.:( |\t)*Dump checksum information for files.*</expected-output>
+          <expected-output>^-checksum &lt;src&gt; \.\.\. :\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*Dump checksum information for files that match the file pattern &lt;src&gt; to stdout\.\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*Note that this requires a round-trip to a datanode storing each block of the\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*file, and thus is not efficient to run on a large number of files\. The checksum\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*of a file depends on its content, block size and the checksum algorithm and\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*parameters used for creating the file\.\s*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -544,7 +600,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-copyToLocal \[-p\] \[-ignoreCrc\] \[-crc\] &lt;src&gt; \.\.\. &lt;localdst&gt;:\s+Identical to the -get command.</expected-output>
+          <expected-output>^-copyToLocal \[-p\] \[-ignoreCrc\] \[-crc\] &lt;src&gt; \.\.\. &lt;localdst&gt; :\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*Identical to the -get command.\s*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -559,7 +619,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-moveToLocal &lt;src&gt; &lt;localdst&gt;:\s+Not implemented yet</expected-output>
+          <expected-output>^-moveToLocal &lt;src&gt; &lt;localdst&gt; :\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*Not implemented yet</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -574,7 +638,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-mkdir \[-p\] &lt;path&gt; \.\.\.:( |\t)*Create a directory in specified location.( )*</expected-output>
+          <expected-output>^-mkdir \[-p\] &lt;path&gt; \.\.\. :\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*Create a directory in specified location.( )*</expected-output>
         </comparator>
         <comparator>
           <type>TokenComparator</type>
@@ -593,27 +661,31 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-setrep \[-R\] \[-w\] &lt;rep&gt; &lt;path&gt; \.\.\.:( |\t)*Set the replication level of a file. If &lt;path&gt; is a directory( )*</expected-output>
+          <expected-output>^-setrep \[-R\] \[-w\] &lt;rep&gt; &lt;path&gt; \.\.\. :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*then the command recursively changes the replication factor of( )*</expected-output>
+          <expected-output>^\s*Set the replication level of a file. If &lt;path&gt; is a directory then the command( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*all files under the directory tree rooted at &lt;path&gt;\.( )*</expected-output>
+          <expected-output>^\s*recursively changes the replication factor of all files under the directory tree( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*rooted at &lt;path&gt;\.( )*</expected-output>
         </comparator>
         <comparator>
             <type>RegexpComparator</type>
-            <expected-output>^( |\t)*The -w flag requests that the command wait for the replication( )*</expected-output>
+            <expected-output>^\s*-w\s+It requests that the command waits for the replication to complete\. This( )*</expected-output>
         </comparator>
         <comparator>
             <type>RegexpComparator</type>
-            <expected-output>^( |\t)*to complete. This can potentially take a very long time\.( )*</expected-output>
+            <expected-output>^( |\t)*can potentially take a very long time\.( )*</expected-output>
         </comparator>
           <comparator>
               <type>RegexpComparator</type>
-              <expected-output>^( |\t)*The -R flag is accepted for backwards compatibility\. It has no effect\.( )*</expected-output>
+              <expected-output>^( |\t)*-R\s+It is accepted for backwards compatibility\. It has no effect\.( )*</expected-output>
           </comparator>
       </comparators>
     </test>
@@ -628,15 +700,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-touchz &lt;path&gt; \.\.\.:( |\t)*Creates a file of zero length( )*</expected-output>
+          <expected-output>^-touchz &lt;path&gt; \.\.\. :( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*at &lt;path&gt; with current time as the timestamp of that &lt;path&gt;.( )*</expected-output>
+          <expected-output>^( |\t)*Creates a file of zero length at &lt;path&gt; with current time as the timestamp of( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*An error is returned if the file exists with non-zero length( )*</expected-output>
+          <expected-output>^( |\t)* that &lt;path&gt;\. An error is returned if the file exists with non-zero length( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -651,11 +723,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-test -\[defsz\] &lt;path&gt;:\sAnswer various questions about &lt;path&gt;, with result via exit status.</expected-output>
+          <expected-output>^-test -\[defsz\] &lt;path&gt; :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*else, return 1.( )*</expected-output>
+          <expected-output>^\s*Answer various questions about &lt;path&gt;, with result via exit status.</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*-[defsz]\s+return 0 if .*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -670,15 +746,23 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-stat \[format\] &lt;path&gt; \.\.\.:( |\t)*Print statistics about the file/directory at &lt;path&gt;( )*</expected-output>
+          <expected-output>^-stat \[format\] &lt;path&gt; \.\.\. :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*in the specified format. Format accepts filesize in blocks \(%b\), group name of owner\(%g\),( )*</expected-output>
+          <expected-output>^( |\t)*Print statistics about the file/directory at &lt;path&gt; in the specified format.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*filename \(%n\), block size \(%o\), replication \(%r\), user name of owner\(%u\), modification date \(%y, %Y\)( )*</expected-output>
+          <expected-output>^( |\t)*Format accepts filesize in blocks \(%b\), group name of owner\(%g\), filename \(%n\),( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*block size \(%o\), replication \(%r\), user name of owner\(%u\), modification date( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*\(%y, %Y\)( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -693,11 +777,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-tail \[-f\] &lt;file&gt;:( |\t)+Show the last 1KB of the file.( )*</expected-output>
+          <expected-output>^-tail \[-f\] &lt;file&gt; :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*The -f option shows appended data as the file grows.( )*</expected-output>
+          <expected-output>^\s*Show the last 1KB of the file.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*-f\s+Shows appended data as the file grows.( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -712,47 +800,55 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-chmod \[-R\] &lt;MODE\[,MODE\]... \| OCTALMODE&gt; PATH...:( |\t)*Changes permissions of a file.( )*</expected-output>
+          <expected-output>^-chmod \[-R\] &lt;MODE\[,MODE\]... \| OCTALMODE&gt; PATH... :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*This works similar to shell's chmod with a few exceptions.( )*</expected-output>
+          <expected-output>^( |\t)*Changes permissions of a file. This works similar to the shell's chmod command( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*-R( |\t)*modifies the files recursively. This is the only option( )*</expected-output>
+          <expected-output>^( |\t)*with a few exceptions.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*currently supported.( )*</expected-output>
+          <expected-output>^( |\t)*-R\s*modifies the files recursively. This is the only option currently( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*MODE( |\t)*Mode is same as mode used for chmod shell command.( )*</expected-output>
+          <expected-output>^( |\t)*supported.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*Only letters recognized are 'rwxXt'. E.g. \+t,a\+r,g-w,\+rwx,o=r( )*</expected-output>
+          <expected-output>^( |\t)*&lt;MODE&gt;\s*Mode is the same as mode used for the shell's command. The only( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*OCTALMODE Mode specifed in 3 or 4 digits. If 4 digits, the first may( )*</expected-output>
+          <expected-output>^( |\t)*letters recognized are 'rwxXt', e\.g\. \+t,a\+r,g-w,\+rwx,o=r\.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*be 1 or 0 to turn the sticky bit on or off, respectively.( )*Unlike( |\t)*shell command, it is not possible to specify only part of the mode( )*</expected-output>
+          <expected-output>^( |\t)*&lt;OCTALMODE&gt;\s+Mode specifed in 3 or 4 digits. If 4 digits, the first may be 1 or( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*E.g. 754 is same as u=rwx,g=rx,o=r( )*</expected-output>
+          <expected-output>^( |\t)*0 to turn the sticky bit on or off, respectively.( )*Unlike( |\t)*the( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*If none of 'augo' is specified, 'a' is assumed and unlike( )*</expected-output>
+          <expected-output>^( |\t)*shell command, it is not possible to specify only part of the( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*shell command, no umask is applied.( )*</expected-output>
+          <expected-output>^( |\t)*mode, e\.g\. 754 is same as u=rwx,g=rx,o=r\.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*If none of 'augo' is specified, 'a' is assumed and unlike the shell command, no( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*umask is applied.( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -767,51 +863,47 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-chown \[-R\] \[OWNER\]\[:\[GROUP\]\] PATH...:( |\t)*Changes owner and group of a file.( )*</expected-output>
+          <expected-output>^-chown \[-R\] \[OWNER\]\[:\[GROUP\]\] PATH... :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*This is similar to shell's chown with a few exceptions.( )*</expected-output>
+          <expected-output>^\s*Changes owner and group of a file\. This is similar to the shell's chown command( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*-R( |\t)*modifies the files recursively. This is the only option( )*</expected-output>
+          <expected-output>^( |\t)*with a few exceptions.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*currently supported.( )*</expected-output>
+          <expected-output>^( |\t)*-R( |\t)*modifies the files recursively. This is the only option currently( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*If only owner or group is specified then only owner or( )*</expected-output>
+          <expected-output>^( |\t)*supported.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*group is modified.( )*</expected-output>
+          <expected-output>^( |\t)*If only the owner or group is specified, then only the owner or group is( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*The owner and group names may only consist of digits, alphabet,( )*</expected-output>
+          <expected-output>^( |\t)*modified. The owner and group names may only consist of digits, alphabet, and( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*and any of .+?. The names are case sensitive.( )*</expected-output>
+          <expected-output>^( |\t)*any of .+?. The names are case sensitive.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*WARNING: Avoid using '.' to separate user name and group though( )*</expected-output>
+          <expected-output>^( |\t)*WARNING: Avoid using '.' to separate user name and group though Linux allows it.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*Linux allows it. If user names have dots in them and you are( )*</expected-output>
+          <expected-output>^( |\t)*If user names have dots in them and you are using local file system, you might( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*using local file system, you might see surprising results since( )*</expected-output>
-        </comparator>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^( |\t)*shell command 'chown' is used for local files.( )*</expected-output>
+          <expected-output>^( |\t)*see surprising results since the shell command 'chown' is used for local files.( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -826,7 +918,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-chgrp \[-R\] GROUP PATH...:( |\t)*This is equivalent to -chown ... :GROUP ...( )*</expected-output>
+          <expected-output>^-chgrp \[-R\] GROUP PATH... :( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*This is equivalent to -chown ... :GROUP ...( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -841,11 +937,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-help \[cmd ...\]:( |\t)*Displays help for given command or all commands if none( )*</expected-output>
+          <expected-output>^-help \[cmd ...\] :( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*is specified.( )*</expected-output>
+          <expected-output>^( |\t)*Displays help for given command or all commands if none is specified.( )*</expected-output>
         </comparator>
       </comparators>
     </test>
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index d1460fe..2ac8fff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -31,6 +31,8 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PositionedReadable;
 import org.apache.hadoop.fs.Seekable;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.net.NetUtils;
@@ -86,6 +88,7 @@
   public static final String REPLICATION_PARAM = "replication";
   public static final String BLOCKSIZE_PARAM = "blocksize";
   public static final String PERMISSION_PARAM = "permission";
+  public static final String ACLSPEC_PARAM = "aclspec";
   public static final String DESTINATION_PARAM = "destination";
   public static final String RECURSIVE_PARAM = "recursive";
   public static final String SOURCES_PARAM = "sources";
@@ -95,6 +98,7 @@
   public static final String ACCESS_TIME_PARAM = "accesstime";
 
   public static final Short DEFAULT_PERMISSION = 0755;
+  public static final String ACLSPEC_DEFAULT = "";
 
   public static final String RENAME_JSON = "boolean";
 
@@ -152,6 +156,11 @@
   public static final String CONTENT_SUMMARY_SPACE_CONSUMED_JSON = "spaceConsumed";
   public static final String CONTENT_SUMMARY_SPACE_QUOTA_JSON = "spaceQuota";
 
+  public static final String ACL_STATUS_JSON = "AclStatus";
+  public static final String ACL_STICKY_BIT_JSON = "stickyBit";
+  public static final String ACL_ENTRIES_JSON = "entries";
+  public static final String ACL_BIT_JSON = "aclBit";
+
   public static final String ERROR_JSON = "RemoteException";
   public static final String ERROR_EXCEPTION_JSON = "exception";
   public static final String ERROR_CLASSNAME_JSON = "javaClassName";
@@ -169,10 +178,12 @@
     OPEN(HTTP_GET), GETFILESTATUS(HTTP_GET), LISTSTATUS(HTTP_GET),
     GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET),
     GETFILECHECKSUM(HTTP_GET),  GETFILEBLOCKLOCATIONS(HTTP_GET),
-    INSTRUMENTATION(HTTP_GET),
+    INSTRUMENTATION(HTTP_GET), GETACLSTATUS(HTTP_GET),
     APPEND(HTTP_POST), CONCAT(HTTP_POST),
     CREATE(HTTP_PUT), MKDIRS(HTTP_PUT), RENAME(HTTP_PUT), SETOWNER(HTTP_PUT),
     SETPERMISSION(HTTP_PUT), SETREPLICATION(HTTP_PUT), SETTIMES(HTTP_PUT),
+    MODIFYACLENTRIES(HTTP_PUT), REMOVEACLENTRIES(HTTP_PUT),
+    REMOVEDEFAULTACL(HTTP_PUT), REMOVEACL(HTTP_PUT), SETACL(HTTP_PUT),
     DELETE(HTTP_DELETE);
 
     private String httpMethod;
@@ -798,6 +809,105 @@
     return (Boolean) json.get(SET_REPLICATION_JSON);
   }
 
+  /**
+   * Modify the ACL entries for a file.
+   *
+   * @param path Path to modify
+   * @param aclSpec List<AclEntry> describing modifications
+   * @throws IOException
+   */
+  @Override
+  public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
+          throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM, Operation.MODIFYACLENTRIES.toString());
+    params.put(ACLSPEC_PARAM, AclEntry.aclSpecToString(aclSpec));
+    HttpURLConnection conn = getConnection(
+            Operation.MODIFYACLENTRIES.getMethod(), params, path, true);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+  }
+
+  /**
+   * Remove the specified ACL entries from a file
+   * @param path Path to modify
+   * @param aclSpec List<AclEntry> describing entries to remove
+   * @throws IOException
+   */
+  @Override
+  public void removeAclEntries(Path path, List<AclEntry> aclSpec)
+          throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM, Operation.REMOVEACLENTRIES.toString());
+    params.put(ACLSPEC_PARAM, AclEntry.aclSpecToString(aclSpec));
+    HttpURLConnection conn = getConnection(
+            Operation.REMOVEACLENTRIES.getMethod(), params, path, true);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+  }
+
+  /**
+   * Removes the default ACL for the given file
+   * @param path Path from which to remove the default ACL.
+   * @throws IOException
+   */
+  @Override
+  public void removeDefaultAcl(Path path) throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM, Operation.REMOVEDEFAULTACL.toString());
+    HttpURLConnection conn = getConnection(
+            Operation.REMOVEDEFAULTACL.getMethod(), params, path, true);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+  }
+
+  /**
+   * Remove all ACLs from a file
+   * @param path Path from which to remove all ACLs
+   * @throws IOException
+   */
+  @Override
+  public void removeAcl(Path path) throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM, Operation.REMOVEACL.toString());
+    HttpURLConnection conn = getConnection(Operation.REMOVEACL.getMethod(),
+            params, path, true);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+  }
+
+  /**
+   * Set the ACLs for the given file
+   * @param path Path to modify
+   * @param aclSpec List<AclEntry> describing modifications, must include
+   *                entries for user, group, and others for compatibility
+   *                with permission bits.
+   * @throws IOException
+   */
+  @Override
+  public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM, Operation.SETACL.toString());
+    params.put(ACLSPEC_PARAM, AclEntry.aclSpecToString(aclSpec));
+    HttpURLConnection conn = getConnection(Operation.SETACL.getMethod(),
+                                           params, path, true);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+  }
+
+  /**
+   * Get the ACL information for a given file
+   * @param path Path to acquire ACL info for
+   * @return the ACL information in JSON format
+   * @throws IOException
+   */
+  @Override
+  public AclStatus getAclStatus(Path path) throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM, Operation.GETACLSTATUS.toString());
+    HttpURLConnection conn = getConnection(Operation.GETACLSTATUS.getMethod(),
+            params, path, true);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+    JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
+    json = (JSONObject) json.get(ACL_STATUS_JSON);
+    return createAclStatus(json);
+  }
+
   private FileStatus createFileStatus(Path parent, JSONObject json) {
     String pathSuffix = (String) json.get(PATH_SUFFIX_JSON);
     Path path = (pathSuffix.equals("")) ? parent : new Path(parent, pathSuffix);
@@ -830,6 +940,23 @@
     return fileStatus;
   }
 
+  /**
+   * Convert the given JSON object into an AclStatus
+   * @param json Input JSON representing the ACLs
+   * @return Resulting AclStatus
+   */
+  private AclStatus createAclStatus(JSONObject json) {
+    AclStatus.Builder aclStatusBuilder = new AclStatus.Builder()
+            .owner((String) json.get(OWNER_JSON))
+            .group((String) json.get(GROUP_JSON))
+            .stickyBit((Boolean) json.get(ACL_STICKY_BIT_JSON));
+    JSONArray entries = (JSONArray) json.get(ACL_ENTRIES_JSON);
+    for ( Object e : entries ) {
+      aclStatusBuilder.addEntry(AclEntry.parseAclEntry(e.toString(), true));
+    }
+    return aclStatusBuilder.build();
+  }
+
   @Override
   public ContentSummary getContentSummary(Path f) throws IOException {
     Map<String, String> params = new HashMap<String, String>();
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 8e41d04..971b105 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -26,7 +26,10 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.AclException;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.lib.service.FileSystemAccess;
 import org.json.simple.JSONArray;
@@ -36,6 +39,7 @@
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.util.LinkedHashMap;
+import java.util.List;
 import java.util.Map;
 
 /**
@@ -44,34 +48,170 @@
 @InterfaceAudience.Private
 public class FSOperations {
 
-  @SuppressWarnings({"unchecked", "deprecation"})
-  private static Map fileStatusToJSONRaw(FileStatus status, boolean emptyPathSuffix) {
-    Map json = new LinkedHashMap();
-    json.put(HttpFSFileSystem.PATH_SUFFIX_JSON, (emptyPathSuffix) ? "" : status.getPath().getName());
-    json.put(HttpFSFileSystem.TYPE_JSON, HttpFSFileSystem.FILE_TYPE.getType(status).toString());
-    json.put(HttpFSFileSystem.LENGTH_JSON, status.getLen());
-    json.put(HttpFSFileSystem.OWNER_JSON, status.getOwner());
-    json.put(HttpFSFileSystem.GROUP_JSON, status.getGroup());
-    json.put(HttpFSFileSystem.PERMISSION_JSON, HttpFSFileSystem.permissionToString(status.getPermission()));
-    json.put(HttpFSFileSystem.ACCESS_TIME_JSON, status.getAccessTime());
-    json.put(HttpFSFileSystem.MODIFICATION_TIME_JSON, status.getModificationTime());
-    json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, status.getBlockSize());
-    json.put(HttpFSFileSystem.REPLICATION_JSON, status.getReplication());
-    return json;
+  /**
+   * This class is used to group a FileStatus and an AclStatus together.
+   * It's needed for the GETFILESTATUS and LISTSTATUS calls, which take
+   * most info from the FileStatus and a wee bit from the AclStatus.
+   */
+  private static class StatusPair {
+    private FileStatus fileStatus;
+    private AclStatus aclStatus;
+
+    /**
+     * Simple constructor
+     * @param fileStatus Existing FileStatus object
+     * @param aclStatus Existing AclStatus object
+     */
+    public StatusPair(FileStatus fileStatus, AclStatus aclStatus) {
+      this.fileStatus = fileStatus;
+      this.aclStatus = aclStatus;
+    }
+
+    /**
+     * Create one StatusPair by performing the underlying calls to
+     * fs.getFileStatus and fs.getAclStatus
+     * @param fs The FileSystem where 'path' lives
+     * @param path The file/directory to query
+     * @throws IOException
+     */
+    public StatusPair(FileSystem fs, Path path) throws IOException {
+      fileStatus = fs.getFileStatus(path);
+      aclStatus = null;
+      try {
+        aclStatus = fs.getAclStatus(path);
+      } catch (AclException e) {
+        /*
+         * The cause is almost certainly an "ACLS aren't enabled"
+         * exception, so leave aclStatus at null and carry on.
+         */
+      } catch (UnsupportedOperationException e) {
+        /* Ditto above - this is the case for a local file system */
+      }
+    }
+
+    /**
+     * Return a Map suitable for conversion into JSON format
+     * @return The JSONish Map
+     */
+    public Map<String,Object> toJson() {
+      Map<String,Object> json = new LinkedHashMap<String,Object>();
+      json.put(HttpFSFileSystem.FILE_STATUS_JSON, toJsonInner(true));
+      return json;
+    }
+
+    /**
+     * Return in inner part of the JSON for the status - used by both the
+     * GETFILESTATUS and LISTSTATUS calls.
+     * @param emptyPathSuffix Whether or not to include PATH_SUFFIX_JSON
+     * @return The JSONish Map
+     */
+    public Map<String,Object> toJsonInner(boolean emptyPathSuffix) {
+      Map<String,Object> json = new LinkedHashMap<String,Object>();
+      json.put(HttpFSFileSystem.PATH_SUFFIX_JSON,
+              (emptyPathSuffix) ? "" : fileStatus.getPath().getName());
+      json.put(HttpFSFileSystem.TYPE_JSON,
+              HttpFSFileSystem.FILE_TYPE.getType(fileStatus).toString());
+      json.put(HttpFSFileSystem.LENGTH_JSON, fileStatus.getLen());
+      json.put(HttpFSFileSystem.OWNER_JSON, fileStatus.getOwner());
+      json.put(HttpFSFileSystem.GROUP_JSON, fileStatus.getGroup());
+      json.put(HttpFSFileSystem.PERMISSION_JSON,
+              HttpFSFileSystem.permissionToString(fileStatus.getPermission()));
+      json.put(HttpFSFileSystem.ACCESS_TIME_JSON, fileStatus.getAccessTime());
+      json.put(HttpFSFileSystem.MODIFICATION_TIME_JSON,
+              fileStatus.getModificationTime());
+      json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, fileStatus.getBlockSize());
+      json.put(HttpFSFileSystem.REPLICATION_JSON, fileStatus.getReplication());
+      if ( (aclStatus != null) && !(aclStatus.getEntries().isEmpty()) ) {
+        json.put(HttpFSFileSystem.ACL_BIT_JSON,true);
+      }
+      return json;
+    }
   }
 
   /**
-   * Converts a FileSystemAccess <code>FileStatus</code> object into a JSON
-   * object.
-   *
-   * @param status FileSystemAccess file status.
-   *
-   * @return The JSON representation of the file status.
+   * Simple class used to contain and operate upon a list of StatusPair
+   * objects.  Used by LISTSTATUS.
    */
-  @SuppressWarnings({"unchecked", "deprecation"})
-  private static Map fileStatusToJSON(FileStatus status) {
-    Map json = new LinkedHashMap();
-    json.put(HttpFSFileSystem.FILE_STATUS_JSON, fileStatusToJSONRaw(status, true));
+  private static class StatusPairs {
+    private StatusPair[] statusPairs;
+
+    /**
+     * Construct a list of StatusPair objects
+     * @param fs The FileSystem where 'path' lives
+     * @param path The directory to query
+     * @param filter A possible filter for entries in the directory
+     * @throws IOException
+     */
+    public StatusPairs(FileSystem fs, Path path, PathFilter filter)
+            throws IOException {
+      /* Grab all the file statuses at once in an array */
+      FileStatus[] fileStatuses = fs.listStatus(path, filter);
+
+      /* We'll have an array of StatusPairs of the same length */
+      AclStatus aclStatus = null;
+      statusPairs = new StatusPair[fileStatuses.length];
+
+      /*
+       * For each FileStatus, attempt to acquire an AclStatus.  If the
+       * getAclStatus throws an exception, we assume that ACLs are turned
+       * off entirely and abandon the attempt.
+       */
+      boolean useAcls = true;   // Assume ACLs work until proven otherwise
+      for (int i = 0; i < fileStatuses.length; i++) {
+        if (useAcls) {
+          try {
+            aclStatus = fs.getAclStatus(fileStatuses[i].getPath());
+          } catch (AclException e) {
+            /* Almost certainly due to an "ACLs not enabled" exception */
+            aclStatus = null;
+            useAcls = false;
+          } catch (UnsupportedOperationException e) {
+            /* Ditto above - this is the case for a local file system */
+            aclStatus = null;
+            useAcls = false;
+          }
+        }
+        statusPairs[i] = new StatusPair(fileStatuses[i], aclStatus);
+      }
+    }
+
+    /**
+     * Return a Map suitable for conversion into JSON.
+     * @return A JSONish Map
+     */
+    @SuppressWarnings({"unchecked"})
+    public Map<String,Object> toJson() {
+      Map<String,Object> json = new LinkedHashMap<String,Object>();
+      Map<String,Object> inner = new LinkedHashMap<String,Object>();
+      JSONArray statuses = new JSONArray();
+      for (StatusPair s : statusPairs) {
+        statuses.add(s.toJsonInner(false));
+      }
+      inner.put(HttpFSFileSystem.FILE_STATUS_JSON, statuses);
+      json.put(HttpFSFileSystem.FILE_STATUSES_JSON, inner);
+      return json;
+    }
+  }
+
+  /** Converts an <code>AclStatus</code> object into a JSON object.
+   *
+   * @param aclStatus AclStatus object
+   *
+   * @return The JSON representation of the ACLs for the file
+   */
+  @SuppressWarnings({"unchecked"})
+  private static Map<String,Object> aclStatusToJSON(AclStatus aclStatus) {
+    Map<String,Object> json = new LinkedHashMap<String,Object>();
+    Map<String,Object> inner = new LinkedHashMap<String,Object>();
+    JSONArray entriesArray = new JSONArray();
+    inner.put(HttpFSFileSystem.OWNER_JSON, aclStatus.getOwner());
+    inner.put(HttpFSFileSystem.GROUP_JSON, aclStatus.getGroup());
+    inner.put(HttpFSFileSystem.ACL_STICKY_BIT_JSON, aclStatus.isStickyBit());
+    for ( AclEntry e : aclStatus.getEntries() ) {
+      entriesArray.add(e.toString());
+    }
+    inner.put(HttpFSFileSystem.ACL_ENTRIES_JSON, entriesArray);
+    json.put(HttpFSFileSystem.ACL_STATUS_JSON, inner);
     return json;
   }
 
@@ -118,30 +258,6 @@
   }
 
   /**
-   * Converts a FileSystemAccess <code>FileStatus</code> array into a JSON array
-   * object.
-   *
-   * @param status FileSystemAccess file status array.
-   * <code>SCHEME://HOST:PORT</code> in the file status.
-   *
-   * @return The JSON representation of the file status array.
-   */
-  @SuppressWarnings("unchecked")
-  private static Map fileStatusToJSON(FileStatus[] status) {
-    JSONArray json = new JSONArray();
-    if (status != null) {
-      for (FileStatus s : status) {
-        json.add(fileStatusToJSONRaw(s, false));
-      }
-    }
-    Map response = new LinkedHashMap();
-    Map temp = new LinkedHashMap();
-    temp.put(HttpFSFileSystem.FILE_STATUS_JSON, json);
-    response.put(HttpFSFileSystem.FILE_STATUSES_JSON, temp);
-    return response;
-  }
-
-  /**
    * Converts an object into a Json Map with with one key-value entry.
    * <p/>
    * It assumes the given value is either a JSON primitive type or a
@@ -418,18 +534,19 @@
     }
 
     /**
-     * Executes the filesystem operation.
+     * Executes the filesystem getFileStatus operation and returns the
+     * result in a JSONish Map.
      *
      * @param fs filesystem instance to use.
      *
      * @return a Map object (JSON friendly) with the file status.
      *
-     * @throws IOException thrown if an IO error occured.
+     * @throws IOException thrown if an IO error occurred.
      */
     @Override
     public Map execute(FileSystem fs) throws IOException {
-      FileStatus status = fs.getFileStatus(path);
-      return fileStatusToJSON(status);
+      StatusPair sp = new StatusPair(fs, path);
+      return sp.toJson();
     }
 
   }
@@ -482,19 +599,20 @@
     }
 
     /**
-     * Executes the filesystem operation.
+     * Returns data for a JSON Map containing the information for
+     * the set of files in 'path' that match 'filter'.
      *
      * @param fs filesystem instance to use.
      *
      * @return a Map with the file status of the directory
-     *         contents.
+     *         contents that match the filter
      *
-     * @throws IOException thrown if an IO error occured.
+     * @throws IOException thrown if an IO error occurred.
      */
     @Override
     public Map execute(FileSystem fs) throws IOException {
-      FileStatus[] status = fs.listStatus(path, filter);
-      return fileStatusToJSON(status);
+      StatusPairs sp = new StatusPairs(fs, path, filter);
+      return sp.toJson();
     }
 
     @Override
@@ -691,6 +809,218 @@
   }
 
   /**
+   * Executor that sets the acl for a file in a FileSystem
+   */
+  @InterfaceAudience.Private
+  public static class FSSetAcl implements FileSystemAccess.FileSystemExecutor<Void> {
+
+    private Path path;
+    private List<AclEntry> aclEntries;
+
+    /**
+     * Creates a set-acl executor.
+     *
+     * @param path path to set the acl.
+     * @param aclSpec acl to set.
+     */
+    public FSSetAcl(String path, String aclSpec) {
+      this.path = new Path(path);
+      this.aclEntries = AclEntry.parseAclSpec(aclSpec, true);
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return void.
+     *
+     * @throws IOException thrown if an IO error occurred.
+     */
+    @Override
+    public Void execute(FileSystem fs) throws IOException {
+      fs.setAcl(path, aclEntries);
+      return null;
+    }
+
+  }
+
+  /**
+   * Executor that removes all acls from a file in a FileSystem
+   */
+  @InterfaceAudience.Private
+  public static class FSRemoveAcl implements FileSystemAccess.FileSystemExecutor<Void> {
+
+    private Path path;
+
+    /**
+     * Creates a remove-acl executor.
+     *
+     * @param path path from which to remove the acl.
+     */
+    public FSRemoveAcl(String path) {
+      this.path = new Path(path);
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return void.
+     *
+     * @throws IOException thrown if an IO error occurred.
+     */
+    @Override
+    public Void execute(FileSystem fs) throws IOException {
+      fs.removeAcl(path);
+      return null;
+    }
+
+  }
+
+  /**
+   * Executor that modifies acl entries for a file in a FileSystem
+   */
+  @InterfaceAudience.Private
+  public static class FSModifyAclEntries implements FileSystemAccess.FileSystemExecutor<Void> {
+
+    private Path path;
+    private List<AclEntry> aclEntries;
+
+    /**
+     * Creates a modify-acl executor.
+     *
+     * @param path path to set the acl.
+     * @param aclSpec acl to set.
+     */
+    public FSModifyAclEntries(String path, String aclSpec) {
+      this.path = new Path(path);
+      this.aclEntries = AclEntry.parseAclSpec(aclSpec, true);
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return void.
+     *
+     * @throws IOException thrown if an IO error occurred.
+     */
+    @Override
+    public Void execute(FileSystem fs) throws IOException {
+      fs.modifyAclEntries(path, aclEntries);
+      return null;
+    }
+
+  }
+
+  /**
+   * Executor that removes acl entries from a file in a FileSystem
+   */
+  @InterfaceAudience.Private
+  public static class FSRemoveAclEntries implements FileSystemAccess.FileSystemExecutor<Void> {
+
+    private Path path;
+    private List<AclEntry> aclEntries;
+
+    /**
+     * Creates a remove acl entry executor.
+     *
+     * @param path path to set the acl.
+     * @param aclSpec acl parts to remove.
+     */
+    public FSRemoveAclEntries(String path, String aclSpec) {
+      this.path = new Path(path);
+      this.aclEntries = AclEntry.parseAclSpec(aclSpec, true);
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return void.
+     *
+     * @throws IOException thrown if an IO error occurred.
+     */
+    @Override
+    public Void execute(FileSystem fs) throws IOException {
+      fs.removeAclEntries(path, aclEntries);
+      return null;
+    }
+
+  }
+
+  /**
+   * Executor that removes the default acl from a directory in a FileSystem
+   */
+  @InterfaceAudience.Private
+  public static class FSRemoveDefaultAcl implements FileSystemAccess.FileSystemExecutor<Void> {
+
+    private Path path;
+
+    /**
+     * Creates an executor for removing the default acl.
+     *
+     * @param path path to set the acl.
+     */
+    public FSRemoveDefaultAcl(String path) {
+      this.path = new Path(path);
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return void.
+     *
+     * @throws IOException thrown if an IO error occurred.
+     */
+    @Override
+    public Void execute(FileSystem fs) throws IOException {
+      fs.removeDefaultAcl(path);
+      return null;
+    }
+
+  }
+
+  /**
+   * Executor that gets the ACL information for a given file.
+   */
+  @InterfaceAudience.Private
+  public static class FSAclStatus implements FileSystemAccess.FileSystemExecutor<Map> {
+    private Path path;
+
+    /**
+     * Creates an executor for getting the ACLs for a file.
+     *
+     * @param path the path to retrieve the ACLs.
+     */
+    public FSAclStatus(String path) {
+      this.path = new Path(path);
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return a Map object (JSON friendly) with the file status.
+     *
+     * @throws IOException thrown if an IO error occurred.
+     */
+    @Override
+    public Map execute(FileSystem fs) throws IOException {
+      AclStatus status = fs.getAclStatus(path);
+      return aclStatusToJSON(status);
+    }
+
+  }
+
+  /**
    * Executor that performs a set-replication FileSystemAccess files system operation.
    */
   @InterfaceAudience.Private
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
index 1410b8b..9923962 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
@@ -33,12 +33,16 @@
 import javax.ws.rs.ext.Provider;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.regex.Pattern;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT;
 
 /**
  * HttpFS ParametersProvider.
  */
 @Provider
 @InterfaceAudience.Private
+@SuppressWarnings("unchecked")
 public class HttpFSParametersProvider extends ParametersProvider {
 
   private static final Map<Enum, Class<Param<?>>[]> PARAMS_DEF =
@@ -55,6 +59,7 @@
     PARAMS_DEF.put(Operation.GETFILECHECKSUM, new Class[]{DoAsParam.class});
     PARAMS_DEF.put(Operation.GETFILEBLOCKLOCATIONS,
       new Class[]{DoAsParam.class});
+    PARAMS_DEF.put(Operation.GETACLSTATUS, new Class[]{DoAsParam.class});
     PARAMS_DEF.put(Operation.INSTRUMENTATION, new Class[]{DoAsParam.class});
     PARAMS_DEF.put(Operation.APPEND,
       new Class[]{DoAsParam.class, DataParam.class});
@@ -77,6 +82,16 @@
                   AccessTimeParam.class});
     PARAMS_DEF.put(Operation.DELETE,
       new Class[]{DoAsParam.class, RecursiveParam.class});
+    PARAMS_DEF.put(Operation.SETACL,
+            new Class[]{DoAsParam.class, AclPermissionParam.class});
+    PARAMS_DEF.put(Operation.REMOVEACL,
+            new Class[]{DoAsParam.class});
+    PARAMS_DEF.put(Operation.MODIFYACLENTRIES,
+            new Class[]{DoAsParam.class, AclPermissionParam.class});
+    PARAMS_DEF.put(Operation.REMOVEACLENTRIES,
+            new Class[]{DoAsParam.class, AclPermissionParam.class});
+    PARAMS_DEF.put(Operation.REMOVEDEFAULTACL,
+            new Class[]{DoAsParam.class});
   }
 
   public HttpFSParametersProvider() {
@@ -371,6 +386,26 @@
   }
 
   /**
+   * Class for AclPermission parameter.
+   */
+  @InterfaceAudience.Private
+  public static class AclPermissionParam extends StringParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.ACLSPEC_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public AclPermissionParam() {
+      super(NAME, HttpFSFileSystem.ACLSPEC_DEFAULT,
+              Pattern.compile(DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT));
+    }
+  }
+
+  /**
    * Class for replication parameter.
    */
   @InterfaceAudience.Private
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
index 798d4fb..c275b91 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
@@ -23,6 +23,7 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AccessTimeParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AclPermissionParam;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.BlockSizeParam;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DataParam;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DestinationParam;
@@ -313,6 +314,14 @@
         response = Response.status(Response.Status.BAD_REQUEST).build();
         break;
       }
+      case GETACLSTATUS: {
+        FSOperations.FSAclStatus command =
+                new FSOperations.FSAclStatus(path);
+        Map json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("ACL status for [{}]", path);
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
       default: {
         throw new IOException(
           MessageFormat.format("Invalid HTTP GET operation [{0}]",
@@ -579,6 +588,52 @@
         response = Response.ok().build();
         break;
       }
+      case SETACL: {
+        String aclSpec = params.get(AclPermissionParam.NAME,
+                AclPermissionParam.class);
+        FSOperations.FSSetAcl command =
+                new FSOperations.FSSetAcl(path, aclSpec);
+        fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] to acl [{}]", path, aclSpec);
+        response = Response.ok().build();
+        break;
+      }
+      case REMOVEACL: {
+        FSOperations.FSRemoveAcl command =
+                new FSOperations.FSRemoveAcl(path);
+        fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] removed acl", path);
+        response = Response.ok().build();
+        break;
+      }
+      case MODIFYACLENTRIES: {
+        String aclSpec = params.get(AclPermissionParam.NAME,
+                AclPermissionParam.class);
+        FSOperations.FSModifyAclEntries command =
+                new FSOperations.FSModifyAclEntries(path, aclSpec);
+        fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] modify acl entry with [{}]", path, aclSpec);
+        response = Response.ok().build();
+        break;
+      }
+      case REMOVEACLENTRIES: {
+        String aclSpec = params.get(AclPermissionParam.NAME,
+                AclPermissionParam.class);
+        FSOperations.FSRemoveAclEntries command =
+                new FSOperations.FSRemoveAclEntries(path, aclSpec);
+        fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] remove acl entry [{}]", path, aclSpec);
+        response = Response.ok().build();
+        break;
+      }
+      case REMOVEDEFAULTACL: {
+        FSOperations.FSRemoveDefaultAcl command =
+                new FSOperations.FSRemoveDefaultAcl(path);
+        fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] remove default acl", path);
+        response = Response.ok().build();
+        break;
+      }
       default: {
         throw new IOException(
           MessageFormat.format("Invalid HTTP PUT operation [{0}]",
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
index d512897..cfc747a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
@@ -26,6 +26,8 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -87,6 +89,7 @@
     String fsDefaultName = getProxiedFSURI();
     Configuration conf = new Configuration(false);
     conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     File hdfsSite = new File(new File(homeDir, "conf"), "hdfs-site.xml");
     OutputStream os = new FileOutputStream(hdfsSite);
     conf.writeXml(os);
@@ -479,9 +482,112 @@
     Assert.assertEquals(httpContentSummary.getSpaceQuota(), hdfsContentSummary.getSpaceQuota());
   }
 
+  /**
+   * Runs assertions testing that two AclStatus objects contain the same info
+   * @param a First AclStatus
+   * @param b Second AclStatus
+   * @throws Exception
+   */
+  private void assertSameAcls(AclStatus a, AclStatus b) throws Exception {
+    Assert.assertTrue(a.getOwner().equals(b.getOwner()));
+    Assert.assertTrue(a.getGroup().equals(b.getGroup()));
+    Assert.assertTrue(a.isStickyBit() == b.isStickyBit());
+    Assert.assertTrue(a.getEntries().size() == b.getEntries().size());
+    for (AclEntry e : a.getEntries()) {
+      Assert.assertTrue(b.getEntries().contains(e));
+    }
+    for (AclEntry e : b.getEntries()) {
+      Assert.assertTrue(a.getEntries().contains(e));
+    }
+  }
+
+  /**
+   * Simple ACL tests on a file:  Set an acl, add an acl, remove one acl,
+   * and remove all acls.
+   * @throws Exception
+   */
+  private void testFileAcls() throws Exception {
+    if ( isLocalFS() ) {
+      return;
+    }
+
+    final String aclUser1 = "user:foo:rw-";
+    final String aclUser2 = "user:bar:r--";
+    final String aclGroup1 = "group::r--";
+    final String aclSet = "user::rwx," + aclUser1 + ","
+            + aclGroup1 + ",other::---";
+
+    FileSystem proxyFs = FileSystem.get(getProxiedFSConf());
+    FileSystem httpfs = getHttpFSFileSystem();
+
+    Path path = new Path(getProxiedFSTestDir(), "testAclStatus.txt");
+    OutputStream os = proxyFs.create(path);
+    os.write(1);
+    os.close();
+
+    AclStatus proxyAclStat = proxyFs.getAclStatus(path);
+    AclStatus httpfsAclStat = httpfs.getAclStatus(path);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+
+    httpfs.setAcl(path, AclEntry.parseAclSpec(aclSet,true));
+    proxyAclStat = proxyFs.getAclStatus(path);
+    httpfsAclStat = httpfs.getAclStatus(path);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+
+    httpfs.modifyAclEntries(path, AclEntry.parseAclSpec(aclUser2, true));
+    proxyAclStat = proxyFs.getAclStatus(path);
+    httpfsAclStat = httpfs.getAclStatus(path);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+
+    httpfs.removeAclEntries(path, AclEntry.parseAclSpec(aclUser1, true));
+    proxyAclStat = proxyFs.getAclStatus(path);
+    httpfsAclStat = httpfs.getAclStatus(path);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+
+    httpfs.removeAcl(path);
+    proxyAclStat = proxyFs.getAclStatus(path);
+    httpfsAclStat = httpfs.getAclStatus(path);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+  }
+
+  /**
+   * Simple acl tests on a directory: set a default acl, remove default acls.
+   * @throws Exception
+   */
+  private void testDirAcls() throws Exception {
+    if ( isLocalFS() ) {
+      return;
+    }
+
+    final String defUser1 = "default:user:glarch:r-x";
+
+    FileSystem proxyFs = FileSystem.get(getProxiedFSConf());
+    FileSystem httpfs = getHttpFSFileSystem();
+
+    Path dir = getProxiedFSTestDir();
+
+    /* ACL Status on a directory */
+    AclStatus proxyAclStat = proxyFs.getAclStatus(dir);
+    AclStatus httpfsAclStat = httpfs.getAclStatus(dir);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+
+    /* Set a default ACL on the directory */
+    httpfs.setAcl(dir, (AclEntry.parseAclSpec(defUser1,true)));
+    proxyAclStat = proxyFs.getAclStatus(dir);
+    httpfsAclStat = httpfs.getAclStatus(dir);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+
+    /* Remove the default ACL */
+    httpfs.removeDefaultAcl(dir);
+    proxyAclStat = proxyFs.getAclStatus(dir);
+    httpfsAclStat = httpfs.getAclStatus(dir);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+  }
+
   protected enum Operation {
     GET, OPEN, CREATE, APPEND, CONCAT, RENAME, DELETE, LIST_STATUS, WORKING_DIRECTORY, MKDIRS,
-    SET_TIMES, SET_PERMISSION, SET_OWNER, SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY
+    SET_TIMES, SET_PERMISSION, SET_OWNER, SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY,
+    FILEACLS, DIRACLS
   }
 
   private void operation(Operation op) throws Exception {
@@ -533,6 +639,12 @@
       case CONTENT_SUMMARY:
         testContentSummary();
         break;
+      case FILEACLS:
+        testFileAcls();
+        break;
+      case DIRACLS:
+        testDirAcls();
+        break;
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
index b932e3a..72fd82e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.fs.http.server;
 
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.json.simple.JSONArray;
 import org.junit.Assert;
 
 import java.io.BufferedReader;
@@ -31,6 +33,7 @@
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.text.MessageFormat;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
@@ -128,6 +131,7 @@
     String fsDefaultName = TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
     Configuration conf = new Configuration(false);
     conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
     OutputStream os = new FileOutputStream(hdfsSite);
     conf.writeXml(os);
@@ -241,6 +245,10 @@
   private void createWithHttp ( String filename, String perms )
           throws Exception {
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
+    // Remove leading / from filename
+    if ( filename.charAt(0) == '/' ) {
+      filename = filename.substring(1);
+    }
     String pathOps;
     if ( perms == null ) {
       pathOps = MessageFormat.format(
@@ -260,18 +268,24 @@
   }
 
   /**
-   * Talks to the http interface to get the json output of the GETFILESTATUS
-   * command on the given file.
+   * Talks to the http interface to get the json output of a *STATUS command
+   * on the given file.
    *
    * @param filename The file to query.
+   * @param command Either GETFILESTATUS, LISTSTATUS, or ACLSTATUS
    * @return A string containing the JSON output describing the file.
    * @throws Exception
    */
-  private String getFileStatus ( String filename ) throws Exception {
+  private String getStatus(String filename, String command)
+          throws Exception {
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
+    // Remove leading / from filename
+    if ( filename.charAt(0) == '/' ) {
+      filename = filename.substring(1);
+    }
     String pathOps = MessageFormat.format(
-            "/webhdfs/v1/{0}?user.name={1}&op=GETFILESTATUS",
-            filename, user);
+            "/webhdfs/v1/{0}?user.name={1}&op={2}",
+            filename, user, command);
     URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
     HttpURLConnection conn = (HttpURLConnection) url.openConnection();
     conn.connect();
@@ -284,6 +298,30 @@
   }
 
   /**
+   * General-purpose http PUT command to the httpfs server.
+   * @param filename The file to operate upon
+   * @param command The command to perform (SETACL, etc)
+   * @param params Parameters, like "aclspec=..."
+   */
+  private void putCmd(String filename, String command,
+                      String params) throws Exception {
+    String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
+    // Remove leading / from filename
+    if ( filename.charAt(0) == '/' ) {
+      filename = filename.substring(1);
+    }
+    String pathOps = MessageFormat.format(
+            "/webhdfs/v1/{0}?user.name={1}{2}{3}&op={4}",
+            filename, user, (params == null) ? "" : "&",
+            (params == null) ? "" : params, command);
+    URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    conn.setRequestMethod("PUT");
+    conn.connect();
+    Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+  }
+
+  /**
    * Given the JSON output from the GETFILESTATUS call, return the
    * 'permission' value.
    *
@@ -299,6 +337,27 @@
   }
 
   /**
+   * Given the JSON output from the GETACLSTATUS call, return the
+   * 'entries' value as a List<String>.
+   * @param statusJson JSON from GETACLSTATUS
+   * @return A List of Strings which are the elements of the ACL entries
+   * @throws Exception
+   */
+  private List<String> getAclEntries ( String statusJson ) throws Exception {
+    List<String> entries = new ArrayList<String>();
+    JSONParser parser = new JSONParser();
+    JSONObject jsonObject = (JSONObject) parser.parse(statusJson);
+    JSONObject details = (JSONObject) jsonObject.get("AclStatus");
+    JSONArray jsonEntries = (JSONArray) details.get("entries");
+    if ( jsonEntries != null ) {
+      for (Object e : jsonEntries) {
+        entries.add(e.toString());
+      }
+    }
+    return entries;
+  }
+
+  /**
    * Validate that files are created with 755 permissions when no
    * 'permissions' attribute is specified, and when 'permissions'
    * is specified, that value is honored.
@@ -314,22 +373,167 @@
     fs.mkdirs(new Path("/perm"));
 
     createWithHttp("/perm/none", null);
-    String statusJson = getFileStatus("/perm/none");
+    String statusJson = getStatus("/perm/none", "GETFILESTATUS");
     Assert.assertTrue("755".equals(getPerms(statusJson)));
 
     createWithHttp("/perm/p-777", "777");
-    statusJson = getFileStatus("/perm/p-777");
+    statusJson = getStatus("/perm/p-777", "GETFILESTATUS");
     Assert.assertTrue("777".equals(getPerms(statusJson)));
 
     createWithHttp("/perm/p-654", "654");
-    statusJson = getFileStatus("/perm/p-654");
+    statusJson = getStatus("/perm/p-654", "GETFILESTATUS");
     Assert.assertTrue("654".equals(getPerms(statusJson)));
 
     createWithHttp("/perm/p-321", "321");
-    statusJson = getFileStatus("/perm/p-321");
+    statusJson = getStatus("/perm/p-321", "GETFILESTATUS");
     Assert.assertTrue("321".equals(getPerms(statusJson)));
   }
 
+  /**
+   * Validate the various ACL set/modify/remove calls.  General strategy is
+   * to verify each of the following steps with GETFILESTATUS, LISTSTATUS,
+   * and GETACLSTATUS:
+   * <ol>
+   *   <li>Create a file with no ACLs</li>
+   *   <li>Add a user + group ACL</li>
+   *   <li>Add another user ACL</li>
+   *   <li>Remove the first user ACL</li>
+   *   <li>Remove all ACLs</li>
+   * </ol>
+   */
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testFileAcls() throws Exception {
+    final String aclUser1 = "user:foo:rw-";
+    final String aclUser2 = "user:bar:r--";
+    final String aclGroup1 = "group::r--";
+    final String aclSpec = "aclspec=user::rwx," + aclUser1 + ","
+            + aclGroup1 + ",other::---";
+    final String modAclSpec = "aclspec=" + aclUser2;
+    final String remAclSpec = "aclspec=" + aclUser1;
+    final String dir = "/aclFileTest";
+    final String path = dir + "/test";
+    String statusJson;
+    List<String> aclEntries;
+
+    createHttpFSServer(false);
+
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    fs.mkdirs(new Path(dir));
+
+    createWithHttp(path, null);
+
+    /* getfilestatus and liststatus don't have 'aclBit' in their reply */
+    statusJson = getStatus(path, "GETFILESTATUS");
+    Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
+    statusJson = getStatus(dir, "LISTSTATUS");
+    Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
+
+    /* getaclstatus works and returns no entries */
+    statusJson = getStatus(path, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 0);
+
+    /*
+     * Now set an ACL on the file.  (getfile|list)status have aclBit,
+     * and aclstatus has entries that looks familiar.
+     */
+    putCmd(path, "SETACL", aclSpec);
+    statusJson = getStatus(path, "GETFILESTATUS");
+    Assert.assertNotEquals(-1, statusJson.indexOf("aclBit"));
+    statusJson = getStatus(dir, "LISTSTATUS");
+    Assert.assertNotEquals(-1, statusJson.indexOf("aclBit"));
+    statusJson = getStatus(path, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 2);
+    Assert.assertTrue(aclEntries.contains(aclUser1));
+    Assert.assertTrue(aclEntries.contains(aclGroup1));
+
+    /* Modify acl entries to add another user acl */
+    putCmd(path, "MODIFYACLENTRIES", modAclSpec);
+    statusJson = getStatus(path, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 3);
+    Assert.assertTrue(aclEntries.contains(aclUser1));
+    Assert.assertTrue(aclEntries.contains(aclUser2));
+    Assert.assertTrue(aclEntries.contains(aclGroup1));
+
+    /* Remove the first user acl entry and verify */
+    putCmd(path, "REMOVEACLENTRIES", remAclSpec);
+    statusJson = getStatus(path, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 2);
+    Assert.assertTrue(aclEntries.contains(aclUser2));
+    Assert.assertTrue(aclEntries.contains(aclGroup1));
+
+    /* Remove all acls and verify */
+    putCmd(path, "REMOVEACL", null);
+    statusJson = getStatus(path, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 0);
+    statusJson = getStatus(path, "GETFILESTATUS");
+    Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
+    statusJson = getStatus(dir, "LISTSTATUS");
+    Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
+  }
+
+  /**
+   * Test ACL operations on a directory, including default ACLs.
+   * General strategy is to use GETFILESTATUS and GETACLSTATUS to verify:
+   * <ol>
+   *   <li>Initial status with no ACLs</li>
+   *   <li>The addition of a default ACL</li>
+   *   <li>The removal of default ACLs</li>
+   * </ol>
+   *
+   * @throws Exception
+   */
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testDirAcls() throws Exception {
+    final String defUser1 = "default:user:glarch:r-x";
+    final String defSpec1 = "aclspec=" + defUser1;
+    final String dir = "/aclDirTest";
+    String statusJson;
+    List<String> aclEntries;
+
+    createHttpFSServer(false);
+
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    fs.mkdirs(new Path(dir));
+
+    /* getfilestatus and liststatus don't have 'aclBit' in their reply */
+    statusJson = getStatus(dir, "GETFILESTATUS");
+    Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
+
+    /* No ACLs, either */
+    statusJson = getStatus(dir, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 0);
+
+    /* Give it a default ACL and verify */
+    putCmd(dir, "SETACL", defSpec1);
+    statusJson = getStatus(dir, "GETFILESTATUS");
+    Assert.assertNotEquals(-1, statusJson.indexOf("aclBit"));
+    statusJson = getStatus(dir, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 5);
+    /* 4 Entries are default:(user|group|mask|other):perm */
+    Assert.assertTrue(aclEntries.contains(defUser1));
+
+    /* Remove the default ACL and re-verify */
+    putCmd(dir, "REMOVEDEFAULTACL", null);
+    statusJson = getStatus(dir, "GETFILESTATUS");
+    Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
+    statusJson = getStatus(dir, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 0);
+  }
+
   @Test
   @TestDir
   @TestJetty
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoACLs.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoACLs.java
new file mode 100644
index 0000000..b329026
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoACLs.java
@@ -0,0 +1,283 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.http.server;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.test.HTestCase;
+import org.apache.hadoop.test.HadoopUsersConfTestHelper;
+import org.apache.hadoop.test.TestDir;
+import org.apache.hadoop.test.TestDirHelper;
+import org.apache.hadoop.test.TestJetty;
+import org.apache.hadoop.test.TestJettyHelper;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mortbay.jetty.Server;
+import org.mortbay.jetty.webapp.WebAppContext;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.io.Writer;
+import java.net.HttpURLConnection;
+import java.net.URL;
+import java.text.MessageFormat;
+
+/**
+ * This test class ensures that everything works as expected when ACL
+ * support is turned off HDFS.  This is the default configuration.  The other
+ * tests operate with ACL support turned on.
+ */
+public class TestHttpFSServerNoACLs extends HTestCase {
+
+  private MiniDFSCluster miniDfs;
+  private Configuration nnConf;
+
+  /**
+   * Fire up our own hand-rolled MiniDFSCluster.  We do this here instead
+   * of relying on TestHdfsHelper because we don't want to turn on ACL
+   * support.
+   *
+   * @throws Exception
+   */
+  private void startMiniDFS() throws Exception {
+
+    File testDirRoot = TestDirHelper.getTestDir();
+
+    if (System.getProperty("hadoop.log.dir") == null) {
+      System.setProperty("hadoop.log.dir",
+              new File(testDirRoot, "hadoop-log").getAbsolutePath());
+    }
+    if (System.getProperty("test.build.data") == null) {
+      System.setProperty("test.build.data",
+              new File(testDirRoot, "hadoop-data").getAbsolutePath());
+    }
+
+    Configuration conf = HadoopUsersConfTestHelper.getBaseConf();
+    HadoopUsersConfTestHelper.addUserConf(conf);
+    conf.set("fs.hdfs.impl.disable.cache", "true");
+    conf.set("dfs.block.access.token.enable", "false");
+    conf.set("dfs.permissions", "true");
+    conf.set("hadoop.security.authentication", "simple");
+
+    // Explicitly turn off ACL support
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, false);
+
+    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
+    builder.numDataNodes(2);
+    miniDfs = builder.build();
+    nnConf = miniDfs.getConfiguration(0);
+  }
+
+  /**
+   * Create an HttpFS Server to talk to the MiniDFSCluster we created.
+   * @throws Exception
+   */
+  private void createHttpFSServer() throws Exception {
+    File homeDir = TestDirHelper.getTestDir();
+    Assert.assertTrue(new File(homeDir, "conf").mkdir());
+    Assert.assertTrue(new File(homeDir, "log").mkdir());
+    Assert.assertTrue(new File(homeDir, "temp").mkdir());
+    HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
+
+    File secretFile = new File(new File(homeDir, "conf"), "secret");
+    Writer w = new FileWriter(secretFile);
+    w.write("secret");
+    w.close();
+
+    // HDFS configuration
+    File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
+    if ( !hadoopConfDir.mkdirs() ) {
+      throw new IOException();
+    }
+
+    String fsDefaultName =
+            nnConf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
+    Configuration conf = new Configuration(false);
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
+
+    // Explicitly turn off ACLs, just in case the default becomes true later
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, false);
+
+    File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
+    OutputStream os = new FileOutputStream(hdfsSite);
+    conf.writeXml(os);
+    os.close();
+
+    // HTTPFS configuration
+    conf = new Configuration(false);
+    conf.set("httpfs.hadoop.config.dir", hadoopConfDir.toString());
+    conf.set("httpfs.proxyuser." +
+                    HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups",
+            HadoopUsersConfTestHelper.getHadoopProxyUserGroups());
+    conf.set("httpfs.proxyuser." +
+                    HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts",
+            HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
+    conf.set("httpfs.authentication.signature.secret.file",
+            secretFile.getAbsolutePath());
+
+    File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
+    os = new FileOutputStream(httpfsSite);
+    conf.writeXml(os);
+    os.close();
+
+    ClassLoader cl = Thread.currentThread().getContextClassLoader();
+    URL url = cl.getResource("webapp");
+    if ( url == null ) {
+      throw new IOException();
+    }
+    WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
+    Server server = TestJettyHelper.getJettyServer();
+    server.addHandler(context);
+    server.start();
+  }
+
+  /**
+   * Talks to the http interface to get the json output of a *STATUS command
+   * on the given file.
+   *
+   * @param filename The file to query.
+   * @param command Either GETFILESTATUS, LISTSTATUS, or ACLSTATUS
+   * @param expectOK Is this operation expected to succeed?
+   * @throws Exception
+   */
+  private void getStatus(String filename, String command, boolean expectOK)
+          throws Exception {
+    String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
+    // Remove leading / from filename
+    if ( filename.charAt(0) == '/' ) {
+      filename = filename.substring(1);
+    }
+    String pathOps = MessageFormat.format(
+            "/webhdfs/v1/{0}?user.name={1}&op={2}",
+            filename, user, command);
+    URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    conn.connect();
+    int resp = conn.getResponseCode();
+    BufferedReader reader;
+    if ( expectOK ) {
+      Assert.assertEquals(HttpURLConnection.HTTP_OK, resp);
+      reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
+      String res = reader.readLine();
+      Assert.assertTrue(!res.contains("aclBit"));
+      Assert.assertTrue(res.contains("owner")); // basic sanity check
+    } else {
+      Assert.assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR, resp);
+      reader = new BufferedReader(new InputStreamReader(conn.getErrorStream()));
+      String res = reader.readLine();
+      Assert.assertTrue(res.contains("RemoteException"));
+      Assert.assertTrue(res.contains("ACL"));
+      Assert.assertTrue(res.contains("rejected"));
+    }
+  }
+
+  /**
+   * General-purpose http PUT command to the httpfs server.
+   * @param filename The file to operate upon
+   * @param command The command to perform (SETACL, etc)
+   * @param params Parameters, like "aclspec=..."
+   */
+  private void putCmd(String filename, String command,
+                      String params, boolean expectOK) throws Exception {
+    String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
+    // Remove leading / from filename
+    if ( filename.charAt(0) == '/' ) {
+      filename = filename.substring(1);
+    }
+    String pathOps = MessageFormat.format(
+            "/webhdfs/v1/{0}?user.name={1}{2}{3}&op={4}",
+            filename, user, (params == null) ? "" : "&",
+            (params == null) ? "" : params, command);
+    URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    conn.setRequestMethod("PUT");
+    conn.connect();
+    int resp = conn.getResponseCode();
+    if ( expectOK ) {
+      Assert.assertEquals(HttpURLConnection.HTTP_OK, resp);
+    } else {
+      Assert.assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR, resp);
+      BufferedReader reader;
+      reader = new BufferedReader(new InputStreamReader(conn.getErrorStream()));
+      String err = reader.readLine();
+      Assert.assertTrue(err.contains("RemoteException"));
+      Assert.assertTrue(err.contains("ACL"));
+      Assert.assertTrue(err.contains("rejected"));
+    }
+  }
+
+  /**
+   * Ensure that
+   * <ol>
+   *   <li>GETFILESTATUS and LISTSTATUS work happily</li>
+   *   <li>ACLSTATUS throws an exception</li>
+   *   <li>The ACL SET, REMOVE, etc calls all fail</li>
+   * </ol>
+   *
+   * @throws Exception
+   */
+  @Test
+  @TestDir
+  @TestJetty
+  public void testWithNoAcls() throws Exception {
+    final String aclUser1 = "user:foo:rw-";
+    final String aclUser2 = "user:bar:r--";
+    final String aclGroup1 = "group::r--";
+    final String aclSpec = "aclspec=user::rwx," + aclUser1 + ","
+            + aclGroup1 + ",other::---";
+    final String modAclSpec = "aclspec=" + aclUser2;
+    final String remAclSpec = "aclspec=" + aclUser1;
+    final String defUser1 = "default:user:glarch:r-x";
+    final String defSpec1 = "aclspec=" + defUser1;
+    final String dir = "/noACLs";
+    final String path = dir + "/foo";
+
+    startMiniDFS();
+    createHttpFSServer();
+
+    FileSystem fs = FileSystem.get(nnConf);
+    fs.mkdirs(new Path(dir));
+    OutputStream os = fs.create(new Path(path));
+    os.write(1);
+    os.close();
+
+    /* The normal status calls work as expected; GETACLSTATUS fails */
+    getStatus(path, "GETFILESTATUS", true);
+    getStatus(dir, "LISTSTATUS", true);
+    getStatus(path, "GETACLSTATUS", false);
+
+    /* All the ACL-based PUT commands fail with ACL exceptions */
+    putCmd(path, "SETACL", aclSpec, false);
+    putCmd(path, "MODIFYACLENTRIES", modAclSpec, false);
+    putCmd(path, "REMOVEACLENTRIES", remAclSpec, false);
+    putCmd(path, "REMOVEACL", null, false);
+    putCmd(dir, "SETACL", defSpec1, false);
+    putCmd(dir, "REMOVEDEFAULTACL", null, false);
+
+    miniDfs.shutdown();
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java
index 8e1fc2f..5e4aee7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java
@@ -24,6 +24,7 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.junit.Test;
 import org.junit.runners.model.FrameworkMethod;
@@ -145,6 +146,7 @@
       conf.set("dfs.block.access.token.enable", "false");
       conf.set("dfs.permissions", "true");
       conf.set("hadoop.security.authentication", "simple");
+      conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
       MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
       builder.numDataNodes(2);
       MiniDFSCluster miniHdfs = builder.build();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8714d15..93adae6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -254,72 +254,6 @@
     HDFS-5794. Fix the inconsistency of layout version number of 
     ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9)
 
-    HDFS-6464. Support multiple xattr.name parameters for WebHDFS getXAttrs.
-    (Yi Liu via umamahesh)
-
-  BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
-
-    HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
-
-    HDFS-6302. Implement XAttr as a INode feature. (Yi Liu via umamahesh)
-
-    HDFS-6309. Javadocs for Xattrs apis in DFSClient and other minor fixups. (Charles Lamb via umamahesh)
-
-    HDFS-6258. Namenode server-side storage for XAttrs. (Yi Liu via umamahesh)
-
-    HDFS-6303. HDFS implementation of FileContext API for XAttrs. (Yi Liu and Charles Lamb via umamahesh)
-
-    HDFS-6324. Shift XAttr helper code out for reuse. (Yi Liu via umamahesh)
-
-    HDFS-6301. NameNode: persist XAttrs in fsimage and record XAttrs modifications to edit log.
-    (Yi Liu via umamahesh)
-
-    HDFS-6298. XML based End-to-End test for getfattr and setfattr commands. (Yi Liu via umamahesh)
-
-    HDFS-6314. Test cases for XAttrs. (Yi Liu via umamahesh)
-
-    HDFS-6344. Maximum limit on the size of an xattr. (Yi Liu via umamahesh)
-
-    HDFS-6377. Unify xattr name and value limits into a single limit. (wang)
-
-    HDFS-6373. Remove support for extended attributes on symlinks. (Charles Lamb via wang)
-
-    HDFS-6283. Write end user documentation for xattrs. (wang)
-
-    HDFS-6412. Interface audience and stability annotations missing from
-    several new classes related to xattrs. (wang)
-
-    HDFS-6259. Support extended attributes via WebHDFS. (yliu)
-
-    HDFS-6346. Optimize OP_SET_XATTRS by persisting single Xattr entry per setXattr/removeXattr api call
-    (Yi Liu via umamahesh)
-
-    HDFS-6331. ClientProtocol#setXattr should not be annotated idempotent.
-    (umamahesh via wang)
-
-    HDFS-6335. TestOfflineEditsViewer for XAttr. (Yi Liu via umamahesh)
-
-    HDFS-6343. fix TestNamenodeRetryCache and TestRetryCacheWithHA failures. (umamahesh)
-
-    HDFS-6366. FsImage loading failed with RemoveXattr op (umamahesh)
-
-    HDFS-6357. SetXattr should persist rpcIDs for handling retrycache with Namenode restart and HA
-    (umamahesh)
-
-    HDFS-6372. Handle setXattr rpcIDs for OfflineEditsViewer. (umamahesh)
-
-    HDFS-6410. DFSClient unwraps AclException in xattr methods, but those
-    methods cannot throw AclException. (wang)
-
-    HDFS-6413. xattr names erroneously handled as case-insensitive.
-    (Charles Lamb via cnauroth)
-
-    HDFS-6414. xattr modification operations are based on state of latest
-    snapshot instead of current version of inode. (Andrew Wang via cnauroth)
-
-    HDFS-6374. setXAttr should require the user to be the owner of the file
-    or directory (Charles Lamb via wang)
-
     HDFS-6375. Listing extended attributes with the search permission.
     (Charles Lamb via wang)
 
@@ -485,6 +419,39 @@
     HDFS-6399. Add note about setfacl in HDFS permissions guide.
     (cnauroth via wang)
 
+    HDFS-6315. Decouple recording edit logs from FSDirectory. (wheat9)
+
+    HDFS-6379. HTTPFS - Implement ACLs support. (yoderme via tucu)
+
+    HDFS-6471. Make moveFromLocal CLI testcases to be non-disruptive
+    (Dasha Boudnik via cos)
+
+    HDFS-6395. Skip checking xattr limits for non-user-visible namespaces.
+    (Yi Liu via wang).
+
+    HDFS-3493. Invalidate excess corrupted blocks as long as minimum
+    replication is satisfied. (Juan Yu and Vinayakumar B via wang)
+
+    HDFS-6330. Move mkdirs() to FSNamesystem. (wheat9)
+
+    HDFS-6470. TestBPOfferService.testBPInitErrorHandling is flaky.
+    (Ming Ma via wang)
+
+    HDFS-6529. Trace logging for RemoteBlockReader2 to identify remote datanode
+    and file being read. (Anubhav Dhoot via atm)
+
+    HDFS-6499. Use NativeIO#renameTo instead of File#renameTo in
+    FileJournalManager. (Yongjun Zhang via atm)
+
+    HDFS-6518. TestCacheDirectives#testExceedsCapacity should
+    take FSN read lock when accessing pendingCached list.
+    (wang)
+
+    HDFS-6528. Add XAttrs to TestOfflineImageViewer. (Stephen Chu via wang)
+
+    HDFS-6545. Finalizing rolling upgrade can make NN unavailable for a long
+    duration. (kihwal)
+
   OPTIMIZATIONS
 
     HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)
@@ -669,7 +636,85 @@
     HDFS-6364. Incorrect check for unknown datanode in Balancer. (Benoy
     Antony via Arpit Agarwal)
 
-Release 2.4.1 - UNRELEASED
+    HDFS-6503. Fix typo of DFSAdmin restoreFailedStorage.
+    (Zesheng Wu via wheat9)
+
+    HDFS-6464. Support multiple xattr.name parameters for WebHDFS getXAttrs.
+    (Yi Liu via umamahesh)
+
+    HDFS-6375. Listing extended attributes with the search permission.
+    (Charles Lamb via wang)
+
+    HDFS-6539. test_native_mini_dfs is skipped in hadoop-hdfs/pom.xml
+    (decstery via cmccabe)
+
+    HDFS-6527. Edit log corruption due to defered INode removal. (kihwal and
+    jing9 via jing9)
+
+  BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
+
+    HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
+
+    HDFS-6302. Implement XAttr as a INode feature. (Yi Liu via umamahesh)
+
+    HDFS-6309. Javadocs for Xattrs apis in DFSClient and other minor fixups. (Charles Lamb via umamahesh)
+
+    HDFS-6258. Namenode server-side storage for XAttrs. (Yi Liu via umamahesh)
+
+    HDFS-6303. HDFS implementation of FileContext API for XAttrs. (Yi Liu and Charles Lamb via umamahesh)
+
+    HDFS-6324. Shift XAttr helper code out for reuse. (Yi Liu via umamahesh)
+
+    HDFS-6301. NameNode: persist XAttrs in fsimage and record XAttrs modifications to edit log.
+    (Yi Liu via umamahesh)
+
+    HDFS-6298. XML based End-to-End test for getfattr and setfattr commands. (Yi Liu via umamahesh)
+
+    HDFS-6314. Test cases for XAttrs. (Yi Liu via umamahesh)
+
+    HDFS-6344. Maximum limit on the size of an xattr. (Yi Liu via umamahesh)
+
+    HDFS-6377. Unify xattr name and value limits into a single limit. (wang)
+
+    HDFS-6373. Remove support for extended attributes on symlinks. (Charles Lamb via wang)
+
+    HDFS-6283. Write end user documentation for xattrs. (wang)
+
+    HDFS-6412. Interface audience and stability annotations missing from
+    several new classes related to xattrs. (wang)
+
+    HDFS-6259. Support extended attributes via WebHDFS. (yliu)
+
+    HDFS-6346. Optimize OP_SET_XATTRS by persisting single Xattr entry per setXattr/removeXattr api call
+    (Yi Liu via umamahesh)
+
+    HDFS-6331. ClientProtocol#setXattr should not be annotated idempotent.
+    (umamahesh via wang)
+
+    HDFS-6335. TestOfflineEditsViewer for XAttr. (Yi Liu via umamahesh)
+
+    HDFS-6343. fix TestNamenodeRetryCache and TestRetryCacheWithHA failures. (umamahesh)
+
+    HDFS-6366. FsImage loading failed with RemoveXattr op (umamahesh)
+
+    HDFS-6357. SetXattr should persist rpcIDs for handling retrycache with Namenode restart and HA
+    (umamahesh)
+
+    HDFS-6372. Handle setXattr rpcIDs for OfflineEditsViewer. (umamahesh)
+
+    HDFS-6410. DFSClient unwraps AclException in xattr methods, but those
+    methods cannot throw AclException. (wang)
+
+    HDFS-6413. xattr names erroneously handled as case-insensitive.
+    (Charles Lamb via cnauroth)
+
+    HDFS-6414. xattr modification operations are based on state of latest
+    snapshot instead of current version of inode. (Andrew Wang via cnauroth)
+
+    HDFS-6374. setXAttr should require the user to be the owner of the file
+    or directory (Charles Lamb via wang)
+
+Release 2.4.1 - 2014-06-23 
 
   INCOMPATIBLE CHANGES
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 1e19729..14019b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -408,7 +408,7 @@
                     </exec>
                     <exec executable="sh" failonerror="true" dir="${project.build.directory}/native/">
                         <arg value="-c"/>
-                        <arg value="[ x$SKIPTESTS = xtrue ] || ${project.build.directory}/native/test_libhdfs_threaded"/>
+                        <arg value="[ x$SKIPTESTS = xtrue ] || ${project.build.directory}/native/test_native_mini_dfs"/>
                       <env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
                       <env key="SKIPTESTS" value="${skipTests}"/>
                     </exec>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java
index d648a6f..a710581 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java
@@ -33,6 +33,7 @@
 import org.apache.hadoop.security.authorize.Service;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
 import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
+import org.apache.hadoop.ipc.GenericRefreshProtocol;
 
 /**
  * {@link PolicyProvider} for HDFS protocols.
@@ -68,7 +69,10 @@
         GetUserMappingsProtocol.class),
     new Service(
         CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_CALLQUEUE,
-        RefreshCallQueueProtocol.class)
+        RefreshCallQueueProtocol.class),
+    new Service(
+        CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_GENERIC_REFRESH,
+        GenericRefreshProtocol.class)
   };
   
   @Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
index 44d5979..2361f0a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
@@ -26,6 +26,7 @@
 import java.nio.ByteBuffer;
 import java.nio.channels.ReadableByteChannel;
 import java.util.EnumSet;
+import java.util.UUID;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -133,9 +134,22 @@
   public synchronized int read(byte[] buf, int off, int len) 
                                throws IOException {
 
+    UUID randomId = null;
+    if (LOG.isTraceEnabled()) {
+      randomId = UUID.randomUUID();
+      LOG.trace(String.format("Starting read #%s file %s from datanode %s",
+        randomId.toString(), this.filename,
+        this.datanodeID.getHostName()));
+    }
+
     if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
       readNextPacket();
     }
+
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(String.format("Finishing read #" + randomId));
+    }
+
     if (curDataSlice.remaining() == 0) {
       // we're at EOF now
       return -1;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index e122fc2..1d83c66 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1096,8 +1096,9 @@
           + blk + " not found");
       return;
     }
-    markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock, reason,
-        Reason.CORRUPTION_REPORTED), dn, storageID);
+    markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock,
+        blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED),
+        dn, storageID);
   }
 
   private void markBlockAsCorrupt(BlockToMarkCorrupt b,
@@ -1123,7 +1124,25 @@
     // Add this replica to corruptReplicas Map
     corruptReplicas.addToCorruptReplicasMap(b.corrupted, node, b.reason,
         b.reasonCode);
-    if (countNodes(b.stored).liveReplicas() >= bc.getBlockReplication()) {
+
+    NumberReplicas numberOfReplicas = countNodes(b.stored);
+    boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() >= bc
+        .getBlockReplication();
+    boolean minReplicationSatisfied =
+        numberOfReplicas.liveReplicas() >= minReplication;
+    boolean hasMoreCorruptReplicas = minReplicationSatisfied &&
+        (numberOfReplicas.liveReplicas() + numberOfReplicas.corruptReplicas()) >
+        bc.getBlockReplication();
+    boolean corruptedDuringWrite = minReplicationSatisfied &&
+        (b.stored.getGenerationStamp() > b.corrupted.getGenerationStamp());
+    // case 1: have enough number of live replicas
+    // case 2: corrupted replicas + live replicas > Replication factor
+    // case 3: Block is marked corrupt due to failure while writing. In this
+    //         case genstamp will be different than that of valid block.
+    // In all these cases we can delete the replica.
+    // In case of 3, rbw block will be deleted and valid block can be replicated
+    if (hasEnoughLiveReplicas || hasMoreCorruptReplicas
+        || corruptedDuringWrite) {
       // the block is over-replicated so invalidate the replicas immediately
       invalidateBlock(b, node);
     } else if (namesystem.isPopulatingReplQueues()) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index a21b7db..34a579f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -50,7 +50,6 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.AclException;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
@@ -265,11 +264,6 @@
     ready = flag;
   }
 
-  private void incrDeletedFileCount(long count) {
-    if (getFSNamesystem() != null)
-      NameNode.getNameNodeMetrics().incrFilesDeleted(count);
-  }
-    
   /**
    * Shutdown the filestore
    */
@@ -321,19 +315,7 @@
       UnresolvedLinkException, SnapshotAccessControlException, AclException {
     waitForReady();
 
-    // Always do an implicit mkdirs for parent directory tree.
     long modTime = now();
-    
-    Path parent = new Path(path).getParent();
-    if (parent == null) {
-      // Trying to add "/" as a file - this path has no
-      // parent -- avoids an NPE below.
-      return null;
-    }
-    
-    if (!mkdirs(parent.toString(), permissions, true, modTime)) {
-      return null;
-    }
     INodeFile newNode = new INodeFile(namesystem.allocateNewInodeId(), null,
         permissions, modTime, modTime, BlockInfo.EMPTY_ARRAY, replication,
         preferredBlockSize);
@@ -437,65 +419,6 @@
   }
 
   /**
-   * Persist the block list for the inode.
-   */
-  void persistBlocks(String path, INodeFile file, boolean logRetryCache) {
-    Preconditions.checkArgument(file.isUnderConstruction());
-    waitForReady();
-
-    writeLock();
-    try {
-      fsImage.getEditLog().logUpdateBlocks(path, file, logRetryCache);
-      if(NameNode.stateChangeLog.isDebugEnabled()) {
-        NameNode.stateChangeLog.debug("DIR* FSDirectory.persistBlocks: "
-            +path+" with "+ file.getBlocks().length 
-            +" blocks is persisted to the file system");
-      }
-    } finally {
-      writeUnlock();
-    }
-  }
-  
-  /**
-   * Persist the new block (the last block of the given file).
-   */
-  void persistNewBlock(String path, INodeFile file) {
-    Preconditions.checkArgument(file.isUnderConstruction());
-    waitForReady();
-
-    writeLock();
-    try {
-      fsImage.getEditLog().logAddBlock(path, file);
-    } finally {
-      writeUnlock();
-    }
-    if (NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("DIR* FSDirectory.persistNewBlock: "
-          + path + " with new block " + file.getLastBlock().toString()
-          + ", current total block count is " + file.getBlocks().length);
-    }
-  }
-  
-  /**
-   * Close file.
-   */
-  void closeFile(String path, INodeFile file) {
-    waitForReady();
-    writeLock();
-    try {
-      // file is closed
-      fsImage.getEditLog().logCloseFile(path, file);
-      if (NameNode.stateChangeLog.isDebugEnabled()) {
-        NameNode.stateChangeLog.debug("DIR* FSDirectory.closeFile: "
-            +path+" with "+ file.getBlocks().length 
-            +" blocks is persisted to the file system");
-      }
-    } finally {
-      writeUnlock();
-    }
-  }
-
-  /**
    * Remove a block from the file.
    * @return Whether the block exists in the corresponding file
    */
@@ -540,7 +463,7 @@
    * @deprecated Use {@link #renameTo(String, String, boolean, Rename...)}
    */
   @Deprecated
-  boolean renameTo(String src, String dst, boolean logRetryCache) 
+  boolean renameTo(String src, String dst, long mtime)
       throws QuotaExceededException, UnresolvedLinkException, 
       FileAlreadyExistsException, SnapshotAccessControlException, IOException {
     if (NameNode.stateChangeLog.isDebugEnabled()) {
@@ -548,22 +471,20 @@
           +src+" to "+dst);
     }
     waitForReady();
-    long now = now();
     writeLock();
     try {
-      if (!unprotectedRenameTo(src, dst, now))
+      if (!unprotectedRenameTo(src, dst, mtime))
         return false;
     } finally {
       writeUnlock();
     }
-    fsImage.getEditLog().logRename(src, dst, now, logRetryCache);
     return true;
   }
 
   /**
    * @see #unprotectedRenameTo(String, String, long, Options.Rename...)
    */
-  void renameTo(String src, String dst, boolean logRetryCache, 
+  void renameTo(String src, String dst, long mtime,
       Options.Rename... options)
       throws FileAlreadyExistsException, FileNotFoundException,
       ParentNotDirectoryException, QuotaExceededException,
@@ -573,16 +494,14 @@
           + " to " + dst);
     }
     waitForReady();
-    long now = now();
     writeLock();
     try {
-      if (unprotectedRenameTo(src, dst, now, options)) {
-        incrDeletedFileCount(1);
+      if (unprotectedRenameTo(src, dst, mtime, options)) {
+        namesystem.incrDeletedFileCount(1);
       }
     } finally {
       writeUnlock();
     }
-    fsImage.getEditLog().logRename(src, dst, now, logRetryCache, options);
   }
 
   /**
@@ -1106,11 +1025,7 @@
     waitForReady();
     writeLock();
     try {
-      final Block[] fileBlocks = unprotectedSetReplication(
-          src, replication, blockRepls);
-      if (fileBlocks != null)  // log replication change
-        fsImage.getEditLog().logSetReplication(src, replication);
-      return fileBlocks;
+      return unprotectedSetReplication(src, replication, blockRepls);
     } finally {
       writeUnlock();
     }
@@ -1178,7 +1093,6 @@
     } finally {
       writeUnlock();
     }
-    fsImage.getEditLog().logSetPermissions(src, permission);
   }
   
   void unprotectedSetPermission(String src, FsPermission permissions)
@@ -1203,7 +1117,6 @@
     } finally {
       writeUnlock();
     }
-    fsImage.getEditLog().logSetOwner(src, username, groupname);
   }
 
   void unprotectedSetOwner(String src, String username, String groupname)
@@ -1226,18 +1139,14 @@
   /**
    * Concat all the blocks from srcs to trg and delete the srcs files
    */
-  void concat(String target, String [] srcs, boolean supportRetryCache) 
+  void concat(String target, String[] srcs, long timestamp)
       throws UnresolvedLinkException, QuotaExceededException,
       SnapshotAccessControlException, SnapshotException {
     writeLock();
     try {
       // actual move
       waitForReady();
-      long timestamp = now();
       unprotectedConcat(target, srcs, timestamp);
-      // do the commit
-      fsImage.getEditLog().logConcat(target, srcs, timestamp, 
-          supportRetryCache);
     } finally {
       writeUnlock();
     }
@@ -1312,17 +1221,14 @@
    * @param src Path of a directory to delete
    * @param collectedBlocks Blocks under the deleted directory
    * @param removedINodes INodes that should be removed from {@link #inodeMap}
-   * @param logRetryCache Whether to record RPC IDs in editlog to support retry
-   *                      cache rebuilding.
-   * @return true on successful deletion; else false
+   * @return the number of files that have been removed
    */
-  boolean delete(String src, BlocksMapUpdateInfo collectedBlocks,
-      List<INode> removedINodes, boolean logRetryCache) throws IOException {
+  long delete(String src, BlocksMapUpdateInfo collectedBlocks,
+              List<INode> removedINodes, long mtime) throws IOException {
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + src);
     }
     waitForReady();
-    long now = now();
     final long filesRemoved;
     writeLock();
     try {
@@ -1335,20 +1241,13 @@
             new ArrayList<INodeDirectorySnapshottable>();
         checkSnapshot(inodesInPath.getLastINode(), snapshottableDirs);
         filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks,
-            removedINodes, now);
+            removedINodes, mtime);
         namesystem.removeSnapshottableDirs(snapshottableDirs);
       }
     } finally {
       writeUnlock();
     }
-    if (filesRemoved < 0) {
-      return false;
-    }
-    fsImage.getEditLog().logDelete(src, now, logRetryCache);
-    incrDeletedFileCount(filesRemoved);
-    // Blocks/INodes will be handled later by the caller of this method
-    getFSNamesystem().removePathAndBlocks(src, null, null);
-    return true;
+    return filesRemoved;
   }
   
   private static boolean deleteAllowed(final INodesInPath iip,
@@ -1895,112 +1794,6 @@
     // inodes can be null only when its called without holding lock
     return inodes == null ? "" : getFullPathName(inodes, inodes.length - 1);
   }
-  
-  /**
-   * Create a directory 
-   * If ancestor directories do not exist, automatically create them.
-
-   * @param src string representation of the path to the directory
-   * @param permissions the permission of the directory
-   * @param inheritPermission if the permission of the directory should inherit
-   *                          from its parent or not. u+wx is implicitly added to
-   *                          the automatically created directories, and to the
-   *                          given directory if inheritPermission is true
-   * @param now creation time
-   * @return true if the operation succeeds false otherwise
-   * @throws QuotaExceededException if directory creation violates
-   *                                any quota limit
-   * @throws UnresolvedLinkException if a symlink is encountered in src.                      
-   * @throws SnapshotAccessControlException if path is in RO snapshot
-   */
-  boolean mkdirs(String src, PermissionStatus permissions,
-      boolean inheritPermission, long now)
-      throws FileAlreadyExistsException, QuotaExceededException, 
-             UnresolvedLinkException, SnapshotAccessControlException,
-             AclException {
-    src = normalizePath(src);
-    String[] names = INode.getPathNames(src);
-    byte[][] components = INode.getPathComponents(names);
-    final int lastInodeIndex = components.length - 1;
-
-    writeLock();
-    try {
-      INodesInPath iip = getExistingPathINodes(components);
-      if (iip.isSnapshot()) {
-        throw new SnapshotAccessControlException(
-            "Modification on RO snapshot is disallowed");
-      }
-      INode[] inodes = iip.getINodes();
-
-      // find the index of the first null in inodes[]
-      StringBuilder pathbuilder = new StringBuilder();
-      int i = 1;
-      for(; i < inodes.length && inodes[i] != null; i++) {
-        pathbuilder.append(Path.SEPARATOR).append(names[i]);
-        if (!inodes[i].isDirectory()) {
-          throw new FileAlreadyExistsException("Parent path is not a directory: "
-              + pathbuilder+ " "+inodes[i].getLocalName());
-        }
-      }
-
-      // default to creating parent dirs with the given perms
-      PermissionStatus parentPermissions = permissions;
-
-      // if not inheriting and it's the last inode, there's no use in
-      // computing perms that won't be used
-      if (inheritPermission || (i < lastInodeIndex)) {
-        // if inheriting (ie. creating a file or symlink), use the parent dir,
-        // else the supplied permissions
-        // NOTE: the permissions of the auto-created directories violate posix
-        FsPermission parentFsPerm = inheritPermission
-            ? inodes[i-1].getFsPermission() : permissions.getPermission();
-        
-        // ensure that the permissions allow user write+execute
-        if (!parentFsPerm.getUserAction().implies(FsAction.WRITE_EXECUTE)) {
-          parentFsPerm = new FsPermission(
-              parentFsPerm.getUserAction().or(FsAction.WRITE_EXECUTE),
-              parentFsPerm.getGroupAction(),
-              parentFsPerm.getOtherAction()
-          );
-        }
-        
-        if (!parentPermissions.getPermission().equals(parentFsPerm)) {
-          parentPermissions = new PermissionStatus(
-              parentPermissions.getUserName(),
-              parentPermissions.getGroupName(),
-              parentFsPerm
-          );
-          // when inheriting, use same perms for entire path
-          if (inheritPermission) permissions = parentPermissions;
-        }
-      }
-      
-      // create directories beginning from the first null index
-      for(; i < inodes.length; i++) {
-        pathbuilder.append(Path.SEPARATOR).append(names[i]);
-        unprotectedMkdir(namesystem.allocateNewInodeId(), iip, i,
-            components[i], (i < lastInodeIndex) ? parentPermissions
-                : permissions, null, now);
-        if (inodes[i] == null) {
-          return false;
-        }
-        // Directory creation also count towards FilesCreated
-        // to match count of FilesDeleted metric.
-        if (getFSNamesystem() != null)
-          NameNode.getNameNodeMetrics().incrFilesCreated();
-
-        final String cur = pathbuilder.toString();
-        fsImage.getEditLog().logMkDir(cur, inodes[i]);
-        if(NameNode.stateChangeLog.isDebugEnabled()) {
-          NameNode.stateChangeLog.debug(
-              "DIR* FSDirectory.mkdirs: created directory " + cur);
-        }
-      }
-    } finally {
-      writeUnlock();
-    }
-    return true;
-  }
 
   INode unprotectedMkdir(long inodeId, String src, PermissionStatus permissions,
                           List<AclEntry> aclEntries, long timestamp)
@@ -2019,7 +1812,7 @@
    * The parent path to the directory is at [0, pos-1].
    * All ancestors exist. Newly created one stored at index pos.
    */
-  private void unprotectedMkdir(long inodeId, INodesInPath inodesInPath,
+  void unprotectedMkdir(long inodeId, INodesInPath inodesInPath,
       int pos, byte[] name, PermissionStatus permission,
       List<AclEntry> aclEntries, long timestamp)
       throws QuotaExceededException, AclException {
@@ -2331,10 +2124,8 @@
     }
     return 1;
   }
-  
-  /**
-   */
-  String normalizePath(String src) {
+
+  static String normalizePath(String src) {
     if (src.length() > 1 && src.endsWith("/")) {
       src = src.substring(0, src.length() - 1);
     }
@@ -2419,7 +2210,7 @@
   /**
    * See {@link ClientProtocol#setQuota(String, long, long)} for the contract.
    * Sets quota for for a directory.
-   * @return INodeDirectory if any of the quotas have changed. null other wise.
+   * @return INodeDirectory if any of the quotas have changed. null otherwise.
    * @throws FileNotFoundException if the path does not exist.
    * @throws PathIsNotDirectoryException if the path is not a directory.
    * @throws QuotaExceededException if the directory tree size is 
@@ -2470,21 +2261,17 @@
   
   /**
    * See {@link ClientProtocol#setQuota(String, long, long)} for the contract.
+   * @return INodeDirectory if any of the quotas have changed. null otherwise.
    * @throws SnapshotAccessControlException if path is in RO snapshot
    * @see #unprotectedSetQuota(String, long, long)
    */
-  void setQuota(String src, long nsQuota, long dsQuota) 
+  INodeDirectory setQuota(String src, long nsQuota, long dsQuota)
       throws FileNotFoundException, PathIsNotDirectoryException,
       QuotaExceededException, UnresolvedLinkException,
       SnapshotAccessControlException {
     writeLock();
     try {
-      INodeDirectory dir = unprotectedSetQuota(src, nsQuota, dsQuota);
-      if (dir != null) {
-        final Quota.Counts q = dir.getQuotaCounts();
-        fsImage.getEditLog().logSetQuota(src,
-            q.get(Quota.NAMESPACE), q.get(Quota.DISKSPACE));
-      }
+      return unprotectedSetQuota(src, nsQuota, dsQuota);
     } finally {
       writeUnlock();
     }
@@ -2503,18 +2290,14 @@
   /**
    * Sets the access time on the file/directory. Logs it in the transaction log.
    */
-  void setTimes(String src, INode inode, long mtime, long atime, boolean force,
-      int latestSnapshotId) throws QuotaExceededException {
-    boolean status = false;
+  boolean setTimes(INode inode, long mtime, long atime, boolean force,
+                   int latestSnapshotId) throws QuotaExceededException {
     writeLock();
     try {
-      status = unprotectedSetTimes(inode, mtime, atime, force, latestSnapshotId);
+      return unprotectedSetTimes(inode, mtime, atime, force, latestSnapshotId);
     } finally {
       writeUnlock();
     }
-    if (status) {
-      fsImage.getEditLog().logTimes(src, mtime, atime);
-    }
   }
 
   boolean unprotectedSetTimes(String src, long mtime, long atime, boolean force) 
@@ -2678,49 +2461,21 @@
     }
     return perm;
   }
-    
-  /**
-   * Add the given symbolic link to the fs. Record it in the edits log.
-   */
-  INodeSymlink addSymlink(String path, String target,
-      PermissionStatus dirPerms, boolean createParent, boolean logRetryCache)
-      throws UnresolvedLinkException, FileAlreadyExistsException,
-      QuotaExceededException, SnapshotAccessControlException, AclException {
-    waitForReady();
 
-    final long modTime = now();
-    if (createParent) {
-      final String parent = new Path(path).getParent().toString();
-      if (!mkdirs(parent, dirPerms, true, modTime)) {
-        return null;
-      }
-    }
-    final String userName = dirPerms.getUserName();
-    INodeSymlink newNode  = null;
-    long id = namesystem.allocateNewInodeId();
+  /**
+   * Add the specified path into the namespace.
+   */
+  INodeSymlink addSymlink(long id, String path, String target,
+                          long mtime, long atime, PermissionStatus perm)
+          throws UnresolvedLinkException, QuotaExceededException {
     writeLock();
     try {
-      newNode = unprotectedAddSymlink(id, path, target, modTime, modTime,
-          new PermissionStatus(userName, null, FsPermission.getDefault()));
+      return unprotectedAddSymlink(id, path, target, mtime, atime, perm);
     } finally {
       writeUnlock();
     }
-    if (newNode == null) {
-      NameNode.stateChangeLog.info("DIR* addSymlink: failed to add " + path);
-      return null;
-    }
-    fsImage.getEditLog().logSymlink(path, target, modTime, modTime, newNode,
-        logRetryCache);
-    
-    if(NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("DIR* addSymlink: " + path + " is added");
-    }
-    return newNode;
   }
 
-  /**
-   * Add the specified path into the namespace. Invoked from edit log processing.
-   */
   INodeSymlink unprotectedAddSymlink(long id, String path, String target,
       long mtime, long atime, PermissionStatus perm)
       throws UnresolvedLinkException, QuotaExceededException {
@@ -2730,11 +2485,10 @@
     return addINode(path, symlink) ? symlink : null;
   }
 
-  void modifyAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
+  List<AclEntry> modifyAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
     writeLock();
     try {
-      List<AclEntry> newAcl = unprotectedModifyAclEntries(src, aclSpec);
-      fsImage.getEditLog().logSetAcl(src, newAcl);
+      return unprotectedModifyAclEntries(src, aclSpec);
     } finally {
       writeUnlock();
     }
@@ -2753,11 +2507,10 @@
     return newAcl;
   }
 
-  void removeAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
+  List<AclEntry> removeAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
     writeLock();
     try {
-      List<AclEntry> newAcl = unprotectedRemoveAclEntries(src, aclSpec);
-      fsImage.getEditLog().logSetAcl(src, newAcl);
+      return unprotectedRemoveAclEntries(src, aclSpec);
     } finally {
       writeUnlock();
     }
@@ -2776,11 +2529,10 @@
     return newAcl;
   }
 
-  void removeDefaultAcl(String src) throws IOException {
+  List<AclEntry> removeDefaultAcl(String src) throws IOException {
     writeLock();
     try {
-      List<AclEntry> newAcl = unprotectedRemoveDefaultAcl(src);
-      fsImage.getEditLog().logSetAcl(src, newAcl);
+      return unprotectedRemoveDefaultAcl(src);
     } finally {
       writeUnlock();
     }
@@ -2803,7 +2555,6 @@
     writeLock();
     try {
       unprotectedRemoveAcl(src);
-      fsImage.getEditLog().logSetAcl(src, AclFeature.EMPTY_ENTRY_LIST);
     } finally {
       writeUnlock();
     }
@@ -2817,11 +2568,10 @@
     AclStorage.removeINodeAcl(inode, snapshotId);
   }
 
-  void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
+  List<AclEntry> setAcl(String src, List<AclEntry> aclSpec) throws IOException {
     writeLock();
     try {
-      List<AclEntry> newAcl = unprotectedSetAcl(src, aclSpec);
-      fsImage.getEditLog().logSetAcl(src, newAcl);
+      return unprotectedSetAcl(src, aclSpec);
     } finally {
       writeUnlock();
     }
@@ -2868,18 +2618,11 @@
       readUnlock();
     }
   }
-  
-  void removeXAttr(String src, XAttr xAttr) throws IOException {
+
+  XAttr removeXAttr(String src, XAttr xAttr) throws IOException {
     writeLock();
     try {
-      XAttr removedXAttr = unprotectedRemoveXAttr(src, xAttr);
-      if (removedXAttr != null) {
-        fsImage.getEditLog().logRemoveXAttr(src, removedXAttr);
-      } else {
-        NameNode.stateChangeLog.info("DIR* FSDirectory.removeXAttr: XAttr " +
-        		XAttrHelper.getPrefixName(xAttr) + 
-        		" does not exist on the path " + src);
-      }
+      return unprotectedRemoveXAttr(src, xAttr);
     } finally {
       writeUnlock();
     }
@@ -2917,12 +2660,11 @@
     return xAttrs;
   }
   
-  void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
-      boolean logRetryCache) throws IOException {
+  void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
+          throws IOException {
     writeLock();
     try {
       unprotectedSetXAttr(src, xAttr, flag);
-      fsImage.getEditLog().logSetXAttr(src, xAttr, logRetryCache);
     } finally {
       writeUnlock();
     }
@@ -2943,6 +2685,7 @@
       EnumSet<XAttrSetFlag> flag) throws QuotaExceededException, IOException {
     List<XAttr> xAttrs = Lists.newArrayListWithCapacity(
         existingXAttrs != null ? existingXAttrs.size() + 1 : 1);
+    int userVisibleXAttrsNum = 0; // Number of user visible xAttrs
     boolean exist = false;
     if (existingXAttrs != null) {
       for (XAttr a: existingXAttrs) {
@@ -2951,6 +2694,10 @@
           exist = true;
         } else {
           xAttrs.add(a);
+          
+          if (isUserVisible(a)) {
+            userVisibleXAttrsNum++;
+          }
         }
       }
     }
@@ -2958,7 +2705,11 @@
     XAttrSetFlag.validate(xAttr.getName(), exist, flag);
     xAttrs.add(xAttr);
     
-    if (xAttrs.size() > inodeXAttrsLimit) {
+    if (isUserVisible(xAttr)) {
+      userVisibleXAttrsNum++;
+    }
+    
+    if (userVisibleXAttrsNum > inodeXAttrsLimit) {
       throw new IOException("Cannot add additional XAttr to inode, "
           + "would exceed limit of " + inodeXAttrsLimit);
     }
@@ -2966,6 +2717,14 @@
     return xAttrs;
   }
   
+  private boolean isUserVisible(XAttr xAttr) {
+    if (xAttr.getNameSpace() == XAttr.NameSpace.USER || 
+        xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED) {
+      return true;
+    }
+    return false;
+  }
+  
   List<XAttr> getXAttrs(String src) throws IOException {
     String srcs = normalizePath(src);
     readLock();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index d7bbf76..b7698b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -145,7 +145,7 @@
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.StorageType;
-import org.apache.hadoop.hdfs.XAttrHelper;
+import org.apache.hadoop.hdfs.protocol.AclException;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
@@ -167,6 +167,7 @@
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeException;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
+import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
@@ -1567,6 +1568,7 @@
       src = FSDirectory.resolvePath(src, pathComponents, dir);
       checkOwner(pc, src);
       dir.setPermission(src, permission);
+      getEditLog().logSetPermissions(src, permission);
       resultingStat = getAuditFileInfo(src, false);
     } finally {
       writeUnlock();
@@ -1612,6 +1614,7 @@
         }
       }
       dir.setOwner(src, username, group);
+      getEditLog().logSetOwner(src, username, group);
       resultingStat = getAuditFileInfo(src, false);
     } finally {
       writeUnlock();
@@ -1742,7 +1745,11 @@
             if (isReadOp) {
               continue;
             }
-            dir.setTimes(src, inode, -1, now, false, iip.getLatestSnapshotId());
+            boolean changed = dir.setTimes(inode, -1, now, false,
+                    iip.getLatestSnapshotId());
+            if (changed) {
+              getEditLog().logTimes(src, -1, now);
+            }
           }
         }
         final long fileSize = iip.isSnapshot() ?
@@ -1953,7 +1960,9 @@
           Arrays.toString(srcs) + " to " + target);
     }
 
-    dir.concat(target,srcs, logRetryCache);
+    long timestamp = now();
+    dir.concat(target, srcs, timestamp);
+    getEditLog().logConcat(target, srcs, timestamp, logRetryCache);
   }
   
   /**
@@ -1994,7 +2003,11 @@
       final INodesInPath iip = dir.getINodesInPath4Write(src);
       final INode inode = iip.getLastINode();
       if (inode != null) {
-        dir.setTimes(src, inode, mtime, atime, true, iip.getLatestSnapshotId());
+        boolean changed = dir.setTimes(inode, mtime, atime, true,
+                iip.getLatestSnapshotId());
+        if (changed) {
+          getEditLog().logTimes(src, mtime, atime);
+        }
         resultingStat = getAuditFileInfo(src, false);
       } else {
         throw new FileNotFoundException("File/Directory " + src + " does not exist.");
@@ -2063,7 +2076,7 @@
       checkFsObjectLimit();
 
       // add symbolic link to namespace
-      dir.addSymlink(link, target, dirPerms, createParent, logRetryCache);
+      addSymlink(link, target, dirPerms, createParent, logRetryCache);
       resultingStat = getAuditFileInfo(link, false);
     } finally {
       writeUnlock();
@@ -2115,6 +2128,7 @@
       final Block[] blocks = dir.setReplication(src, replication, blockRepls);
       isFile = blocks != null;
       if (isFile) {
+        getEditLog().logSetReplication(src, replication);
         blockManager.setReplication(blockRepls[0], blockRepls[1], src, blocks);
       }
     } finally {
@@ -2315,8 +2329,16 @@
       final DatanodeDescriptor clientNode = 
           blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
 
-      INodeFile newNode = dir.addFile(src, permissions, replication, blockSize,
-          holder, clientMachine, clientNode);
+      INodeFile newNode = null;
+
+      // Always do an implicit mkdirs for parent directory tree.
+      Path parent = new Path(src).getParent();
+      if (parent != null && mkdirsRecursively(parent.toString(),
+              permissions, true, now())) {
+        newNode = dir.addFile(src, permissions, replication, blockSize,
+                holder, clientMachine, clientNode);
+      }
+
       if (newNode == null) {
         throw new IOException("Unable to add " + src +  " to namespace");
       }
@@ -2740,7 +2762,7 @@
       INodesInPath inodesInPath = INodesInPath.fromINode(pendingFile);
       saveAllocatedBlock(src, inodesInPath, newBlock, targets);
 
-      dir.persistNewBlock(src, pendingFile);
+      persistNewBlock(src, pendingFile);
       offset = pendingFile.computeFileSize();
     } finally {
       writeUnlock();
@@ -2960,7 +2982,7 @@
         NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: "
                                       + b + " is removed from pendingCreates");
       }
-      dir.persistBlocks(src, file, false);
+      persistBlocks(src, file, false);
     } finally {
       writeUnlock();
     }
@@ -2996,6 +3018,13 @@
           + (lease != null ? lease.toString()
               : "Holder " + holder + " does not have any open files."));
     }
+    // No further modification is allowed on a deleted file.
+    // A file is considered deleted, if it has no parent or is marked
+    // as deleted in the snapshot feature.
+    if (file.getParent() == null || (file.isWithSnapshot() &&
+        file.getFileWithSnapshotFeature().isCurrentFileDeleted())) {
+      throw new FileNotFoundException(src);
+    }
     String clientName = file.getFileUnderConstructionFeature().getClientName();
     if (holder != null && !clientName.equals(holder)) {
       throw new LeaseExpiredException("Lease mismatch on " + ident +
@@ -3260,7 +3289,9 @@
           false, false);
     }
 
-    if (dir.renameTo(src, dst, logRetryCache)) {
+    long mtime = now();
+    if (dir.renameTo(src, dst, mtime)) {
+      getEditLog().logRename(src, dst, mtime, logRetryCache);
       return true;
     }
     return false;
@@ -3325,7 +3356,9 @@
           false);
     }
 
-    dir.renameTo(src, dst, logRetryCache, options);
+    long mtime = now();
+    dir.renameTo(src, dst, mtime, options);
+    getEditLog().logRename(src, dst, mtime, logRetryCache, options);
   }
   
   /**
@@ -3408,10 +3441,17 @@
         checkPermission(pc, src, false, null, FsAction.WRITE, null,
             FsAction.ALL, true, false);
       }
+      long mtime = now();
       // Unlink the target directory from directory tree
-      if (!dir.delete(src, collectedBlocks, removedINodes, logRetryCache)) {
+      long filesRemoved = dir.delete(src, collectedBlocks, removedINodes,
+              mtime);
+      if (filesRemoved < 0) {
         return false;
       }
+      getEditLog().logDelete(src, mtime, logRetryCache);
+      incrDeletedFileCount(filesRemoved);
+      // Blocks/INodes will be handled later
+      removePathAndBlocks(src, null, null);
       ret = true;
     } finally {
       writeUnlock();
@@ -3419,6 +3459,7 @@
     getEditLog().logSync(); 
     removeBlocks(collectedBlocks); // Incremental deletion of blocks
     collectedBlocks.clear();
+
     dir.writeLock();
     try {
       dir.removeFromInodeMap(removedINodes);
@@ -3671,13 +3712,119 @@
     // create multiple inodes.
     checkFsObjectLimit();
 
-    if (!dir.mkdirs(src, permissions, false, now())) {
+    if (!mkdirsRecursively(src, permissions, false, now())) {
       throw new IOException("Failed to create directory: " + src);
     }
     return true;
   }
 
   /**
+   * Create a directory
+   * If ancestor directories do not exist, automatically create them.
+
+   * @param src string representation of the path to the directory
+   * @param permissions the permission of the directory
+   * @param inheritPermission if the permission of the directory should inherit
+   *                          from its parent or not. u+wx is implicitly added to
+   *                          the automatically created directories, and to the
+   *                          given directory if inheritPermission is true
+   * @param now creation time
+   * @return true if the operation succeeds false otherwise
+   * @throws QuotaExceededException if directory creation violates
+   *                                any quota limit
+   * @throws UnresolvedLinkException if a symlink is encountered in src.
+   * @throws SnapshotAccessControlException if path is in RO snapshot
+   */
+  private boolean mkdirsRecursively(String src, PermissionStatus permissions,
+                 boolean inheritPermission, long now)
+          throws FileAlreadyExistsException, QuotaExceededException,
+                 UnresolvedLinkException, SnapshotAccessControlException,
+                 AclException {
+    src = FSDirectory.normalizePath(src);
+    String[] names = INode.getPathNames(src);
+    byte[][] components = INode.getPathComponents(names);
+    final int lastInodeIndex = components.length - 1;
+
+    dir.writeLock();
+    try {
+      INodesInPath iip = dir.getExistingPathINodes(components);
+      if (iip.isSnapshot()) {
+        throw new SnapshotAccessControlException(
+                "Modification on RO snapshot is disallowed");
+      }
+      INode[] inodes = iip.getINodes();
+
+      // find the index of the first null in inodes[]
+      StringBuilder pathbuilder = new StringBuilder();
+      int i = 1;
+      for(; i < inodes.length && inodes[i] != null; i++) {
+        pathbuilder.append(Path.SEPARATOR).append(names[i]);
+        if (!inodes[i].isDirectory()) {
+          throw new FileAlreadyExistsException(
+                  "Parent path is not a directory: "
+                  + pathbuilder + " "+inodes[i].getLocalName());
+        }
+      }
+
+      // default to creating parent dirs with the given perms
+      PermissionStatus parentPermissions = permissions;
+
+      // if not inheriting and it's the last inode, there's no use in
+      // computing perms that won't be used
+      if (inheritPermission || (i < lastInodeIndex)) {
+        // if inheriting (ie. creating a file or symlink), use the parent dir,
+        // else the supplied permissions
+        // NOTE: the permissions of the auto-created directories violate posix
+        FsPermission parentFsPerm = inheritPermission
+                ? inodes[i-1].getFsPermission() : permissions.getPermission();
+
+        // ensure that the permissions allow user write+execute
+        if (!parentFsPerm.getUserAction().implies(FsAction.WRITE_EXECUTE)) {
+          parentFsPerm = new FsPermission(
+                  parentFsPerm.getUserAction().or(FsAction.WRITE_EXECUTE),
+                  parentFsPerm.getGroupAction(),
+                  parentFsPerm.getOtherAction()
+          );
+        }
+
+        if (!parentPermissions.getPermission().equals(parentFsPerm)) {
+          parentPermissions = new PermissionStatus(
+                  parentPermissions.getUserName(),
+                  parentPermissions.getGroupName(),
+                  parentFsPerm
+          );
+          // when inheriting, use same perms for entire path
+          if (inheritPermission) permissions = parentPermissions;
+        }
+      }
+
+      // create directories beginning from the first null index
+      for(; i < inodes.length; i++) {
+        pathbuilder.append(Path.SEPARATOR).append(names[i]);
+        dir.unprotectedMkdir(allocateNewInodeId(), iip, i, components[i],
+                (i < lastInodeIndex) ? parentPermissions : permissions, null,
+                now);
+        if (inodes[i] == null) {
+          return false;
+        }
+        // Directory creation also count towards FilesCreated
+        // to match count of FilesDeleted metric.
+        NameNode.getNameNodeMetrics().incrFilesCreated();
+
+        final String cur = pathbuilder.toString();
+        getEditLog().logMkDir(cur, inodes[i]);
+        if(NameNode.stateChangeLog.isDebugEnabled()) {
+          NameNode.stateChangeLog.debug(
+                  "mkdirs: created directory " + cur);
+        }
+      }
+    } finally {
+      dir.writeUnlock();
+    }
+    return true;
+  }
+
+  /**
    * Get the content summary for a specific file/dir.
    *
    * @param src The string representation of the path to the file
@@ -3721,7 +3868,7 @@
    * 
    * Note: This does not support ".inodes" relative path.
    */
-  void setQuota(String path, long nsQuota, long dsQuota) 
+  void setQuota(String path, long nsQuota, long dsQuota)
       throws IOException, UnresolvedLinkException {
     checkSuperuserPrivilege();
     checkOperation(OperationCategory.WRITE);
@@ -3729,7 +3876,12 @@
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set quota on " + path);
-      dir.setQuota(path, nsQuota, dsQuota);
+      INodeDirectory changed = dir.setQuota(path, nsQuota, dsQuota);
+      if (changed != null) {
+        final Quota.Counts q = changed.getQuotaCounts();
+        getEditLog().logSetQuota(path,
+                q.get(Quota.NAMESPACE), q.get(Quota.DISKSPACE));
+      }
     } finally {
       writeUnlock();
     }
@@ -3770,7 +3922,7 @@
         pendingFile.getFileUnderConstructionFeature().updateLengthOfLastBlock(
             pendingFile, lastBlockLength);
       }
-      dir.persistBlocks(src, pendingFile, false);
+      persistBlocks(src, pendingFile, false);
     } finally {
       writeUnlock();
     }
@@ -3963,7 +4115,7 @@
     final INodeFile newFile = pendingFile.toCompleteFile(now());
 
     // close file and persist block allocations for this file
-    dir.closeFile(src, newFile);
+    closeFile(src, newFile);
 
     blockManager.checkReplication(newFile);
   }
@@ -4114,7 +4266,8 @@
         src = closeFileCommitBlocks(iFile, storedBlock);
       } else {
         // If this commit does not want to close the file, persist blocks
-        src = persistBlocks(iFile, false);
+        src = iFile.getFullPathName();
+        persistBlocks(src, iFile, false);
       }
     } finally {
       writeUnlock();
@@ -4153,21 +4306,6 @@
   }
 
   /**
-   * Persist the block list for the given file.
-   *
-   * @param pendingFile
-   * @return Path to the given file.
-   * @throws IOException
-   */
-  @VisibleForTesting
-  String persistBlocks(INodeFile pendingFile, boolean logRetryCache)
-      throws IOException {
-    String src = pendingFile.getFullPathName();
-    dir.persistBlocks(src, pendingFile, logRetryCache);
-    return src;
-  }
-
-  /**
    * Renew the lease(s) held by the given client
    */
   void renewLease(String holder) throws IOException {
@@ -4351,6 +4489,85 @@
   }
 
   /**
+   * Persist the block list for the inode.
+   * @param path
+   * @param file
+   * @param logRetryCache
+   */
+  private void persistBlocks(String path, INodeFile file,
+                             boolean logRetryCache) {
+    assert hasWriteLock();
+    Preconditions.checkArgument(file.isUnderConstruction());
+    getEditLog().logUpdateBlocks(path, file, logRetryCache);
+    if(NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("persistBlocks: " + path
+              + " with " + file.getBlocks().length + " blocks is persisted to" +
+              " the file system");
+    }
+  }
+
+  void incrDeletedFileCount(long count) {
+    NameNode.getNameNodeMetrics().incrFilesDeleted(count);
+  }
+
+  /**
+   * Close file.
+   * @param path
+   * @param file
+   */
+  private void closeFile(String path, INodeFile file) {
+    assert hasWriteLock();
+    dir.waitForReady();
+    // file is closed
+    getEditLog().logCloseFile(path, file);
+    if (NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("closeFile: "
+              +path+" with "+ file.getBlocks().length
+              +" blocks is persisted to the file system");
+    }
+  }
+
+  /**
+   * Add the given symbolic link to the fs. Record it in the edits log.
+   * @param path
+   * @param target
+   * @param dirPerms
+   * @param createParent
+   * @param logRetryCache
+   * @param dir
+   */
+  private INodeSymlink addSymlink(String path, String target,
+                                  PermissionStatus dirPerms,
+                                  boolean createParent, boolean logRetryCache)
+      throws UnresolvedLinkException, FileAlreadyExistsException,
+      QuotaExceededException, SnapshotAccessControlException, AclException {
+    dir.waitForReady();
+
+    final long modTime = now();
+    if (createParent) {
+      final String parent = new Path(path).getParent().toString();
+      if (!mkdirsRecursively(parent, dirPerms, true, modTime)) {
+        return null;
+      }
+    }
+    final String userName = dirPerms.getUserName();
+    long id = allocateNewInodeId();
+    INodeSymlink newNode = dir.addSymlink(id, path, target, modTime, modTime,
+            new PermissionStatus(userName, null, FsPermission.getDefault()));
+    if (newNode == null) {
+      NameNode.stateChangeLog.info("addSymlink: failed to add " + path);
+      return null;
+    }
+    getEditLog().logSymlink(path, target, modTime, modTime, newNode,
+        logRetryCache);
+
+    if(NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("addSymlink: " + path + " is added");
+    }
+    return newNode;
+  }
+
+  /**
    * Periodically calls hasAvailableResources of NameNodeResourceChecker, and if
    * there are found to be insufficient resources available, causes the NN to
    * enter safe mode. If resources are later found to have returned to
@@ -4683,6 +4900,21 @@
   }
 
   /**
+   * Persist the new block (the last block of the given file).
+   * @param path
+   * @param file
+   */
+  private void persistNewBlock(String path, INodeFile file) {
+    Preconditions.checkArgument(file.isUnderConstruction());
+    getEditLog().logAddBlock(path, file);
+    if (NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("persistNewBlock: "
+              + path + " with new block " + file.getLastBlock().toString()
+              + ", current total block count is " + file.getBlocks().length);
+    }
+  }
+
+  /**
    * SafeModeInfo contains information related to the safe mode.
    * <p>
    * An instance of {@link SafeModeInfo} is created when the name node
@@ -6090,7 +6322,7 @@
     blockinfo.setExpectedLocations(storages);
 
     String src = pendingFile.getFullPathName();
-    dir.persistBlocks(src, pendingFile, logRetryCache);
+    persistBlocks(src, pendingFile, logRetryCache);
   }
 
   // rename was successful. If any part of the renamed subtree had
@@ -7415,14 +7647,20 @@
 
       returnInfo = finalizeRollingUpgradeInternal(now());
       getEditLog().logFinalizeRollingUpgrade(returnInfo.getFinalizeTime());
-      getFSImage().saveNamespace(this);
+      if (haEnabled) {
+        // roll the edit log to make sure the standby NameNode can tail
+        getFSImage().rollEditLog();
+      }
       getFSImage().renameCheckpoint(NameNodeFile.IMAGE_ROLLBACK,
           NameNodeFile.IMAGE);
     } finally {
       writeUnlock();
     }
 
-    // getEditLog().logSync() is not needed since it does saveNamespace 
+    if (!haEnabled) {
+      // Sync not needed for ha since the edit was rolled after logging.
+      getEditLog().logSync();
+    }
 
     if (auditLog.isInfoEnabled() && isExternalInvocation()) {
       logAuditEvent(true, "finalizeRollingUpgrade", null, null, null);
@@ -7718,7 +7956,8 @@
       checkNameNodeSafeMode("Cannot modify ACL entries on " + src);
       src = FSDirectory.resolvePath(src, pathComponents, dir);
       checkOwner(pc, src);
-      dir.modifyAclEntries(src, aclSpec);
+      List<AclEntry> newAcl = dir.modifyAclEntries(src, aclSpec);
+      getEditLog().logSetAcl(src, newAcl);
       resultingStat = getAuditFileInfo(src, false);
     } finally {
       writeUnlock();
@@ -7739,7 +7978,8 @@
       checkNameNodeSafeMode("Cannot remove ACL entries on " + src);
       src = FSDirectory.resolvePath(src, pathComponents, dir);
       checkOwner(pc, src);
-      dir.removeAclEntries(src, aclSpec);
+      List<AclEntry> newAcl = dir.removeAclEntries(src, aclSpec);
+      getEditLog().logSetAcl(src, newAcl);
       resultingStat = getAuditFileInfo(src, false);
     } finally {
       writeUnlock();
@@ -7760,7 +8000,8 @@
       checkNameNodeSafeMode("Cannot remove default ACL entries on " + src);
       src = FSDirectory.resolvePath(src, pathComponents, dir);
       checkOwner(pc, src);
-      dir.removeDefaultAcl(src);
+      List<AclEntry> newAcl = dir.removeDefaultAcl(src);
+      getEditLog().logSetAcl(src, newAcl);
       resultingStat = getAuditFileInfo(src, false);
     } finally {
       writeUnlock();
@@ -7782,6 +8023,7 @@
       src = FSDirectory.resolvePath(src, pathComponents, dir);
       checkOwner(pc, src);
       dir.removeAcl(src);
+      getEditLog().logSetAcl(src, AclFeature.EMPTY_ENTRY_LIST);
       resultingStat = getAuditFileInfo(src, false);
     } finally {
       writeUnlock();
@@ -7802,7 +8044,8 @@
       checkNameNodeSafeMode("Cannot set ACL on " + src);
       src = FSDirectory.resolvePath(src, pathComponents, dir);
       checkOwner(pc, src);
-      dir.setAcl(src, aclSpec);
+      List<AclEntry> newAcl = dir.setAcl(src, aclSpec);
+      getEditLog().logSetAcl(src, newAcl);
       resultingStat = getAuditFileInfo(src, false);
     } finally {
       writeUnlock();
@@ -7878,7 +8121,8 @@
         checkOwner(pc, src);
         checkPathAccess(pc, src, FsAction.WRITE);
       }
-      dir.setXAttr(src, xAttr, flag, logRetryCache);
+      dir.setXAttr(src, xAttr, flag);
+      getEditLog().logSetXAttr(src, xAttr, logRetryCache);
       resultingStat = getAuditFileInfo(src, false);
     } finally {
       writeUnlock();
@@ -7999,7 +8243,10 @@
         checkPathAccess(pc, src, FsAction.WRITE);
       }
       
-      dir.removeXAttr(src, xAttr);
+      XAttr removedXAttr = dir.removeXAttr(src, xAttr);
+      if (removedXAttr != null) {
+        getEditLog().logRemoveXAttr(src, removedXAttr);
+      }
       resultingStat = getAuditFileInfo(src, false);
     } catch (AccessControlException e) {
       logAuditEvent(false, "removeXAttr", src);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
index 2840e8a..a41ff13 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
@@ -43,6 +43,8 @@
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
+import org.apache.hadoop.io.nativeio.NativeIO;
+
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
@@ -132,10 +134,14 @@
     Preconditions.checkState(!dstFile.exists(),
         "Can't finalize edits file " + inprogressFile + " since finalized file " +
         "already exists");
-    if (!inprogressFile.renameTo(dstFile)) {
+
+    try {
+      NativeIO.renameTo(inprogressFile, dstFile);
+    } catch (IOException e) {
       errorReporter.reportErrorOnFile(dstFile);
-      throw new IllegalStateException("Unable to finalize edits file " + inprogressFile);
+      throw new IllegalStateException("Unable to finalize edits file " + inprogressFile, e);
     }
+
     if (inprogressFile.equals(currentInProgress)) {
       currentInProgress = null;
     }
@@ -513,11 +519,16 @@
       File src = file;
       File dst = new File(src.getParent(), src.getName() + newSuffix);
       // renameTo fails on Windows if the destination file already exists.
-      if (!src.renameTo(dst)) {
-        if (!dst.delete() || !src.renameTo(dst)) {
-          throw new IOException(
-            "Couldn't rename log " + src + " to " + dst);
+      try {
+        if (dst.exists()) {
+          if (!dst.delete()) {
+            throw new IOException("Couldn't delete " + dst);
+          }
         }
+        NativeIO.renameTo(src, dst);
+      } catch (IOException e) {
+        throw new IOException(
+            "Couldn't rename log " + src + " to " + dst, e);
       }
       file = dst;
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 656790f..d21700b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -132,6 +132,8 @@
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.WritableRpcEngine;
+import org.apache.hadoop.ipc.RefreshRegistry;
+import org.apache.hadoop.ipc.RefreshResponse;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.Groups;
@@ -147,6 +149,9 @@
 import org.apache.hadoop.ipc.protocolPB.RefreshCallQueueProtocolPB;
 import org.apache.hadoop.ipc.protocolPB.RefreshCallQueueProtocolServerSideTranslatorPB;
 import org.apache.hadoop.ipc.proto.RefreshCallQueueProtocolProtos.RefreshCallQueueProtocolService;
+import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolPB;
+import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolServerSideTranslatorPB;
+import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshProtocolService;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.tools.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService;
@@ -229,6 +234,11 @@
     BlockingService refreshCallQueueService = RefreshCallQueueProtocolService
         .newReflectiveBlockingService(refreshCallQueueXlator);
 
+    GenericRefreshProtocolServerSideTranslatorPB genericRefreshXlator =
+        new GenericRefreshProtocolServerSideTranslatorPB(this);
+    BlockingService genericRefreshService = GenericRefreshProtocolService
+        .newReflectiveBlockingService(genericRefreshXlator);
+
     GetUserMappingsProtocolServerSideTranslatorPB getUserMappingXlator = 
         new GetUserMappingsProtocolServerSideTranslatorPB(this);
     BlockingService getUserMappingService = GetUserMappingsProtocolService
@@ -278,6 +288,8 @@
       // We support Refreshing call queue here in case the client RPC queue is full
       DFSUtil.addPBProtocol(conf, RefreshCallQueueProtocolPB.class,
           refreshCallQueueService, serviceRpcServer);
+      DFSUtil.addPBProtocol(conf, GenericRefreshProtocolPB.class,
+          genericRefreshService, serviceRpcServer);
       DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, 
           getUserMappingService, serviceRpcServer);
   
@@ -322,6 +334,8 @@
         refreshUserMappingService, clientRpcServer);
     DFSUtil.addPBProtocol(conf, RefreshCallQueueProtocolPB.class,
         refreshCallQueueService, clientRpcServer);
+    DFSUtil.addPBProtocol(conf, GenericRefreshProtocolPB.class,
+        genericRefreshService, clientRpcServer);
     DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, 
         getUserMappingService, clientRpcServer);
 
@@ -1154,6 +1168,12 @@
       serviceRpcServer.refreshCallQueue(conf);
     }
   }
+
+  @Override // GenericRefreshProtocol
+  public Collection<RefreshResponse> refresh(String identifier, String[] args) {
+    // Let the registry handle as needed
+    return RefreshRegistry.defaultRegistry().dispatch(identifier, args);
+  }
   
   @Override // GetUserMappingsProtocol
   public String[] getGroupsForUser(String user) throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocols.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocols.java
index 10ecfb0..b1fe53d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocols.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocols.java
@@ -24,6 +24,7 @@
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.security.RefreshUserMappingsProtocol;
 import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
+import org.apache.hadoop.ipc.GenericRefreshProtocol;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
 
 /** The full set of RPC methods implemented by the Namenode.  */
@@ -35,6 +36,7 @@
           RefreshAuthorizationPolicyProtocol,
           RefreshUserMappingsProtocol,
           RefreshCallQueueProtocol,
+          GenericRefreshProtocol,
           GetUserMappingsProtocol,
           HAServiceProtocol {
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
index 3325570c..dbe22845 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
@@ -40,7 +40,8 @@
 import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolStats;
-import org.apache.hadoop.hdfs.tools.TableListing.Justification;
+import org.apache.hadoop.tools.TableListing;
+import org.apache.hadoop.tools.TableListing.Justification;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index cc5bbcf..e3cdd63 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -26,6 +26,7 @@
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
@@ -62,12 +63,17 @@
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
+import org.apache.hadoop.ipc.GenericRefreshProtocol;
+import org.apache.hadoop.ipc.RefreshResponse;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.RefreshUserMappingsProtocol;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
-import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
+import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolPB;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -575,7 +581,7 @@
    * @exception IOException 
    * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#restoreFailedStorage(String arg)
    */
-  public int restoreFaileStorage(String arg) throws IOException {
+  public int restoreFailedStorage(String arg) throws IOException {
     int exitCode = -1;
 
     if(!arg.equals("check") && !arg.equals("true") && !arg.equals("false")) {
@@ -688,6 +694,7 @@
       "\t[-refreshUserToGroupsMappings]\n" +
       "\t[-refreshSuperUserGroupsConfiguration]\n" +
       "\t[-refreshCallQueue]\n" +
+      "\t[-refresh <host:ipc_port> <key> [arg1..argn]\n" +
       "\t[-printTopology]\n" +
       "\t[-refreshNamenodes datanodehost:port]\n"+
       "\t[-deleteBlockPool datanodehost:port blockpoolId [force]]\n"+
@@ -764,6 +771,10 @@
 
     String refreshCallQueue = "-refreshCallQueue: Reload the call queue from config\n";
 
+    String genericRefresh = "-refresh: Arguments are <hostname:port> <resource_identifier> [arg1..argn]\n" +
+      "\tTriggers a runtime-refresh of the resource specified by <resource_identifier>\n" +
+      "\ton <hostname:port>. All other args after are sent to the host.";
+
     String printTopology = "-printTopology: Print a tree of the racks and their\n" +
                            "\t\tnodes as reported by the Namenode\n";
     
@@ -848,6 +859,8 @@
       System.out.println(refreshSuperUserGroupsConfiguration);
     } else if ("refreshCallQueue".equals(cmd)) {
       System.out.println(refreshCallQueue);
+    } else if ("refresh".equals(cmd)) {
+      System.out.println(genericRefresh);
     } else if ("printTopology".equals(cmd)) {
       System.out.println(printTopology);
     } else if ("refreshNamenodes".equals(cmd)) {
@@ -887,6 +900,7 @@
       System.out.println(refreshUserToGroupsMappings);
       System.out.println(refreshSuperUserGroupsConfiguration);
       System.out.println(refreshCallQueue);
+      System.out.println(genericRefresh);
       System.out.println(printTopology);
       System.out.println(refreshNamenodes);
       System.out.println(deleteBlockPool);
@@ -1100,6 +1114,56 @@
     return 0;
   }
 
+  public int genericRefresh(String[] argv, int i) throws IOException {
+    String hostport = argv[i++];
+    String identifier = argv[i++];
+    String[] args = Arrays.copyOfRange(argv, i, argv.length);
+
+    // Get the current configuration
+    Configuration conf = getConf();
+
+    // for security authorization
+    // server principal for this call
+    // should be NN's one.
+    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
+      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
+
+    // Create the client
+    Class<?> xface = GenericRefreshProtocolPB.class;
+    InetSocketAddress address = NetUtils.createSocketAddr(hostport);
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+
+    RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine.class);
+    GenericRefreshProtocolPB proxy = (GenericRefreshProtocolPB)
+      RPC.getProxy(xface, RPC.getProtocolVersion(xface), address,
+        ugi, conf, NetUtils.getDefaultSocketFactory(conf), 0);
+
+    GenericRefreshProtocol xlator =
+      new GenericRefreshProtocolClientSideTranslatorPB(proxy);
+
+    // Refresh
+    Collection<RefreshResponse> responses = xlator.refresh(identifier, args);
+
+    int returnCode = 0;
+
+    // Print refresh responses
+    System.out.println("Refresh Responses:\n");
+    for (RefreshResponse response : responses) {
+      System.out.println(response.toString());
+
+      if (returnCode == 0 && response.getReturnCode() != 0) {
+        // This is the first non-zero return code, so we should return this
+        returnCode = response.getReturnCode();
+      } else if (returnCode != 0 && response.getReturnCode() != 0) {
+        // Then now we have multiple non-zero return codes,
+        // so we merge them into -1
+        returnCode = -1;
+      }
+    }
+
+    return returnCode;
+  }
+
   /**
    * Displays format of commands.
    * @param cmd The command that is being executed.
@@ -1162,6 +1226,9 @@
     } else if ("-refreshCallQueue".equals(cmd)) {
       System.err.println("Usage: java DFSAdmin"
                          + " [-refreshCallQueue]");
+    } else if ("-refresh".equals(cmd)) {
+      System.err.println("Usage: java DFSAdmin"
+                         + " [-refresh <hostname:port> <resource_identifier> [arg1..argn]");
     } else if ("-printTopology".equals(cmd)) {
       System.err.println("Usage: java DFSAdmin"
                          + " [-printTopology]");
@@ -1195,6 +1262,7 @@
       System.err.println("           [-refreshUserToGroupsMappings]");
       System.err.println("           [-refreshSuperUserGroupsConfiguration]");
       System.err.println("           [-refreshCallQueue]");
+      System.err.println("           [-refresh]");
       System.err.println("           [-printTopology]");
       System.err.println("           [-refreshNamenodes datanodehost:port]");
       System.err.println("           [-deleteBlockPool datanode-host:port blockpoolId [force]]");
@@ -1292,6 +1360,11 @@
         printUsage(cmd);
         return exitCode;
       }
+    } else if ("-refresh".equals(cmd)) {
+      if (argv.length < 3) {
+        printUsage(cmd);
+        return exitCode;
+      }
     } else if ("-refreshUserToGroupsMappings".equals(cmd)) {
       if (argv.length != 1) {
         printUsage(cmd);
@@ -1362,7 +1435,7 @@
       } else if ("-rollEdits".equals(cmd)) {
         exitCode = rollEdits();
       } else if ("-restoreFailedStorage".equals(cmd)) {
-        exitCode = restoreFaileStorage(argv[i]);
+        exitCode = restoreFailedStorage(argv[i]);
       } else if ("-refreshNodes".equals(cmd)) {
         exitCode = refreshNodes();
       } else if ("-finalizeUpgrade".equals(cmd)) {
@@ -1387,6 +1460,8 @@
         exitCode = refreshSuperUserGroupsConfiguration();
       } else if ("-refreshCallQueue".equals(cmd)) {
         exitCode = refreshCallQueue();
+      } else if ("-refresh".equals(cmd)) {
+        exitCode = genericRefresh(argv, i);
       } else if ("-printTopology".equals(cmd)) {
         exitCode = printTopology();
       } else if ("-refreshNamenodes".equals(cmd)) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
new file mode 100644
index 0000000..664a478
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
@@ -0,0 +1,227 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.ipc.RefreshHandler;
+
+import org.apache.hadoop.ipc.RefreshRegistry;
+import org.apache.hadoop.ipc.RefreshResponse;
+import org.junit.Test;
+import org.junit.Before;
+import org.junit.After;
+import org.junit.BeforeClass;
+import org.junit.AfterClass;
+import org.mockito.Mockito;
+
+/**
+ * Before all tests, a MiniDFSCluster is spun up.
+ * Before each test, mock refresh handlers are created and registered.
+ * After each test, the mock handlers are unregistered.
+ * After all tests, the cluster is spun down.
+ */
+public class TestGenericRefresh {
+  private static MiniDFSCluster cluster;
+  private static Configuration config;
+  private static final int NNPort = 54222;
+
+  private static RefreshHandler firstHandler;
+  private static RefreshHandler secondHandler;
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    config = new Configuration();
+    config.set("hadoop.security.authorization", "true");
+
+    FileSystem.setDefaultUri(config, "hdfs://localhost:" + NNPort);
+    cluster = new MiniDFSCluster.Builder(config).nameNodePort(NNPort).build();
+    cluster.waitActive();
+  }
+
+  @AfterClass
+  public static void tearDownBeforeClass() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    // Register Handlers, first one just sends an ok response
+    firstHandler = Mockito.mock(RefreshHandler.class);
+    Mockito.stub(firstHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
+      .toReturn(RefreshResponse.successResponse());
+    RefreshRegistry.defaultRegistry().register("firstHandler", firstHandler);
+
+    // Second handler has conditional response for testing args
+    secondHandler = Mockito.mock(RefreshHandler.class);
+    Mockito.stub(secondHandler.handleRefresh("secondHandler", new String[]{"one", "two"}))
+      .toReturn(new RefreshResponse(3, "three"));
+    Mockito.stub(secondHandler.handleRefresh("secondHandler", new String[]{"one"}))
+      .toReturn(new RefreshResponse(2, "two"));
+    RefreshRegistry.defaultRegistry().register("secondHandler", secondHandler);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    RefreshRegistry.defaultRegistry().unregisterAll("firstHandler");
+    RefreshRegistry.defaultRegistry().unregisterAll("secondHandler");
+  }
+
+  @Test
+  public void testInvalidCommand() throws Exception {
+    DFSAdmin admin = new DFSAdmin(config);
+    String [] args = new String[]{"-refresh", "nn"};
+    int exitCode = admin.run(args);
+    assertEquals("DFSAdmin should fail due to bad args", -1, exitCode);
+  }
+
+  @Test
+  public void testInvalidIdentifier() throws Exception {
+    DFSAdmin admin = new DFSAdmin(config);
+    String [] args = new String[]{"-refresh", "localhost:" + NNPort, "unregisteredIdentity"};
+    int exitCode = admin.run(args);
+    assertEquals("DFSAdmin should fail due to no handler registered", -1, exitCode);
+  }
+
+  @Test
+  public void testValidIdentifier() throws Exception {
+    DFSAdmin admin = new DFSAdmin(config);
+    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "firstHandler"};
+    int exitCode = admin.run(args);
+    assertEquals("DFSAdmin should succeed", 0, exitCode);
+
+    Mockito.verify(firstHandler).handleRefresh("firstHandler", new String[]{});
+    // Second handler was never called
+    Mockito.verify(secondHandler, Mockito.never())
+      .handleRefresh(Mockito.anyString(), Mockito.any(String[].class));
+  }
+
+  @Test
+  public void testVariableArgs() throws Exception {
+    DFSAdmin admin = new DFSAdmin(config);
+    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "secondHandler", "one"};
+    int exitCode = admin.run(args);
+    assertEquals("DFSAdmin should return 2", 2, exitCode);
+
+    exitCode = admin.run(new String[]{"-refresh", "localhost:" + NNPort, "secondHandler", "one", "two"});
+    assertEquals("DFSAdmin should now return 3", 3, exitCode);
+
+    Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one"});
+    Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one", "two"});
+  }
+
+  @Test
+  public void testUnregistration() throws Exception {
+    RefreshRegistry.defaultRegistry().unregisterAll("firstHandler");
+
+    // And now this should fail
+    DFSAdmin admin = new DFSAdmin(config);
+    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "firstHandler"};
+    int exitCode = admin.run(args);
+    assertEquals("DFSAdmin should return -1", -1, exitCode);
+  }
+
+  @Test
+  public void testUnregistrationReturnValue() {
+    RefreshHandler mockHandler = Mockito.mock(RefreshHandler.class);
+    RefreshRegistry.defaultRegistry().register("test", mockHandler);
+    boolean ret = RefreshRegistry.defaultRegistry().unregister("test", mockHandler);
+    assertTrue(ret);
+  }
+
+  @Test
+  public void testMultipleRegistration() throws Exception {
+    RefreshRegistry.defaultRegistry().register("sharedId", firstHandler);
+    RefreshRegistry.defaultRegistry().register("sharedId", secondHandler);
+
+    // this should trigger both
+    DFSAdmin admin = new DFSAdmin(config);
+    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "sharedId", "one"};
+    int exitCode = admin.run(args);
+    assertEquals(-1, exitCode); // -1 because one of the responses is unregistered
+
+    // verify we called both
+    Mockito.verify(firstHandler).handleRefresh("sharedId", new String[]{"one"});
+    Mockito.verify(secondHandler).handleRefresh("sharedId", new String[]{"one"});
+
+    RefreshRegistry.defaultRegistry().unregisterAll("sharedId");
+  }
+
+  @Test
+  public void testMultipleReturnCodeMerging() throws Exception {
+    // Two handlers which return two non-zero values
+    RefreshHandler handlerOne = Mockito.mock(RefreshHandler.class);
+    Mockito.stub(handlerOne.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
+      .toReturn(new RefreshResponse(23, "Twenty Three"));
+
+    RefreshHandler handlerTwo = Mockito.mock(RefreshHandler.class);
+    Mockito.stub(handlerTwo.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
+      .toReturn(new RefreshResponse(10, "Ten"));
+
+    // Then registered to the same ID
+    RefreshRegistry.defaultRegistry().register("shared", handlerOne);
+    RefreshRegistry.defaultRegistry().register("shared", handlerTwo);
+
+    // We refresh both
+    DFSAdmin admin = new DFSAdmin(config);
+    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "shared"};
+    int exitCode = admin.run(args);
+    assertEquals(-1, exitCode); // We get -1 because of our logic for melding non-zero return codes
+
+    // Verify we called both
+    Mockito.verify(handlerOne).handleRefresh("shared", new String[]{});
+    Mockito.verify(handlerTwo).handleRefresh("shared", new String[]{});
+
+    RefreshRegistry.defaultRegistry().unregisterAll("shared");
+  }
+
+  @Test
+  public void testExceptionResultsInNormalError() throws Exception {
+    // In this test, we ensure that all handlers are called even if we throw an exception in one
+    RefreshHandler exceptionalHandler = Mockito.mock(RefreshHandler.class);
+    Mockito.stub(exceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
+      .toThrow(new RuntimeException("Exceptional Handler Throws Exception"));
+
+    RefreshHandler otherExceptionalHandler = Mockito.mock(RefreshHandler.class);
+    Mockito.stub(otherExceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
+      .toThrow(new RuntimeException("More Exceptions"));
+
+    RefreshRegistry.defaultRegistry().register("exceptional", exceptionalHandler);
+    RefreshRegistry.defaultRegistry().register("exceptional", otherExceptionalHandler);
+
+    DFSAdmin admin = new DFSAdmin(config);
+    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "exceptional"};
+    int exitCode = admin.run(args);
+    assertEquals(-1, exitCode); // Exceptions result in a -1
+
+    Mockito.verify(exceptionalHandler).handleRefresh("exceptional", new String[]{});
+    Mockito.verify(otherExceptionalHandler).handleRefresh("exceptional", new String[]{});
+
+    RefreshRegistry.defaultRegistry().unregisterAll("exceptional");
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index 3e29059..286be84 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -34,6 +34,7 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@@ -54,10 +55,16 @@
 import org.junit.Test;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
+import static org.apache.hadoop.fs.permission.AclEntryType.*;
+import static org.apache.hadoop.fs.permission.FsAction.*;
+import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
 import static org.hamcrest.CoreMatchers.is;
 import static org.hamcrest.CoreMatchers.not;
 import static org.junit.Assert.*;
 
+import com.google.common.collect.Lists;
+
 /**
  * This class tests commands from DFSShell.
  */
@@ -1620,6 +1627,240 @@
     int res = admin.run(new String[] {"-refreshNodes"});
     assertEquals("expected to fail -1", res , -1);
   }
+  
+  // Preserve Copy Option is -ptopxa (timestamps, ownership, permission, XATTR,
+  // ACLs)
+  @Test (timeout = 120000)
+  public void testCopyCommandsWithPreserveOption() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+        .format(true).build();
+    FsShell shell = null;
+    FileSystem fs = null;
+    final String testdir = "/tmp/TestDFSShell-testCopyCommandsWithPreserveOption-"
+        + counter.getAndIncrement();
+    final Path hdfsTestDir = new Path(testdir);
+    try {
+      fs = cluster.getFileSystem();
+      fs.mkdirs(hdfsTestDir);
+      Path src = new Path(hdfsTestDir, "srcfile");
+      fs.create(src).close();
+
+      fs.setAcl(src, Lists.newArrayList(
+          aclEntry(ACCESS, USER, ALL),
+          aclEntry(ACCESS, USER, "foo", ALL),
+          aclEntry(ACCESS, GROUP, READ_EXECUTE),
+          aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE),
+          aclEntry(ACCESS, OTHER, EXECUTE)));
+
+      FileStatus status = fs.getFileStatus(src);
+      final long mtime = status.getModificationTime();
+      final long atime = status.getAccessTime();
+      final String owner = status.getOwner();
+      final String group = status.getGroup();
+      final FsPermission perm = status.getPermission();
+      
+      fs.setXAttr(src, "user.a1", new byte[]{0x31, 0x32, 0x33});
+      fs.setXAttr(src, "trusted.a1", new byte[]{0x31, 0x31, 0x31});
+      
+      shell = new FsShell(conf);
+      
+      // -p
+      Path target1 = new Path(hdfsTestDir, "targetfile1");
+      String[] argv = new String[] { "-cp", "-p", src.toUri().toString(), 
+          target1.toUri().toString() };
+      int ret = ToolRunner.run(shell, argv);
+      assertEquals("cp -p is not working", SUCCESS, ret);
+      FileStatus targetStatus = fs.getFileStatus(target1);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      FsPermission targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      Map<String, byte[]> xattrs = fs.getXAttrs(target1);
+      assertTrue(xattrs.isEmpty());
+      List<AclEntry> acls = fs.getAclStatus(target1).getEntries();
+      assertTrue(acls.isEmpty());
+      assertFalse(targetPerm.getAclBit());
+
+      // -ptop
+      Path target2 = new Path(hdfsTestDir, "targetfile2");
+      argv = new String[] { "-cp", "-ptop", src.toUri().toString(), 
+          target2.toUri().toString() };
+      ret = ToolRunner.run(shell, argv);
+      assertEquals("cp -ptop is not working", SUCCESS, ret);
+      targetStatus = fs.getFileStatus(target2);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      xattrs = fs.getXAttrs(target2);
+      assertTrue(xattrs.isEmpty());
+      acls = fs.getAclStatus(target2).getEntries();
+      assertTrue(acls.isEmpty());
+      assertFalse(targetPerm.getAclBit());
+
+      // -ptopx
+      Path target3 = new Path(hdfsTestDir, "targetfile3");
+      argv = new String[] { "-cp", "-ptopx", src.toUri().toString(), 
+          target3.toUri().toString() };
+      ret = ToolRunner.run(shell, argv);
+      assertEquals("cp -ptopx is not working", SUCCESS, ret);
+      targetStatus = fs.getFileStatus(target3);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      xattrs = fs.getXAttrs(target3);
+      assertEquals(xattrs.size(), 2);
+      assertArrayEquals(new byte[]{0x31, 0x32, 0x33}, xattrs.get("user.a1"));
+      assertArrayEquals(new byte[]{0x31, 0x31, 0x31}, xattrs.get("trusted.a1"));
+      acls = fs.getAclStatus(target3).getEntries();
+      assertTrue(acls.isEmpty());
+      assertFalse(targetPerm.getAclBit());
+
+      // -ptopa
+      Path target4 = new Path(hdfsTestDir, "targetfile4");
+      argv = new String[] { "-cp", "-ptopa", src.toUri().toString(),
+          target4.toUri().toString() };
+      ret = ToolRunner.run(shell, argv);
+      assertEquals("cp -ptopa is not working", SUCCESS, ret);
+      targetStatus = fs.getFileStatus(target4);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      xattrs = fs.getXAttrs(target4);
+      assertTrue(xattrs.isEmpty());
+      acls = fs.getAclStatus(target4).getEntries();
+      assertFalse(acls.isEmpty());
+      assertTrue(targetPerm.getAclBit());
+      assertEquals(fs.getAclStatus(src), fs.getAclStatus(target4));
+
+      // -ptoa (verify -pa option will preserve permissions also)
+      Path target5 = new Path(hdfsTestDir, "targetfile5");
+      argv = new String[] { "-cp", "-ptoa", src.toUri().toString(),
+          target5.toUri().toString() };
+      ret = ToolRunner.run(shell, argv);
+      assertEquals("cp -ptoa is not working", SUCCESS, ret);
+      targetStatus = fs.getFileStatus(target5);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      xattrs = fs.getXAttrs(target5);
+      assertTrue(xattrs.isEmpty());
+      acls = fs.getAclStatus(target5).getEntries();
+      assertFalse(acls.isEmpty());
+      assertTrue(targetPerm.getAclBit());
+      assertEquals(fs.getAclStatus(src), fs.getAclStatus(target5));
+    } finally {
+      if (null != shell) {
+        shell.close();
+      }
+
+      if (null != fs) {
+        fs.delete(hdfsTestDir, true);
+        fs.close();
+      }
+      cluster.shutdown();
+    }
+  }
+
+  // Verify cp -pa option will preserve both ACL and sticky bit.
+  @Test (timeout = 120000)
+  public void testCopyCommandsPreserveAclAndStickyBit() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+        .format(true).build();
+    FsShell shell = null;
+    FileSystem fs = null;
+    final String testdir =
+        "/tmp/TestDFSShell-testCopyCommandsPreserveAclAndStickyBit-"
+        + counter.getAndIncrement();
+    final Path hdfsTestDir = new Path(testdir);
+    try {
+      fs = cluster.getFileSystem();
+      fs.mkdirs(hdfsTestDir);
+      Path src = new Path(hdfsTestDir, "srcfile");
+      fs.create(src).close();
+
+      fs.setAcl(src, Lists.newArrayList(
+          aclEntry(ACCESS, USER, ALL),
+          aclEntry(ACCESS, USER, "foo", ALL),
+          aclEntry(ACCESS, GROUP, READ_EXECUTE),
+          aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE),
+          aclEntry(ACCESS, OTHER, EXECUTE)));
+      // set sticky bit
+      fs.setPermission(src,
+          new FsPermission(ALL, READ_EXECUTE, EXECUTE, true));
+
+      FileStatus status = fs.getFileStatus(src);
+      final long mtime = status.getModificationTime();
+      final long atime = status.getAccessTime();
+      final String owner = status.getOwner();
+      final String group = status.getGroup();
+      final FsPermission perm = status.getPermission();
+
+      shell = new FsShell(conf);
+
+      // -p preserves sticky bit and doesn't preserve ACL
+      Path target1 = new Path(hdfsTestDir, "targetfile1");
+      String[] argv = new String[] { "-cp", "-p", src.toUri().toString(),
+          target1.toUri().toString() };
+      int ret = ToolRunner.run(shell, argv);
+      assertEquals("cp is not working", SUCCESS, ret);
+      FileStatus targetStatus = fs.getFileStatus(target1);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      FsPermission targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      List<AclEntry> acls = fs.getAclStatus(target1).getEntries();
+      assertTrue(acls.isEmpty());
+      assertFalse(targetPerm.getAclBit());
+
+      // -ptopa preserves both sticky bit and ACL
+      Path target2 = new Path(hdfsTestDir, "targetfile2");
+      argv = new String[] { "-cp", "-ptopa", src.toUri().toString(),
+          target2.toUri().toString() };
+      ret = ToolRunner.run(shell, argv);
+      assertEquals("cp -ptopa is not working", SUCCESS, ret);
+      targetStatus = fs.getFileStatus(target2);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      acls = fs.getAclStatus(target2).getEntries();
+      assertFalse(acls.isEmpty());
+      assertTrue(targetPerm.getAclBit());
+      assertEquals(fs.getAclStatus(src), fs.getAclStatus(target2));
+    } finally {
+      if (null != shell) {
+        shell.close();
+      }
+      if (null != fs) {
+        fs.delete(hdfsTestDir, true);
+        fs.close();
+      }
+      cluster.shutdown();
+    }
+  }
 
   // force Copy Option is -f
   @Test (timeout = 30000)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
index 1ec395d..8734710 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
@@ -25,13 +25,16 @@
 import java.io.OutputStream;
 import java.io.RandomAccessFile;
 import java.net.InetSocketAddress;
+import java.util.ArrayList;
 import java.util.Iterator;
+import java.util.List;
 import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -453,4 +456,66 @@
     }
     fs.delete(fileName, true);
   }
+
+  /**
+   * Test that blocks should get replicated if we have corrupted blocks and
+   * having good replicas at least equal or greater to minreplication
+   *
+   * Simulate rbw blocks by creating dummy copies, then a DN restart to detect
+   * those corrupted blocks asap.
+   */
+  @Test(timeout=30000)
+  public void testReplicationWhenBlockCorruption() throws Exception {
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new HdfsConfiguration();
+      conf.setLong(
+          DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+      FileSystem fs = cluster.getFileSystem();
+      FSDataOutputStream create = fs.create(new Path("/test"));
+      fs.setReplication(new Path("/test"), (short) 1);
+      create.write(new byte[1024]);
+      create.close();
+
+      List<File> nonParticipatedNodeDirs = new ArrayList<File>();
+      File participatedNodeDirs = null;
+      for (int i = 0; i < cluster.getDataNodes().size(); i++) {
+        File storageDir = cluster.getInstanceStorageDir(i, 0);
+        String bpid = cluster.getNamesystem().getBlockPoolId();
+        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
+        if (data_dir.listFiles().length == 0) {
+          nonParticipatedNodeDirs.add(data_dir);
+        } else {
+          participatedNodeDirs = data_dir;
+        }
+      }
+
+      String blockFile = null;
+      File[] listFiles = participatedNodeDirs.listFiles();
+      for (File file : listFiles) {
+        if (file.getName().startsWith("blk_")
+            && !file.getName().endsWith("meta")) {
+          blockFile = file.getName();
+          for (File file1 : nonParticipatedNodeDirs) {
+            file1.mkdirs();
+            new File(file1, blockFile).createNewFile();
+            new File(file1, blockFile + "_1000.meta").createNewFile();
+          }
+          break;
+        }
+      }
+
+      fs.setReplication(new Path("/test"), (short) 3);
+      cluster.restartDataNodes(); // Lets detect all DNs about dummy copied
+      // blocks
+      cluster.waitActive();
+      cluster.triggerBlockReports();
+      DFSTestUtil.waitReplication(fs, new Path("/test"), (short) 3);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
index 473e9c0..d2f58a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
@@ -410,6 +410,7 @@
    * The second datanode is started in the cluster.
    * As soon as the replication process is completed test finds a block from
    * the second DN and sets its GS to be < of original one.
+   * this is the markBlockAsCorrupt case 3 so we expect one pending deletion
    * Block report is forced and the check for # of currupted blocks is performed.
    * Another block is chosen and its length is set to a lesser than original.
    * A check for another corrupted block is performed after yet another
@@ -436,20 +437,20 @@
     printStats();
 
     assertThat("Wrong number of corrupt blocks",
-               cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L));
+               cluster.getNamesystem().getCorruptReplicaBlocks(), is(0L));
     assertThat("Wrong number of PendingDeletion blocks",
-               cluster.getNamesystem().getPendingDeletionBlocks(), is(0L));
+               cluster.getNamesystem().getPendingDeletionBlocks(), is(1L));
     assertThat("Wrong number of PendingReplication blocks",
                cluster.getNamesystem().getPendingReplicationBlocks(), is(0L));
 
-    reports = getBlockReports(dn, poolId, true, true);
+    reports = getBlockReports(dn, poolId, false, true);
     sendBlockReports(dnR, poolId, reports);
     printStats();
 
     assertThat("Wrong number of corrupt blocks",
-               cluster.getNamesystem().getCorruptReplicaBlocks(), is(2L));
+               cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L));
     assertThat("Wrong number of PendingDeletion blocks",
-               cluster.getNamesystem().getPendingDeletionBlocks(), is(0L));
+               cluster.getNamesystem().getPendingDeletionBlocks(), is(1L));
     assertThat("Wrong number of PendingReplication blocks",
                cluster.getNamesystem().getPendingReplicationBlocks(), is(0L));
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
index c2d8698..704d099 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
@@ -325,15 +325,14 @@
       }
     }).when(mockDn).initBlockPool(Mockito.any(BPOfferService.class));
     BPOfferService bpos = setupBPOSForNNs(mockDn, mockNN1, mockNN2);
+    List<BPServiceActor> actors = bpos.getBPServiceActors();
+    assertEquals(2, actors.size());
     bpos.start();
     try {
       waitForInitialization(bpos);
-      List<BPServiceActor> actors = bpos.getBPServiceActors();
-      // even if one of the actor initialization fails also other will be
-      // running until both failed.
-      assertEquals(2, actors.size());
-      BPServiceActor actor = actors.get(0);
-      waitForBlockReport(actor.getNameNodeProxy());
+      // even if one of the actor initialization fails, the other one will be
+      // finish block report.
+      waitForBlockReport(mockNN1, mockNN2);
     } finally {
       bpos.stop();
     }
@@ -409,7 +408,32 @@
       }
     }, 500, 10000);
   }
-  
+
+  private void waitForBlockReport(
+      final DatanodeProtocolClientSideTranslatorPB mockNN1,
+      final DatanodeProtocolClientSideTranslatorPB mockNN2)
+          throws Exception {
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return get(mockNN1) || get(mockNN2);
+      }
+
+      private Boolean get(DatanodeProtocolClientSideTranslatorPB mockNN) {
+        try {
+          Mockito.verify(mockNN).blockReport(
+                  Mockito.<DatanodeRegistration>anyObject(),
+                  Mockito.eq(FAKE_BPID),
+                  Mockito.<StorageBlockReport[]>anyObject());
+          return true;
+        } catch (Throwable t) {
+          LOG.info("waiting on block report: " + t.getMessage());
+          return false;
+        }
+      }
+    }, 500, 10000);
+  }
+
   private ReceivedDeletedBlockInfo[] waitForBlockReceived(
       ExtendedBlock fakeBlock,
       DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
index 8ef3887..bb8ef96 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
@@ -1408,12 +1408,17 @@
    */
   private void checkPendingCachedEmpty(MiniDFSCluster cluster)
       throws Exception {
-    final DatanodeManager datanodeManager =
-        cluster.getNamesystem().getBlockManager().getDatanodeManager();
-    for (DataNode dn : cluster.getDataNodes()) {
-      DatanodeDescriptor descriptor =
-          datanodeManager.getDatanode(dn.getDatanodeId());
-      Assert.assertTrue(descriptor.getPendingCached().isEmpty());
+    cluster.getNamesystem().readLock();
+    try {
+      final DatanodeManager datanodeManager =
+          cluster.getNamesystem().getBlockManager().getDatanodeManager();
+      for (DataNode dn : cluster.getDataNodes()) {
+        DatanodeDescriptor descriptor =
+            datanodeManager.getDatanode(dn.getDatanodeId());
+        Assert.assertTrue(descriptor.getPendingCached().isEmpty());
+      }
+    } finally {
+      cluster.getNamesystem().readUnlock();
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
index 2933629..dac8c0f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
@@ -62,8 +62,6 @@
     doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
     doReturn("").when(namesystemSpy).closeFileCommitBlocks(
         any(INodeFile.class), any(BlockInfo.class));
-    doReturn("").when(namesystemSpy).persistBlocks(
-        any(INodeFile.class), anyBoolean());
     doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog();
 
     return namesystemSpy;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
new file mode 100644
index 0000000..cf4b29f
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
@@ -0,0 +1,149 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.FileNotFoundException;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
+import org.apache.hadoop.net.Node;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.internal.util.reflection.Whitebox;
+
+
+/**
+ * Test race between delete and other operations.  For now only addBlock()
+ * is tested since all others are acquiring FSNamesystem lock for the 
+ * whole duration.
+ */
+public class TestDeleteRace {
+  private static final Log LOG = LogFactory.getLog(TestDeleteRace.class);
+  private static final Configuration conf = new HdfsConfiguration();
+  private MiniDFSCluster cluster;
+
+  @Test  
+  public void testDeleteAddBlockRace() throws Exception {
+    testDeleteAddBlockRace(false);
+  }
+
+  @Test  
+  public void testDeleteAddBlockRaceWithSnapshot() throws Exception {
+    testDeleteAddBlockRace(true);
+  }
+
+  private void testDeleteAddBlockRace(boolean hasSnapshot) throws Exception {
+    try {
+      conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
+          SlowBlockPlacementPolicy.class, BlockPlacementPolicy.class);
+      cluster = new MiniDFSCluster.Builder(conf).build();
+      FileSystem fs = cluster.getFileSystem();
+      final String fileName = "/testDeleteAddBlockRace";
+      Path filePath = new Path(fileName);
+
+      FSDataOutputStream out = null;
+      out = fs.create(filePath);
+      if (hasSnapshot) {
+        SnapshotTestHelper.createSnapshot((DistributedFileSystem) fs, new Path(
+            "/"), "s1");
+      }
+
+      Thread deleteThread = new DeleteThread(fs, filePath);
+      deleteThread.start();
+
+      try {
+        // write data and syn to make sure a block is allocated.
+        out.write(new byte[32], 0, 32);
+        out.hsync();
+        Assert.fail("Should have failed.");
+      } catch (FileNotFoundException e) {
+        GenericTestUtils.assertExceptionContains(filePath.getName(), e);
+      }
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  private static class SlowBlockPlacementPolicy extends
+      BlockPlacementPolicyDefault {
+    @Override
+    public DatanodeStorageInfo[] chooseTarget(String srcPath,
+                                      int numOfReplicas,
+                                      Node writer,
+                                      List<DatanodeStorageInfo> chosenNodes,
+                                      boolean returnChosenNodes,
+                                      Set<Node> excludedNodes,
+                                      long blocksize,
+                                      StorageType storageType) {
+      DatanodeStorageInfo[] results = super.chooseTarget(srcPath,
+          numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes,
+          blocksize, storageType);
+      try {
+        Thread.sleep(3000);
+      } catch (InterruptedException e) {}
+      return results;
+    }
+  }
+
+  private class DeleteThread extends Thread {
+    private FileSystem fs;
+    private Path path;
+
+    DeleteThread(FileSystem fs, Path path) {
+      this.fs = fs;
+      this.path = path;
+    }
+
+    @Override
+    public void run() {
+      try {
+        Thread.sleep(1000);
+        LOG.info("Deleting" + path);
+        final FSDirectory fsdir = cluster.getNamesystem().dir;
+        INode fileINode = fsdir.getINode4Write(path.toString());
+        INodeMap inodeMap = (INodeMap) Whitebox.getInternalState(fsdir,
+            "inodeMap");
+
+        fs.delete(path, false);
+        // after deletion, add the inode back to the inodeMap
+        inodeMap.put(fileINode);
+        LOG.info("Deleted" + path);
+      } catch (Exception e) {
+        LOG.info(e);
+      }
+    }
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
index 7c13744..6e31f2c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
@@ -22,22 +22,29 @@
 import java.io.BufferedReader;
 import java.io.IOException;
 import java.io.StringReader;
+import java.util.EnumSet;
+import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
+import com.google.common.collect.Lists;
+
 /**
  * Test {@link FSDirectory}, the in-memory namespace tree.
  */
@@ -70,6 +77,7 @@
   @Before
   public void setUp() throws Exception {
     conf = new Configuration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 2);
     cluster = new MiniDFSCluster.Builder(conf)
       .numDataNodes(REPLICATION)
       .build();
@@ -171,4 +179,36 @@
     Assert.assertTrue(classname.startsWith(INodeFile.class.getSimpleName())
         || classname.startsWith(INodeDirectory.class.getSimpleName()));
   }
+  
+  @Test
+  public void testINodeXAttrsLimit() throws Exception {
+    List<XAttr> existingXAttrs = Lists.newArrayListWithCapacity(2);
+    XAttr xAttr1 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
+        setName("a1").setValue(new byte[]{0x31, 0x32, 0x33}).build();
+    XAttr xAttr2 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
+        setName("a2").setValue(new byte[]{0x31, 0x31, 0x31}).build();
+    existingXAttrs.add(xAttr1);
+    existingXAttrs.add(xAttr2);
+    
+    // Adding a system namespace xAttr, isn't affected by inode xAttrs limit.
+    XAttr newXAttr = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.SYSTEM).
+        setName("a3").setValue(new byte[]{0x33, 0x33, 0x33}).build();
+    List<XAttr> xAttrs = fsdir.setINodeXAttr(existingXAttrs, newXAttr, 
+        EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
+    Assert.assertEquals(xAttrs.size(), 3);
+    
+    // Adding a trusted namespace xAttr, is affected by inode xAttrs limit.
+    XAttr newXAttr1 = (new XAttr.Builder()).setNameSpace(
+        XAttr.NameSpace.TRUSTED).setName("a4").
+        setValue(new byte[]{0x34, 0x34, 0x34}).build();
+    try {
+      fsdir.setINodeXAttr(existingXAttrs, newXAttr1, 
+          EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
+      Assert.fail("Setting user visable xattr on inode should fail if " +
+          "reaching limit.");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Cannot add additional XAttr " +
+          "to inode, would exceed limit", e);
+    }
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
index 8cf68a7..0f4a2b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
@@ -22,6 +22,7 @@
 import static org.apache.hadoop.util.Time.now;
 import static org.junit.Assert.assertEquals;
 import static org.mockito.Matchers.anyObject;
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -45,37 +46,20 @@
 
 public class TestFsLimits {
   static Configuration conf;
-  static INode[] inodes;
-  static FSDirectory fs;
+  static FSNamesystem fs;
   static boolean fsIsReady;
   
   static final PermissionStatus perms
     = new PermissionStatus("admin", "admin", FsPermission.getDefault());
 
-  static private FSImage getMockFSImage() {
-    FSEditLog editLog = mock(FSEditLog.class);
+  static private FSNamesystem getMockNamesystem() throws IOException {
     FSImage fsImage = mock(FSImage.class);
-    when(fsImage.getEditLog()).thenReturn(editLog);
-    return fsImage;
-  }
-
-  static private FSNamesystem getMockNamesystem() {
-    FSNamesystem fsn = mock(FSNamesystem.class);
-    when(
-        fsn.createFsOwnerPermissions((FsPermission)anyObject())
-    ).thenReturn(
-         new PermissionStatus("root", "wheel", FsPermission.getDefault())
-    );
+    FSEditLog editLog = mock(FSEditLog.class);
+    doReturn(editLog).when(fsImage).getEditLog();
+    FSNamesystem fsn = new FSNamesystem(conf, fsImage);
+    fsn.getFSDirectory().setReady(fsIsReady);
     return fsn;
   }
-  
-  private static class MockFSDirectory extends FSDirectory {
-    public MockFSDirectory() throws IOException {
-      super(getMockFSImage(), getMockNamesystem(), conf);
-      setReady(fsIsReady);
-      NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
-    }
-  }
 
   @Before
   public void setUp() throws IOException {
@@ -83,7 +67,7 @@
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
              fileAsURI(new File(MiniDFSCluster.getBaseDirectory(),
                                 "namenode")).toString());
-
+    NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
     fs = null;
     fsIsReady = true;
   }
@@ -197,9 +181,10 @@
     lazyInitFSDirectory();
     Class<?> generated = null;
     try {
-      fs.mkdirs(name, perms, false, now());
+      fs.mkdirs(name, perms, false);
     } catch (Throwable e) {
       generated = e.getClass();
+      e.printStackTrace();
     }
     assertEquals(expected, generated);
   }
@@ -209,7 +194,7 @@
     lazyInitFSDirectory();
     Class<?> generated = null;
     try {
-      fs.renameTo(src, dst, false, new Rename[] { });
+      fs.renameTo(src, dst, new Rename[] { });
     } catch (Throwable e) {
       generated = e.getClass();
     }
@@ -222,7 +207,7 @@
     lazyInitFSDirectory();
     Class<?> generated = null;
     try {
-      fs.renameTo(src, dst, false);
+      fs.renameTo(src, dst);
     } catch (Throwable e) {
       generated = e.getClass();
     }
@@ -232,7 +217,7 @@
   private static void lazyInitFSDirectory() throws IOException {
     // have to create after the caller has had a chance to set conf values
     if (fs == null) {
-      fs = new MockFSDirectory();
+      fs = getMockNamesystem();
     }
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
index 32efe34..e5df79e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
@@ -138,6 +138,13 @@
       hdfs.mkdirs(new Path("/snapshot/1"));
       hdfs.delete(snapshot, true);
 
+      // Set XAttrs so the fsimage contains XAttr ops
+      final Path xattr = new Path("/xattr");
+      hdfs.mkdirs(xattr);
+      hdfs.setXAttr(xattr, "user.a1", new byte[]{ 0x31, 0x32, 0x33 });
+      hdfs.setXAttr(xattr, "user.a2", new byte[]{ 0x37, 0x38, 0x39 });
+      writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr));
+
       // Write results to the fsimage file
       hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
       hdfs.saveNamespace();
@@ -210,8 +217,8 @@
     matcher = p.matcher(output.getBuffer());
     assertTrue(matcher.find() && matcher.groupCount() == 1);
     int totalDirs = Integer.parseInt(matcher.group(1));
-    // totalDirs includes root directory and empty directory
-    assertEquals(NUM_DIRS + 2, totalDirs);
+    // totalDirs includes root directory, empty directory, and xattr directory
+    assertEquals(NUM_DIRS + 3, totalDirs);
 
     FileStatus maxFile = Collections.max(writtenFiles.values(),
         new Comparator<FileStatus>() {
@@ -264,7 +271,7 @@
 
       // verify the number of directories
       FileStatus[] statuses = webhdfs.listStatus(new Path("/"));
-      assertEquals(NUM_DIRS + 1, statuses.length); // contains empty directory
+      assertEquals(NUM_DIRS + 2, statuses.length); // contains empty and xattr directory
 
       // verify the number of files in the directory
       statuses = webhdfs.listStatus(new Path("/dir0"));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
index 3824dd5..2ec4fcf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
@@ -16118,8 +16118,7 @@
       </comparators>
     </test>
 
-    <!-- DFS tests
-        Must come before moveFromLocal tests until HDFS-6471 is fixed -->
+    <!-- DFS tests -->
     <test>
       <description>appendToFile</description>
       <test-commands>
@@ -16480,10 +16479,11 @@
     <test> <!-- TESTED -->
       <description>moveFromLocal: moving non existent file(absolute path)</description>
       <test-commands>
-        <command>-fs NAMENODE -moveFromLocal /user/wrongdata file</command>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir</command>
+        <command>-fs NAMENODE -moveFromLocal /user/wrongdata /user/USERNAME/dir</command>
       </test-commands>
       <cleanup-commands>
-        <command>-fs NAMENODE -rm -r /user</command>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
       </cleanup-commands>
       <comparators>
         <comparator>
@@ -16496,12 +16496,11 @@
     <test> <!-- TESTED -->
       <description>moveFromLocal: moving non existent file(relative path)</description>
       <test-commands>
-        <command>-fs NAMENODE -mkdir -p dir</command> <!-- make sure user home dir exists -->
-        <command>-fs NAMENODE -touchz test</command>
-        <command>-fs NAMENODE -moveFromLocal wrongdata file</command>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir</command> <!-- make sure user home dir exists -->
+        <command>-fs NAMENODE -moveFromLocal wrongdata /user/USERNAME/dir</command>
       </test-commands>
       <cleanup-commands>
-        <command>-fs NAMENODE -rm -r /user</command>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
       </cleanup-commands>
       <comparators>
         <comparator>
@@ -16514,19 +16513,19 @@
     <test> <!-- TESTED -->
       <description>moveFromLocal: moving many files into an existing file</description>
       <test-commands>
-        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data15bytes /data15bytes</command>
-        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data30bytes /data30bytes</command>
-        <command>-fs NAMENODE -mkdir -p dir</command> <!-- make sure user home dir exists -->
-        <command>-fs NAMENODE -touchz file0</command>
-        <command>-fs NAMENODE -moveFromLocal /data15bytes /data30bytes file0</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data15bytes data15bytes</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data30bytes data30bytes</command>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir</command>
+        <command>-fs NAMENODE -touchz /user/USERNAME/dir/file0</command>
+        <command>-fs NAMENODE -moveFromLocal data15bytes data30bytes /user/USERNAME/dir/file0</command>
       </test-commands>
       <cleanup-commands>
-        <command>-fs NAMENODE -rm -r /user</command>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
       </cleanup-commands>
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^moveFromLocal: `file0': Is not a directory</expected-output>
+          <expected-output>moveFromLocal: `/user/USERNAME/dir/file0': Is not a directory</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -16534,17 +16533,17 @@
     <test> <!-- TESTED -->
       <description>moveFromLocal: moving many files into a non existent directory</description>
       <test-commands>
-        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data15bytes /data15bytes</command>
-        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data30bytes /data30bytes</command>
-        <command>-fs NAMENODE -moveFromLocal /data15bytes /data30bytes wrongdir</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data15bytes data15bytes</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data30bytes data30bytes</command>
+        <command>-fs NAMENODE -moveFromLocal data15bytes data30bytes /user/USERNAME/dir/wrongdir</command>
       </test-commands>
       <cleanup-commands>
-        <command>-fs NAMENODE -rm -r /user</command>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
       </cleanup-commands>
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^moveFromLocal: `wrongdir': No such file or directory</expected-output>
+          <expected-output>^moveFromLocal: `/user/USERNAME/dir/wrongdir': No such file or directory</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -16552,7 +16551,8 @@
     <test> <!-- TESTED -->
       <description>moveFromLocal: Test for hdfs:// path - moving non existent file</description>
       <test-commands>
-        <command>-fs NAMENODE -moveFromLocal /user/wrongdata hdfs:///file</command>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir</command>
+        <command>-fs NAMENODE -moveFromLocal /user/wrongdata hdfs:///user/USERNAME/dir/file</command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rm -r hdfs:///*</command>
@@ -16568,8 +16568,11 @@
     <test> <!-- TESTED -->
       <description>moveFromLocal: Test for hdfs:// path - moving many files into an existing file</description>
       <test-commands>
-        <command>-fs NAMENODE -touchz hdfs:///file0</command>
-        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes hdfs:///file0</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data15bytes data15bytes</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data30bytes data30bytes</command>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir</command>
+        <command>-fs NAMENODE -touchz hdfs:///user/USERNAME/dir/file</command>
+        <command>-fs NAMENODE -moveFromLocal data15bytes data30bytes hdfs:///user/USERNAME/dir/file</command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rm -r hdfs:///*</command>
@@ -16577,7 +16580,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^moveFromLocal: `hdfs:///file0': Is not a directory</expected-output>
+          <expected-output>moveFromLocal: `hdfs:///user/USERNAME/dir/file': Is not a directory</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -16585,15 +16588,17 @@
     <test> <!-- TESTED -->
       <description>moveFromLocal: Test for hdfs:// path - moving many files into a non existent directory</description>
       <test-commands>
-        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes hdfs:///wrongdir</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data15bytes data15bytes</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data30bytes data30bytes</command>
+        <command>-fs NAMENODE -moveFromLocal data15bytes data30bytes hdfs:///user/USERNAME/dir/wrongdir</command>
       </test-commands>
       <cleanup-commands>
-        <command>-fs NAMENODE -rm -r /user</command>
+        <command>-fs NAMENODE -rm -r hdfs:///*</command>
       </cleanup-commands>
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^moveFromLocal: `hdfs:///wrongdir': No such file or directory</expected-output>
+          <expected-output>moveFromLocal: `hdfs:///user/USERNAME/dir/wrongdir': No such file or directory</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -16601,7 +16606,8 @@
     <test> <!-- TESTED -->
       <description>moveFromLocal: Test for Namenode's path - moving non existent file</description>
       <test-commands>
-        <command>-fs NAMENODE -moveFromLocal /user/wrongdata NAMENODE/file</command>
+        <command>-fs NAMENODE -mkdir -p NAMENODE/user/USERNAME/dir</command>
+        <command>-fs NAMENODE -moveFromLocal /user/wrongdata NAMENODE/user/USERNAME/dir</command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rm -r NAMENODE/*</command>
@@ -16617,8 +16623,11 @@
     <test> <!-- TESTED -->
       <description>moveFromLocal: Test for Namenode's path - moving many files into an existing file</description>
       <test-commands>
-        <command>-fs NAMENODE -touchz NAMENODE/file0</command>
-        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes NAMENODE/file0</command>
+        <command>-fs NAMENODE -mkdir -p NAMENODE/user/USERNAME/dir</command>
+        <command>-fs NAMENODE -touchz NAMENODE/user/USERNAME/dir/file0</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data15bytes data15bytes</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data30bytes data30bytes</command>
+        <command>-fs NAMENODE -moveFromLocal data15bytes data30bytes NAMENODE/user/USERNAME/dir/file0</command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rm -r NAMENODE/*</command>
@@ -16626,7 +16635,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^moveFromLocal: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file0': Is not a directory</expected-output>
+          <expected-output>moveFromLocal: `hdfs://\w+[-.a-z0-9]*:[0-9]+/user/USERNAME/dir/file0': Is not a directory</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -16634,15 +16643,19 @@
     <test> <!-- TESTED -->
       <description>moveFromLocal: Test for Namenode's path - moving many files into a non existent directory</description>
       <test-commands>
-        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes NAMENODE/wrongdir</command>
+        <command>-fs NAMENODE -mkdir -p NAMENODE/user/USERNAME</command>
+        <command>-fs NAMENODE -touchz NAMENODE/user/USERNAME/dir/file0</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data15bytes data15bytes</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data30bytes data30bytes</command>
+        <command>-fs NAMENODE -moveFromLocal data15bytes data30bytes NAMENODE/user/USERNAME/wrongdir</command>
       </test-commands>
       <cleanup-commands>
-        <command>-fs NAMENODE -rm -r /user</command>
+        <command>-fs NAMENODE -rm -r NAMENODE/*</command>
       </cleanup-commands>
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^moveFromLocal: `hdfs://\w+[-.a-z0-9]*:[0-9]+/wrongdir': No such file or directory</expected-output>
+          <expected-output>moveFromLocal: `hdfs://\w+[-.a-z0-9]*:[0-9]+/user/USERNAME/wrongdir': No such file or directory</expected-output>
         </comparator>
       </comparators>
     </test>
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 0686809..f3c4d0a 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -77,6 +77,9 @@
     MAPREDUCE-5196. Add bookkeeping for managing checkpoints of task state.
     (Carlo Curino via cdouglas)
 
+    MAPREDUCE-5912. Task.calculateOutputSize does not handle Windows files after
+    MAPREDUCE-5196. (Remus Rusanu via cnauroth)
+
   BUG FIXES
 
     MAPREDUCE-5714. Removed forceful JVM exit in shutDownJob.  
@@ -142,9 +145,6 @@
     MAPREDUCE-5867. Fix NPE in KillAMPreemptionPolicy related to 
     ProportionalCapacityPreemptionPolicy (Sunil G via devaraj)
 
-    MAPREDUCE-5898. distcp to support preserving HDFS extended attributes(XAttrs)
-    (Yi Liu via umamahesh)
-
 Release 2.5.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -259,7 +259,15 @@
     MAPREDUCE-5777. Support utf-8 text with Byte Order Marker.
     (Zhihai Xu via kasha)
 
-Release 2.4.1 - UNRELEASED
+    MAPREDUCE-5898. distcp to support preserving HDFS extended attributes(XAttrs)
+    (Yi Liu via umamahesh)
+
+    MAPREDUCE-5920. Add Xattr option in DistCp docs. (Yi Liu via cnauroth)
+
+    MAPREDUCE-5924. Changed TaskAttemptImpl to ignore TA_COMMIT_PENDING event
+    at COMMIT_PENDING state. (Zhijie Shen via jianhe)
+
+Release 2.4.1 - 2014-06-23 
 
   INCOMPATIBLE CHANGES
 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
index 56e7db6..f33c58e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
@@ -335,6 +335,15 @@
      .addTransition(TaskAttemptStateInternal.COMMIT_PENDING,
          TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP,
          TaskAttemptEventType.TA_TIMED_OUT, CLEANUP_CONTAINER_TRANSITION)
+     // AM is likely to receive duplicate TA_COMMIT_PENDINGs as the task attempt
+     // will re-send the commit message until it doesn't encounter any
+     // IOException and succeeds in delivering the commit message.
+     // Ignoring the duplicate commit message is a short-term fix. In long term,
+     // we need to make use of retry cache to help this and other MR protocol
+     // APIs that can be considered as @AtMostOnce.
+     .addTransition(TaskAttemptStateInternal.COMMIT_PENDING,
+         TaskAttemptStateInternal.COMMIT_PENDING,
+         TaskAttemptEventType.TA_COMMIT_PENDING)
 
      // Transitions from SUCCESS_CONTAINER_CLEANUP state
      // kill and cleanup the container
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java
index 8baddc8..b03d58d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java
@@ -112,6 +112,15 @@
     //wait for first attempt to commit pending
     app.waitForState(attempt, TaskAttemptState.COMMIT_PENDING);
 
+    //re-send the commit pending signal to the task
+    app.getContext().getEventHandler().handle(
+        new TaskAttemptEvent(
+            attempt.getID(),
+            TaskAttemptEventType.TA_COMMIT_PENDING));
+
+    //the task attempt should be still at COMMIT_PENDING
+    app.waitForState(attempt, TaskAttemptState.COMMIT_PENDING);
+
     //send the done signal to the task
     app.getContext().getEventHandler().handle(
         new TaskAttemptEvent(
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
index 17ae55e..861c47b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
@@ -112,7 +112,7 @@
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class JobConf extends Configuration {
-  
+
   private static final Log LOG = LogFactory.getLog(JobConf.class);
 
   static{
@@ -882,7 +882,7 @@
       JobContext.KEY_COMPARATOR, null, RawComparator.class);
     if (theClass != null)
       return ReflectionUtils.newInstance(theClass, this);
-    return WritableComparator.get(getMapOutputKeyClass().asSubclass(WritableComparable.class));
+    return WritableComparator.get(getMapOutputKeyClass().asSubclass(WritableComparable.class), this);
   }
 
   /**
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
index 72cd41c..4815f19 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
@@ -1120,8 +1120,8 @@
     if (isMapTask() && conf.getNumReduceTasks() > 0) {
       try {
         Path mapOutput =  mapOutputFile.getOutputFile();
-        FileSystem fs = mapOutput.getFileSystem(conf);
-        return fs.getFileStatus(mapOutput).getLen();
+        FileSystem localFS = FileSystem.getLocal(conf);
+        return localFS.getFileStatus(mapOutput).getLen();
       } catch (IOException e) {
         LOG.warn ("Could not find output size " , e);
       }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeRecordReader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeRecordReader.java
index 8bb5fcd..0684268 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeRecordReader.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeRecordReader.java
@@ -131,7 +131,7 @@
   public void add(ComposableRecordReader<K,? extends V> rr) throws IOException {
     kids[rr.id()] = rr;
     if (null == q) {
-      cmp = WritableComparator.get(rr.createKey().getClass());
+      cmp = WritableComparator.get(rr.createKey().getClass(), conf);
       q = new PriorityQueue<ComposableRecordReader<K,?>>(3,
           new Comparator<ComposableRecordReader<K,?>>() {
             public int compare(ComposableRecordReader<K,?> o1,
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/WrappedRecordReader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/WrappedRecordReader.java
index 5b38ba2..a86f32e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/WrappedRecordReader.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/WrappedRecordReader.java
@@ -22,6 +22,8 @@
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.WritableComparator;
@@ -38,7 +40,7 @@
 @InterfaceStability.Stable
 public class WrappedRecordReader<K extends WritableComparable,
                           U extends Writable>
-    implements ComposableRecordReader<K,U> {
+    implements ComposableRecordReader<K,U>, Configurable {
 
   private boolean empty = false;
   private RecordReader<K,U> rr;
@@ -47,6 +49,7 @@
   private K khead; // key at the top of this RR
   private U vhead; // value assoc with khead
   private WritableComparator cmp;
+  private Configuration conf;
 
   private ResetableIterator<U> vjoin;
 
@@ -55,13 +58,20 @@
    */
   WrappedRecordReader(int id, RecordReader<K,U> rr,
       Class<? extends WritableComparator> cmpcl) throws IOException {
+    this(id, rr, cmpcl, null);
+  }
+
+  WrappedRecordReader(int id, RecordReader<K,U> rr,
+                      Class<? extends WritableComparator> cmpcl,
+                      Configuration conf) throws IOException {
     this.id = id;
     this.rr = rr;
+    this.conf = (conf == null) ? new Configuration() : conf;
     khead = rr.createKey();
     vhead = rr.createValue();
     try {
       cmp = (null == cmpcl)
-        ? WritableComparator.get(khead.getClass())
+        ? WritableComparator.get(khead.getClass(), this.conf)
         : cmpcl.newInstance();
     } catch (InstantiationException e) {
       throw (IOException)new IOException().initCause(e);
@@ -207,4 +217,13 @@
     return 42;
   }
 
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+  }
+
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeRecordReader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeRecordReader.java
index b6b9ddc..40f3570 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeRecordReader.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeRecordReader.java
@@ -109,7 +109,7 @@
         }
         // create priority queue
         if (null == q) {
-          cmp = WritableComparator.get(keyclass);
+          cmp = WritableComparator.get(keyclass, conf);
           q = new PriorityQueue<ComposableRecordReader<K,?>>(3,
                 new Comparator<ComposableRecordReader<K,?>>() {
                   public int compare(ComposableRecordReader<K,?> o1,
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/WrappedRecordReader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/WrappedRecordReader.java
index d3521d4..28310d0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/WrappedRecordReader.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/WrappedRecordReader.java
@@ -92,7 +92,7 @@
       keyclass = key.getClass().asSubclass(WritableComparable.class);
       valueclass = value.getClass();
       if (cmp == null) {
-        cmp = WritableComparator.get(keyclass);
+        cmp = WritableComparator.get(keyclass, conf);
       }
     }
   }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistCp.md.vm b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistCp.md.vm
index 669e6cd..6271a92 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistCp.md.vm
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistCp.md.vm
@@ -196,7 +196,7 @@
 
 Flag              | Description                          | Notes
 ----------------- | ------------------------------------ | --------
-`-p[rbugpca]` | Preserve r: replication number b: block size u: user g: group p: permission c: checksum-type a: ACL | Modification times are not preserved. Also, when `-update` is specified, status updates will **not** be synchronized unless the file sizes also differ (i.e. unless the file is re-created). If -pa is specified, DistCp preserves the permissions also because ACLs are a super-set of permissions.
+`-p[rbugpcax]` | Preserve r: replication number b: block size u: user g: group p: permission c: checksum-type a: ACL x: XAttr | Modification times are not preserved. Also, when `-update` is specified, status updates will **not** be synchronized unless the file sizes also differ (i.e. unless the file is re-created). If -pa is specified, DistCp preserves the permissions also because ACLs are a super-set of permissions.
 `-i` | Ignore failures | As explained in the Appendix, this option will keep more accurate statistics about the copy than the default case. It also preserves logs from failed copies, which can be valuable for debugging. Finally, a failing map will not cause the job to fail before all splits are attempted.
 `-log <logdir>` | Write logs to \<logdir\> | DistCp keeps logs of each file it attempts to copy as map output. If a map fails, the log output will not be retained if it is re-executed.
 `-m <num_maps>` | Maximum number of simultaneous copies | Specify the number of maps to copy data. Note that more maps may not necessarily improve throughput.
diff --git a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java
index bd62f55..86ba7bf 100644
--- a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java
+++ b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java
@@ -1,6 +1,4 @@
 /*
- * Copyright 2012 The Apache Software Foundation.
- *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
  * You may obtain a copy of the License at
diff --git a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java
index 7dafe81..45b40c2 100644
--- a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java
+++ b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java
@@ -1,6 +1,4 @@
 /*
- * Copyright 2012 The Apache Software Foundation.
- *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
  * You may obtain a copy of the License at
diff --git a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/FileSetUtils.java b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/FileSetUtils.java
index 73f2ca6..6661f67 100644
--- a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/FileSetUtils.java
+++ b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/FileSetUtils.java
@@ -1,6 +1,4 @@
 /*
- * Copyright 2012 The Apache Software Foundation.
- *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
  * You may obtain a copy of the License at
diff --git a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/versioninfo/VersionInfoMojo.java b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/versioninfo/VersionInfoMojo.java
index decd288..563bd59 100644
--- a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/versioninfo/VersionInfoMojo.java
+++ b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/versioninfo/VersionInfoMojo.java
@@ -1,6 +1,4 @@
 /*
- * Copyright 2012 The Apache Software Foundation.
- *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
  * You may obtain a copy of the License at
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index 8ba5852..9a22e98 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -137,6 +137,7 @@
       <item name="Common CHANGES.txt" href="hadoop-project-dist/hadoop-common/CHANGES.txt"/>
       <item name="HDFS CHANGES.txt" href="hadoop-project-dist/hadoop-hdfs/CHANGES.txt"/>
       <item name="MapReduce CHANGES.txt" href="hadoop-project-dist/hadoop-mapreduce/CHANGES.txt"/>
+      <item name="Metrics" href="hadoop-project-dist/hadoop-common/Metrics.html"/>
     </menu>
     
     <menu name="Configuration" inherit="top">
diff --git a/hadoop-tools/hadoop-distcp/src/main/resources/distcp-default.xml b/hadoop-tools/hadoop-distcp/src/main/resources/distcp-default.xml
index a5c4443..f50dddd 100644
--- a/hadoop-tools/hadoop-distcp/src/main/resources/distcp-default.xml
+++ b/hadoop-tools/hadoop-distcp/src/main/resources/distcp-default.xml
@@ -1,8 +1,6 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <!--
-  Copyright 2002-2004 The Apache Software Foundation
-
   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at
diff --git a/hadoop-tools/hadoop-distcp/src/test/resources/sslConfig.xml b/hadoop-tools/hadoop-distcp/src/test/resources/sslConfig.xml
index 64485f1..42ee7ee 100644
--- a/hadoop-tools/hadoop-distcp/src/test/resources/sslConfig.xml
+++ b/hadoop-tools/hadoop-distcp/src/test/resources/sslConfig.xml
@@ -1,8 +1,6 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <!--
-  Copyright 2002-2004 The Apache Software Foundation
-
   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c88560b..7792935 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -36,6 +36,12 @@
     schedulers after ResourceManager Restart so as to preserve running work in
     the cluster. (Jian He via vinodkv)
 
+    YARN-1702. Added kill app functionality to RM web services. (Varun Vasudev
+    via vinodkv)
+
+    YARN-1339. Recover DeletionService state upon nodemanager restart. (Jason Lowe
+    via junping_du)
+
   IMPROVEMENTS
 
     YARN-1479. Invalid NaN values in Hadoop REST API JSON response (Chen He via
@@ -158,6 +164,12 @@
     YARN-2091. Add more values to ContainerExitStatus and pass it from NM to
     RM and then to app masters (Tsuyoshi OZAWA via bikas)
 
+    YARN-2125. Changed ProportionalCapacityPreemptionPolicy to log CSV in debug
+    level. (Wangda Tan via jianhe)
+
+    YARN-2159. Better logging in SchedulerNode#allocateContainer.
+    (Ray Chiang via kasha)
+
   OPTIMIZATIONS
 
   BUG FIXES 
@@ -227,7 +239,27 @@
     YARN-2128. FairScheduler: Incorrect calculation of amResource usage.
     (Wei Yan via kasha)
 
-Release 2.4.1 - UNRELEASED
+    YARN-2124. Fixed NPE in ProportionalCapacityPreemptionPolicy. (Wangda Tan
+    via jianhe)
+
+    YARN-2148. TestNMClient failed due more exit code values added and passed
+    to AM (Wangda Tan via bikas)
+
+    YARN-2075. Fixed the test failure of TestRMAdminCLI. (Kenji Kikushima via
+    zjshen)
+
+    YARN-2155. FairScheduler: Incorrect threshold check for preemption.
+    (Wei Yan via kasha)
+
+    YARN-1885. Fixed a bug that RM may not send application-clean-up signal
+    to NMs where the completed applications previously ran in case of RM restart.
+    (Wangda Tan via jianhe)
+
+    YARN-2167. LeveldbIterator should get closed in
+    NMLeveldbStateStoreService#loadLocalizationState() within finally block
+    (Junping Du via jlowe)
+
+Release 2.4.1 - 2014-06-23 
 
   INCOMPATIBLE CHANGES
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMAdminCLI.java
index f4ef0aa..419b9ae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMAdminCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMAdminCLI.java
@@ -305,7 +305,8 @@
       testError(new String[] { "-help", "-getGroups" },
           "Usage: yarn rmadmin [-getGroups [username]]", dataErr, 0);
       testError(new String[] { "-help", "-transitionToActive" },
-          "Usage: yarn rmadmin [-transitionToActive <serviceId>]", dataErr, 0);
+          "Usage: yarn rmadmin [-transitionToActive <serviceId>" +
+          " [--forceactive]]", dataErr, 0);
       testError(new String[] { "-help", "-transitionToStandby" },
           "Usage: yarn rmadmin [-transitionToStandby <serviceId>]", dataErr, 0);
       testError(new String[] { "-help", "-getServiceState" },
@@ -332,9 +333,9 @@
               "yarn rmadmin [-refreshQueues] [-refreshNodes] [-refreshSuper" +
               "UserGroupsConfiguration] [-refreshUserToGroupsMappings] " +
               "[-refreshAdminAcls] [-refreshServiceAcl] [-getGroup" +
-              " [username]] [-help [cmd]] [-transitionToActive <serviceId>]" +
-              " [-transitionToStandby <serviceId>] [-failover [--forcefence]" +
-              " [--forceactive] <serviceId> <serviceId>] " +
+              " [username]] [-help [cmd]] [-transitionToActive <serviceId>" + 
+              " [--forceactive]] [-transitionToStandby <serviceId>] [-failover" +
+              " [--forcefence] [--forceactive] <serviceId> <serviceId>] " +
               "[-getServiceState <serviceId>] [-checkHealth <serviceId>]"));
     } finally {
       System.setOut(oldOutPrintStream);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java
index 363f666..8885769 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java
@@ -60,7 +60,7 @@
     // make sure registerNodeManager works when failover happens
     RegisterNodeManagerRequest request =
         RegisterNodeManagerRequest.newInstance(nodeId, 0, resource,
-            YarnVersionInfo.getVersion(), null);
+            YarnVersionInfo.getVersion(), null, null);
     resourceTracker.registerNodeManager(request);
     Assert.assertTrue(waitForNodeManagerToConnect(10000, nodeId));
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java
index 3077a2a..88dbf81 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java
@@ -43,6 +43,7 @@
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
@@ -346,7 +347,7 @@
           // 137 is possible if the container is not terminated but killed
           testGetContainerStatus(container, i, ContainerState.COMPLETE,
               "Container killed by the ApplicationMaster.", Arrays.asList(
-                  new Integer[] {137, 143, 0}));
+                  new Integer[] {ContainerExitStatus.KILLED_BY_APPMASTER}));
         } catch (YarnException e) {
           // The exception is possible because, after the container is stopped,
           // it may be removed from NM's context.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java
index 43e892d..0e3d7e4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java
@@ -20,15 +20,17 @@
 
 import java.util.List;
 
+import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.util.Records;
 
 public abstract class RegisterNodeManagerRequest {
-  
+
   public static RegisterNodeManagerRequest newInstance(NodeId nodeId,
       int httpPort, Resource resource, String nodeManagerVersionId,
-      List<NMContainerStatus> containerStatuses) {
+      List<NMContainerStatus> containerStatuses,
+      List<ApplicationId> runningApplications) {
     RegisterNodeManagerRequest request =
         Records.newRecord(RegisterNodeManagerRequest.class);
     request.setHttpPort(httpPort);
@@ -36,6 +38,7 @@
     request.setNodeId(nodeId);
     request.setNMVersion(nodeManagerVersionId);
     request.setContainerStatuses(containerStatuses);
+    request.setRunningApplications(runningApplications);
     return request;
   }
   
@@ -45,10 +48,30 @@
   public abstract String getNMVersion();
   public abstract List<NMContainerStatus> getNMContainerStatuses();
   
+  /**
+   * We introduce this here because currently YARN RM doesn't persist nodes info
+   * for application running. When RM restart happened, we cannot determinate if
+   * a node should do application cleanup (like log-aggregation, status update,
+   * etc.) or not. <p/>
+   * When we have this running application list in node manager register
+   * request, we can recover nodes info for running applications. And then we
+   * can take actions accordingly
+   * 
+   * @return running application list in this node
+   */
+  public abstract List<ApplicationId> getRunningApplications();
+  
   public abstract void setNodeId(NodeId nodeId);
   public abstract void setHttpPort(int port);
   public abstract void setResource(Resource resource);
   public abstract void setNMVersion(String version);
   public abstract void setContainerStatuses(
       List<NMContainerStatus> containerStatuses);
+  
+  /**
+   * Setter for {@link RegisterNodeManagerRequest#getRunningApplications()}
+   * @param runningApplications running application in this node
+   */
+  public abstract void setRunningApplications(
+      List<ApplicationId> runningApplications);
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java
index 5b3d066..ce4faec 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java
@@ -20,12 +20,23 @@
 
 
 import java.util.ArrayList;
+import java.util.Iterator;
 import java.util.List;
 
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ContainerStatusPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NMContainerStatusProto;
@@ -44,6 +55,7 @@
   private Resource resource = null;
   private NodeId nodeId = null;
   private List<NMContainerStatus> containerStatuses = null;
+  private List<ApplicationId> runningApplications = null;
   
   public RegisterNodeManagerRequestPBImpl() {
     builder = RegisterNodeManagerRequestProto.newBuilder();
@@ -65,6 +77,9 @@
     if (this.containerStatuses != null) {
       addNMContainerStatusesToProto();
     }
+    if (this.runningApplications != null) {
+      addRunningApplicationsToProto();
+    }
     if (this.resource != null) {
       builder.setResource(convertToProtoFormat(this.resource));
     }
@@ -158,6 +173,66 @@
     maybeInitBuilder();
     builder.setHttpPort(httpPort);
   }
+  
+  @Override
+  public List<ApplicationId> getRunningApplications() {
+    initRunningApplications();
+    return runningApplications;
+  }
+  
+  private void initRunningApplications() {
+    if (this.runningApplications != null) {
+      return;
+    }
+    RegisterNodeManagerRequestProtoOrBuilder p = viaProto ? proto : builder;
+    List<ApplicationIdProto> list = p.getRunningApplicationsList();
+    this.runningApplications = new ArrayList<ApplicationId>();
+    for (ApplicationIdProto c : list) {
+      this.runningApplications.add(convertFromProtoFormat(c));
+    }
+  }
+
+  @Override
+  public void setRunningApplications(List<ApplicationId> apps) {
+    if (apps == null) {
+      return;
+    }
+    initRunningApplications();
+    this.runningApplications.addAll(apps);
+  }
+  
+  private void addRunningApplicationsToProto() {
+    maybeInitBuilder();
+    builder.clearRunningApplications();
+    if (runningApplications == null) {
+      return;
+    }
+    Iterable<ApplicationIdProto> it = new Iterable<ApplicationIdProto>() {
+      
+      @Override
+      public Iterator<ApplicationIdProto> iterator() {
+        return new Iterator<ApplicationIdProto>() {
+          Iterator<ApplicationId> iter = runningApplications.iterator();
+          
+          @Override
+          public boolean hasNext() {
+            return iter.hasNext();
+          }
+          
+          @Override
+          public ApplicationIdProto next() {
+            return convertToProtoFormat(iter.next());  
+          }
+          
+          @Override
+          public void remove() {
+            throw new UnsupportedOperationException();
+          }
+        };
+      }
+    };
+    builder.addAllRunningApplications(it);
+  }
 
   @Override
   public List<NMContainerStatus> getNMContainerStatuses() {
@@ -216,6 +291,14 @@
     maybeInitBuilder();
     builder.setNmVersion(version);
   }
+  
+  private ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto p) {
+    return new ApplicationIdPBImpl(p);
+  }
+
+  private ApplicationIdProto convertToProtoFormat(ApplicationId t) {
+    return ((ApplicationIdPBImpl)t).getProto();
+  }
 
   private NodeIdPBImpl convertFromProtoFormat(NodeIdProto p) {
     return new NodeIdPBImpl(p);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
index ebd752f..aab4383 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
@@ -31,6 +31,7 @@
   optional ResourceProto resource = 4;
   optional string nm_version = 5;
   repeated NMContainerStatusProto container_statuses = 6;
+  repeated ApplicationIdProto runningApplications = 7;
 }
 
 message RegisterNodeManagerResponseProto {
@@ -66,4 +67,4 @@
   optional PriorityProto priority = 4;
   optional string diagnostics = 5 [default = "N/A"];
   optional int32 container_exit_status = 6;
-}
\ No newline at end of file
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java
index 2ffc9c9..bfb764e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java
@@ -83,7 +83,8 @@
     RegisterNodeManagerRequest request =
         RegisterNodeManagerRequest.newInstance(
           NodeId.newInstance("1.1.1.1", 1000), 8080,
-          Resource.newInstance(1024, 1), "NM-version-id", reports);
+            Resource.newInstance(1024, 1), "NM-version-id", reports,
+            Arrays.asList(appId));
     RegisterNodeManagerRequest requestProto =
         new RegisterNodeManagerRequestPBImpl(
           ((RegisterNodeManagerRequestPBImpl) request).getProto());
@@ -95,5 +96,7 @@
       requestProto.getNodeId());
     Assert.assertEquals(Resource.newInstance(1024, 1),
       requestProto.getResource());
+    Assert.assertEquals(1, requestProto.getRunningApplications().size());
+    Assert.assertEquals(appId, requestProto.getRunningApplications().get(0)); 
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestRegisterNodeManagerRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestRegisterNodeManagerRequest.java
new file mode 100644
index 0000000..1f32e66
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestRegisterNodeManagerRequest.java
@@ -0,0 +1,81 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.api.protocolrecords;
+
+import java.util.Arrays;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerRequestPBImpl;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestRegisterNodeManagerRequest {
+  @Test
+  public void testRegisterNodeManagerRequest() {
+    RegisterNodeManagerRequest request =
+        RegisterNodeManagerRequest.newInstance(
+            NodeId.newInstance("host", 1234), 1234, Resource.newInstance(0, 0),
+            "version", Arrays.asList(NMContainerStatus.newInstance(
+                ContainerId.newInstance(
+                    ApplicationAttemptId.newInstance(
+                        ApplicationId.newInstance(1234L, 1), 1), 1),
+                ContainerState.RUNNING, Resource.newInstance(1024, 1), "good",
+                -1)), Arrays.asList(ApplicationId.newInstance(1234L, 1),
+                ApplicationId.newInstance(1234L, 2)));
+
+    // serialze to proto, and get request from proto
+    RegisterNodeManagerRequest request1 =
+        new RegisterNodeManagerRequestPBImpl(
+            ((RegisterNodeManagerRequestPBImpl) request).getProto());
+
+    // check values
+    Assert.assertEquals(request1.getNMContainerStatuses().size(), request
+        .getNMContainerStatuses().size());
+    Assert.assertEquals(request1.getNMContainerStatuses().get(0).getContainerId(),
+        request.getNMContainerStatuses().get(0).getContainerId());
+    Assert.assertEquals(request1.getRunningApplications().size(), request
+        .getRunningApplications().size());
+    Assert.assertEquals(request1.getRunningApplications().get(0), request
+        .getRunningApplications().get(0));
+    Assert.assertEquals(request1.getRunningApplications().get(1), request
+        .getRunningApplications().get(1));
+  }
+  
+  @Test
+  public void testRegisterNodeManagerRequestWithNullArrays() {
+    RegisterNodeManagerRequest request =
+        RegisterNodeManagerRequest.newInstance(NodeId.newInstance("host", 1234),
+            1234, Resource.newInstance(0, 0), "version", null, null);
+
+    // serialze to proto, and get request from proto
+    RegisterNodeManagerRequest request1 =
+        new RegisterNodeManagerRequestPBImpl(
+            ((RegisterNodeManagerRequestPBImpl) request).getProto());
+
+    // check values
+    Assert.assertEquals(0, request1.getNMContainerStatuses().size());
+    Assert.assertEquals(0, request1.getRunningApplications().size());
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
index 45504fd..e4025f5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
@@ -21,10 +21,13 @@
 import static java.util.concurrent.TimeUnit.SECONDS;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.ThreadFactory;
@@ -40,6 +43,10 @@
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredDeletionServiceState;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
@@ -50,6 +57,8 @@
   private final ContainerExecutor exec;
   private ScheduledThreadPoolExecutor sched;
   private static final FileContext lfs = getLfs();
+  private final NMStateStoreService stateStore;
+  private AtomicInteger nextTaskId = new AtomicInteger(0);
 
   static final FileContext getLfs() {
     try {
@@ -60,14 +69,18 @@
   }
 
   public DeletionService(ContainerExecutor exec) {
+    this(exec, new NMNullStateStoreService());
+  }
+
+  public DeletionService(ContainerExecutor exec,
+      NMStateStoreService stateStore) {
     super(DeletionService.class.getName());
     this.exec = exec;
     this.debugDelay = 0;
+    this.stateStore = stateStore;
   }
   
   /**
-   * 
-  /**
    * Delete the path(s) as this user.
    * @param user The user to delete as, or the JVM user if null
    * @param subDir the sub directory name
@@ -76,19 +89,20 @@
   public void delete(String user, Path subDir, Path... baseDirs) {
     // TODO if parent owned by NM, rename within parent inline
     if (debugDelay != -1) {
-      if (baseDirs == null || baseDirs.length == 0) {
-        sched.schedule(new FileDeletionTask(this, user, subDir, null),
-          debugDelay, TimeUnit.SECONDS);
-      } else {
-        sched.schedule(
-          new FileDeletionTask(this, user, subDir, Arrays.asList(baseDirs)),
-          debugDelay, TimeUnit.SECONDS);
+      List<Path> baseDirList = null;
+      if (baseDirs != null && baseDirs.length != 0) {
+        baseDirList = Arrays.asList(baseDirs);
       }
+      FileDeletionTask task =
+          new FileDeletionTask(this, user, subDir, baseDirList);
+      recordDeletionTaskInStateStore(task);
+      sched.schedule(task, debugDelay, TimeUnit.SECONDS);
     }
   }
   
   public void scheduleFileDeletionTask(FileDeletionTask fileDeletionTask) {
     if (debugDelay != -1) {
+      recordDeletionTaskInStateStore(fileDeletionTask);
       sched.schedule(fileDeletionTask, debugDelay, TimeUnit.SECONDS);
     }
   }
@@ -109,6 +123,9 @@
     }
     sched.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
     sched.setKeepAliveTime(60L, SECONDS);
+    if (stateStore.canRecover()) {
+      recover(stateStore.loadDeletionServiceState());
+    }
     super.serviceInit(conf);
   }
 
@@ -139,6 +156,8 @@
   }
 
   public static class FileDeletionTask implements Runnable {
+    public static final int INVALID_TASK_ID = -1;
+    private int taskId;
     private final String user;
     private final Path subDir;
     private final List<Path> baseDirs;
@@ -152,6 +171,12 @@
     
     private FileDeletionTask(DeletionService delService, String user,
         Path subDir, List<Path> baseDirs) {
+      this(INVALID_TASK_ID, delService, user, subDir, baseDirs);
+    }
+
+    private FileDeletionTask(int taskId, DeletionService delService,
+        String user, Path subDir, List<Path> baseDirs) {
+      this.taskId = taskId;
       this.delService = delService;
       this.user = user;
       this.subDir = subDir;
@@ -198,6 +223,12 @@
       return this.success;
     }
     
+    public synchronized FileDeletionTask[] getSuccessorTasks() {
+      FileDeletionTask[] successors =
+          new FileDeletionTask[successorTaskSet.size()];
+      return successorTaskSet.toArray(successors);
+    }
+
     @Override
     public void run() {
       if (LOG.isDebugEnabled()) {
@@ -286,6 +317,12 @@
      * dependent tasks of it has failed marking its success = false.  
      */
     private synchronized void fileDeletionTaskFinished() {
+      try {
+        delService.stateStore.removeDeletionTask(taskId);
+      } catch (IOException e) {
+        LOG.error("Unable to remove deletion task " + taskId
+            + " from state store", e);
+      }
       Iterator<FileDeletionTask> successorTaskI =
           this.successorTaskSet.iterator();
       while (successorTaskI.hasNext()) {
@@ -318,4 +355,129 @@
       Path[] baseDirs) {
     return new FileDeletionTask(this, user, subDir, Arrays.asList(baseDirs));
   }
+
+  private void recover(RecoveredDeletionServiceState state)
+      throws IOException {
+    List<DeletionServiceDeleteTaskProto> taskProtos = state.getTasks();
+    Map<Integer, DeletionTaskRecoveryInfo> idToInfoMap =
+        new HashMap<Integer, DeletionTaskRecoveryInfo>(taskProtos.size());
+    Set<Integer> successorTasks = new HashSet<Integer>();
+    for (DeletionServiceDeleteTaskProto proto : taskProtos) {
+      DeletionTaskRecoveryInfo info = parseTaskProto(proto);
+      idToInfoMap.put(info.task.taskId, info);
+      nextTaskId.set(Math.max(nextTaskId.get(), info.task.taskId));
+      successorTasks.addAll(info.successorTaskIds);
+    }
+
+    // restore the task dependencies and schedule the deletion tasks that
+    // have no predecessors
+    final long now = System.currentTimeMillis();
+    for (DeletionTaskRecoveryInfo info : idToInfoMap.values()) {
+      for (Integer successorId : info.successorTaskIds){
+        DeletionTaskRecoveryInfo successor = idToInfoMap.get(successorId);
+        if (successor != null) {
+          info.task.addFileDeletionTaskDependency(successor.task);
+        } else {
+          LOG.error("Unable to locate dependency task for deletion task "
+              + info.task.taskId + " at " + info.task.getSubDir());
+        }
+      }
+      if (!successorTasks.contains(info.task.taskId)) {
+        long msecTilDeletion = info.deletionTimestamp - now;
+        sched.schedule(info.task, msecTilDeletion, TimeUnit.MILLISECONDS);
+      }
+    }
+  }
+
+  private DeletionTaskRecoveryInfo parseTaskProto(
+      DeletionServiceDeleteTaskProto proto) throws IOException {
+    int taskId = proto.getId();
+    String user = proto.hasUser() ? proto.getUser() : null;
+    Path subdir = null;
+    List<Path> basePaths = null;
+    if (proto.hasSubdir()) {
+      subdir = new Path(proto.getSubdir());
+    }
+    List<String> basedirs = proto.getBasedirsList();
+    if (basedirs != null && basedirs.size() > 0) {
+      basePaths = new ArrayList<Path>(basedirs.size());
+      for (String basedir : basedirs) {
+        basePaths.add(new Path(basedir));
+      }
+    }
+
+    FileDeletionTask task = new FileDeletionTask(taskId, this, user,
+        subdir, basePaths);
+    return new DeletionTaskRecoveryInfo(task,
+        proto.getSuccessorIdsList(),
+        proto.getDeletionTime());
+  }
+
+  private int generateTaskId() {
+    // get the next ID but avoid an invalid ID
+    int taskId = nextTaskId.incrementAndGet();
+    while (taskId == FileDeletionTask.INVALID_TASK_ID) {
+      taskId = nextTaskId.incrementAndGet();
+    }
+    return taskId;
+  }
+
+  private void recordDeletionTaskInStateStore(FileDeletionTask task) {
+    if (!stateStore.canRecover()) {
+      // optimize the case where we aren't really recording
+      return;
+    }
+    if (task.taskId != FileDeletionTask.INVALID_TASK_ID) {
+      return;  // task already recorded
+    }
+
+    task.taskId = generateTaskId();
+
+    FileDeletionTask[] successors = task.getSuccessorTasks();
+
+    // store successors first to ensure task IDs have been generated for them
+    for (FileDeletionTask successor : successors) {
+      recordDeletionTaskInStateStore(successor);
+    }
+
+    DeletionServiceDeleteTaskProto.Builder builder =
+        DeletionServiceDeleteTaskProto.newBuilder();
+    builder.setId(task.taskId);
+    if (task.getUser() != null) {
+      builder.setUser(task.getUser());
+    }
+    if (task.getSubDir() != null) {
+      builder.setSubdir(task.getSubDir().toString());
+    }
+    builder.setDeletionTime(System.currentTimeMillis() +
+        TimeUnit.MILLISECONDS.convert(debugDelay, TimeUnit.SECONDS));
+    if (task.getBaseDirs() != null) {
+      for (Path dir : task.getBaseDirs()) {
+        builder.addBasedirs(dir.toString());
+      }
+    }
+    for (FileDeletionTask successor : successors) {
+      builder.addSuccessorIds(successor.taskId);
+    }
+
+    try {
+      stateStore.storeDeletionTask(task.taskId, builder.build());
+    } catch (IOException e) {
+      LOG.error("Unable to store deletion task " + task.taskId + " for "
+          + task.getSubDir(), e);
+    }
+  }
+
+  private static class DeletionTaskRecoveryInfo {
+    FileDeletionTask task;
+    List<Integer> successorTaskIds;
+    long deletionTimestamp;
+
+    public DeletionTaskRecoveryInfo(FileDeletionTask task,
+        List<Integer> successorTaskIds, long deletionTimestamp) {
+      this.task = task;
+      this.successorTaskIds = successorTaskIds;
+      this.deletionTimestamp = deletionTimestamp;
+    }
+  }
 }
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index 83b0ede..2292a0d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -114,7 +114,7 @@
   }
 
   protected DeletionService createDeletionService(ContainerExecutor exec) {
-    return new DeletionService(exec);
+    return new DeletionService(exec, nmStore);
   }
 
   protected NMContext createNMContext(
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index 5cdb574..0b8f5b4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -250,7 +250,7 @@
     List<NMContainerStatus> containerReports = getNMContainerStatuses();
     RegisterNodeManagerRequest request =
         RegisterNodeManagerRequest.newInstance(nodeId, httpPort, totalResource,
-          nodeManagerVersionId, containerReports);
+          nodeManagerVersionId, containerReports, getRunningApplications());
     if (containerReports != null) {
       LOG.info("Registering with RM using containers :" + containerReports);
     }
@@ -374,6 +374,12 @@
     }
     return containerStatuses;
   }
+  
+  private List<ApplicationId> getRunningApplications() {
+    List<ApplicationId> runningApplications = new ArrayList<ApplicationId>();
+    runningApplications.addAll(this.context.getApplications().keySet());
+    return runningApplications;
+  }
 
   // These NMContainerStatus are sent on NM registration and used by YARN only.
   private List<NMContainerStatus> getNMContainerStatuses() {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
index d124757..dc9aa88 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
@@ -38,6 +38,7 @@
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
 import org.apache.hadoop.yarn.server.utils.LeveldbIterator;
 import org.apache.hadoop.yarn.util.ConverterUtils;
@@ -58,6 +59,9 @@
   private static final String DB_SCHEMA_VERSION_KEY = "schema-version";
   private static final String DB_SCHEMA_VERSION = "1.0";
 
+  private static final String DELETION_TASK_KEY_PREFIX =
+      "DeletionService/deltask_";
+
   private static final String LOCALIZATION_KEY_PREFIX = "Localization/";
   private static final String LOCALIZATION_PUBLIC_KEY_PREFIX =
       LOCALIZATION_KEY_PREFIX + "public/";
@@ -91,8 +95,9 @@
       throws IOException {
     RecoveredLocalizationState state = new RecoveredLocalizationState();
 
+    LeveldbIterator iter = null;
     try {
-      LeveldbIterator iter = new LeveldbIterator(db);
+      iter = new LeveldbIterator(db);
       iter.seek(bytes(LOCALIZATION_PUBLIC_KEY_PREFIX));
       state.publicTrackerState = loadResourceTrackerState(iter,
           LOCALIZATION_PUBLIC_KEY_PREFIX);
@@ -118,6 +123,10 @@
       }
     } catch (DBException e) {
       throw new IOException(e.getMessage(), e);
+    } finally {
+      if (iter != null) {
+        iter.close();
+      }
     }
 
     return state;
@@ -309,6 +318,56 @@
 
 
   @Override
+  public RecoveredDeletionServiceState loadDeletionServiceState()
+      throws IOException {
+    RecoveredDeletionServiceState state = new RecoveredDeletionServiceState();
+    state.tasks = new ArrayList<DeletionServiceDeleteTaskProto>();
+    LeveldbIterator iter = null;
+    try {
+      iter = new LeveldbIterator(db);
+      iter.seek(bytes(DELETION_TASK_KEY_PREFIX));
+      while (iter.hasNext()) {
+        Entry<byte[], byte[]> entry = iter.next();
+        String key = asString(entry.getKey());
+        if (!key.startsWith(DELETION_TASK_KEY_PREFIX)) {
+          break;
+        }
+        state.tasks.add(
+            DeletionServiceDeleteTaskProto.parseFrom(entry.getValue()));
+      }
+    } catch (DBException e) {
+      throw new IOException(e.getMessage(), e);
+    } finally {
+      if (iter != null) {
+        iter.close();
+      }
+    }
+    return state;
+  }
+
+  @Override
+  public void storeDeletionTask(int taskId,
+      DeletionServiceDeleteTaskProto taskProto) throws IOException {
+    String key = DELETION_TASK_KEY_PREFIX + taskId;
+    try {
+      db.put(bytes(key), taskProto.toByteArray());
+    } catch (DBException e) {
+      throw new IOException(e.getMessage(), e);
+    }
+  }
+
+  @Override
+  public void removeDeletionTask(int taskId) throws IOException {
+    String key = DELETION_TASK_KEY_PREFIX + taskId;
+    try {
+      db.delete(bytes(key));
+    } catch (DBException e) {
+      throw new IOException(e.getMessage(), e);
+    }
+  }
+
+
+  @Override
   protected void initStorage(Configuration conf)
       throws IOException {
     Path storeRoot = createStorageDir(conf);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
index d41ddde..dfe4f09 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
@@ -24,6 +24,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
 
 // The state store to use when state isn't being stored
@@ -61,6 +62,22 @@
   }
 
   @Override
+  public RecoveredDeletionServiceState loadDeletionServiceState()
+      throws IOException {
+    throw new UnsupportedOperationException(
+        "Recovery not supported by this state store");
+  }
+
+  @Override
+  public void storeDeletionTask(int taskId,
+      DeletionServiceDeleteTaskProto taskProto) throws IOException {
+  }
+
+  @Override
+  public void removeDeletionTask(int taskId) throws IOException {
+  }
+
+  @Override
   protected void initStorage(Configuration conf) throws IOException {
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
index 295fdb9..f2e5945 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
@@ -31,6 +31,7 @@
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
 
 @Private
@@ -91,6 +92,14 @@
     }
   }
 
+  public static class RecoveredDeletionServiceState {
+    List<DeletionServiceDeleteTaskProto> tasks;
+
+    public List<DeletionServiceDeleteTaskProto> getTasks() {
+      return tasks;
+    }
+  }
+
   /** Initialize the state storage */
   @Override
   public void serviceInit(Configuration conf) throws IOException {
@@ -155,6 +164,15 @@
       ApplicationId appId, Path localPath) throws IOException;
 
 
+  public abstract RecoveredDeletionServiceState loadDeletionServiceState()
+      throws IOException;
+
+  public abstract void storeDeletionTask(int taskId,
+      DeletionServiceDeleteTaskProto taskProto) throws IOException;
+
+  public abstract void removeDeletionTask(int taskId) throws IOException;
+
+
   protected abstract void initStorage(Configuration conf) throws IOException;
 
   protected abstract void startStorage() throws IOException;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto
index bd1f74a..9546dbb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto
@@ -24,6 +24,15 @@
 
 import "yarn_protos.proto";
 
+message DeletionServiceDeleteTaskProto {
+  optional int32 id = 1;
+  optional string user = 2;
+  optional string subdir = 3;
+  optional int64 deletionTime = 4;
+  repeated string basedirs = 5;
+  repeated int32 successorIds = 6;
+}
+
 message LocalizedResourceProto {
   optional LocalResourceProto resource = 1;
   optional string localPath = 2;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDeletionService.java
index 69208c5..c01ea15 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDeletionService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDeletionService.java
@@ -34,6 +34,7 @@
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.nodemanager.DeletionService.FileDeletionTask;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMMemoryStateStoreService;
 import org.junit.AfterClass;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -285,4 +286,58 @@
       del.stop();
     }
   }
+
+  @Test
+  public void testRecovery() throws Exception {
+    Random r = new Random();
+    long seed = r.nextLong();
+    r.setSeed(seed);
+    System.out.println("SEED: " + seed);
+    List<Path> baseDirs = buildDirs(r, base, 4);
+    createDirs(new Path("."), baseDirs);
+    List<Path> content = buildDirs(r, new Path("."), 10);
+    for (Path b : baseDirs) {
+      createDirs(b, content);
+    }
+    Configuration conf = new YarnConfiguration();
+    conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
+    conf.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 1);
+    NMMemoryStateStoreService stateStore = new NMMemoryStateStoreService();
+    stateStore.init(conf);
+    stateStore.start();
+    DeletionService del =
+      new DeletionService(new FakeDefaultContainerExecutor(), stateStore);
+    try {
+      del.init(conf);
+      del.start();
+      for (Path p : content) {
+        assertTrue(lfs.util().exists(new Path(baseDirs.get(0), p)));
+        del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo",
+            p, baseDirs.toArray(new Path[4]));
+      }
+
+      // restart the deletion service
+      del.stop();
+      del = new DeletionService(new FakeDefaultContainerExecutor(),
+          stateStore);
+      del.init(conf);
+      del.start();
+
+      // verify paths are still eventually deleted
+      int msecToWait = 10 * 1000;
+      for (Path p : baseDirs) {
+        for (Path q : content) {
+          Path fp = new Path(p, q);
+          while (msecToWait > 0 && lfs.util().exists(fp)) {
+            Thread.sleep(100);
+            msecToWait -= 100;
+          }
+          assertFalse(lfs.util().exists(fp));
+        }
+      }
+    } finally {
+      del.close();
+      stateStore.close();
+    }
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java
index a146e7b..0c8a843 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.recovery;
 
+import java.io.IOException;
+import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -25,10 +27,12 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
 
 public class NMMemoryStateStoreService extends NMStateStoreService {
   private Map<TrackerKey, TrackerState> trackerStates;
+  private Map<Integer, DeletionServiceDeleteTaskProto> deleteTasks;
 
   public NMMemoryStateStoreService() {
     super(NMMemoryStateStoreService.class.getName());
@@ -110,6 +114,7 @@
   @Override
   protected void initStorage(Configuration conf) {
     trackerStates = new HashMap<TrackerKey, TrackerState>();
+    deleteTasks = new HashMap<Integer, DeletionServiceDeleteTaskProto>();
   }
 
   @Override
@@ -121,6 +126,28 @@
   }
 
 
+  @Override
+  public RecoveredDeletionServiceState loadDeletionServiceState()
+      throws IOException {
+    RecoveredDeletionServiceState result =
+        new RecoveredDeletionServiceState();
+    result.tasks = new ArrayList<DeletionServiceDeleteTaskProto>(
+        deleteTasks.values());
+    return result;
+  }
+
+  @Override
+  public synchronized void storeDeletionTask(int taskId,
+      DeletionServiceDeleteTaskProto taskProto) throws IOException {
+    deleteTasks.put(taskId, taskProto);
+  }
+
+  @Override
+  public synchronized void removeDeletionTask(int taskId) throws IOException {
+    deleteTasks.remove(taskId);
+  }
+
+
   private static class TrackerState {
     Map<Path, LocalResourceProto> inProgressMap =
         new HashMap<Path, LocalResourceProto>();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
index c970c1c3..494b27f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
@@ -35,8 +35,10 @@
 import org.apache.hadoop.yarn.api.records.impl.pb.LocalResourcePBImpl;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.LocalResourceTrackerState;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredDeletionServiceState;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredLocalizationState;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredUserResources;
 import org.apache.hadoop.yarn.util.ConverterUtils;
@@ -404,4 +406,58 @@
         state.getUserResources();
     assertTrue(userResources.isEmpty());
   }
+
+  @Test
+  public void testDeletionTaskStorage() throws IOException {
+    // test empty when no state
+    RecoveredDeletionServiceState state =
+        stateStore.loadDeletionServiceState();
+    assertTrue(state.getTasks().isEmpty());
+
+    // store a deletion task and verify recovered
+    DeletionServiceDeleteTaskProto proto =
+        DeletionServiceDeleteTaskProto.newBuilder()
+        .setId(7)
+        .setUser("someuser")
+        .setSubdir("some/subdir")
+        .addBasedirs("some/dir/path")
+        .addBasedirs("some/other/dir/path")
+        .setDeletionTime(123456L)
+        .addSuccessorIds(8)
+        .addSuccessorIds(9)
+        .build();
+    stateStore.storeDeletionTask(proto.getId(), proto);
+    restartStateStore();
+    state = stateStore.loadDeletionServiceState();
+    assertEquals(1, state.getTasks().size());
+    assertEquals(proto, state.getTasks().get(0));
+
+    // store another deletion task
+    DeletionServiceDeleteTaskProto proto2 =
+        DeletionServiceDeleteTaskProto.newBuilder()
+        .setId(8)
+        .setUser("user2")
+        .setSubdir("subdir2")
+        .setDeletionTime(789L)
+        .build();
+    stateStore.storeDeletionTask(proto2.getId(), proto2);
+    restartStateStore();
+    state = stateStore.loadDeletionServiceState();
+    assertEquals(2, state.getTasks().size());
+    assertTrue(state.getTasks().contains(proto));
+    assertTrue(state.getTasks().contains(proto2));
+
+    // delete a task and verify gone after recovery
+    stateStore.removeDeletionTask(proto2.getId());
+    restartStateStore();
+    state = stateStore.loadDeletionServiceState();
+    assertEquals(1, state.getTasks().size());
+    assertEquals(proto, state.getTasks().get(0));
+
+    // delete the last task and verify none left
+    stateStore.removeDeletionTask(proto.getId());
+    restartStateStore();
+    state = stateStore.loadDeletionServiceState();
+    assertTrue(state.getTasks().isEmpty());
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 724dee1..77de209 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -327,7 +327,7 @@
    * RMActiveServices handles all the Active services in the RM.
    */
   @Private
-  class RMActiveServices extends CompositeService {
+  public class RMActiveServices extends CompositeService {
 
     private DelegationTokenRenewer delegationTokenRenewer;
     private EventHandler<SchedulerEvent> schedulerDispatcher;
@@ -526,11 +526,9 @@
                   (PreemptableResourceScheduler) scheduler));
           for (SchedulingEditPolicy policy : policies) {
             LOG.info("LOADING SchedulingEditPolicy:" + policy.getPolicyName());
-            policy.init(conf, rmContext.getDispatcher().getEventHandler(),
-                (PreemptableResourceScheduler) scheduler);
             // periodically check whether we need to take action to guarantee
             // constraints
-            SchedulingMonitor mon = new SchedulingMonitor(policy);
+            SchedulingMonitor mon = new SchedulingMonitor(rmContext, policy);
             addService(mon);
           }
         } else {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
index e00eaef..f2a8376 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
@@ -244,15 +244,6 @@
     Resource capability = request.getResource();
     String nodeManagerVersion = request.getNMVersion();
 
-    if (!rmContext.isWorkPreservingRecoveryEnabled()) {
-      if (!request.getNMContainerStatuses().isEmpty()) {
-        LOG.info("received container statuses on node manager register :"
-            + request.getNMContainerStatuses());
-        for (NMContainerStatus status : request.getNMContainerStatuses()) {
-          handleNMContainerStatus(status);
-        }
-      }
-    }
     RegisterNodeManagerResponse response = recordFactory
         .newRecordInstance(RegisterNodeManagerResponse.class);
 
@@ -311,7 +302,8 @@
     RMNode oldNode = this.rmContext.getRMNodes().putIfAbsent(nodeId, rmNode);
     if (oldNode == null) {
       this.rmContext.getDispatcher().getEventHandler().handle(
-          new RMNodeStartedEvent(nodeId, request.getNMContainerStatuses()));
+              new RMNodeStartedEvent(nodeId, request.getNMContainerStatuses(),
+                  request.getRunningApplications()));
     } else {
       LOG.info("Reconnect from the node at: " + host);
       this.nmLivelinessMonitor.unregister(nodeId);
@@ -322,6 +314,18 @@
     // present for any running application.
     this.nmTokenSecretManager.removeNodeKey(nodeId);
     this.nmLivelinessMonitor.register(nodeId);
+    
+    // Handle received container status, this should be processed after new
+    // RMNode inserted
+    if (!rmContext.isWorkPreservingRecoveryEnabled()) {
+      if (!request.getNMContainerStatuses().isEmpty()) {
+        LOG.info("received container statuses on node manager register :"
+            + request.getNMContainerStatuses());
+        for (NMContainerStatus status : request.getNMContainerStatuses()) {
+          handleNMContainerStatus(status);
+        }
+      }
+    }
 
     String message =
         "NodeManager from node " + host + "(cmPort: " + cmPort + " httpPort: "
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
index 2e93a9e..1682f7d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
@@ -21,6 +21,8 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResourceScheduler;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -34,18 +36,29 @@
   private Thread checkerThread;
   private volatile boolean stopped;
   private long monitorInterval;
+  private RMContext rmContext;
 
-  public SchedulingMonitor(SchedulingEditPolicy scheduleEditPolicy) {
+  public SchedulingMonitor(RMContext rmContext,
+      SchedulingEditPolicy scheduleEditPolicy) {
     super("SchedulingMonitor (" + scheduleEditPolicy.getPolicyName() + ")");
     this.scheduleEditPolicy = scheduleEditPolicy;
-    this.monitorInterval = scheduleEditPolicy.getMonitoringInterval();
+    this.rmContext = rmContext;
   }
 
   public long getMonitorInterval() {
     return monitorInterval;
   }
+  
+  @VisibleForTesting
+  public synchronized SchedulingEditPolicy getSchedulingEditPolicy() {
+    return scheduleEditPolicy;
+  }
 
+  @SuppressWarnings("unchecked")
   public void serviceInit(Configuration conf) throws Exception {
+    scheduleEditPolicy.init(conf, rmContext.getDispatcher().getEventHandler(),
+        (PreemptableResourceScheduler) rmContext.getScheduler());
+    this.monitorInterval = scheduleEditPolicy.getMonitoringInterval();
     super.serviceInit(conf);
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index f94aedbf..568c5ab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -165,6 +165,11 @@
     observeOnly = config.getBoolean(OBSERVE_ONLY, false);
     rc = scheduler.getResourceCalculator();
   }
+  
+  @VisibleForTesting
+  public ResourceCalculator getResourceCalculator() {
+    return rc;
+  }
 
   @Override
   public void editSchedule(){
@@ -203,7 +208,9 @@
     Map<ApplicationAttemptId,Set<RMContainer>> toPreempt =
         getContainersToPreempt(queues, clusterResources);
 
-    logToCSV(queues);
+    if (LOG.isDebugEnabled()) {
+      logToCSV(queues);
+    }
 
     // if we are in observeOnly mode return before any action is taken
     if (observeOnly) {
@@ -603,7 +610,7 @@
       sb.append(", ");
       tq.appendLogString(sb);
     }
-    LOG.info(sb.toString());
+    LOG.debug(sb.toString());
   }
 
   /**
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
index 391ccf6..2b590a0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
@@ -19,16 +19,16 @@
 package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
 
 import java.util.Collection;
-
 import java.util.Map;
 import java.util.Set;
 
 import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
@@ -208,6 +208,14 @@
    * @return the flag indicating whether the applications's state is stored.
    */
   boolean isAppFinalStateStored();
+  
+  
+  /**
+   * Nodes on which the containers for this {@link RMApp} ran.
+   * @return the set of nodes that ran any containers from this {@link RMApp}
+   * Add more node on which containers for this {@link RMApp} ran
+   */
+  Set<NodeId> getRanNodes();
 
   /**
    * Create the external user-facing state of ApplicationMaster from the
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java
index 3ab5db4..668c5e1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java
@@ -38,6 +38,9 @@
   ATTEMPT_FAILED,
   ATTEMPT_KILLED,
   NODE_UPDATE,
+  
+  // Source: Container and ResourceTracker
+  APP_RUNNING_ON_NODE,
 
   // Source: RMStateStore
   APP_NEW_SAVED,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 3318f15..3f9ef64 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -25,6 +25,7 @@
 import java.util.LinkedHashMap;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.ConcurrentSkipListSet;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
@@ -71,7 +72,6 @@
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent;
-import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
 import org.apache.hadoop.yarn.state.MultipleArcTransition;
@@ -116,6 +116,7 @@
   private EventHandler handler;
   private static final AppFinishedTransition FINISHED_TRANSITION =
       new AppFinishedTransition();
+  private Set<NodeId> ranNodes = new ConcurrentSkipListSet<NodeId>();
 
   // These states stored are only valid when app is at killing or final_saving.
   private RMAppState stateBeforeKilling;
@@ -180,7 +181,6 @@
         new FinalSavingTransition(
           new AppKilledTransition(), RMAppState.KILLED))
 
-
      // Transitions from ACCEPTED state
     .addTransition(RMAppState.ACCEPTED, RMAppState.ACCEPTED,
         RMAppEventType.NODE_UPDATE, new RMAppNodeUpdateTransition())
@@ -200,6 +200,9 @@
         new FinalSavingTransition(FINISHED_TRANSITION, RMAppState.FINISHED))
     .addTransition(RMAppState.ACCEPTED, RMAppState.KILLING,
         RMAppEventType.KILL, new KillAttemptTransition())
+    .addTransition(RMAppState.ACCEPTED, RMAppState.ACCEPTED, 
+        RMAppEventType.APP_RUNNING_ON_NODE,
+        new AppRunningOnNodeTransition())
     // ACCECPTED state can once again receive APP_ACCEPTED event, because on
     // recovery the app returns ACCEPTED state and the app once again go
     // through the scheduler and triggers one more APP_ACCEPTED event at
@@ -220,6 +223,9 @@
     .addTransition(RMAppState.RUNNING, RMAppState.FINISHED,
       // UnManagedAM directly jumps to finished
         RMAppEventType.ATTEMPT_FINISHED, FINISHED_TRANSITION)
+    .addTransition(RMAppState.RUNNING, RMAppState.RUNNING, 
+        RMAppEventType.APP_RUNNING_ON_NODE,
+        new AppRunningOnNodeTransition())
     .addTransition(RMAppState.RUNNING,
         EnumSet.of(RMAppState.ACCEPTED, RMAppState.FINAL_SAVING),
         RMAppEventType.ATTEMPT_FAILED,
@@ -235,6 +241,9 @@
     .addTransition(RMAppState.FINAL_SAVING, RMAppState.FINAL_SAVING,
         RMAppEventType.ATTEMPT_FINISHED,
         new AttemptFinishedAtFinalSavingTransition())
+    .addTransition(RMAppState.FINAL_SAVING, RMAppState.FINAL_SAVING, 
+        RMAppEventType.APP_RUNNING_ON_NODE,
+        new AppRunningOnNodeTransition())
     // ignorable transitions
     .addTransition(RMAppState.FINAL_SAVING, RMAppState.FINAL_SAVING,
         EnumSet.of(RMAppEventType.NODE_UPDATE, RMAppEventType.KILL,
@@ -243,6 +252,9 @@
      // Transitions from FINISHING state
     .addTransition(RMAppState.FINISHING, RMAppState.FINISHED,
         RMAppEventType.ATTEMPT_FINISHED, FINISHED_TRANSITION)
+    .addTransition(RMAppState.FINISHING, RMAppState.FINISHING, 
+        RMAppEventType.APP_RUNNING_ON_NODE,
+        new AppRunningOnNodeTransition())
     // ignorable transitions
     .addTransition(RMAppState.FINISHING, RMAppState.FINISHING,
       EnumSet.of(RMAppEventType.NODE_UPDATE,
@@ -251,6 +263,9 @@
         RMAppEventType.KILL))
 
      // Transitions from KILLING state
+    .addTransition(RMAppState.KILLING, RMAppState.KILLING, 
+        RMAppEventType.APP_RUNNING_ON_NODE,
+        new AppRunningOnNodeTransition())
     .addTransition(RMAppState.KILLING, RMAppState.FINAL_SAVING,
         RMAppEventType.ATTEMPT_KILLED,
         new FinalSavingTransition(
@@ -267,6 +282,9 @@
 
      // Transitions from FINISHED state
      // ignorable transitions
+    .addTransition(RMAppState.FINISHED, RMAppState.FINISHED, 
+        RMAppEventType.APP_RUNNING_ON_NODE,
+        new AppRunningOnNodeTransition())
     .addTransition(RMAppState.FINISHED, RMAppState.FINISHED,
         EnumSet.of(
             RMAppEventType.NODE_UPDATE,
@@ -276,11 +294,17 @@
 
      // Transitions from FAILED state
      // ignorable transitions
+    .addTransition(RMAppState.FAILED, RMAppState.FAILED, 
+        RMAppEventType.APP_RUNNING_ON_NODE,
+        new AppRunningOnNodeTransition())
     .addTransition(RMAppState.FAILED, RMAppState.FAILED,
         EnumSet.of(RMAppEventType.KILL, RMAppEventType.NODE_UPDATE))
 
      // Transitions from KILLED state
      // ignorable transitions
+    .addTransition(RMAppState.KILLED, RMAppState.KILLED, 
+        RMAppEventType.APP_RUNNING_ON_NODE,
+        new AppRunningOnNodeTransition())
     .addTransition(
         RMAppState.KILLED,
         RMAppState.KILLED,
@@ -695,6 +719,23 @@
           nodeUpdateEvent.getNode());
     };
   }
+  
+  private static final class AppRunningOnNodeTransition extends RMAppTransition {
+    public void transition(RMAppImpl app, RMAppEvent event) {
+      RMAppRunningOnNodeEvent nodeAddedEvent = (RMAppRunningOnNodeEvent) event;
+      
+      // if final state already stored, notify RMNode
+      if (isAppInFinalState(app)) {
+        app.handler.handle(
+            new RMNodeCleanAppEvent(nodeAddedEvent.getNodeId(), nodeAddedEvent
+                .getApplicationId()));
+        return;
+      }
+      
+      // otherwise, add it to ranNodes for further process
+      app.ranNodes.add(nodeAddedEvent.getNodeId());
+    };
+  }
 
   /**
    * Move an app to a new queue.
@@ -1037,17 +1078,8 @@
       this.finalState = finalState;
     }
 
-    private Set<NodeId> getNodesOnWhichAttemptRan(RMAppImpl app) {
-      Set<NodeId> nodes = new HashSet<NodeId>();
-      for (RMAppAttempt attempt : app.attempts.values()) {
-        nodes.addAll(attempt.getRanNodes());
-      }
-      return nodes;
-    }
-
     public void transition(RMAppImpl app, RMAppEvent event) {
-      Set<NodeId> nodes = getNodesOnWhichAttemptRan(app);
-      for (NodeId nodeId : nodes) {
+      for (NodeId nodeId : app.getRanNodes()) {
         app.handler.handle(
             new RMNodeCleanAppEvent(nodeId, app.applicationId));
       }
@@ -1148,4 +1180,9 @@
   private RMAppState getRecoveredFinalState() {
     return this.recoveredFinalState;
   }
+
+  @Override
+  public Set<NodeId> getRanNodes() {
+    return ranNodes;
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppRunningOnNodeEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppRunningOnNodeEvent.java
new file mode 100644
index 0000000..45c0d3c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppRunningOnNodeEvent.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+
+public class RMAppRunningOnNodeEvent extends RMAppEvent {
+  private final NodeId node;
+
+  public RMAppRunningOnNodeEvent(ApplicationId appId, NodeId node) {
+    super(appId, RMAppEventType.APP_RUNNING_ON_NODE);
+    this.node = node;
+  }
+  
+  public NodeId getNodeId() {
+    return node;
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java
index b4bad12..d472ad4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt;
 
 import java.util.List;
-import java.util.Set;
 
 import javax.crypto.SecretKey;
 
@@ -32,7 +31,6 @@
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
@@ -115,12 +113,6 @@
   FinalApplicationStatus getFinalApplicationStatus();
 
   /**
-   * Nodes on which the containers for this {@link RMAppAttempt} ran.
-   * @return the set of nodes that ran any containers from this {@link RMAppAttempt}
-   */
-  Set<NodeId> getRanNodes();
-
-  /**
    * Return a list of the last set of finished containers, resetting the
    * finished containers to empty.
    * @return the list of just finished containers, re setting the finished containers.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptEventType.java
index e1522f1..ddf782e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptEventType.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptEventType.java
@@ -36,7 +36,6 @@
   UNREGISTERED,
 
   // Source: Containers
-  CONTAINER_ACQUIRED,
   CONTAINER_ALLOCATED,
   CONTAINER_FINISHED,
   
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 2a1170d..5e71c93 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -26,16 +26,13 @@
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.EnumSet;
-import java.util.HashSet;
 import java.util.List;
-import java.util.Set;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
 
 import javax.crypto.SecretKey;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -54,7 +51,6 @@
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
@@ -80,7 +76,6 @@
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppFailedAttemptEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppFinishedAttemptEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerAcquiredEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerAllocatedEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptLaunchFailedEvent;
@@ -103,6 +98,8 @@
 import org.apache.hadoop.yarn.state.StateMachineFactory;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 
+import com.google.common.annotations.VisibleForTesting;
+
 @SuppressWarnings({"unchecked", "rawtypes"})
 public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
 
@@ -133,10 +130,7 @@
   private final ApplicationSubmissionContext submissionContext;
   private Token<AMRMTokenIdentifier> amrmToken = null;
   private SecretKey clientTokenMasterKey = null;
-
-  //nodes on while this attempt's containers ran
-  private Set<NodeId> ranNodes =
-    new HashSet<NodeId>();
+  
   private List<ContainerStatus> justFinishedContainers =
     new ArrayList<ContainerStatus>();
   private Container masterContainer;
@@ -219,10 +213,7 @@
       .addTransition(RMAppAttemptState.ALLOCATED_SAVING, 
           RMAppAttemptState.ALLOCATED,
           RMAppAttemptEventType.ATTEMPT_NEW_SAVED, new AttemptStoredTransition())
-      .addTransition(RMAppAttemptState.ALLOCATED_SAVING, 
-          RMAppAttemptState.ALLOCATED_SAVING,
-          RMAppAttemptEventType.CONTAINER_ACQUIRED, 
-          new ContainerAcquiredTransition())
+          
        // App could be killed by the client. So need to handle this. 
       .addTransition(RMAppAttemptState.ALLOCATED_SAVING, 
           RMAppAttemptState.FINAL_SAVING,
@@ -249,10 +240,6 @@
             RMAppAttemptState.KILLED), RMAppAttemptState.KILLED))
 
        // Transitions from ALLOCATED State
-      .addTransition(RMAppAttemptState.ALLOCATED,
-          RMAppAttemptState.ALLOCATED,
-          RMAppAttemptEventType.CONTAINER_ACQUIRED,
-          new ContainerAcquiredTransition())
       .addTransition(RMAppAttemptState.ALLOCATED, RMAppAttemptState.LAUNCHED,
           RMAppAttemptEventType.LAUNCHED, new AMLaunchedTransition())
       .addTransition(RMAppAttemptState.ALLOCATED, RMAppAttemptState.FINAL_SAVING,
@@ -297,10 +284,6 @@
       .addTransition(RMAppAttemptState.RUNNING, RMAppAttemptState.RUNNING,
           RMAppAttemptEventType.CONTAINER_ALLOCATED)
       .addTransition(
-                RMAppAttemptState.RUNNING, RMAppAttemptState.RUNNING,
-                RMAppAttemptEventType.CONTAINER_ACQUIRED,
-                new ContainerAcquiredTransition())
-      .addTransition(
           RMAppAttemptState.RUNNING,
           EnumSet.of(RMAppAttemptState.RUNNING, RMAppAttemptState.FINAL_SAVING),
           RMAppAttemptEventType.CONTAINER_FINISHED,
@@ -337,7 +320,6 @@
             // should be fixed to reject container allocate request at Final
             // Saving in scheduler
               RMAppAttemptEventType.CONTAINER_ALLOCATED,
-              RMAppAttemptEventType.CONTAINER_ACQUIRED,
               RMAppAttemptEventType.ATTEMPT_NEW_SAVED,
               RMAppAttemptEventType.KILL))
 
@@ -620,11 +602,6 @@
   }
 
   @Override
-  public Set<NodeId> getRanNodes() {
-    return ranNodes;
-  }
-
-  @Override
   public Container getMasterContainer() {
     this.readLock.lock();
 
@@ -705,7 +682,6 @@
 
   public void transferStateFromPreviousAttempt(RMAppAttempt attempt) {
     this.justFinishedContainers = attempt.getJustFinishedContainers();
-    this.ranNodes = attempt.getRanNodes();
   }
 
   private void recoverAppAttemptCredentials(Credentials appAttemptTokens)
@@ -1402,17 +1378,6 @@
     finalStatus = unregisterEvent.getFinalApplicationStatus();
   }
 
-  private static final class ContainerAcquiredTransition extends
-      BaseTransition {
-    @Override
-    public void transition(RMAppAttemptImpl appAttempt,
-        RMAppAttemptEvent event) {
-      RMAppAttemptContainerAcquiredEvent acquiredEvent
-        = (RMAppAttemptContainerAcquiredEvent) event;
-      appAttempt.ranNodes.add(acquiredEvent.getContainer().getNodeId());
-    }
-  }
-
   private static final class ContainerFinishedTransition
       implements
       MultipleArcTransition<RMAppAttemptImpl, RMAppAttemptEvent, RMAppAttemptState> {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptContainerAcquiredEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptContainerAcquiredEvent.java
deleted file mode 100644
index 5902f91..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptContainerAcquiredEvent.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event;
-
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
-
-public class RMAppAttemptContainerAcquiredEvent extends RMAppAttemptEvent {
-
-  private final Container container;
-
-  public RMAppAttemptContainerAcquiredEvent(ApplicationAttemptId appAttemptId, 
-      Container container) {
-    super(appAttemptId, RMAppAttemptEventType.CONTAINER_ACQUIRED);
-    this.container = container;
-  }
-
-  public Container getContainer() {
-    return this.container;
-  }
-
-}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index 01db215..c205537 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -37,7 +37,7 @@
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerAcquiredEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRunningOnNodeEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerAllocatedEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent;
@@ -365,9 +365,9 @@
             RMContainerEventType.FINISHED));
         return RMContainerState.COMPLETED;
       } else if (report.getContainerState().equals(ContainerState.RUNNING)) {
-        // Tell the appAttempt
-        container.eventHandler.handle(new RMAppAttemptContainerAcquiredEvent(
-            container.getApplicationAttemptId(), container.getContainer()));
+        // Tell the app
+        container.eventHandler.handle(new RMAppRunningOnNodeEvent(container
+            .getApplicationAttemptId().getApplicationId(), container.nodeId));
         return RMContainerState.RUNNING;
       } else {
         // This can never happen.
@@ -408,9 +408,9 @@
       // Register with containerAllocationExpirer.
       container.containerAllocationExpirer.register(container.getContainerId());
 
-      // Tell the appAttempt
-      container.eventHandler.handle(new RMAppAttemptContainerAcquiredEvent(
-          container.getApplicationAttemptId(), container.getContainer()));
+      // Tell the app
+      container.eventHandler.handle(new RMAppRunningOnNodeEvent(container
+          .getApplicationAttemptId().getApplicationId(), container.nodeId));
     }
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index 66a7d96..acee7d7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -55,6 +55,8 @@
 import org.apache.hadoop.yarn.server.resourcemanager.NodesListManagerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.NodesListManagerEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRunningOnNodeEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
@@ -473,7 +475,13 @@
       } else {
         // Increment activeNodes explicitly because this is a new node.
         ClusterMetrics.getMetrics().incrNumActiveNodes();
-        containers = startEvent.getContainerRecoveryReports();
+        containers = startEvent.getNMContainerStatuses();
+      }
+      
+      if (null != startEvent.getRunningApplications()) {
+        for (ApplicationId appId : startEvent.getRunningApplications()) {
+          handleRunningAppOnNode(rmNode, rmNode.context, appId, rmNode.nodeId);
+        }
       }
 
       rmNode.context.getDispatcher().getEventHandler()
@@ -482,6 +490,24 @@
         new NodesListManagerEvent(
             NodesListManagerEventType.NODE_USABLE, rmNode));
     }
+
+    void handleRunningAppOnNode(RMNodeImpl rmNode, RMContext context,
+        ApplicationId appId, NodeId nodeId) {
+      RMApp app = context.getRMApps().get(appId);
+      
+      // if we failed getting app by appId, maybe something wrong happened, just
+      // add the app to the finishedApplications list so that the app can be
+      // cleaned up on the NM
+      if (null == app) {
+        LOG.warn("Cannot get RMApp by appId=" + appId
+            + ", just added it to finishedApplications list for cleanup");
+        rmNode.finishedApplications.add(appId);
+        return;
+      }
+
+      context.getDispatcher().getEventHandler()
+          .handle(new RMAppRunningOnNodeEvent(appId, nodeId));
+    }
   }
 
   public static class ReconnectNodeTransition implements
@@ -517,7 +543,7 @@
         }
         rmNode.context.getRMNodes().put(newNode.getNodeID(), newNode);
         rmNode.context.getDispatcher().getEventHandler().handle(
-            new RMNodeStartedEvent(newNode.getNodeID(), null));
+            new RMNodeStartedEvent(newNode.getNodeID(), null, null));
       }
       rmNode.context.getDispatcher().getEventHandler().handle(
           new NodesListManagerEvent(
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStartedEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStartedEvent.java
index 0414347..4fc983a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStartedEvent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStartedEvent.java
@@ -20,19 +20,28 @@
 
 import java.util.List;
 
+import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
 
 public class RMNodeStartedEvent extends RMNodeEvent {
 
-  private List<NMContainerStatus> containerReports;
+  private List<NMContainerStatus> containerStatuses;
+  private List<ApplicationId> runningApplications;
 
-  public RMNodeStartedEvent(NodeId nodeId, List<NMContainerStatus> containerReports) {
+  public RMNodeStartedEvent(NodeId nodeId,
+      List<NMContainerStatus> containerReports,
+      List<ApplicationId> runningApplications) {
     super(nodeId, RMNodeEventType.STARTED);
-    this.containerReports = containerReports;
+    this.containerStatuses = containerReports;
+    this.runningApplications = runningApplications;
   }
 
-  public List<NMContainerStatus> getContainerRecoveryReports() {
-    return this.containerReports;
+  public List<NMContainerStatus> getNMContainerStatuses() {
+    return this.containerStatuses;
+  }
+  
+  public List<ApplicationId> getRunningApplications() {
+    return runningApplications;
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
index 9fb8d23..7074059 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
@@ -130,9 +130,9 @@
 
     LOG.info("Assigned container " + container.getId() + " of capacity "
         + container.getResource() + " on host " + rmNode.getNodeAddress()
-        + ", which currently has " + numContainers + " containers, "
+        + ", which has " + numContainers + " containers, "
         + getUsedResource() + " used and " + getAvailableResource()
-        + " available");
+        + " available after allocation");
   }
 
   /**
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 5725f8c..ea53165 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -1072,8 +1072,8 @@
   private boolean shouldAttemptPreemption() {
     if (preemptionEnabled) {
       return (preemptionUtilizationThreshold < Math.max(
-          (float) rootMetrics.getAvailableMB() / clusterResource.getMemory(),
-          (float) rootMetrics.getAvailableVirtualCores() /
+          (float) rootMetrics.getAllocatedMB() / clusterResource.getMemory(),
+          (float) rootMetrics.getAllocatedVirtualCores() /
               clusterResource.getVirtualCores()));
     }
     return false;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index 56eba34..62e7f9f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -19,6 +19,9 @@
 package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
 import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.security.AccessControlException;
+import java.security.PrivilegedExceptionAction;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.EnumSet;
@@ -31,19 +34,27 @@
 
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.Consumes;
 import javax.ws.rs.GET;
+import javax.ws.rs.PUT;
 import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
 import javax.ws.rs.Produces;
 import javax.ws.rs.QueryParam;
 import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
@@ -56,6 +67,8 @@
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger;
+import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants;
 import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
@@ -66,10 +79,10 @@
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.security.QueueACLsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppState;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationStatisticsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo;
@@ -82,7 +95,6 @@
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.StatisticsItemInfo;
-import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.webapp.BadRequestException;
 import org.apache.hadoop.yarn.webapp.NotFoundException;
@@ -584,4 +596,166 @@
 
     return appAttemptsInfo;
   }
+
+  @GET
+  @Path("/apps/{appid}/state")
+  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  public AppState getAppState(@Context HttpServletRequest hsr,
+      @PathParam("appid") String appId) throws AuthorizationException {
+    init();
+    UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr);
+    String userName = "";
+    if (callerUGI != null) {
+      userName = callerUGI.getUserName();
+    }
+    RMApp app = null;
+    try {
+      app = getRMAppForAppId(appId);
+    } catch (NotFoundException e) {
+      RMAuditLogger.logFailure(userName, AuditConstants.KILL_APP_REQUEST,
+        "UNKNOWN", "RMWebService",
+        "Trying to get state of an absent application " + appId);
+      throw e;
+    }
+
+    AppState ret = new AppState();
+    ret.setState(app.getState().toString());
+
+    return ret;
+  }
+
+  // can't return POJO because we can't control the status code
+  // it's always set to 200 when we need to allow it to be set
+  // to 202
+
+  @PUT
+  @Path("/apps/{appid}/state")
+  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  public Response updateAppState(AppState targetState,
+      @Context HttpServletRequest hsr, @PathParam("appid") String appId)
+      throws AuthorizationException, YarnException, InterruptedException,
+      IOException {
+
+    init();
+    UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr);
+    if (callerUGI == null) {
+      String msg = "Unable to obtain user name, user not authenticated";
+      throw new AuthorizationException(msg);
+    }
+
+    String userName = callerUGI.getUserName();
+    RMApp app = null;
+    try {
+      app = getRMAppForAppId(appId);
+    } catch (NotFoundException e) {
+      RMAuditLogger.logFailure(userName, AuditConstants.KILL_APP_REQUEST,
+        "UNKNOWN", "RMWebService", "Trying to kill/move an absent application "
+            + appId);
+      throw e;
+    }
+
+    if (!app.getState().toString().equals(targetState.getState())) {
+      // user is attempting to change state. right we only
+      // allow users to kill the app
+
+      if (targetState.getState().equals(YarnApplicationState.KILLED.toString())) {
+        return killApp(app, callerUGI, hsr);
+      }
+      throw new BadRequestException("Only '"
+          + YarnApplicationState.KILLED.toString()
+          + "' is allowed as a target state.");
+    }
+
+    AppState ret = new AppState();
+    ret.setState(app.getState().toString());
+
+    return Response.status(Status.OK).entity(ret).build();
+  }
+
+  protected Response killApp(RMApp app, UserGroupInformation callerUGI,
+      HttpServletRequest hsr) throws IOException, InterruptedException {
+
+    if (app == null) {
+      throw new IllegalArgumentException("app cannot be null");
+    }
+    String userName = callerUGI.getUserName();
+    final ApplicationId appid = app.getApplicationId();
+    KillApplicationResponse resp = null;
+    try {
+      resp =
+          callerUGI
+            .doAs(new PrivilegedExceptionAction<KillApplicationResponse>() {
+              @Override
+              public KillApplicationResponse run() throws IOException,
+                  YarnException {
+                KillApplicationRequest req =
+                    KillApplicationRequest.newInstance(appid);
+                return rm.getClientRMService().forceKillApplication(req);
+              }
+            });
+    } catch (UndeclaredThrowableException ue) {
+      // if the root cause is a permissions issue
+      // bubble that up to the user
+      if (ue.getCause() instanceof YarnException) {
+        YarnException ye = (YarnException) ue.getCause();
+        if (ye.getCause() instanceof AccessControlException) {
+          String appId = app.getApplicationId().toString();
+          String msg =
+              "Unauthorized attempt to kill appid " + appId
+                  + " by remote user " + userName;
+          return Response.status(Status.FORBIDDEN).entity(msg).build();
+        } else {
+          throw ue;
+        }
+      } else {
+        throw ue;
+      }
+    }
+
+    AppState ret = new AppState();
+    ret.setState(app.getState().toString());
+
+    if (resp.getIsKillCompleted()) {
+      RMAuditLogger.logSuccess(userName, AuditConstants.KILL_APP_REQUEST,
+        "RMWebService", app.getApplicationId());
+    } else {
+      return Response.status(Status.ACCEPTED).entity(ret)
+        .header(HttpHeaders.LOCATION, hsr.getRequestURL()).build();
+    }
+    return Response.status(Status.OK).entity(ret).build();
+  }
+
+  private RMApp getRMAppForAppId(String appId) {
+
+    if (appId == null || appId.isEmpty()) {
+      throw new NotFoundException("appId, " + appId + ", is empty or null");
+    }
+    ApplicationId id;
+    try {
+      id = ConverterUtils.toApplicationId(recordFactory, appId);
+    } catch (NumberFormatException e) {
+      throw new NotFoundException("appId is invalid");
+    }
+    if (id == null) {
+      throw new NotFoundException("appId is invalid");
+    }
+    RMApp app = rm.getRMContext().getRMApps().get(id);
+    if (app == null) {
+      throw new NotFoundException("app with id: " + appId + " not found");
+    }
+    return app;
+  }
+
+  private UserGroupInformation getCallerUserGroupInformation(
+      HttpServletRequest hsr) {
+
+    String remoteUser = hsr.getRemoteUser();
+    UserGroupInformation callerUGI = null;
+    if (remoteUser != null) {
+      callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
+    }
+
+    return callerUGI;
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppState.java
new file mode 100644
index 0000000..e8f1cc3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppState.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlRootElement;
+
+@XmlRootElement(name = "appstate")
+@XmlAccessorType(XmlAccessType.FIELD)
+public class AppState {
+
+  String state;
+
+  public AppState() {
+  }
+
+  public AppState(String state) {
+    this.state = state;
+  }
+
+  public void setState(String state) {
+    this.state = state;
+  }
+
+  public String getState() {
+    return this.state;
+  }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java
index e3a5776..0a838fc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java
@@ -100,11 +100,17 @@
   }
 
   public RegisterNodeManagerResponse registerNode() throws Exception {
-    return registerNode(null);
+    return registerNode(null, null);
+  }
+  
+  public RegisterNodeManagerResponse registerNode(
+      List<ApplicationId> runningApplications) throws Exception {
+    return registerNode(null, runningApplications);
   }
 
   public RegisterNodeManagerResponse registerNode(
-      List<NMContainerStatus> containerReports) throws Exception{
+      List<NMContainerStatus> containerReports,
+      List<ApplicationId> runningApplications) throws Exception {
     RegisterNodeManagerRequest req = Records.newRecord(
         RegisterNodeManagerRequest.class);
     req.setNodeId(nodeId);
@@ -113,6 +119,7 @@
     req.setResource(resource);
     req.setContainerStatuses(containerReports);
     req.setNMVersion(version);
+    req.setRunningApplications(runningApplications);
     RegisterNodeManagerResponse registrationResponse =
         resourceTracker.registerNodeManager(req);
     this.currentContainerTokenMasterKey =
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index 446bbae..45c2e5f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -78,6 +78,7 @@
 import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;
 import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
 import org.apache.hadoop.yarn.util.Records;
+import org.apache.hadoop.yarn.util.YarnVersionInfo;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
@@ -350,11 +351,20 @@
     nm.registerNode();
     return nm;
   }
+  
+  public MockNM registerNode(String nodeIdStr, int memory, int vCores,
+      List<ApplicationId> runningApplications) throws Exception {
+    MockNM nm =
+        new MockNM(nodeIdStr, memory, vCores, getResourceTrackerService(),
+            YarnVersionInfo.getVersion());
+    nm.registerNode(runningApplications);
+    return nm;
+  }
 
   public void sendNodeStarted(MockNM nm) throws Exception {
     RMNodeImpl node = (RMNodeImpl) getRMContext().getRMNodes().get(
         nm.getNodeId());
-    node.handle(new RMNodeStartedEvent(nm.getNodeId(), null));
+    node.handle(new RMNodeStartedEvent(nm.getNodeId(), null, null));
   }
   
   public void sendNodeLost(MockNM nm) throws Exception {
@@ -571,4 +581,8 @@
       .getSchedulerApplications().get(app.getApplicationId()).getQueue()
       .getMetrics().clearQueueMetrics();
   }
+  
+  public RMActiveServices getRMActiveService() {
+    return activeServices;
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
index 30ae089..47d4e37 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
@@ -18,26 +18,30 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager;
 
+import java.net.UnknownHostException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.junit.Assert;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
@@ -45,13 +49,29 @@
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
+import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 
 public class TestApplicationCleanup {
 
   private static final Log LOG = LogFactory
     .getLog(TestApplicationCleanup.class);
+  
+  private YarnConfiguration conf;
+  
+  @Before
+  public void setup() throws UnknownHostException {
+    Logger rootLogger = LogManager.getRootLogger();
+    rootLogger.setLevel(Level.DEBUG);
+    conf = new YarnConfiguration();
+    UserGroupInformation.setConfiguration(conf);
+    conf.set(YarnConfiguration.RECOVERY_ENABLED, "true");
+    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
+    Assert.assertTrue(YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS > 1);
+  }
 
+  @SuppressWarnings("resource")
   @Test
   public void testAppCleanup() throws Exception {
     Logger rootLogger = LogManager.getRootLogger();
@@ -130,6 +150,7 @@
     rm.stop();
   }
 
+  @SuppressWarnings("resource")
   @Test
   public void testContainerCleanup() throws Exception {
 
@@ -252,6 +273,69 @@
 
     rm.stop();
   }
+  
+  private void waitForAppCleanupMessageRecved(MockNM nm, ApplicationId appId)
+      throws Exception {
+    while (true) {
+      NodeHeartbeatResponse response = nm.nodeHeartbeat(true);
+      if (response.getApplicationsToCleanup() != null
+          && response.getApplicationsToCleanup().size() == 1
+          && appId.equals(response.getApplicationsToCleanup().get(0))) {
+        return;
+      }
+
+      LOG.info("Haven't got application=" + appId.toString()
+          + " in cleanup list from node heartbeat response, "
+          + "sleep for a while before next heartbeat");
+      Thread.sleep(1000);
+    }
+  }
+  
+  private MockAM launchAM(RMApp app, MockRM rm, MockNM nm)
+      throws Exception {
+    RMAppAttempt attempt = app.getCurrentAppAttempt();
+    nm.nodeHeartbeat(true);
+    MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId());
+    am.registerAppAttempt();
+    rm.waitForState(app.getApplicationId(), RMAppState.RUNNING);
+    return am;
+  }
+  
+  @SuppressWarnings("resource")
+  @Test (timeout = 60000)
+  public void testAppCleanupWhenRestartedAfterAppFinished() throws Exception {
+    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
+    MemoryRMStateStore memStore = new MemoryRMStateStore();
+    memStore.init(conf);
+
+    // start RM
+    MockRM rm1 = new MockRM(conf, memStore);
+    rm1.start();
+    MockNM nm1 =
+        new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
+    nm1.registerNode();
+
+    // create app and launch the AM
+    RMApp app0 = rm1.submitApp(200);
+    MockAM am0 = launchAM(app0, rm1, nm1);
+    nm1.nodeHeartbeat(am0.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
+    rm1.waitForState(app0.getApplicationId(), RMAppState.FAILED);
+
+    // start new RM
+    MockRM rm2 = new MockRM(conf, memStore);
+    rm2.start();
+    
+    // nm1 register to rm2, and do a heartbeat
+    nm1.setResourceTrackerService(rm2.getResourceTrackerService());
+    nm1.registerNode(Arrays.asList(app0.getApplicationId()));
+    rm2.waitForState(app0.getApplicationId(), RMAppState.FAILED);
+    
+    // wait for application cleanup message received
+    waitForAppCleanupMessageRecved(nm1, app0.getApplicationId());
+    
+    rm1.stop();
+    rm2.stop();
+  }
 
   public static void main(String[] args) throws Exception {
     TestApplicationCleanup t = new TestApplicationCleanup();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
index 1da03fe..46693be 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
@@ -161,7 +161,7 @@
   @Test (timeout = 5000)
   public void testExpiredContainer() {
     // Start the node
-    node.handle(new RMNodeStartedEvent(null, null));
+    node.handle(new RMNodeStartedEvent(null, null, null));
     verify(scheduler).handle(any(NodeAddedSchedulerEvent.class));
     
     // Expire a container
@@ -189,11 +189,11 @@
   @Test (timeout = 5000)
   public void testContainerUpdate() throws InterruptedException{
     //Start the node
-    node.handle(new RMNodeStartedEvent(null, null));
+    node.handle(new RMNodeStartedEvent(null, null, null));
     
     NodeId nodeId = BuilderUtils.newNodeId("localhost:1", 1);
     RMNodeImpl node2 = new RMNodeImpl(nodeId, rmContext, null, 0, 0, null, null, null);
-    node2.handle(new RMNodeStartedEvent(null, null));
+    node2.handle(new RMNodeStartedEvent(null, null, null));
     
     ContainerId completedContainerIdFromNode1 = BuilderUtils.newContainerId(
         BuilderUtils.newApplicationAttemptId(
@@ -249,7 +249,7 @@
   @Test (timeout = 5000)
   public void testStatusChange(){
     //Start the node
-    node.handle(new RMNodeStartedEvent(null, null));
+    node.handle(new RMNodeStartedEvent(null, null, null));
     //Add info to the queue first
     node.setNextHeartBeat(false);
 
@@ -465,7 +465,7 @@
     RMNodeImpl node = new RMNodeImpl(nodeId, rmContext,null, 0, 0,
         null, ResourceOption.newInstance(capability,
             RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT), nmVersion);
-    node.handle(new RMNodeStartedEvent(node.getNodeID(), null));
+    node.handle(new RMNodeStartedEvent(node.getNodeID(), null, null));
     Assert.assertEquals(NodeState.RUNNING, node.getState());
     return node;
   }
@@ -496,7 +496,7 @@
     int initialUnhealthy = cm.getUnhealthyNMs();
     int initialDecommissioned = cm.getNumDecommisionedNMs();
     int initialRebooted = cm.getNumRebootedNMs();
-    node.handle(new RMNodeStartedEvent(node.getNodeID(), null));
+    node.handle(new RMNodeStartedEvent(node.getNodeID(), null, null));
     Assert.assertEquals("Active Nodes", initialActive + 1, cm.getNumActiveNMs());
     Assert.assertEquals("Lost Nodes", initialLost, cm.getNumLostNMs());
     Assert.assertEquals("Unhealthy Nodes",
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
index 9c2d87e..8eed4e6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
@@ -102,7 +102,6 @@
 import org.junit.Test;
 
 public class TestRMRestart {
-
   private final static File TEMP_DIR = new File(System.getProperty(
     "test.build.data", "/tmp"), "decommision");
   private File hostFile = new File(TEMP_DIR + File.separator + "hostFile.txt");
@@ -309,7 +308,7 @@
         TestRMRestart
           .createNMContainerStatus(loadedApp1.getCurrentAppAttempt()
             .getAppAttemptId(), 1, ContainerState.COMPLETE);
-    nm1.registerNode(Arrays.asList(status));
+    nm1.registerNode(Arrays.asList(status), null);
     nm2.registerNode();
     
     rm2.waitForState(loadedApp1.getApplicationId(), RMAppState.ACCEPTED);
@@ -392,7 +391,7 @@
     // completed apps are not removed immediately after app finish
     // And finished app is also loaded back.
     Assert.assertEquals(4, rmAppState.size());
- }
+  }
 
   @Test (timeout = 60000)
   public void testRMRestartAppRunningAMFailed() throws Exception {
@@ -514,7 +513,7 @@
     NMContainerStatus status =
         TestRMRestart.createNMContainerStatus(
           am2.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
-    nm1.registerNode(Arrays.asList(status));
+    nm1.registerNode(Arrays.asList(status), null);
     rm2.waitForState(am2.getApplicationAttemptId(), RMAppAttemptState.FAILED);
     launchAM(rmApp, rm2, nm1);
     Assert.assertEquals(3, rmApp.getAppAttempts().size());
@@ -1680,7 +1679,8 @@
         TestRMRestart
           .createNMContainerStatus(loadedApp1.getCurrentAppAttempt()
             .getAppAttemptId(), 1, ContainerState.COMPLETE);
-    nm1.registerNode(Arrays.asList(status));
+    nm1.registerNode(Arrays.asList(status), null);
+
     while (loadedApp1.getAppAttempts().size() != 2) {
       Thread.sleep(200);
     }
@@ -1807,7 +1807,7 @@
             NMContainerStatus status =
                 TestRMRestart.createNMContainerStatus(
                   am0.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
-            nm1.registerNode(Arrays.asList(status));
+            nm1.registerNode(Arrays.asList(status), null);
           }
         };
       }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
index 6693d09..6dd2992 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
@@ -159,7 +159,7 @@
           ContainerState.COMPLETE);
 
     nm1.registerNode(Arrays.asList(amContainer, runningContainer,
-      completedContainer));
+      completedContainer), null);
 
     // Wait for RM to settle down on recovering containers;
     waitForNumContainersToRecover(2, rm2, am1.getApplicationAttemptId());
@@ -383,11 +383,11 @@
     List<NMContainerStatus> am1_2Containers =
         createNMContainerStatusForApp(am1_2);
     am1_1Containers.addAll(am1_2Containers);
-    nm1.registerNode(am1_1Containers);
+    nm1.registerNode(am1_1Containers, null);
 
     List<NMContainerStatus> am2Containers =
         createNMContainerStatusForApp(am2);
-    nm2.registerNode(am2Containers);
+    nm2.registerNode(am2Containers, null);
 
     // Wait for RM to settle down on recovering containers;
     waitForNumContainersToRecover(2, rm2, am1_1.getApplicationAttemptId());
@@ -482,7 +482,7 @@
         TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(), 3,
           ContainerState.COMPLETE);
     nm1.registerNode(Arrays.asList(amContainer, runningContainer,
-      completedContainer));
+      completedContainer), null);
     rm2.waitForState(am1.getApplicationAttemptId(), RMAppAttemptState.FAILED);
     // Wait for RM to settle down on recovering containers;
     Thread.sleep(3000);
@@ -519,7 +519,7 @@
     NMContainerStatus completedContainer =
         TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(), 3,
           ContainerState.COMPLETE);
-    nm1.registerNode(Arrays.asList(runningContainer, completedContainer));
+    nm1.registerNode(Arrays.asList(runningContainer, completedContainer), null);
     RMApp recoveredApp1 =
         rm2.getRMContext().getRMApps().get(app1.getApplicationId());
     assertEquals(RMAppState.FINISHED, recoveredApp1.getState());
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
index 2cdbf95..4349a236 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
@@ -31,6 +31,7 @@
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
@@ -159,6 +160,11 @@
     public YarnApplicationState createApplicationState() {
       throw new UnsupportedOperationException("Not supported yet.");
     }
+
+    @Override
+    public Set<NodeId> getRanNodes() {
+      throw new UnsupportedOperationException("Not supported yet.");
+    }
   }
 
   public static RMApp newApplication(int i) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
index 6a449f5..d0a80eb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
@@ -17,6 +17,25 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 
+import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.MAX_IGNORED_OVER_CAPACITY;
+import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.MONITORING_INTERVAL;
+import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.NATURAL_TERMINATION_FACTOR;
+import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.OBSERVE_ONLY;
+import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.TOTAL_PREEMPTION_PER_ROUND;
+import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.WAIT_TIME_BEFORE_KILL;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerPreemptEventType.KILL_CONTAINER;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerPreemptEventType.PREEMPT_CONTAINER;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Matchers.isA;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
 import java.util.ArrayList;
 import java.util.Comparator;
 import java.util.Deque;
@@ -27,12 +46,16 @@
 import java.util.TreeSet;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.Service;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingMonitor;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.Priority;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerPreemptEvent;
@@ -52,17 +75,6 @@
 import org.mockito.ArgumentCaptor;
 import org.mockito.ArgumentMatcher;
 
-import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.MAX_IGNORED_OVER_CAPACITY;
-import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.MONITORING_INTERVAL;
-import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.NATURAL_TERMINATION_FACTOR;
-import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.OBSERVE_ONLY;
-import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.TOTAL_PREEMPTION_PER_ROUND;
-import static org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.WAIT_TIME_BEFORE_KILL;
-import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerPreemptEventType.KILL_CONTAINER;
-import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerPreemptEventType.PREEMPT_CONTAINER;
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.*;
-
 public class TestProportionalCapacityPreemptionPolicy {
 
   static final long TS = 3141592653L;
@@ -424,6 +436,36 @@
     assert containers.get(4).equals(rm5);
 
   }
+  
+  @Test
+  public void testPolicyInitializeAfterSchedulerInitialized() {
+    Configuration conf = new Configuration();
+    conf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
+        ProportionalCapacityPreemptionPolicy.class.getCanonicalName());
+    conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
+    
+    @SuppressWarnings("resource")
+    MockRM rm = new MockRM(conf);
+    rm.init(conf);
+    
+    // ProportionalCapacityPreemptionPolicy should be initialized after
+    // CapacityScheduler initialized. We will 
+    // 1) find SchedulingMonitor from RMActiveService's service list, 
+    // 2) check if ResourceCalculator in policy is null or not. 
+    // If it's not null, we can come to a conclusion that policy initialized
+    // after scheduler got initialized
+    for (Service service : rm.getRMActiveService().getServices()) {
+      if (service instanceof SchedulingMonitor) {
+        ProportionalCapacityPreemptionPolicy policy =
+            (ProportionalCapacityPreemptionPolicy) ((SchedulingMonitor) service)
+                .getSchedulingEditPolicy();
+        assertNotNull(policy.getResourceCalculator());
+        return;
+      }
+    }
+    
+    fail("Failed to find SchedulingMonitor service, please check what happened");
+  }
 
   static class IsPreemptionRequestFor
       extends ArgumentMatcher<ContainerPreemptEvent> {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
index b07525d..8f26d10 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
@@ -29,6 +29,7 @@
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -232,4 +233,9 @@
   public YarnApplicationState createApplicationState() {
     return null;
   }
+
+  @Override
+  public Set<NodeId> getRanNodes() {
+    return null;
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
index 133e12f..a4f173d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
@@ -56,6 +56,7 @@
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
@@ -75,8 +76,8 @@
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppFailedAttemptEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRunningOnNodeEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRejectedEvent;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerAcquiredEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerAllocatedEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptLaunchFailedEvent;
@@ -315,7 +316,7 @@
     assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
     assertNull(applicationAttempt.getMasterContainer());
     assertEquals(0.0, (double)applicationAttempt.getProgress(), 0.0001);
-    assertEquals(0, applicationAttempt.getRanNodes().size());
+    assertEquals(0, application.getRanNodes().size());
     assertNull(applicationAttempt.getFinalApplicationStatus());
     assertNotNull(applicationAttempt.getTrackingUrl());
     assertFalse("N/A".equals(applicationAttempt.getTrackingUrl()));
@@ -331,7 +332,7 @@
     assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
     assertNull(applicationAttempt.getMasterContainer());
     assertEquals(0.0, (double)applicationAttempt.getProgress(), 0.0001);
-    assertEquals(0, applicationAttempt.getRanNodes().size());
+    assertEquals(0, application.getRanNodes().size());
     assertNull(applicationAttempt.getFinalApplicationStatus());
     if (UserGroupInformation.isSecurityEnabled()) {
       verify(clientToAMTokenManager).createMasterKey(
@@ -359,7 +360,7 @@
     assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
     assertNull(applicationAttempt.getMasterContainer());
     assertEquals(0.0, (double)applicationAttempt.getProgress(), 0.0001);
-    assertEquals(0, applicationAttempt.getRanNodes().size());
+    assertEquals(0, application.getRanNodes().size());
     assertNull(applicationAttempt.getFinalApplicationStatus());
     
     // Check events
@@ -385,7 +386,7 @@
     assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
     assertEquals(amContainer, applicationAttempt.getMasterContainer());
     assertEquals(0.0, (double)applicationAttempt.getProgress(), 0.0001);
-    assertEquals(0, applicationAttempt.getRanNodes().size());
+    assertEquals(0, application.getRanNodes().size());
     assertNull(applicationAttempt.getFinalApplicationStatus());
     verifyTokenCount(applicationAttempt.getAppAttemptId(), 1);
     verifyAttemptFinalStateSaved();
@@ -425,7 +426,7 @@
     assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
     assertNull(applicationAttempt.getMasterContainer());
     assertEquals(0.0, (double)applicationAttempt.getProgress(), 0.0001);
-    assertEquals(0, applicationAttempt.getRanNodes().size());
+    assertEquals(0, application.getRanNodes().size());
     assertNull(applicationAttempt.getFinalApplicationStatus());
   }
 
@@ -461,7 +462,7 @@
     assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
     assertEquals(container, applicationAttempt.getMasterContainer());
     assertEquals(0.0, (double)applicationAttempt.getProgress(), 0.0001);
-    assertEquals(0, applicationAttempt.getRanNodes().size());
+    assertEquals(0, application.getRanNodes().size());
     
     // Check events
     verify(application, times(1)).handle(any(RMAppFailedAttemptEvent.class));
@@ -666,8 +667,10 @@
     runApplicationAttempt(null, "host", 8042, url, true);
 
     // complete a container
-    applicationAttempt.handle(new RMAppAttemptContainerAcquiredEvent(
-        applicationAttempt.getAppAttemptId(), mock(Container.class)));
+    Container container = mock(Container.class);
+    when(container.getNodeId()).thenReturn(NodeId.newInstance("host", 1234));
+    application.handle(new RMAppRunningOnNodeEvent(application.getApplicationId(),
+        container.getNodeId()));
     applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(
         applicationAttempt.getAppAttemptId(), mock(ContainerStatus.class)));
     // complete AM
@@ -845,7 +848,7 @@
         applicationAttempt.getAppAttemptState());
     assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
     assertEquals(amContainer, applicationAttempt.getMasterContainer());
-    assertEquals(0, applicationAttempt.getRanNodes().size());
+    assertEquals(0, application.getRanNodes().size());
     String rmAppPageUrl = pjoin(RM_WEBAPP_ADDR, "cluster", "app",
         applicationAttempt.getAppAttemptId().getApplicationId());
     assertEquals(rmAppPageUrl, applicationAttempt.getOriginalTrackingUrl());
@@ -882,7 +885,7 @@
         applicationAttempt.getAppAttemptState());
     assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
     assertEquals(amContainer, applicationAttempt.getMasterContainer());
-    assertEquals(0, applicationAttempt.getRanNodes().size());
+    assertEquals(0, application.getRanNodes().size());
     String rmAppPageUrl = pjoin(RM_WEBAPP_ADDR, "cluster", "app",
         applicationAttempt.getAppAttemptId().getApplicationId());
     assertEquals(rmAppPageUrl, applicationAttempt.getOriginalTrackingUrl());
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
index 2098e16..310104b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
@@ -146,7 +146,7 @@
     // Create node with 4GB memory and 4 vcores
     registerNodeAndSubmitApp(4 * 1024, 4, 2, 1024);
 
-    // Verify submitting another request doesn't trigger preemption
+    // Verify submitting another request triggers preemption
     createSchedulingRequest(1024, "queueB", "user1", 1, 1);
     scheduler.update();
     clock.tick(6);
@@ -171,5 +171,21 @@
     scheduler.preemptTasksIfNecessary();
     assertEquals("preemptResources() should not have been called", -1,
         ((StubbedFairScheduler) scheduler).lastPreemptMemory);
+
+    resourceManager.stop();
+
+    startResourceManager(0.7f);
+    // Create node with 4GB memory and 4 vcores
+    registerNodeAndSubmitApp(4 * 1024, 4, 3, 1024);
+
+    // Verify submitting another request triggers preemption
+    createSchedulingRequest(1024, "queueB", "user1", 1, 1);
+    scheduler.update();
+    clock.tick(6);
+
+    ((StubbedFairScheduler) scheduler).resetLastPreemptResources();
+    scheduler.preemptTasksIfNecessary();
+    assertEquals("preemptResources() should have been called", 1024,
+        ((StubbedFairScheduler) scheduler).lastPreemptMemory);
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java
new file mode 100644
index 0000000..7cbf125
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java
@@ -0,0 +1,496 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Enumeration;
+import java.util.Properties;
+
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.MediaType;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
+import org.apache.hadoop.yarn.api.records.QueueACL;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.security.QueueACLsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppState;
+import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.NodeList;
+import org.xml.sax.InputSource;
+import org.xml.sax.SAXException;
+
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.Singleton;
+import com.google.inject.servlet.GuiceServletContextListener;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.ClientResponse.Status;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.api.json.JSONJAXBContext;
+import com.sun.jersey.api.json.JSONMarshaller;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.JerseyTest;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+
+@RunWith(Parameterized.class)
+public class TestRMWebServicesAppsModification extends JerseyTest {
+  private static MockRM rm;
+
+  private static final int CONTAINER_MB = 1024;
+
+  private Injector injector;
+  private String webserviceUserName = "testuser";
+
+  public class GuiceServletConfig extends GuiceServletContextListener {
+
+    @Override
+    protected Injector getInjector() {
+      return injector;
+    }
+  }
+
+  /*
+   * Helper class to allow testing of RM web services which require
+   * authorization Add this class as a filter in the Guice injector for the
+   * MockRM
+   */
+
+  @Singleton
+  public static class TestRMCustomAuthFilter extends AuthenticationFilter {
+
+    @Override
+    protected Properties getConfiguration(String configPrefix,
+        FilterConfig filterConfig) throws ServletException {
+      Properties props = new Properties();
+      Enumeration<?> names = filterConfig.getInitParameterNames();
+      while (names.hasMoreElements()) {
+        String name = (String) names.nextElement();
+        if (name.startsWith(configPrefix)) {
+          String value = filterConfig.getInitParameter(name);
+          props.put(name.substring(configPrefix.length()), value);
+        }
+      }
+      props.put(AuthenticationFilter.AUTH_TYPE, "simple");
+      props.put(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "false");
+      return props;
+    }
+
+  }
+
+  private class TestServletModule extends ServletModule {
+    public Configuration conf = new Configuration();
+    boolean setAuthFilter = false;
+
+    @Override
+    protected void configureServlets() {
+      bind(JAXBContextResolver.class);
+      bind(RMWebServices.class);
+      bind(GenericExceptionHandler.class);
+      conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
+        YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
+      rm = new MockRM(conf);
+      bind(ResourceManager.class).toInstance(rm);
+      bind(RMContext.class).toInstance(rm.getRMContext());
+      bind(ApplicationACLsManager.class).toInstance(
+        rm.getApplicationACLsManager());
+      bind(QueueACLsManager.class).toInstance(rm.getQueueACLsManager());
+      if (setAuthFilter) {
+        filter("/*").through(TestRMCustomAuthFilter.class);
+      }
+      serve("/*").with(GuiceContainer.class);
+    }
+  }
+
+  private Injector getNoAuthInjector() {
+    return Guice.createInjector(new TestServletModule() {
+      @Override
+      protected void configureServlets() {
+        super.configureServlets();
+      }
+    });
+  }
+
+  private Injector getSimpleAuthInjector() {
+    return Guice.createInjector(new TestServletModule() {
+      @Override
+      protected void configureServlets() {
+        setAuthFilter = true;
+        conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
+        // set the admin acls otherwise all users are considered admins
+        // and we can't test authorization
+        conf.setStrings(YarnConfiguration.YARN_ADMIN_ACL, "testuser1");
+        super.configureServlets();
+      }
+    });
+  }
+
+  @Parameters
+  public static Collection<Object[]> guiceConfigs() {
+    return Arrays.asList(new Object[][] { { 0 }, { 1 } });
+  }
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+  }
+
+  public TestRMWebServicesAppsModification(int run) {
+    super(new WebAppDescriptor.Builder(
+      "org.apache.hadoop.yarn.server.resourcemanager.webapp")
+      .contextListenerClass(GuiceServletConfig.class)
+      .filterClass(com.google.inject.servlet.GuiceFilter.class)
+      .contextPath("jersey-guice-filter").servletPath("/").build());
+    switch (run) {
+    case 0:
+    default:
+      injector = getNoAuthInjector();
+      break;
+    case 1:
+      injector = getSimpleAuthInjector();
+      break;
+    }
+  }
+
+  private boolean isAuthorizationEnabled() {
+    return rm.getConfig().getBoolean(YarnConfiguration.YARN_ACL_ENABLE, false);
+  }
+
+  private WebResource constructWebResource(WebResource r, String... paths) {
+    WebResource rt = r;
+    for (String path : paths) {
+      rt = rt.path(path);
+    }
+    if (isAuthorizationEnabled()) {
+      rt = rt.queryParam("user.name", webserviceUserName);
+    }
+    return rt;
+  }
+
+  private WebResource constructWebResource(String... paths) {
+    WebResource r = resource();
+    WebResource ws = r.path("ws").path("v1").path("cluster");
+    return this.constructWebResource(ws, paths);
+  }
+
+  @Test
+  public void testSingleAppState() throws Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
+    String[] mediaTypes =
+        { MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML };
+    for (String mediaType : mediaTypes) {
+      RMApp app = rm.submitApp(CONTAINER_MB, "", webserviceUserName);
+      amNodeManager.nodeHeartbeat(true);
+      ClientResponse response =
+          this
+            .constructWebResource("apps", app.getApplicationId().toString(),
+              "state").accept(mediaType).get(ClientResponse.class);
+      assertEquals(Status.OK, response.getClientResponseStatus());
+      if (mediaType == MediaType.APPLICATION_JSON) {
+        verifyAppStateJson(response, RMAppState.ACCEPTED);
+      } else if (mediaType == MediaType.APPLICATION_XML) {
+        verifyAppStateXML(response, RMAppState.ACCEPTED);
+      }
+    }
+    rm.stop();
+  }
+
+  @Test(timeout = 90000)
+  public void testSingleAppKill() throws Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
+    String[] mediaTypes =
+        { MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML };
+    MediaType[] contentTypes =
+        { MediaType.APPLICATION_JSON_TYPE, MediaType.APPLICATION_XML_TYPE };
+    for (String mediaType : mediaTypes) {
+      for (MediaType contentType : contentTypes) {
+        RMApp app = rm.submitApp(CONTAINER_MB, "", webserviceUserName);
+        amNodeManager.nodeHeartbeat(true);
+
+        ClientResponse response =
+            this
+              .constructWebResource("apps", app.getApplicationId().toString(),
+                "state").accept(mediaType).get(ClientResponse.class);
+        AppState targetState =
+            new AppState(YarnApplicationState.KILLED.toString());
+
+        Object entity;
+        if (contentType == MediaType.APPLICATION_JSON_TYPE) {
+          entity = appStateToJSON(targetState);
+        } else {
+          entity = targetState;
+        }
+        response =
+            this
+              .constructWebResource("apps", app.getApplicationId().toString(),
+                "state").entity(entity, contentType).accept(mediaType)
+              .put(ClientResponse.class);
+
+        if (!isAuthorizationEnabled()) {
+          assertEquals(Status.UNAUTHORIZED, response.getClientResponseStatus());
+          continue;
+        }
+        assertEquals(Status.ACCEPTED, response.getClientResponseStatus());
+        if (mediaType == MediaType.APPLICATION_JSON) {
+          verifyAppStateJson(response, RMAppState.KILLING, RMAppState.ACCEPTED);
+        } else {
+          verifyAppStateXML(response, RMAppState.KILLING, RMAppState.ACCEPTED);
+        }
+
+        String locationHeaderValue =
+            response.getHeaders().getFirst(HttpHeaders.LOCATION);
+        Client c = Client.create();
+        WebResource tmp = c.resource(locationHeaderValue);
+        if (isAuthorizationEnabled()) {
+          tmp = tmp.queryParam("user.name", webserviceUserName);
+        }
+        response = tmp.get(ClientResponse.class);
+        assertEquals(Status.OK, response.getClientResponseStatus());
+        assertTrue(locationHeaderValue.endsWith("/ws/v1/cluster/apps/"
+            + app.getApplicationId().toString() + "/state"));
+
+        while (true) {
+          Thread.sleep(100);
+          response =
+              this
+                .constructWebResource("apps",
+                  app.getApplicationId().toString(), "state").accept(mediaType)
+                .entity(entity, contentType).put(ClientResponse.class);
+          assertTrue((response.getClientResponseStatus() == Status.ACCEPTED)
+              || (response.getClientResponseStatus() == Status.OK));
+          if (response.getClientResponseStatus() == Status.OK) {
+            assertEquals(RMAppState.KILLED, app.getState());
+            if (mediaType == MediaType.APPLICATION_JSON) {
+              verifyAppStateJson(response, RMAppState.KILLED);
+            } else {
+              verifyAppStateXML(response, RMAppState.KILLED);
+            }
+            break;
+          }
+        }
+      }
+    }
+
+    rm.stop();
+    return;
+  }
+
+  @Test
+  public void testSingleAppKillInvalidState() throws Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
+
+    String[] mediaTypes =
+        { MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML };
+    MediaType[] contentTypes =
+        { MediaType.APPLICATION_JSON_TYPE, MediaType.APPLICATION_XML_TYPE };
+    String[] targetStates =
+        { YarnApplicationState.FINISHED.toString(), "blah" };
+
+    for (String mediaType : mediaTypes) {
+      for (MediaType contentType : contentTypes) {
+        for (String targetStateString : targetStates) {
+          RMApp app = rm.submitApp(CONTAINER_MB, "", webserviceUserName);
+          amNodeManager.nodeHeartbeat(true);
+          ClientResponse response;
+          AppState targetState = new AppState(targetStateString);
+          Object entity;
+          if (contentType == MediaType.APPLICATION_JSON_TYPE) {
+            entity = appStateToJSON(targetState);
+          } else {
+            entity = targetState;
+          }
+          response =
+              this
+                .constructWebResource("apps",
+                  app.getApplicationId().toString(), "state")
+                .entity(entity, contentType).accept(mediaType)
+                .put(ClientResponse.class);
+
+          if (!isAuthorizationEnabled()) {
+            assertEquals(Status.UNAUTHORIZED,
+              response.getClientResponseStatus());
+            continue;
+          }
+          assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
+        }
+      }
+    }
+
+    rm.stop();
+    return;
+  }
+
+  private static String appStateToJSON(AppState state) throws Exception {
+    StringWriter sw = new StringWriter();
+    JSONJAXBContext ctx = new JSONJAXBContext(AppState.class);
+    JSONMarshaller jm = ctx.createJSONMarshaller();
+    jm.marshallToJSON(state, sw);
+    return sw.toString();
+  }
+
+  protected static void verifyAppStateJson(ClientResponse response,
+      RMAppState... states) throws JSONException {
+
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    JSONObject json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    boolean valid = false;
+    for (RMAppState state : states) {
+      if (state.toString().equals(json.getString("state"))) {
+        valid = true;
+      }
+    }
+    assertTrue("app state incorrect", valid);
+    return;
+  }
+
+  protected static void verifyAppStateXML(ClientResponse response,
+      RMAppState... appStates) throws ParserConfigurationException,
+      IOException, SAXException {
+    assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
+    String xml = response.getEntity(String.class);
+    DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+    DocumentBuilder db = dbf.newDocumentBuilder();
+    InputSource is = new InputSource();
+    is.setCharacterStream(new StringReader(xml));
+    Document dom = db.parse(is);
+    NodeList nodes = dom.getElementsByTagName("appstate");
+    assertEquals("incorrect number of elements", 1, nodes.getLength());
+    Element element = (Element) nodes.item(0);
+    String state = WebServicesTestUtils.getXmlString(element, "state");
+    boolean valid = false;
+    for (RMAppState appState : appStates) {
+      if (appState.toString().equals(state)) {
+        valid = true;
+      }
+    }
+    assertTrue("app state incorrect", valid);
+    return;
+  }
+
+  @Test(timeout = 30000)
+  public void testSingleAppKillUnauthorized() throws Exception {
+
+    // default root queue allows anyone to have admin acl
+    CapacitySchedulerConfiguration csconf =
+        new CapacitySchedulerConfiguration();
+    csconf.setAcl("root", QueueACL.ADMINISTER_QUEUE, "someuser");
+    csconf.setAcl("root.default", QueueACL.ADMINISTER_QUEUE, "someuser");
+    rm.getResourceScheduler().reinitialize(csconf, rm.getRMContext());
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
+
+    String[] mediaTypes =
+        { MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML };
+    for (String mediaType : mediaTypes) {
+      RMApp app = rm.submitApp(CONTAINER_MB, "test", "someuser");
+      amNodeManager.nodeHeartbeat(true);
+      ClientResponse response =
+          this
+            .constructWebResource("apps", app.getApplicationId().toString(),
+              "state").accept(mediaType).get(ClientResponse.class);
+      AppState info = response.getEntity(AppState.class);
+      info.setState(YarnApplicationState.KILLED.toString());
+
+      response =
+          this
+            .constructWebResource("apps", app.getApplicationId().toString(),
+              "state").accept(mediaType)
+            .entity(info, MediaType.APPLICATION_XML).put(ClientResponse.class);
+      if (!isAuthorizationEnabled()) {
+        assertEquals(Status.UNAUTHORIZED, response.getClientResponseStatus());
+      } else {
+        assertEquals(Status.FORBIDDEN, response.getClientResponseStatus());
+      }
+    }
+    rm.stop();
+    return;
+
+  }
+
+  @Test
+  public void testSingleAppKillInvalidId() throws Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
+    amNodeManager.nodeHeartbeat(true);
+    String[] testAppIds = { "application_1391705042196_0001", "random_string" };
+    for (String testAppId : testAppIds) {
+      AppState info = new AppState("KILLED");
+      ClientResponse response =
+          this.constructWebResource("apps", testAppId, "state")
+            .accept(MediaType.APPLICATION_XML)
+            .entity(info, MediaType.APPLICATION_XML).put(ClientResponse.class);
+      if (!isAuthorizationEnabled()) {
+        assertEquals(Status.UNAUTHORIZED, response.getClientResponseStatus());
+        continue;
+      }
+      assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
+    }
+    rm.stop();
+    return;
+  }
+
+  @After
+  @Override
+  public void tearDown() throws Exception {
+    if (rm != null) {
+      rm.stop();
+    }
+    super.tearDown();
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
index a8f4f00..08de51e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
@@ -1564,6 +1564,287 @@
 </app>
 +---+
 
+* Cluster Application State API
+
+  With the application state API, you can query the state of a submitted app as well kill a running app by modifying the state of a running app using a PUT request with the state set to "KILLED". To perform the PUT operation, authentication has to be setup for the RM web services. In addition, you must be authorized to kill the app. Currently you can only change the state to "KILLED"; an attempt to change the state to any other results in a 400 error response. Examples of the unauthorized and bad request errors are below. When you carry out a successful PUT, the iniital response may be a 202. You can confirm that the app is killed by repeating the PUT request until you get a 200, querying the state using the GET method or querying for app information and checking the state. In the examples below, we repeat the PUT request and get a 200 response.
+
+  Please note that in order to kill an app, you must have an authentication filter setup for the HTTP interface. The functionality requires that a username is set in the HttpServletRequest. If no filter is setup, the response will be an "UNAUTHORIZED" response.
+
+** URI
+
+-----
+  * http://<rm http address:port>/ws/v1/cluster/apps/{appid}/state
+-----
+
+** HTTP Operations Supported
+
+------
+  * GET
+  * PUT
+------
+
+** Query Parameters Supported
+
+------
+  None
+------
+
+** Elements of <appstate> object
+
+  When you make a request for the state of an app, the information returned has the following fields
+
+*---------------+--------------+-------------------------------+
+|| Item         || Data Type   || Description                  |
+*---------------+--------------+-------------------------------+
+| state | string  | The application state - can be one of "NEW", "NEW_SAVING", "SUBMITTED", "ACCEPTED", "RUNNING", "FINISHED", "FAILED", "KILLED" |
+*---------------+--------------+--------------------------------+
+
+
+** Response Examples
+
+  <<JSON responses>>
+
+  HTTP Request
+
+-----
+  GET http://<rm http address:port>/ws/v1/cluster/apps/application_1399397633663_0003/state
+-----
+
+  Response Header:
+
++---+
+HTTP/1.1 200 OK
+Content-Type: application/json
+Transfer-Encoding: chunked
+Server: Jetty(6.1.26)
++---+
+
+  Response Body:
+
++---+
+{
+  "state":"ACCEPTED"
+}
++---+
+
+  HTTP Request
+
+-----
+  PUT http://<rm http address:port>/ws/v1/cluster/apps/application_1399397633663_0003/state
+----
+
+  Request Body:
+
++---+
+{
+  "state":"KILLED"
+}
++---+
+
+  Response Header:
+
++---+
+HTTP/1.1 202 Accepted
+Content-Type: application/json
+Transfer-Encoding: chunked
+Location: http://<rm http address:port>/ws/v1/cluster/apps/application_1399397633663_0003
+Server: Jetty(6.1.26)
++---+
+
+  Response Body:
+
++---+
+{
+  "state":"ACCEPTED"
+}
++---+
+
+-----
+  PUT http://<rm http address:port>/ws/v1/cluster/apps/application_1399397633663_0003/state
+----
+
+  Request Body:
+
++---+
+{
+  "state":"KILLED"
+}
++---+
+
+  Response Header:
+
++---+
+HTTP/1.1 200 OK
+Content-Type: application/json
+Transfer-Encoding: chunked
+Server: Jetty(6.1.26)
++---+
+
+  Response Body:
+
++---+
+{
+  "state":"KILLED"
+}
++---+
+
+  <<XML responses>>
+
+  HTTP Request
+
+-----
+  GET http://<rm http address:port>/ws/v1/cluster/apps/application_1399397633663_0003/state
+-----
+
+  Response Header:
+
++---+
+HTTP/1.1 200 OK
+Content-Type: application/xml
+Content-Length: 99
+Server: Jetty(6.1.26)
++---+
+
+  Response Body:
+
++---+
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<appstate>
+  <state>ACCEPTED</state>
+</appstate>
++---+
+
+  HTTP Request
+
+-----
+  PUT http://<rm http address:port>/ws/v1/cluster/apps/application_1399397633663_0003/state
+----
+
+  Request Body:
+
++---+
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<appstate>
+  <state>KILLED</state>
+</appstate>
++---+
+
+  Response Header:
+
++---+
+HTTP/1.1 202 Accepted
+Content-Type: application/json
+Content-Length: 794
+Location: http://<rm http address:port>/ws/v1/cluster/apps/application_1399397633663_0003
+Server: Jetty(6.1.26)
++---+
+
+  Response Body:
+
++---+
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<appstate>
+  <state>ACCEPTED</state>
+</appstate>
++---+
+
+  HTTP Request
+
+-----
+  PUT http://<rm http address:port>/ws/v1/cluster/apps/application_1399397633663_0003/state
+----
+
+  Request Body:
+
++---+
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<appstate>
+  <state>KILLED</state>
+</appstate>
++---+
+
+  Response Header:
+
++---+
+HTTP/1.1 200 OK
+Content-Type: application/xml
+Content-Length: 917
+Server: Jetty(6.1.26)
++---+
+
+  Response Body:
+
++---+
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<appstate>
+  <state>KILLED</state>
+</appstate>
++---+
+
+  <<Unauthorized Error Response>>
+
+  HTTP Request
+
+-----
+  PUT http://<rm http address:port>/ws/v1/cluster/apps/application_1399397633663_0003/state
+----
+
+  Request Body:
+
++---+
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<appstate>
+  <state>KILLED</state>
+</appstate>
++---+
+
+  Response Header:
+
++---+
+HTTP/1.1 403 Unauthorized
+Content-Type: application/json
+Transfer-Encoding: chunked
+Server: Jetty(6.1.26)
++---+
+
+
+  <<Bad Request Error Response>>
+
+  HTTP Request
+
+-----
+  PUT http://<rm http address:port>/ws/v1/cluster/apps/application_1399397633663_0003/state
+----
+
+  Request Body:
+
++---+
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<appstate>
+  <state>RUNNING</state>
+</appstate>
++---+
+
+  Response Header:
+
++---+
+HTTP/1.1 400
+Content-Length: 295
+Content-Type: application/xml
+Server: Jetty(6.1.26)
++---+
+
+  Response Body:
+
++---+
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<RemoteException>
+  <exception>BadRequestException</exception>
+  <message>java.lang.Exception: Only 'KILLED' is allowed as a target state.</message>
+  <javaClassName>org.apache.hadoop.yarn.webapp.BadRequestException</javaClassName>
+</RemoteException>
++---+
+
 * Cluster Application Attempts API
 
   With the application attempts API, you can obtain a collection of resources that represent an application attempt.  When you run a GET operation on this resource, you obtain a collection of App Attempt Objects.