Merge branch 'trunk' into HDFS-7240
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
index 7b0a25c..a8a513d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
@@ -525,6 +525,9 @@
     }
     List<String> newArgs = new ArrayList<String>(args.length);
     for (int i=0; i < args.length; i++) {
+      if (args[i] == null) {
+        continue;
+      }
       String prop = null;
       if (args[i].equals("-D")) {
         newArgs.add(args[i]);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
old mode 100755
new mode 100644
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
old mode 100755
new mode 100644
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java
old mode 100755
new mode 100644
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
index a99a26a..970bd4f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
@@ -456,7 +456,7 @@
         dev = (float) Math.sqrt(dev / usages.length);
       }
     } catch (IOException e) {
-      LOG.info("Cannot get the live nodes: {}", e.getMessage());
+      LOG.error("Cannot get the live nodes: {}", e.getMessage());
     }
 
     final Map<String, Object> innerInfo = new HashMap<>();
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java
index b6bd4b3..afc49c7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java
@@ -210,8 +210,7 @@
         }
       }
     } catch (IOException ioe) {
-      LOG.error("Cannot get Namenodes from the State Store: {}",
-          ioe.getMessage());
+      LOG.error("Cannot get Namenodes from the State Store", ioe);
     }
     return ret;
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java
index fe172c2..a7f02d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java
@@ -100,7 +100,7 @@
           LOG.debug("Router heartbeat for router {}", routerId);
         }
       } catch (IOException e) {
-        LOG.error("Cannot heartbeat router {}: {}", routerId, e.getMessage());
+        LOG.error("Cannot heartbeat router {}", routerId, e);
       }
     } else {
       LOG.warn("Cannot heartbeat router {}: State Store unavailable", routerId);
@@ -132,7 +132,7 @@
         }
       }
     } catch (Exception e) {
-      LOG.error("Cannot get version for {}: {}", clazz, e.getMessage());
+      LOG.error("Cannot get version for {}", clazz, e);
     }
     return version;
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
index 6c657b2..513e867 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
@@ -1067,8 +1067,8 @@
           results.put(location, clazz.cast(result));
         } catch (CancellationException ce) {
           T loc = orderedLocations.get(i);
-          String msg =
-              "Invocation to \"" + loc + "\" for \"" + method + "\" timed out";
+          String msg = "Invocation to \"" + loc + "\" for \""
+              + method.getMethodName() + "\" timed out";
           LOG.error(msg);
           IOException ioe = new SubClusterTimeoutException(msg);
           exceptions.put(location, ioe);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 0dc2a69..f4c010c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -2293,7 +2293,7 @@
           return entry.isAll();
         }
       } catch (IOException e) {
-        LOG.error("Cannot get mount point: {}", e.getMessage());
+        LOG.error("Cannot get mount point", e);
       }
     }
     return false;
@@ -2314,7 +2314,7 @@
           return true;
         }
       } catch (IOException e) {
-        LOG.error("Cannot get mount point: {}", e.getMessage());
+        LOG.error("Cannot get mount point", e);
       }
     }
     return false;
@@ -2343,7 +2343,7 @@
           ret.put(child, entry.getDateModified());
         }
       } catch (IOException e) {
-        LOG.error("Cannot get mount point: {}", e.getMessage());
+        LOG.error("Cannot get mount point", e);
       }
     }
     return ret;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java
index ccbde09..a0744a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java
@@ -183,7 +183,7 @@
     } catch (NotCompliantMBeanException e) {
       throw new RuntimeException("Bad StateStoreMBean setup", e);
     } catch (MetricsException e) {
-      LOG.info("Failed to register State Store bean {}", e.getMessage());
+      LOG.error("Failed to register State Store bean {}", e.getMessage());
     }
 
     super.serviceInit(this.conf);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
index 6638d1c..15fc9c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
@@ -361,7 +361,7 @@
           try {
             writer.close();
           } catch (IOException e) {
-            LOG.error("Cannot close the writer for {}", recordPathTemp);
+            LOG.error("Cannot close the writer for {}", recordPathTemp, e);
           }
         }
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
index a7da094..f1cf482 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
@@ -90,6 +90,21 @@
 
 <div class="page-header"><h1>Summary</h1></div>
 {#federation}
+<p>
+  Security is {#routerstat}{#SecurityEnabled}on{:else}off{/SecurityEnabled}{/routerstat}.</p>
+<p>{#router}{#Safemode}{.}{:else}Safemode is off.{/Safemode}{/router}</p>
+
+<p>
+  {NumFiles|fmt_number} files and directories, {NumBlocks|fmt_number} blocks.
+</p>
+
+{#mem.HeapMemoryUsage}
+<p>Heap Memory used {used|fmt_bytes} of {committed|fmt_bytes} Heap Memory. Max Heap Memory is {@eq key=max value="-1" type="number"}&ltunbounded&gt{:else}{max|fmt_bytes}{/eq}.</p>
+{/mem.HeapMemoryUsage}
+
+{#mem.NonHeapMemoryUsage}
+<p>Non Heap Memory used {used|fmt_bytes} of {committed|fmt_bytes} Commited Non Heap Memory. Max Non Heap Memory is {@eq key=max value="-1" type="number"}&ltunbounded&gt{:else}{max|fmt_bytes}{/eq}.</p>
+{/mem.NonHeapMemoryUsage}
 <table class="table table-bordered table-striped">
   <tr><th>Total capacity</th><td>{TotalCapacity|fmt_bytes}</td></tr>
   <tr><th>Used capacity</th><td>{UsedCapacity|fmt_bytes}</td></tr>
@@ -103,8 +118,6 @@
   <tr><th><a href="#tab-datanode">Live Nodes</a></th><td>{NumLiveNodes} (Decommissioned: {NumDecomLiveNodes})</td></tr>
   <tr><th><a href="#tab-datanode">Dead Nodes</a></th><td>{NumDeadNodes} (Decommissioned: {NumDecomDeadNodes})</td></tr>
   <tr><th><a href="#tab-datanode">Decommissioning Nodes</a></th><td>{NumDecommissioningNodes}</td></tr>
-  <tr><th>Files</th><td>{NumFiles}</td></tr>
-  <tr><th>Blocks</th><td>{NumBlocks}</td></tr>
   <tr><th title="Excludes missing blocks.">Number of Under-Replicated Blocks</th><td>{NumOfBlocksUnderReplicated}</td></tr>
   <tr><th>Number of Blocks Pending Deletion</th><td>{NumOfBlocksPendingDeletion}</td></tr>
 </table>
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
index ef0a2a4..a0b0128 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
@@ -33,7 +33,10 @@
 
   function load_overview() {
     var BEANS = [
-      {"name": "federation",      "url": "/jmx?qry=Hadoop:service=Router,name=FederationState"}
+      {"name": "federation",  "url": "/jmx?qry=Hadoop:service=Router,name=FederationState"},
+      {"name": "routerstat",  "url": "/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus"},
+      {"name": "router",      "url": "/jmx?qrt=Hadoop:service=NameNode,name=NameNodeInfo"},
+      {"name": "mem",         "url": "/jmx?qry=java.lang:type=Memory"}
     ];
 
     var HELPERS = {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b4dab4d..a7f0a07 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -977,15 +977,19 @@
   public static final String  DFS_JOURNALNODE_EDITS_DIR_DEFAULT = "/tmp/hadoop/dfs/journalnode/";
   public static final String  DFS_JOURNALNODE_RPC_ADDRESS_KEY = "dfs.journalnode.rpc-address";
   public static final int     DFS_JOURNALNODE_RPC_PORT_DEFAULT = 8485;
+  public static final String  DFS_JOURNALNODE_RPC_BIND_HOST_KEY = "dfs.journalnode.rpc-bind-host";
   public static final String  DFS_JOURNALNODE_RPC_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_JOURNALNODE_RPC_PORT_DEFAULT;
-    
+
   public static final String  DFS_JOURNALNODE_HTTP_ADDRESS_KEY = "dfs.journalnode.http-address";
   public static final int     DFS_JOURNALNODE_HTTP_PORT_DEFAULT = 8480;
+  public static final String  DFS_JOURNALNODE_HTTP_BIND_HOST_KEY = "dfs.journalnode.http-bind-host";
   public static final String  DFS_JOURNALNODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_JOURNALNODE_HTTP_PORT_DEFAULT;
   public static final String  DFS_JOURNALNODE_HTTPS_ADDRESS_KEY = "dfs.journalnode.https-address";
   public static final int     DFS_JOURNALNODE_HTTPS_PORT_DEFAULT = 8481;
+  public static final String  DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY = "dfs.journalnode.https-bind-host";
   public static final String  DFS_JOURNALNODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_JOURNALNODE_HTTPS_PORT_DEFAULT;
 
+
   public static final String  DFS_JOURNALNODE_KEYTAB_FILE_KEY = "dfs.journalnode.keytab.file";
   public static final String  DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY = "dfs.journalnode.kerberos.principal";
   public static final String  DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY = "dfs.journalnode.kerberos.internal.spnego.principal";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
index c772dfc..11a5c04 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
@@ -36,9 +36,12 @@
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.metrics2.util.MBeans;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.tracing.TraceUtils;
 import org.apache.hadoop.util.DiskChecker;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTP_BIND_HOST_KEY;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
@@ -226,7 +229,8 @@
 
       registerJNMXBean();
 
-      httpServer = new JournalNodeHttpServer(conf, this);
+      httpServer = new JournalNodeHttpServer(conf, this,
+          getHttpServerBindAddress(conf));
       httpServer.start();
 
       httpServerURI = httpServer.getServerURI().toString();
@@ -251,11 +255,6 @@
   public InetSocketAddress getBoundIpcAddress() {
     return rpcServer.getAddress();
   }
-  
-  @Deprecated
-  public InetSocketAddress getBoundHttpAddress() {
-    return httpServer.getAddress();
-  }
 
   public String getHttpServerURI() {
     return httpServerURI;
@@ -400,7 +399,7 @@
   private void registerJNMXBean() {
     journalNodeInfoBeanName = MBeans.register("JournalNode", "JournalNodeInfo", this);
   }
-  
+
   private class ErrorReporter implements StorageErrorReporter {
     @Override
     public void reportErrorOnFile(File f) {
@@ -464,4 +463,53 @@
     return journalsById.get(jid);
   }
 
+  public static InetSocketAddress getHttpAddress(Configuration conf) {
+    String addr = conf.get(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY,
+        DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_DEFAULT);
+    return NetUtils.createSocketAddr(addr,
+        DFSConfigKeys.DFS_JOURNALNODE_HTTP_PORT_DEFAULT,
+        DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY);
+  }
+
+  protected InetSocketAddress getHttpServerBindAddress(
+      Configuration configuration) {
+    InetSocketAddress bindAddress = getHttpAddress(configuration);
+
+    // If DFS_JOURNALNODE_HTTP_BIND_HOST_KEY exists then it overrides the
+    // host name portion of DFS_JOURNALNODE_HTTP_ADDRESS_KEY.
+    final String bindHost = configuration.getTrimmed(
+        DFS_JOURNALNODE_HTTP_BIND_HOST_KEY);
+    if (bindHost != null && !bindHost.isEmpty()) {
+      bindAddress = new InetSocketAddress(bindHost, bindAddress.getPort());
+    }
+
+    return bindAddress;
+  }
+
+  @VisibleForTesting
+  public JournalNodeRpcServer getRpcServer() {
+    return rpcServer;
+  }
+
+
+  /**
+   * @return the actual JournalNode HTTP/HTTPS address.
+   */
+  public InetSocketAddress getBoundHttpAddress() {
+    return httpServer.getAddress();
+  }
+
+  /**
+   * @return JournalNode HTTP address
+   */
+  public InetSocketAddress getHttpAddress() {
+    return httpServer.getHttpAddress();
+  }
+
+  /**
+   * @return JournalNode HTTPS address
+   */
+  public InetSocketAddress getHttpsAddress() {
+    return httpServer.getHttpsAddress();
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java
index 3adb93a..1d29c1b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java
@@ -28,6 +28,7 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.net.NetUtils;
 
@@ -41,21 +42,37 @@
   private HttpServer2 httpServer;
   private final JournalNode localJournalNode;
 
+  private InetSocketAddress httpAddress;
+  private InetSocketAddress httpsAddress;
+  private final InetSocketAddress bindAddress;
+
   private final Configuration conf;
 
-  JournalNodeHttpServer(Configuration conf, JournalNode jn) {
+  JournalNodeHttpServer(Configuration conf, JournalNode jn,
+      InetSocketAddress bindAddress) {
     this.conf = conf;
     this.localJournalNode = jn;
+    this.bindAddress = bindAddress;
   }
 
   void start() throws IOException {
-    final InetSocketAddress httpAddr = getAddress(conf);
+    final InetSocketAddress httpAddr = bindAddress;
 
     final String httpsAddrString = conf.get(
         DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY,
         DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_DEFAULT);
     InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
 
+    if (httpsAddr != null) {
+      // If DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY exists then it overrides the
+      // host name portion of DFS_NAMENODE_HTTPS_ADDRESS_KEY.
+      final String bindHost =
+          conf.getTrimmed(DFSConfigKeys.DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY);
+      if (bindHost != null && !bindHost.isEmpty()) {
+        httpsAddr = new InetSocketAddress(bindHost, httpsAddr.getPort());
+      }
+    }
+
     HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
         httpAddr, httpsAddr, "journal",
         DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
@@ -67,6 +84,20 @@
     httpServer.addInternalServlet("getJournal", "/getJournal",
         GetJournalEditServlet.class, true);
     httpServer.start();
+
+    HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
+    int connIdx = 0;
+    if (policy.isHttpEnabled()) {
+      httpAddress = httpServer.getConnectorAddress(connIdx++);
+      conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY,
+          NetUtils.getHostPortString(httpAddress));
+    }
+
+    if (policy.isHttpsEnabled()) {
+      httpsAddress = httpServer.getConnectorAddress(connIdx);
+      conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY,
+          NetUtils.getHostPortString(httpsAddress));
+    }
   }
 
   void stop() throws IOException {
@@ -78,15 +109,27 @@
       }
     }
   }
+
+  /**
+   * Return the actual HTTP/HTTPS address bound to by the running server.
+   */
+  public InetSocketAddress getAddress() {
+    assert httpAddress != null || httpsAddress != null;
+    return httpAddress != null ? httpAddress : httpsAddress;
+  }
   
   /**
    * Return the actual address bound to by the running server.
    */
-  @Deprecated
-  public InetSocketAddress getAddress() {
-    InetSocketAddress addr = httpServer.getConnectorAddress(0);
-    assert addr.getPort() != 0;
-    return addr;
+  public InetSocketAddress getHttpAddress() {
+    return httpAddress;
+  }
+
+  /**
+   * Return the actual address bound to by the running server.
+   */
+  public InetSocketAddress getHttpsAddress() {
+    return httpsAddress;
   }
 
   /**
@@ -101,14 +144,6 @@
         + NetUtils.getHostPortString(addr));
   }
 
-  private static InetSocketAddress getAddress(Configuration conf) {
-    String addr = conf.get(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY,
-        DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_DEFAULT);
-    return NetUtils.createSocketAddr(addr,
-        DFSConfigKeys.DFS_JOURNALNODE_HTTP_PORT_DEFAULT,
-        DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY);
-  }
-
   public static Journal getJournalFromContext(ServletContext context, String jid)
       throws IOException {
     JournalNode jn = (JournalNode)context.getAttribute(JN_ATTRIBUTE_KEY);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java
index 6e7a388..b1a3c96 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java
@@ -19,6 +19,7 @@
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.BlockingService;
+import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -53,11 +54,14 @@
 import java.net.InetSocketAddress;
 import java.net.URL;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_RPC_BIND_HOST_KEY;
+
 
 @InterfaceAudience.Private
 @VisibleForTesting
 public class JournalNodeRpcServer implements QJournalProtocol,
     InterQJournalProtocol {
+  private static final Log LOG = JournalNode.LOG;
   private static final int HANDLER_COUNT = 5;
   private final JournalNode jn;
   private Server server;
@@ -73,6 +77,12 @@
         true);
     
     InetSocketAddress addr = getAddress(confCopy);
+    String bindHost = conf.getTrimmed(DFS_JOURNALNODE_RPC_BIND_HOST_KEY, null);
+    if (bindHost == null) {
+      bindHost = addr.getHostName();
+    }
+    LOG.info("RPC server is binding to " + bindHost + ":" + addr.getPort());
+
     RPC.setProtocolEngine(confCopy, QJournalProtocolPB.class,
         ProtobufRpcEngine.class);
     QJournalProtocolServerSideTranslatorPB translator =
@@ -81,13 +91,13 @@
         .newReflectiveBlockingService(translator);
     
     this.server = new RPC.Builder(confCopy)
-      .setProtocol(QJournalProtocolPB.class)
-      .setInstance(service)
-      .setBindAddress(addr.getHostName())
-      .setPort(addr.getPort())
-      .setNumHandlers(HANDLER_COUNT)
-      .setVerbose(false)
-      .build();
+        .setProtocol(QJournalProtocolPB.class)
+        .setInstance(service)
+        .setBindAddress(bindHost)
+        .setPort(addr.getPort())
+        .setNumHandlers(HANDLER_COUNT)
+        .setVerbose(false)
+        .build();
 
 
     //Adding InterQJournalProtocolPB to server
@@ -298,4 +308,10 @@
         .setFromURL(jn.getHttpServerURI())
         .build();
   }
+
+  /** Allow access to the RPC server for testing. */
+  @VisibleForTesting
+  Server getRpcServer() {
+    return server;
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index c141293..695a421 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -970,7 +970,8 @@
    * @return newReplicaInfo
    * @throws IOException
    */
-  private ReplicaInfo moveBlock(ExtendedBlock block, ReplicaInfo replicaInfo,
+  @VisibleForTesting
+  ReplicaInfo moveBlock(ExtendedBlock block, ReplicaInfo replicaInfo,
       FsVolumeReference volumeRef) throws IOException {
     ReplicaInfo newReplicaInfo = copyReplicaToVolume(block, replicaInfo,
         volumeRef);
@@ -2302,10 +2303,8 @@
    * the disk, update {@link ReplicaInfo} with the correct file</li>
    * </ul>
    *
-   * @param blockId Block that differs
-   * @param diskFile Block file on the disk
-   * @param diskMetaFile Metadata file from on the disk
-   * @param vol Volume of the block file
+   * @param bpid block pool ID
+   * @param scanInfo {@link ScanInfo} for a given block
    */
   @Override
   public void checkAndUpdate(String bpid, ScanInfo scanInfo)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
index 8f52ea7..a1804ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
@@ -231,12 +231,11 @@
   }
 
   /**
-   * Calls {@link FsVolumeImpl#checkDirs()} on each volume.
-   * 
-   * Use {@link checkDirsLock} to allow only one instance of checkDirs() call.
+   * Updates the failed volume info in the volumeFailureInfos Map
+   * and calls {@link #removeVolume(FsVolumeImpl)} to remove the volume
+   * from the volume list for each of the failed volumes.
    *
-   * @return list of all the failed volumes.
-   * @param failedVolumes
+   * @param failedVolumes set of volumes marked failed.
    */
   void handleVolumeFailures(Set<FsVolumeSpi> failedVolumes) {
     try (AutoCloseableLock lock = checkDirsLock.acquire()) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
index d102375..7d0a492 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
@@ -385,8 +385,8 @@
           break;
         case XMLEvent.CHARACTERS:
           String val = XMLUtils.
-              unmangleXmlString(ev.asCharacters().getData(), true);
-          parent.setVal(val);
+              unmangleXmlString(ev.asCharacters().getData(), false);
+          parent.setVal(parent.getVal() + val);
           events.nextEvent();
           break;
         case XMLEvent.ATTRIBUTE:
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 4ed4690..921c166 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -2168,6 +2168,17 @@
 </property>
 
 <property>
+  <name>dfs.journalnode.rpc-bind-host</name>
+  <value></value>
+  <description>
+    The actual address the RPC server will bind to. If this optional address is
+    set, it overrides only the hostname portion of dfs.journalnode.rpc-address.
+    This is useful for making the JournalNode listen on all interfaces by
+    setting it to 0.0.0.0.
+  </description>
+</property>
+
+<property>
   <name>dfs.journalnode.http-address</name>
   <value>0.0.0.0:8480</value>
   <description>
@@ -2177,6 +2188,17 @@
 </property>
 
 <property>
+  <name>dfs.journalnode.http-bind-host</name>
+  <value></value>
+  <description>
+    The actual address the HTTP server will bind to. If this optional address
+    is set, it overrides only the hostname portion of
+    dfs.journalnode.http-address. This is useful for making the JournalNode
+    HTTP server listen on allinterfaces by setting it to 0.0.0.0.
+  </description>
+</property>
+
+<property>
   <name>dfs.journalnode.https-address</name>
   <value>0.0.0.0:8481</value>
   <description>
@@ -2186,6 +2208,17 @@
 </property>
 
 <property>
+  <name>dfs.journalnode.https-bind-host</name>
+  <value></value>
+  <description>
+    The actual address the HTTP server will bind to. If this optional address
+    is set, it overrides only the hostname portion of
+    dfs.journalnode.https-address. This is useful for making the JournalNode
+    HTTP server listen on all interfaces by setting it to 0.0.0.0.
+  </description>
+</property>
+
+<property>
   <name>dfs.namenode.audit.loggers</name>
   <value>default</value>
   <description>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java
index c597921..57f5cf8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java
@@ -87,6 +87,11 @@
     this(replicationFactor, new HdfsConfiguration());
   }
 
+  public BlockReaderTestUtil(MiniDFSCluster cluster, HdfsConfiguration conf) {
+    this.conf = conf;
+    this.cluster = cluster;
+  }
+
   public BlockReaderTestUtil(int replicationFactor, HdfsConfiguration config) throws Exception {
     this.conf = config;
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replicationFactor);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeRespectsBindHostKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeRespectsBindHostKeys.java
new file mode 100644
index 0000000..79f3598
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeRespectsBindHostKeys.java
@@ -0,0 +1,200 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.qjournal.server;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTP_BIND_HOST_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_RPC_BIND_HOST_KEY;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertThat;
+import static org.hamcrest.core.Is.is;
+import static org.hamcrest.core.IsNot.not;
+
+import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+
+/**
+ * This test checks that the JournalNode respects the following keys.
+ *
+ *  - DFS_JOURNALNODE_RPC_BIND_HOST_KEY
+ *  - DFS_JOURNALNODE_HTTP_BIND_HOST_KEY
+ *  - DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY
+ */
+public class TestJournalNodeRespectsBindHostKeys {
+
+  public static final Log LOG = LogFactory.getLog(
+      TestJournalNodeRespectsBindHostKeys.class);
+  private static final String WILDCARD_ADDRESS = "0.0.0.0";
+  private static final String LOCALHOST_SERVER_ADDRESS = "127.0.0.1:0";
+  private static final int NUM_JN = 1;
+
+  private HdfsConfiguration conf;
+  private MiniJournalCluster jCluster;
+  private JournalNode jn;
+
+  @Before
+  public void setUp() {
+    conf = new HdfsConfiguration();
+  }
+
+  @After
+  public void tearDown() throws IOException {
+    if (jCluster != null) {
+      jCluster.shutdown();
+      jCluster = null;
+    }
+  }
+
+  private static String getRpcServerAddress(JournalNode jn) {
+    JournalNodeRpcServer rpcServer = jn.getRpcServer();
+    return rpcServer.getRpcServer().getListenerAddress().getAddress().
+        toString();
+  }
+
+  @Test (timeout=300000)
+  public void testRpcBindHostKey() throws IOException {
+    LOG.info("Testing without " + DFS_JOURNALNODE_RPC_BIND_HOST_KEY);
+
+    // NN should not bind the wildcard address by default.
+    jCluster = new MiniJournalCluster.Builder(conf).format(true)
+        .numJournalNodes(NUM_JN).build();
+    jn = jCluster.getJournalNode(0);
+    String address = getRpcServerAddress(jn);
+    assertThat("Bind address not expected to be wildcard by default.",
+        address, not("/" + WILDCARD_ADDRESS));
+
+    LOG.info("Testing with " + DFS_JOURNALNODE_RPC_BIND_HOST_KEY);
+
+    // Tell NN to bind the wildcard address.
+    conf.set(DFS_JOURNALNODE_RPC_BIND_HOST_KEY, WILDCARD_ADDRESS);
+
+    // Verify that NN binds wildcard address now.
+    jCluster = new MiniJournalCluster.Builder(conf).format(true)
+        .numJournalNodes(NUM_JN).build();
+    jn = jCluster.getJournalNode(0);
+    address = getRpcServerAddress(jn);
+    assertThat("Bind address " + address + " is not wildcard.",
+        address, is("/" + WILDCARD_ADDRESS));
+  }
+
+  @Test(timeout=300000)
+  public void testHttpBindHostKey() throws IOException {
+    LOG.info("Testing without " + DFS_JOURNALNODE_HTTP_BIND_HOST_KEY);
+
+    // NN should not bind the wildcard address by default.
+    conf.set(DFS_JOURNALNODE_HTTP_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
+    jCluster = new MiniJournalCluster.Builder(conf).format(true)
+        .numJournalNodes(NUM_JN).build();
+    jn = jCluster.getJournalNode(0);
+    String address = jn.getHttpAddress().toString();
+    assertFalse("HTTP Bind address not expected to be wildcard by default.",
+        address.startsWith(WILDCARD_ADDRESS));
+
+    LOG.info("Testing with " + DFS_JOURNALNODE_HTTP_BIND_HOST_KEY);
+
+    // Tell NN to bind the wildcard address.
+    conf.set(DFS_JOURNALNODE_HTTP_BIND_HOST_KEY, WILDCARD_ADDRESS);
+
+    // Verify that NN binds wildcard address now.
+    conf.set(DFS_JOURNALNODE_HTTP_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
+    jCluster = new MiniJournalCluster.Builder(conf).format(true)
+        .numJournalNodes(NUM_JN).build();
+    jn = jCluster.getJournalNode(0);
+    address = jn.getHttpAddress().toString();
+    assertTrue("HTTP Bind address " + address + " is not wildcard.",
+        address.startsWith(WILDCARD_ADDRESS));
+  }
+
+  private static final String BASEDIR = System.getProperty("test.build.dir",
+      "target/test-dir") + "/" +
+      TestJournalNodeRespectsBindHostKeys.class.getSimpleName();
+
+  private static void setupSsl() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
+    conf.set(DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, "localhost:0");
+    conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
+
+    File base = new File(BASEDIR);
+    FileUtil.fullyDelete(base);
+    assertTrue(base.mkdirs());
+    final String keystoresDir = new File(BASEDIR).getAbsolutePath();
+    final String sslConfDir = KeyStoreTestUtil.getClasspathDir(
+        TestJournalNodeRespectsBindHostKeys.class);
+
+    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
+  }
+
+  /**
+   * HTTPS test is different since we need to setup SSL configuration.
+   * NN also binds the wildcard address for HTTPS port by default so we must
+   * pick a different host/port combination.
+   * @throws Exception
+   */
+  @Test (timeout=300000)
+  public void testHttpsBindHostKey() throws Exception {
+    LOG.info("Testing behavior without " + DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY);
+
+    setupSsl();
+
+    conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
+
+    // NN should not bind the wildcard address by default.
+    conf.set(DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
+    jCluster = new MiniJournalCluster.Builder(conf).format(true)
+        .numJournalNodes(NUM_JN).build();
+    jn = jCluster.getJournalNode(0);
+    String address = jn.getHttpsAddress().toString();
+    assertFalse("HTTP Bind address not expected to be wildcard by default.",
+        address.startsWith(WILDCARD_ADDRESS));
+
+    LOG.info("Testing behavior with " + DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY);
+
+    // Tell NN to bind the wildcard address.
+    conf.set(DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY, WILDCARD_ADDRESS);
+
+    // Verify that NN binds wildcard address now.
+    conf.set(DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
+    jCluster = new MiniJournalCluster.Builder(conf).format(true)
+        .numJournalNodes(NUM_JN).build();
+    jn = jCluster.getJournalNode(0);
+    address = jn.getHttpsAddress().toString();
+    assertTrue("HTTP Bind address " + address + " is not wildcard.",
+        address.startsWith(WILDCARD_ADDRESS));
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index 3c4b657..d684950 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -20,17 +20,23 @@
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 
+import java.nio.file.Files;
+import java.nio.file.Paths;
 import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -83,6 +89,8 @@
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertNull;
@@ -832,8 +840,21 @@
   private ReplicaInfo createNewReplicaObj(ExtendedBlock block, FsDatasetImpl
       fsDataSetImpl) throws IOException {
     ReplicaInfo replicaInfo = fsDataSetImpl.getReplicaInfo(block);
-    FsVolumeSpi destVolume = null;
+    FsVolumeSpi destVolume = getDestinationVolume(block, fsDataSetImpl);
+    return fsDataSetImpl.copyReplicaToVolume(block, replicaInfo,
+        destVolume.obtainReference());
+  }
 
+  /**
+   * Finds a new destination volume for block.
+   *
+   * @param block         - Extended Block
+   * @param fsDataSetImpl - FsDatasetImpl reference
+   * @throws IOException
+   */
+  private FsVolumeSpi getDestinationVolume(ExtendedBlock block, FsDatasetImpl
+      fsDataSetImpl) throws IOException {
+    FsVolumeSpi destVolume = null;
     final String srcStorageId = fsDataSetImpl.getVolume(block).getStorageID();
     try (FsVolumeReferences volumeReferences =
         fsDataSetImpl.getFsVolumeReferences()) {
@@ -844,8 +865,88 @@
         }
       }
     }
-    return fsDataSetImpl.copyReplicaToVolume(block, replicaInfo,
-        destVolume.obtainReference());
+    return destVolume;
+  }
+
+  @Test(timeout = 3000000)
+  public void testBlockReadOpWhileMovingBlock() throws IOException {
+    MiniDFSCluster cluster = null;
+    try {
+
+      // Setup cluster
+      conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
+      cluster = new MiniDFSCluster.Builder(conf)
+          .numDataNodes(1)
+          .storageTypes(new StorageType[]{StorageType.DISK, StorageType.DISK})
+          .storagesPerDatanode(2)
+          .build();
+      FileSystem fs = cluster.getFileSystem();
+      DataNode dataNode = cluster.getDataNodes().get(0);
+
+      // Create test file with ASCII data
+      Path filePath = new Path("/tmp/testData");
+      String blockData = RandomStringUtils.randomAscii(512 * 4);
+      FSDataOutputStream fout = fs.create(filePath);
+      fout.writeBytes(blockData);
+      fout.close();
+      assertEquals(blockData, DFSTestUtil.readFile(fs, filePath));
+
+      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath);
+      BlockReaderTestUtil util = new BlockReaderTestUtil(cluster, new
+          HdfsConfiguration(conf));
+      LocatedBlock blk = util.getFileBlocks(filePath, 512 * 2).get(0);
+      File[] blkFiles = cluster.getAllBlockFiles(block);
+
+      // Part 1: Read partial data from block
+      LOG.info("Reading partial data for block {} before moving it: ",
+          blk.getBlock().toString());
+      BlockReader blkReader = BlockReaderTestUtil.getBlockReader(
+          (DistributedFileSystem) fs, blk, 0, 512 * 2);
+      byte[] buf = new byte[512 * 2];
+      blkReader.read(buf, 0, 512);
+      assertEquals(blockData.substring(0, 512), new String(buf,
+          StandardCharsets.US_ASCII).substring(0, 512));
+
+      // Part 2: Move block and than read remaining block
+      FsDatasetImpl fsDataSetImpl = (FsDatasetImpl) dataNode.getFSDataset();
+      ReplicaInfo replicaInfo = fsDataSetImpl.getReplicaInfo(block);
+      FsVolumeSpi destVolume = getDestinationVolume(block, fsDataSetImpl);
+      assertNotNull("Destination volume should not be null.", destVolume);
+      fsDataSetImpl.moveBlock(block, replicaInfo, destVolume.obtainReference());
+      // Trigger block report to update block info in NN
+      cluster.triggerBlockReports();
+      blkReader.read(buf, 512, 512);
+      assertEquals(blockData.substring(0, 512 * 2), new String(buf,
+          StandardCharsets.US_ASCII).substring(0, 512 * 2));
+      blkReader = BlockReaderTestUtil.getBlockReader(
+          (DistributedFileSystem) fs,
+          blk, 0, blockData.length());
+      buf = new byte[512 * 4];
+      blkReader.read(buf, 0, 512 * 4);
+      assertEquals(blockData, new String(buf, StandardCharsets.US_ASCII));
+
+      // Part 3: 1. Close the block reader
+      // 2. Assert source block doesn't exist on initial volume
+      // 3. Assert new file location for block is different
+      // 4. Confirm client can read data from new location
+      blkReader.close();
+      ExtendedBlock block2 = DFSTestUtil.getFirstBlock(fs, filePath);
+      File[] blkFiles2 = cluster.getAllBlockFiles(block2);
+      blk = util.getFileBlocks(filePath, 512 * 4).get(0);
+      blkReader = BlockReaderTestUtil.getBlockReader(
+          (DistributedFileSystem) fs,
+          blk, 0, blockData.length());
+      blkReader.read(buf, 0, 512 * 4);
+
+      assertFalse(Files.exists(Paths.get(blkFiles[0].getAbsolutePath())));
+      assertNotEquals(blkFiles[0], blkFiles2[0]);
+      assertEquals(blockData, new String(buf, StandardCharsets.US_ASCII));
+
+    } finally {
+      if (cluster.isClusterUp()) {
+        cluster.shutdown();
+      }
+    }
   }
 
 }
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index 5d0fd38..3ac627f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -26,6 +26,7 @@
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 
+import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang.text.StrBuilder;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -56,17 +57,24 @@
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.TestRefreshUserMappings;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.Assert;
 
 import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
+import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -101,6 +109,7 @@
   private final ByteArrayOutputStream err = new ByteArrayOutputStream();
   private static final PrintStream OLD_OUT = System.out;
   private static final PrintStream OLD_ERR = System.err;
+  private String tempResource = null;
 
   @Before
   public void setUp() throws Exception {
@@ -108,7 +117,7 @@
     conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 3);
     restartCluster();
 
-    admin = new DFSAdmin();
+    admin = new DFSAdmin(conf);
   }
 
   private void redirectStream() {
@@ -137,6 +146,11 @@
     }
 
     resetStream();
+    if (tempResource != null) {
+      File f = new File(tempResource);
+      FileUtils.deleteQuietly(f);
+      tempResource = null;
+    }
   }
 
   private void restartCluster() throws IOException {
@@ -923,4 +937,68 @@
       cluster.shutdown();
     }
   }
+
+  @Test
+  public void testRefreshProxyUser() throws Exception {
+    Path dirPath = new Path("/testdir1");
+    Path subDirPath = new Path("/testdir1/subdir1");
+    UserGroupInformation loginUserUgi =  UserGroupInformation.getLoginUser();
+    String proxyUser = "fakeuser";
+    String realUser = loginUserUgi.getShortUserName();
+
+    UserGroupInformation proxyUgi =
+        UserGroupInformation.createProxyUserForTesting(proxyUser,
+            loginUserUgi, loginUserUgi.getGroupNames());
+
+    // create a directory as login user and re-assign it to proxy user
+    loginUserUgi.doAs(new PrivilegedExceptionAction<Integer>() {
+      @Override
+      public Integer run() throws Exception {
+        cluster.getFileSystem().mkdirs(dirPath);
+        cluster.getFileSystem().setOwner(dirPath, proxyUser,
+            proxyUgi.getPrimaryGroupName());
+        return 0;
+      }
+    });
+
+    // try creating subdirectory inside the directory as proxy user,
+    // This should fail because of the current user hasn't still been proxied
+    try {
+      proxyUgi.doAs(new PrivilegedExceptionAction<Integer>() {
+        @Override public Integer run() throws Exception {
+          cluster.getFileSystem().mkdirs(subDirPath);
+          return 0;
+        }
+      });
+    } catch (RemoteException re) {
+      Assert.assertTrue(re.unwrapRemoteException()
+          instanceof AccessControlException);
+      Assert.assertTrue(re.unwrapRemoteException().getMessage()
+          .equals("User: " + realUser +
+              " is not allowed to impersonate " + proxyUser));
+    }
+
+    // refresh will look at configuration on the server side
+    // add additional resource with the new value
+    // so the server side will pick it up
+    String userKeyGroups = DefaultImpersonationProvider.getTestProvider().
+        getProxySuperuserGroupConfKey(realUser);
+    String userKeyHosts = DefaultImpersonationProvider.getTestProvider().
+        getProxySuperuserIpConfKey(realUser);
+    String rsrc = "testGroupMappingRefresh_rsrc.xml";
+    tempResource = TestRefreshUserMappings.addNewConfigResource(rsrc,
+        userKeyGroups, "*", userKeyHosts, "*");
+
+    String[] args = new String[]{"-refreshSuperUserGroupsConfiguration"};
+    admin.run(args);
+
+    // After proxying the fakeuser, the mkdir should work
+    proxyUgi.doAs(new PrivilegedExceptionAction<Integer>() {
+      @Override
+      public Integer run() throws Exception {
+        cluster.getFileSystem().mkdirs(dirPath);
+        return 0;
+      }
+    });
+  }
 }
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
index 7574cbf..b807804 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
@@ -194,10 +194,15 @@
       dirCount++;
       writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir));
 
-      //Create a directory whose name should be escaped in XML
+      //Create directories whose name should be escaped in XML
       Path invalidXMLDir = new Path("/dirContainingInvalidXMLChar\u0000here");
       hdfs.mkdirs(invalidXMLDir);
       dirCount++;
+      Path entityRefXMLDir = new Path("/dirContainingEntityRef&here");
+      hdfs.mkdirs(entityRefXMLDir);
+      dirCount++;
+      writtenFiles.put(entityRefXMLDir.toString(),
+          hdfs.getFileStatus(entityRefXMLDir));
 
       //Create a directory with sticky bits
       Path stickyBitDir = new Path("/stickyBit");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
index ee20a95..f511eb1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
@@ -208,7 +208,8 @@
     // add additional resource with the new value
     // so the server side will pick it up
     String rsrc = "testGroupMappingRefresh_rsrc.xml";
-    addNewConfigResource(rsrc, userKeyGroups, "gr2", userKeyHosts, "127.0.0.1");  
+    tempResource = addNewConfigResource(rsrc, userKeyGroups, "gr2",
+        userKeyHosts, "127.0.0.1");
     
     DFSAdmin admin = new DFSAdmin(config);
     String [] args = new String[]{"-refreshSuperUserGroupsConfiguration"};
@@ -232,7 +233,7 @@
     
   }
 
-  private void addNewConfigResource(String rsrcName, String keyGroup,
+  public static String addNewConfigResource(String rsrcName, String keyGroup,
       String groups, String keyHosts, String hosts)
           throws FileNotFoundException, UnsupportedEncodingException {
     // location for temp resource should be in CLASSPATH
@@ -242,17 +243,18 @@
     String urlPath = URLDecoder.decode(url.getPath().toString(), "UTF-8");
     Path p = new Path(urlPath);
     Path dir = p.getParent();
-    tempResource = dir.toString() + "/" + rsrcName;
+    String tmp = dir.toString() + "/" + rsrcName;
 
     String newResource =
     "<configuration>"+
     "<property><name>" + keyGroup + "</name><value>"+groups+"</value></property>" +
     "<property><name>" + keyHosts + "</name><value>"+hosts+"</value></property>" +
     "</configuration>";
-    PrintWriter writer = new PrintWriter(new FileOutputStream(tempResource));
+    PrintWriter writer = new PrintWriter(new FileOutputStream(tmp));
     writer.println(newResource);
     writer.close();
 
     Configuration.addDefaultResource(rsrcName);
+    return tmp;
   }
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
old mode 100755
new mode 100644
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
old mode 100755
new mode 100644
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java
old mode 100755
new mode 100644
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
index 90635a6..bfdc6db 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
@@ -89,7 +89,7 @@
     // used for a non running job
     return ApplicationReport.newInstance(unknownAppId, unknownAttemptId,
       "N/A", "N/A", "N/A", "N/A", 0, null, YarnApplicationState.NEW, "N/A",
-      "N/A", 0, 0, FinalApplicationStatus.UNDEFINED, null, "N/A", 0.0f,
+      "N/A", 0, 0, 0, FinalApplicationStatus.UNDEFINED, null, "N/A", 0.0f,
       YarnConfiguration.DEFAULT_APPLICATION_TYPE, null);
   }
 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
index b85f18d..275456d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
@@ -515,7 +515,7 @@
         appId, 0);
     return ApplicationReport.newInstance(appId, attemptId, "user", "queue",
       "appname", "host", 124, null, YarnApplicationState.FINISHED,
-      "diagnostics", "url", 0, 0, FinalApplicationStatus.SUCCEEDED, null,
+      "diagnostics", "url", 0, 0, 0, FinalApplicationStatus.SUCCEEDED, null,
       "N/A", 0.0f, YarnConfiguration.DEFAULT_APPLICATION_TYPE, null);
   }
 
@@ -525,7 +525,7 @@
         appId, 0);
     return ApplicationReport.newInstance(appId, attemptId, "user", "queue",
       "appname", host, port, null, YarnApplicationState.RUNNING, "diagnostics",
-      "url", 0, 0, FinalApplicationStatus.UNDEFINED, null, "N/A", 0.0f,
+      "url", 0, 0, 0, FinalApplicationStatus.UNDEFINED, null, "N/A", 0.0f,
       YarnConfiguration.DEFAULT_APPLICATION_TYPE, null);
   }
 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
index 932e808..babf22b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
@@ -272,7 +272,8 @@
         .thenReturn(
             ApplicationReport.newInstance(appId, null, "tmp", "tmp", "tmp",
                 "tmp", 0, null, YarnApplicationState.FINISHED, "tmp", "tmp",
-                0l, 0l, FinalApplicationStatus.SUCCEEDED, null, null, 0f,
+                0L, 0L, 0L,
+                 FinalApplicationStatus.SUCCEEDED, null, null, 0f,
                 "tmp", null));
     yarnRunner.killJob(jobId);
     verify(clientDelegate).killJob(jobId);
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index b8c2884..793ffb4 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1056,7 +1056,7 @@
       <dependency>
         <groupId>org.apache.commons</groupId>
         <artifactId>commons-lang3</artifactId>
-        <version>3.4</version>
+        <version>3.7</version>
       </dependency>
       <dependency>
         <groupId>org.slf4j</groupId>
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index 9f96584..40df7c5 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -176,6 +176,7 @@
       <item name="Concepts" href="hadoop-yarn/hadoop-yarn-site/yarn-service/Concepts.html"/>
       <item name="Yarn Service API" href="hadoop-yarn/hadoop-yarn-site/yarn-service/YarnServiceAPI.html"/>
       <item name="Service Discovery" href="hadoop-yarn/hadoop-yarn-site/yarn-service/ServiceDiscovery.html"/>
+      <item name="System Services" href="hadoop-yarn/hadoop-yarn-site/yarn-service/SystemServices.html"/>
     </menu>
 
     <menu name="Hadoop Compatible File Systems" inherit="top">
diff --git a/hadoop-tools/hadoop-aliyun/src/site/resources/css/site.css b/hadoop-tools/hadoop-aliyun/src/site/resources/css/site.css
new file mode 100644
index 0000000..f830baa
--- /dev/null
+++ b/hadoop-tools/hadoop-aliyun/src/site/resources/css/site.css
@@ -0,0 +1,30 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+#banner {
+  height: 93px;
+  background: none;
+}
+
+#bannerLeft img {
+  margin-left: 30px;
+  margin-top: 10px;
+}
+
+#bannerRight img {
+  margin: 17px;
+}
+
diff --git a/hadoop-tools/hadoop-aws/src/site/resources/css/site.css b/hadoop-tools/hadoop-aws/src/site/resources/css/site.css
new file mode 100644
index 0000000..f830baa
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/site/resources/css/site.css
@@ -0,0 +1,30 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+#banner {
+  height: 93px;
+  background: none;
+}
+
+#bannerLeft img {
+  margin-left: 30px;
+  margin-top: 10px;
+}
+
+#bannerRight img {
+  margin: 17px;
+}
+
diff --git a/hadoop-tools/hadoop-azure-datalake/src/site/resources/css/site.css b/hadoop-tools/hadoop-azure-datalake/src/site/resources/css/site.css
new file mode 100644
index 0000000..f830baa
--- /dev/null
+++ b/hadoop-tools/hadoop-azure-datalake/src/site/resources/css/site.css
@@ -0,0 +1,30 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+#banner {
+  height: 93px;
+  background: none;
+}
+
+#bannerLeft img {
+  margin-left: 30px;
+  margin-top: 10px;
+}
+
+#bannerRight img {
+  margin: 17px;
+}
+
diff --git a/hadoop-tools/hadoop-azure/src/site/resources/css/site.css b/hadoop-tools/hadoop-azure/src/site/resources/css/site.css
new file mode 100644
index 0000000..f830baa
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/site/resources/css/site.css
@@ -0,0 +1,30 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+#banner {
+  height: 93px;
+  background: none;
+}
+
+#bannerLeft img {
+  margin-left: 30px;
+  margin-top: 10px;
+}
+
+#bannerRight img {
+  margin: 17px;
+}
+
diff --git a/hadoop-tools/hadoop-sls/pom.xml b/hadoop-tools/hadoop-sls/pom.xml
index 10f6294..a42f8de 100644
--- a/hadoop-tools/hadoop-sls/pom.xml
+++ b/hadoop-tools/hadoop-sls/pom.xml
@@ -137,6 +137,7 @@
             <exclude>src/test/resources/syn_stream.json</exclude>
             <exclude>src/test/resources/inputsls.json</exclude>
             <exclude>src/test/resources/nodes.json</exclude>
+            <exclude>src/test/resources/nodes-with-resources.json</exclude>
             <exclude>src/test/resources/exit-invariants.txt</exclude>
             <exclude>src/test/resources/ongoing-invariants.txt</exclude>
           </excludes>
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index 8a522fe..8504b9d 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -292,21 +292,30 @@
         SLSConfiguration.NM_RESOURCE_UTILIZATION_RATIO,
         SLSConfiguration.NM_RESOURCE_UTILIZATION_RATIO_DEFAULT);
     // nm information (fetch from topology file, or from sls/rumen json file)
-    Set<String> nodeSet = new HashSet<String>();
+    Map<String, Resource> nodeResourceMap = new HashMap<>();
+    Set<? extends  String> nodeSet;
     if (nodeFile.isEmpty()) {
       for (String inputTrace : inputTraces) {
-
         switch (inputType) {
         case SLS:
-          nodeSet.addAll(SLSUtils.parseNodesFromSLSTrace(inputTrace));
+          nodeSet = SLSUtils.parseNodesFromSLSTrace(inputTrace);
+          for (String node : nodeSet) {
+            nodeResourceMap.put(node, null);
+          }
           break;
         case RUMEN:
-          nodeSet.addAll(SLSUtils.parseNodesFromRumenTrace(inputTrace));
+          nodeSet = SLSUtils.parseNodesFromRumenTrace(inputTrace);
+          for (String node : nodeSet) {
+            nodeResourceMap.put(node, null);
+          }
           break;
         case SYNTH:
           stjp = new SynthTraceJobProducer(getConf(), new Path(inputTraces[0]));
-          nodeSet.addAll(SLSUtils.generateNodes(stjp.getNumNodes(),
-              stjp.getNumNodes()/stjp.getNodesPerRack()));
+          nodeSet = SLSUtils.generateNodes(stjp.getNumNodes(),
+              stjp.getNumNodes()/stjp.getNodesPerRack());
+          for (String node : nodeSet) {
+            nodeResourceMap.put(node, null);
+          }
           break;
         default:
           throw new YarnException("Input configuration not recognized, "
@@ -314,20 +323,26 @@
         }
       }
     } else {
-      nodeSet.addAll(SLSUtils.parseNodesFromNodeFile(nodeFile));
+      nodeResourceMap = SLSUtils.parseNodesFromNodeFile(nodeFile,
+          nodeManagerResource);
     }
 
-    if (nodeSet.size() == 0) {
+    if (nodeResourceMap.size() == 0) {
       throw new YarnException("No node! Please configure nodes.");
     }
 
     // create NM simulators
     Random random = new Random();
     Set<String> rackSet = new HashSet<String>();
-    for (String hostName : nodeSet) {
+    for (Map.Entry<String, Resource> entry : nodeResourceMap.entrySet()) {
       // we randomize the heartbeat start time from zero to 1 interval
       NMSimulator nm = new NMSimulator();
-      nm.init(hostName, nodeManagerResource, random.nextInt(heartbeatInterval),
+      Resource nmResource = nodeManagerResource;
+      String hostName = entry.getKey();
+      if (entry.getValue() != null) {
+        nmResource = entry.getValue();
+      }
+      nm.init(hostName, nmResource, random.nextInt(heartbeatInterval),
           heartbeatInterval, rm, resourceUtilizationRatio);
       nmMap.put(nm.getNode().getNodeID(), nm);
       runner.schedule(nm);
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java
index e914fe7..f2129d014 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java
@@ -22,6 +22,8 @@
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.Reader;
+
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
@@ -39,7 +41,11 @@
 import org.apache.hadoop.tools.rumen.LoggedJob;
 import org.apache.hadoop.tools.rumen.LoggedTask;
 import org.apache.hadoop.tools.rumen.LoggedTaskAttempt;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.sls.conf.SLSConfiguration;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
+import org.apache.hadoop.yarn.util.resource.Resources;
 
 @Private
 @Unstable
@@ -145,9 +151,9 @@
   /**
    * parse the input node file, return each host name
    */
-  public static Set<String> parseNodesFromNodeFile(String nodeFile)
-          throws IOException {
-    Set<String> nodeSet = new HashSet<String>();
+  public static Map<String, Resource> parseNodesFromNodeFile(String nodeFile,
+      Resource nmDefaultResource) throws IOException {
+    Map<String, Resource> nodeResourceMap = new HashMap<>();
     JsonFactory jsonF = new JsonFactory();
     ObjectMapper mapper = new ObjectMapper();
     Reader input =
@@ -160,13 +166,21 @@
         List tasks = (List) jsonE.get("nodes");
         for (Object o : tasks) {
           Map jsonNode = (Map) o;
-          nodeSet.add(rack + "/" + jsonNode.get("node"));
+          Resource nodeResource = Resources.clone(nmDefaultResource);
+          ResourceInformation[] infors = ResourceUtils.getResourceTypesArray();
+          for (ResourceInformation info : infors) {
+            if (jsonNode.get(info.getName()) != null) {
+              nodeResource.setResourceValue(info.getName(),
+                  Integer.parseInt(jsonNode.get(info.getName()).toString()));
+            }
+          }
+          nodeResourceMap.put(rack + "/" + jsonNode.get("node"), nodeResource);
         }
       }
     } finally {
       input.close();
     }
-    return nodeSet;
+    return nodeResourceMap;
   }
 
   public static Set<? extends String> generateNodes(int numNodes,
diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/utils/TestSLSUtils.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/utils/TestSLSUtils.java
index 30964a1..5e586b13 100644
--- a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/utils/TestSLSUtils.java
+++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/utils/TestSLSUtils.java
@@ -18,10 +18,13 @@
 
 package org.apache.hadoop.yarn.sls.utils;
 
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.Assert;
 import org.junit.Test;
 
 import java.util.HashSet;
+import java.util.Map;
 import java.util.Set;
 
 public class TestSLSUtils {
@@ -40,6 +43,28 @@
   }
 
   @Test
+  public void testParseNodesFromNodeFile() throws Exception {
+    String nodeFile = "src/test/resources/nodes.json";
+    Map<String, Resource> nodeResourceMap = SLSUtils.parseNodesFromNodeFile(
+        nodeFile, Resources.createResource(1024, 2));
+    Assert.assertEquals(20, nodeResourceMap.size());
+
+    nodeFile = "src/test/resources/nodes-with-resources.json";
+    nodeResourceMap = SLSUtils.parseNodesFromNodeFile(
+        nodeFile, Resources.createResource(1024, 2));
+    Assert.assertEquals(4,
+        nodeResourceMap.size());
+    Assert.assertEquals(2048,
+        nodeResourceMap.get("/rack1/node1").getMemorySize());
+    Assert.assertEquals(6,
+        nodeResourceMap.get("/rack1/node1").getVirtualCores());
+    Assert.assertEquals(1024,
+        nodeResourceMap.get("/rack1/node2").getMemorySize());
+    Assert.assertEquals(2,
+        nodeResourceMap.get("/rack1/node2").getVirtualCores());
+  }
+
+  @Test
   public void testGenerateNodes() {
     Set<? extends String> nodes = SLSUtils.generateNodes(3, 3);
     Assert.assertEquals("Number of nodes is wrong.", 3, nodes.size());
diff --git a/hadoop-tools/hadoop-sls/src/test/resources/nodes-with-resources.json b/hadoop-tools/hadoop-sls/src/test/resources/nodes-with-resources.json
new file mode 100644
index 0000000..0039181
--- /dev/null
+++ b/hadoop-tools/hadoop-sls/src/test/resources/nodes-with-resources.json
@@ -0,0 +1,19 @@
+{
+  "rack": "rack1",
+  "nodes": [
+    {
+      "node": "node1",
+      "memory-mb" : 2048,
+      "vcores" : 6
+    },
+    {
+      "node": "node2"
+    },
+    {
+      "node": "node3"
+    },
+    {
+      "node": "node4"
+    }
+  ]
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreTables.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreTables.sql
index 67a1817..57cecb2 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreTables.sql
+++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreTables.sql
@@ -22,7 +22,7 @@
 
 CREATE TABLE applicationsHomeSubCluster(
    applicationId varchar(64) NOT NULL,
-   subClusterId varchar(256) NULL,
+   homeSubCluster varchar(256) NULL,
    CONSTRAINT pk_applicationId PRIMARY KEY (applicationId)
 );
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
index 962bbba..711424b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
@@ -58,7 +58,8 @@
       ApplicationAttemptId applicationAttemptId, String user, String queue,
       String name, String host, int rpcPort, Token clientToAMToken,
       YarnApplicationState state, String diagnostics, String url,
-      long startTime, long finishTime, FinalApplicationStatus finalStatus,
+      long startTime, long launchTime, long finishTime,
+      FinalApplicationStatus finalStatus,
       ApplicationResourceUsageReport appResources, String origTrackingUrl,
       float progress, String applicationType, Token amRmToken) {
     ApplicationReport report = Records.newRecord(ApplicationReport.class);
@@ -74,6 +75,7 @@
     report.setDiagnostics(diagnostics);
     report.setTrackingUrl(url);
     report.setStartTime(startTime);
+    report.setLaunchTime(launchTime);
     report.setFinishTime(finishTime);
     report.setFinalApplicationStatus(finalStatus);
     report.setApplicationResourceUsageReport(appResources);
@@ -84,13 +86,40 @@
     return report;
   }
 
+
+  @Private
+  @Unstable
+  public static ApplicationReport newInstance(ApplicationId applicationId,
+        ApplicationAttemptId applicationAttemptId, String user, String queue,
+        String name, String host, int rpcPort, Token clientToAMToken,
+        YarnApplicationState state, String diagnostics, String url,
+        long startTime, long finishTime,
+        FinalApplicationStatus finalStatus,
+        ApplicationResourceUsageReport appResources, String origTrackingUrl,
+        float progress, String applicationType, Token amRmToken,
+        Set<String> tags, boolean unmanagedApplication, Priority priority,
+        String appNodeLabelExpression, String amNodeLabelExpression) {
+    ApplicationReport report =
+            newInstance(applicationId, applicationAttemptId, user, queue, name,
+                    host, rpcPort, clientToAMToken, state, diagnostics, url,
+                    startTime, 0, finishTime, finalStatus, appResources,
+                    origTrackingUrl, progress, applicationType, amRmToken);
+    report.setApplicationTags(tags);
+    report.setUnmanagedApp(unmanagedApplication);
+    report.setPriority(priority);
+    report.setAppNodeLabelExpression(appNodeLabelExpression);
+    report.setAmNodeLabelExpression(amNodeLabelExpression);
+    return report;
+  }
+
   @Private
   @Unstable
   public static ApplicationReport newInstance(ApplicationId applicationId,
       ApplicationAttemptId applicationAttemptId, String user, String queue,
       String name, String host, int rpcPort, Token clientToAMToken,
       YarnApplicationState state, String diagnostics, String url,
-      long startTime, long finishTime, FinalApplicationStatus finalStatus,
+      long startTime, long launchTime, long finishTime,
+      FinalApplicationStatus finalStatus,
       ApplicationResourceUsageReport appResources, String origTrackingUrl,
       float progress, String applicationType, Token amRmToken, Set<String> tags,
       boolean unmanagedApplication, Priority priority,
@@ -98,8 +127,8 @@
     ApplicationReport report =
         newInstance(applicationId, applicationAttemptId, user, queue, name,
           host, rpcPort, clientToAMToken, state, diagnostics, url, startTime,
-          finishTime, finalStatus, appResources, origTrackingUrl, progress,
-          applicationType, amRmToken);
+          launchTime, finishTime, finalStatus, appResources,
+          origTrackingUrl, progress, applicationType, amRmToken);
     report.setApplicationTags(tags);
     report.setUnmanagedApp(unmanagedApplication);
     report.setPriority(priority);
@@ -282,6 +311,14 @@
   @Unstable
   public abstract void setStartTime(long startTime);
 
+  @Private
+  @Unstable
+  public abstract void setLaunchTime(long setLaunchTime);
+
+  @Public
+  @Unstable
+  public abstract long getLaunchTime();
+
   /**
    * Get the <em>finish time</em> of the application.
    * @return <em>finish time</em> of the application
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 5e200dc..d6138e8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -281,6 +281,7 @@
   optional string appNodeLabelExpression = 24;
   optional string amNodeLabelExpression = 25;
   repeated AppTimeoutsMapProto appTimeouts = 26;
+  optional int64 launchTime = 27;
 }
 
 message AppTimeoutsMapProto {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
index 225f8bd..f9cfa92 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
@@ -92,10 +92,12 @@
   private Thread serviceLaucher;
 
   @VisibleForTesting
-  private int skipCounter;
+  private int badFileNameExtensionSkipCounter;
   @VisibleForTesting
   private Map<String, Integer> ignoredUserServices =
       new HashMap<>();
+  @VisibleForTesting
+  private int badDirSkipCounter;
 
   public SystemServiceManagerImpl() {
     super(SystemServiceManagerImpl.class.getName());
@@ -268,6 +270,7 @@
         } else if (launchType.getPath().getName().equals(ASYNC)) {
           scanForUserServiceDefinition(launchType.getPath(), asyncUserServices);
         } else {
+          badDirSkipCounter++;
           LOG.debug("Scanner skips for unknown dir {}.", launchType.getPath());
         }
       }
@@ -308,7 +311,7 @@
         if (!filename.endsWith(YARN_FILE_SUFFIX)) {
           LOG.info("Scanner skips for unknown file extension, filename = {}",
               filename);
-          skipCounter++;
+          badFileNameExtensionSkipCounter++;
           continue;
         }
         Service service = getServiceDefinition(serviceCache.getPath());
@@ -325,9 +328,10 @@
             LOG.warn(
                 "Ignoring service {} for the user {} as it is already present,"
                     + " filename = {}", service.getName(), userName, filename);
+          } else {
+            LOG.info("Added service {} for the user {}, filename = {}",
+                service.getName(), userName, filename);
           }
-          LOG.info("Added service {} for the user {}, filename = {}",
-              service.getName(), userName, filename);
         }
       }
     }
@@ -375,7 +379,13 @@
     return syncUserServices;
   }
 
-  @VisibleForTesting int getSkipCounter() {
-    return skipCounter;
+  @VisibleForTesting
+  int getBadFileNameExtensionSkipCounter() {
+    return badFileNameExtensionSkipCounter;
+  }
+
+  @VisibleForTesting
+  int getBadDirSkipCounter() {
+    return badDirSkipCounter;
   }
 }
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
index 45b1bc7..a604af7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
@@ -251,6 +251,9 @@
       kerberos_principal:
         description: The Kerberos Principal of the service
         $ref: '#/definitions/KerberosPrincipal'
+      docker_client_config:
+        type: string
+        description: URI of the file containing the docker client configuration (e.g. hdfs:///tmp/config.json).
   ResourceInformation:
     description:
       ResourceInformation determines unit/value of resource types in addition to memory and vcores. It will be part of Resource object.
@@ -422,14 +425,15 @@
           type: string
         description: A list of quicklink keys defined at the service level, and to be resolved by this component.
   ReadinessCheck:
-    description: A custom command or a pluggable helper container to determine the readiness of a container of a component. Readiness for every service is different. Hence the need for a simple interface, with scope to support advanced usecases.
+    description: A check to be performed to determine the readiness of a component instance (a container). If no readiness check is specified, the default readiness check will be used unless the yarn.service.default-readiness-check.enabled configuration property is set to false at the component, service, or system level. The artifact field is currently unsupported but may be implemented in the future, enabling a pluggable helper container to support advanced use cases.
     required:
     - type
     properties:
       type:
         type: string
-        description: E.g. HTTP (YARN will perform a simple REST call at a regular interval and expect a 204 No content).
+        description: DEFAULT (AM checks whether the container has an IP and optionally performs a DNS lookup for the container hostname), HTTP (AM performs default checks, plus sends a REST call to the container and expects a response code between 200 and 299), or PORT (AM performs default checks, plus attempts to open a socket connection to the container on a specified port).
         enum:
+          - DEFAULT
           - HTTP
           - PORT
       properties:
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
index 9e28c96..72c6e2f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
@@ -19,6 +19,9 @@
 
 import static org.junit.Assert.*;
 
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileWriter;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -27,6 +30,7 @@
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.Response.Status;
 
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.service.api.records.Artifact;
 import org.apache.hadoop.yarn.service.api.records.Artifact.TypeEnum;
@@ -90,7 +94,19 @@
   }
 
   @Test
-  public void testGoodCreateService() {
+  public void testGoodCreateService() throws Exception {
+    String json = "{\"auths\": "
+        + "{\"https://index.docker.io/v1/\": "
+        + "{\"auth\": \"foobarbaz\"},"
+        + "\"registry.example.com\": "
+        + "{\"auth\": \"bazbarfoo\"}}}";
+    File dockerTmpDir = new File("target", "docker-tmp");
+    FileUtils.deleteQuietly(dockerTmpDir);
+    dockerTmpDir.mkdirs();
+    String dockerConfig = dockerTmpDir + "/config.json";
+    BufferedWriter bw = new BufferedWriter(new FileWriter(dockerConfig));
+    bw.write(json);
+    bw.close();
     Service service = new Service();
     service.setName("jenkins");
     service.setVersion("v1");
@@ -116,6 +132,33 @@
   }
 
   @Test
+  public void testInternalServerErrorDockerClientConfigMissingCreateService() {
+    Service service = new Service();
+    service.setName("jenkins");
+    service.setVersion("v1");
+    service.setDockerClientConfig("/does/not/exist/config.json");
+    Artifact artifact = new Artifact();
+    artifact.setType(TypeEnum.DOCKER);
+    artifact.setId("jenkins:latest");
+    Resource resource = new Resource();
+    resource.setCpus(1);
+    resource.setMemory("2048");
+    List<Component> components = new ArrayList<>();
+    Component c = new Component();
+    c.setName("jenkins");
+    c.setNumberOfContainers(1L);
+    c.setArtifact(artifact);
+    c.setLaunchCommand("");
+    c.setResource(resource);
+    components.add(c);
+    service.setComponents(components);
+    final Response actual = apiServer.createService(request, service);
+    assertEquals("Create service is ",
+        Response.status(Status.BAD_REQUEST).build().getStatus(),
+        actual.getStatus());
+  }
+
+  @Test
   public void testBadGetService() {
     final Response actual = apiServer.getService(request, "no-jenkins");
     assertEquals("Get service is ",
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSystemServiceImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSystemServiceManagerImpl.java
similarity index 93%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSystemServiceImpl.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSystemServiceManagerImpl.java
index 27632f9..d39083d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSystemServiceImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSystemServiceManagerImpl.java
@@ -41,13 +41,13 @@
 /**
  * Test class for system service manager.
  */
-public class TestSystemServiceImpl {
+public class TestSystemServiceManagerImpl {
 
   private static final Logger LOG =
-      LoggerFactory.getLogger(TestSystemServiceImpl.class);
+      LoggerFactory.getLogger(TestSystemServiceManagerImpl.class);
   private SystemServiceManagerImpl systemService;
   private Configuration conf;
-  private String resourcePath = "users";
+  private String resourcePath = "system-services";
 
   private String[] users = new String[] {"user1", "user2"};
   private static Map<String, Set<String>> loadedServices = new HashMap<>();
@@ -88,7 +88,9 @@
         ignoredUserServices.containsKey(users[0]));
     int count = ignoredUserServices.get(users[0]);
     Assert.assertEquals(1, count);
-    Assert.assertEquals(1, systemService.getSkipCounter());
+    Assert.assertEquals(1,
+        systemService.getBadFileNameExtensionSkipCounter());
+    Assert.assertEquals(1, systemService.getBadDirSkipCounter());
 
     Map<String, Set<Service>> userServices =
         systemService.getSyncUserServices();
@@ -112,7 +114,7 @@
       while (iterator.hasNext()) {
         Service next = iterator.next();
         Assert.assertTrue(
-            "Service name doesn't exist in expected " + "userService "
+            "Service name doesn't exist in expected userService "
                 + serviceNames, serviceNames.contains(next.getName()));
       }
     }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/bad/bad.yarnfile b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/bad/bad.yarnfile
new file mode 100644
index 0000000..1d514d6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/bad/bad.yarnfile
@@ -0,0 +1,16 @@
+{
+  "name": "bad",
+  "version": "1.0.0",
+  "components" :
+  [
+    {
+      "name": "simple",
+      "number_of_containers": 1,
+      "launch_command": "sleep 2",
+      "resource": {
+        "cpus": 1,
+        "memory": "128"
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/users/sync/user1/example-app1.yarnfile b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app1.yarnfile
similarity index 100%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/users/sync/user1/example-app1.yarnfile
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app1.yarnfile
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/users/sync/user1/example-app2.yarnfile b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app2.yarnfile
similarity index 100%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/users/sync/user1/example-app2.yarnfile
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app2.yarnfile
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/users/sync/user1/example-app3.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app3.json
similarity index 100%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/users/sync/user1/example-app3.json
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app3.json
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/users/sync/user2/example-app1.yarnfile b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app1.yarnfile
similarity index 100%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/users/sync/user2/example-app1.yarnfile
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app1.yarnfile
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/users/sync/user2/example-app2.yarnfile b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app2.yarnfile
similarity index 100%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/users/sync/user2/example-app2.yarnfile
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app2.yarnfile
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ContainerFailureTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ContainerFailureTracker.java
index 4743f28..5982728 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ContainerFailureTracker.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ContainerFailureTracker.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.service;
 
 import org.apache.hadoop.yarn.service.component.Component;
+import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -29,6 +30,7 @@
 import java.util.Map;
 import java.util.Set;
 
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_NODE_BLACKLIST_THRESHOLD;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.NODE_BLACKLIST_THRESHOLD;
 
 /**
@@ -51,8 +53,9 @@
   public ContainerFailureTracker(ServiceContext context, Component component) {
     this.context = context;
     this.component = component;
-    maxFailurePerNode = component.getComponentSpec().getConfiguration()
-        .getPropertyInt(NODE_BLACKLIST_THRESHOLD, 3);
+    maxFailurePerNode = YarnServiceConf.getInt(NODE_BLACKLIST_THRESHOLD,
+        DEFAULT_NODE_BLACKLIST_THRESHOLD, component.getComponentSpec()
+        .getConfiguration(), context.scheduler.getConfig());
   }
 
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
index 75cc9c5..0383a65 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.service;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.Options;
 import org.apache.commons.lang3.StringUtils;
@@ -89,8 +90,8 @@
     fs.setAppDir(appDir);
     loadApplicationJson(context, fs);
 
+    context.tokens = recordTokensForContainers();
     if (UserGroupInformation.isSecurityEnabled()) {
-      context.tokens = recordTokensForContainers();
       doSecureLogin();
     }
     // Take yarn config from YarnFile and merge them into YarnConfiguration
@@ -128,15 +129,10 @@
 
   // Record the tokens and use them for launching containers.
   // e.g. localization requires the hdfs delegation tokens
-  private ByteBuffer recordTokensForContainers() throws IOException {
+  @VisibleForTesting
+  protected ByteBuffer recordTokensForContainers() throws IOException {
     Credentials copy = new Credentials(UserGroupInformation.getCurrentUser()
         .getCredentials());
-    DataOutputBuffer dob = new DataOutputBuffer();
-    try {
-      copy.writeTokenStorageToStream(dob);
-    } finally {
-      dob.close();
-    }
     // Now remove the AM->RM token so that task containers cannot access it.
     Iterator<Token<?>> iter = copy.getAllTokens().iterator();
     while (iter.hasNext()) {
@@ -146,6 +142,12 @@
         iter.remove();
       }
     }
+    DataOutputBuffer dob = new DataOutputBuffer();
+    try {
+      copy.writeTokenStorageToStream(dob);
+    } finally {
+      dob.close();
+    }
     return ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
index 7eddef9..8d01410 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
@@ -234,9 +234,10 @@
     createConfigFileCache(context.fs.getFileSystem());
 
     createAllComponents();
-    containerRecoveryTimeout = getConfig().getInt(
+    containerRecoveryTimeout = YarnServiceConf.getInt(
         YarnServiceConf.CONTAINER_RECOVERY_TIMEOUT_MS,
-        YarnServiceConf.DEFAULT_CONTAINER_RECOVERY_TIMEOUT_MS);
+        YarnServiceConf.DEFAULT_CONTAINER_RECOVERY_TIMEOUT_MS,
+        app.getConfiguration(), getConfig());
   }
 
   protected YarnRegistryViewForProviders createYarnRegistryOperations(
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ReadinessCheck.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ReadinessCheck.java
index af7c542..0665cb5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ReadinessCheck.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ReadinessCheck.java
@@ -60,6 +60,7 @@
   @XmlType(name = "type")
   @XmlEnum
   public enum TypeEnum {
+    DEFAULT("DEFAULT"),
     HTTP("HTTP"),
     PORT("PORT");
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Service.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Service.java
index 9475bf6..22beff4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Service.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Service.java
@@ -73,6 +73,7 @@
   private KerberosPrincipal kerberosPrincipal = new KerberosPrincipal();
   private String version = null;
   private String description = null;
+  private String dockerClientConfig = null;
 
   /**
    * A unique service name.
@@ -370,6 +371,27 @@
     this.kerberosPrincipal = kerberosPrincipal;
   }
 
+  @JsonProperty("docker_client_config")
+  @XmlElement(name = "docker_client_config")
+  @SuppressWarnings("checkstyle:hiddenfield")
+  public Service dockerClientConfig(String dockerClientConfig) {
+    this.dockerClientConfig = dockerClientConfig;
+    return this;
+  }
+
+  /**
+   * The Docker client config for the service.
+   * @return dockerClientConfig
+   */
+  @ApiModelProperty(value = "The Docker client config for the service")
+  public String getDockerClientConfig() {
+    return dockerClientConfig;
+  }
+
+  public void setDockerClientConfig(String dockerClientConfig) {
+    this.dockerClientConfig = dockerClientConfig;
+  }
+
   @Override
   public boolean equals(java.lang.Object o) {
     if (this == o) {
@@ -414,6 +436,8 @@
     sb.append("    queue: ").append(toIndentedString(queue)).append("\n");
     sb.append("    kerberosPrincipal: ")
         .append(toIndentedString(kerberosPrincipal)).append("\n");
+    sb.append("    dockerClientConfig: ")
+        .append(toIndentedString(dockerClientConfig)).append("\n");
     sb.append("}");
     return sb.toString();
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ClientAMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ClientAMProxy.java
index e17c0c4..e8e7de4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ClientAMProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ClientAMProxy.java
@@ -48,8 +48,9 @@
     } else {
       retryPolicy =
           createRetryPolicy(conf, YarnServiceConf.CLIENT_AM_RETRY_MAX_WAIT_MS,
-              15 * 60 * 1000, YarnServiceConf.CLIENT_AM_RETRY_MAX_INTERVAL_MS,
-              2 * 1000);
+              YarnServiceConf.DEFAULT_CLIENT_AM_RETRY_MAX_WAIT_MS,
+              YarnServiceConf.CLIENT_AM_RETRY_MAX_INTERVAL_MS,
+              YarnServiceConf.DEFAULT_CLIENT_AM_RETRY_MAX_INTERVAL_MS);
     }
     return createRetriableProxy(confClone, protocol, ugi, rpc, serverAddress,
         retryPolicy);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 21fb075..453619b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -79,6 +79,7 @@
 import org.apache.hadoop.yarn.service.utils.ServiceUtils;
 import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
 import org.apache.hadoop.yarn.service.utils.ZookeeperUtils;
+import org.apache.hadoop.yarn.util.DockerClientConfigHandler;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Times;
 import org.slf4j.Logger;
@@ -668,8 +669,8 @@
       submissionContext.setApplicationTimeouts(appTimeout);
     }
     submissionContext.setMaxAppAttempts(YarnServiceConf
-        .getInt(YarnServiceConf.AM_RESTART_MAX, 20, app.getConfiguration(),
-            conf));
+        .getInt(YarnServiceConf.AM_RESTART_MAX, DEFAULT_AM_RESTART_MAX, app
+            .getConfiguration(), conf));
 
     setLogAggregationContext(app, conf, submissionContext);
 
@@ -695,7 +696,7 @@
             conf), 1));
     String queue = app.getQueue();
     if (StringUtils.isEmpty(queue)) {
-      queue = conf.get(YARN_QUEUE, "default");
+      queue = conf.get(YARN_QUEUE, DEFAULT_YARN_QUEUE);
     }
     submissionContext.setQueue(queue);
     submissionContext.setApplicationName(serviceName);
@@ -710,7 +711,7 @@
     amLaunchContext.setCommands(Collections.singletonList(cmdStr));
     amLaunchContext.setEnvironment(env);
     amLaunchContext.setLocalResources(localResources);
-    addHdfsDelegationTokenIfSecure(amLaunchContext);
+    addCredentials(amLaunchContext, app);
     submissionContext.setAMContainerSpec(amLaunchContext);
     yarnClient.submitApplication(submissionContext);
     return submissionContext.getApplicationId();
@@ -933,28 +934,37 @@
     return appDir;
   }
 
-  private void addHdfsDelegationTokenIfSecure(ContainerLaunchContext amContext)
+  private void addCredentials(ContainerLaunchContext amContext, Service app)
       throws IOException {
-    if (!UserGroupInformation.isSecurityEnabled()) {
-      return;
-    }
-    Credentials credentials = new Credentials();
-    String tokenRenewer = YarnClientUtils.getRmPrincipal(getConfig());
-    if (StringUtils.isEmpty(tokenRenewer)) {
-      throw new IOException(
-          "Can't get Master Kerberos principal for the RM to use as renewer");
-    }
-    // Get hdfs dt
-    final org.apache.hadoop.security.token.Token<?>[] tokens =
-        fs.getFileSystem().addDelegationTokens(tokenRenewer, credentials);
-    if (tokens != null && tokens.length != 0) {
-      for (Token<?> token : tokens) {
-        LOG.debug("Got DT: " + token);
+    Credentials allCreds = new Credentials();
+    // HDFS DT
+    if (UserGroupInformation.isSecurityEnabled()) {
+      String tokenRenewer = YarnClientUtils.getRmPrincipal(getConfig());
+      if (StringUtils.isEmpty(tokenRenewer)) {
+        throw new IOException(
+            "Can't get Master Kerberos principal for the RM to use as renewer");
       }
+      final org.apache.hadoop.security.token.Token<?>[] tokens =
+          fs.getFileSystem().addDelegationTokens(tokenRenewer, allCreds);
+      if (LOG.isDebugEnabled()) {
+        if (tokens != null && tokens.length != 0) {
+          for (Token<?> token : tokens) {
+            LOG.debug("Got DT: " + token);
+          }
+        }
+      }
+    }
+
+    if (!StringUtils.isEmpty(app.getDockerClientConfig())) {
+      allCreds.addAll(DockerClientConfigHandler.readCredentialsFromConfigFile(
+          new Path(app.getDockerClientConfig()), getConfig(), app.getName()));
+    }
+
+    if (allCreds.numberOfTokens() > 0) {
       DataOutputBuffer dob = new DataOutputBuffer();
-      credentials.writeTokenStorageToStream(dob);
-      ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
-      amContext.setTokens(fsTokens);
+      allCreds.writeTokenStorageToStream(dob);
+      ByteBuffer tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
+      amContext.setTokens(tokens);
     }
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
index 39897f62..3a08eaa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
@@ -45,6 +45,7 @@
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceId;
+import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
 import org.apache.hadoop.yarn.service.monitor.probe.MonitorUtils;
 import org.apache.hadoop.yarn.service.monitor.probe.Probe;
 import org.apache.hadoop.yarn.service.provider.ProviderUtils;
@@ -79,6 +80,9 @@
 import static org.apache.hadoop.yarn.service.component.ComponentState.*;
 import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.*;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_FAILURE_THRESHOLD;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_CONTAINER_FAILURE_THRESHOLD;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_READINESS_CHECK_ENABLED;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_READINESS_CHECK_ENABLED_DEFAULT;
 
 public class Component implements EventHandler<ComponentEvent> {
   private static final Logger LOG = LoggerFactory.getLogger(Component.class);
@@ -175,9 +179,15 @@
     dispatcher = scheduler.getDispatcher();
     failureTracker =
         new ContainerFailureTracker(context, this);
-    probe = MonitorUtils.getProbe(componentSpec.getReadinessCheck());
-    maxContainerFailurePerComp = componentSpec.getConfiguration()
-        .getPropertyInt(CONTAINER_FAILURE_THRESHOLD, 10);
+    if (componentSpec.getReadinessCheck() != null ||
+        YarnServiceConf.getBoolean(DEFAULT_READINESS_CHECK_ENABLED,
+            DEFAULT_READINESS_CHECK_ENABLED_DEFAULT,
+            componentSpec.getConfiguration(), scheduler.getConfig())) {
+      probe = MonitorUtils.getProbe(componentSpec.getReadinessCheck());
+    }
+    maxContainerFailurePerComp = YarnServiceConf.getInt(
+        CONTAINER_FAILURE_THRESHOLD, DEFAULT_CONTAINER_FAILURE_THRESHOLD,
+        componentSpec.getConfiguration(), scheduler.getConfig());
     createNumCompInstances(component.getNumberOfContainers());
     setDesiredContainers(component.getNumberOfContainers().intValue());
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
index 0e3e11b..c57d888 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
@@ -20,7 +20,9 @@
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.registry.client.api.RegistryConstants;
 import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
+import org.apache.hadoop.registry.client.binding.RegistryUtils;
 import org.apache.hadoop.registry.client.types.ServiceRecord;
 import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
 import org.apache.hadoop.util.ExitUtil;
@@ -520,6 +522,24 @@
     }
   }
 
+  public String getHostname() {
+    String domain = getComponent().getScheduler().getConfig()
+        .get(RegistryConstants.KEY_DNS_DOMAIN);
+    String hostname;
+    if (domain == null || domain.isEmpty()) {
+      hostname = MessageFormat
+          .format("{0}.{1}.{2}", getCompInstanceName(),
+              getComponent().getContext().service.getName(),
+              RegistryUtils.currentUser());
+    } else {
+      hostname = MessageFormat
+          .format("{0}.{1}.{2}.{3}", getCompInstanceName(),
+              getComponent().getContext().service.getName(),
+              RegistryUtils.currentUser(), domain);
+    }
+    return hostname;
+  }
+
   @Override
   public int compareTo(ComponentInstance to) {
     long delta = containerStartedTime - to.containerStartedTime;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
index 3dd5a7e..eda280f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
@@ -20,27 +20,34 @@
 
 import org.apache.hadoop.yarn.service.api.records.Configuration;
 
+// ALL SERVICE AM PROPERTIES ADDED TO THIS FILE MUST BE DOCUMENTED
+// in the yarn site yarn-service/Configurations.md file.
 public class YarnServiceConf {
 
   private static final String YARN_SERVICE_PREFIX = "yarn.service.";
 
   // Retry settings for the ServiceClient to talk to Service AppMaster
   public static final String CLIENT_AM_RETRY_MAX_WAIT_MS = "yarn.service.client-am.retry.max-wait-ms";
+  public static final long DEFAULT_CLIENT_AM_RETRY_MAX_WAIT_MS = 15 * 60 * 1000;
   public static final String CLIENT_AM_RETRY_MAX_INTERVAL_MS = "yarn.service.client-am.retry-interval-ms";
+  public static final long DEFAULT_CLIENT_AM_RETRY_MAX_INTERVAL_MS = 2 * 1000;
 
   // Retry settings for container failures
   public static final String CONTAINER_RETRY_MAX = "yarn.service.container-failure.retry.max";
+  public static final int DEFAULT_CONTAINER_RETRY_MAX = -1;
   public static final String CONTAINER_RETRY_INTERVAL = "yarn.service.container-failure.retry-interval-ms";
+  public static final int DEFAULT_CONTAINER_RETRY_INTERVAL = 30000;
   public static final String CONTAINER_FAILURES_VALIDITY_INTERVAL =
       "yarn.service.container-failure.validity-interval-ms";
+  public static final long DEFAULT_CONTAINER_FAILURES_VALIDITY_INTERVAL = -1;
 
   public static final String AM_RESTART_MAX = "yarn.service.am-restart.max-attempts";
+  public static final int DEFAULT_AM_RESTART_MAX = 20;
   public static final String AM_RESOURCE_MEM = "yarn.service.am-resource.memory";
   public static final long DEFAULT_KEY_AM_RESOURCE_MEM = 1024;
 
-  public static final String DEFAULT_AM_JVM_XMX = " -Xmx768m ";
-
   public static final String YARN_QUEUE = "yarn.service.queue";
+  public static final String DEFAULT_YARN_QUEUE = "default";
 
   public static final String API_SERVER_ADDRESS = "yarn.service.api-server.address";
   public static final String DEFAULT_API_SERVER_ADDRESS = "0.0.0.0:";
@@ -67,11 +74,14 @@
    */
   public static final String CONTAINER_FAILURE_THRESHOLD =
       "yarn.service.container-failure-per-component.threshold";
+  public static final int DEFAULT_CONTAINER_FAILURE_THRESHOLD = 10;
+
   /**
    * Maximum number of container failures on a node before the node is blacklisted
    */
   public static final String NODE_BLACKLIST_THRESHOLD =
       "yarn.service.node-blacklist.threshold";
+  public static final int DEFAULT_NODE_BLACKLIST_THRESHOLD = 3;
 
   /**
    * The failure count for CONTAINER_FAILURE_THRESHOLD and NODE_BLACKLIST_THRESHOLD
@@ -79,6 +89,7 @@
    */
   public static final String CONTAINER_FAILURE_WINDOW =
       "yarn.service.failure-count-reset.window";
+  public static final long DEFAULT_CONTAINER_FAILURE_WINDOW = 21600;
 
   /**
    * interval between readiness checks.
@@ -87,9 +98,17 @@
   public static final int DEFAULT_READINESS_CHECK_INTERVAL = 30; // seconds
 
   /**
+   * Default readiness check enabled.
+   */
+  public static final String DEFAULT_READINESS_CHECK_ENABLED =
+      "yarn.service.default-readiness-check.enabled";
+  public static final boolean DEFAULT_READINESS_CHECK_ENABLED_DEFAULT = true;
+
+  /**
    * JVM opts.
    */
   public static final String JVM_OPTS = "yarn.service.am.java.opts";
+  public static final String DEFAULT_AM_JVM_XMX = " -Xmx768m ";
 
   /**
    * How long to wait until a container is considered dead.
@@ -126,6 +145,12 @@
     return userConf.getPropertyInt(name, systemConf.getInt(name, defaultValue));
   }
 
+  public static boolean getBoolean(String name, boolean defaultValue,
+      Configuration userConf, org.apache.hadoop.conf.Configuration systemConf) {
+    return userConf.getPropertyBool(name, systemConf.getBoolean(name,
+        defaultValue));
+  }
+
   public static String get(String name, String defaultVal,
       Configuration userConf, org.apache.hadoop.conf.Configuration systemConf) {
     return userConf.getProperty(name, systemConf.get(name, defaultVal));
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/ServiceMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/ServiceMonitor.java
index 982448a..033569c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/ServiceMonitor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/ServiceMonitor.java
@@ -43,6 +43,7 @@
 import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.BECOME_READY;
 import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceState.READY;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_FAILURE_WINDOW;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_CONTAINER_FAILURE_WINDOW;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_READINESS_CHECK_INTERVAL;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.READINESS_CHECK_INTERVAL;
 
@@ -81,7 +82,7 @@
 
     // Default 6 hours.
     long failureResetInterval = YarnServiceConf
-        .getLong(CONTAINER_FAILURE_WINDOW, 21600,
+        .getLong(CONTAINER_FAILURE_WINDOW, DEFAULT_CONTAINER_FAILURE_WINDOW,
             context.service.getConfiguration(), conf);
 
     executorService
@@ -109,11 +110,15 @@
         ProbeStatus status = instance.ping();
         if (status.isSuccess()) {
           if (instance.getState() == STARTED) {
+            LOG.info("Readiness check succeeded for {}: {}", instance
+                .getCompInstanceName(), status);
             // synchronously update the state.
             instance.handle(
                 new ComponentInstanceEvent(entry.getKey(), BECOME_READY));
           }
         } else {
+          LOG.info("Readiness check failed for {}: {}", instance
+              .getCompInstanceName(), status);
           if (instance.getState() == READY) {
             instance.handle(
                 new ComponentInstanceEvent(entry.getKey(), BECOME_NOT_READY));
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/DefaultProbe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/DefaultProbe.java
new file mode 100644
index 0000000..4077013
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/DefaultProbe.java
@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.monitor.probe;
+
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
+import org.apache.hadoop.yarn.service.utils.ServiceRegistryUtils;
+import org.apache.hadoop.yarn.service.utils.ServiceUtils;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Map;
+
+/**
+ * A probe that checks whether the AM has retrieved an IP for a container.
+ * Optional parameters enable a subsequent check for whether a DNS lookup can
+ * be performed for the container's hostname. Configurable properties include:
+ *
+ *   dns.check.enabled - true if DNS check should be performed (default false)
+ *   dns.address - optional IP:port address of DNS server to use for DNS check
+ */
+public class DefaultProbe extends Probe {
+  private final boolean dnsCheckEnabled;
+  private final String dnsAddress;
+
+  public DefaultProbe(Map<String, String> props) {
+    this("Default probe: IP presence", props);
+  }
+
+  protected DefaultProbe(String name, Map<String, String> props) {
+    this.dnsCheckEnabled = getPropertyBool(props,
+        DEFAULT_PROBE_DNS_CHECK_ENABLED,
+        DEFAULT_PROBE_DNS_CHECK_ENABLED_DEFAULT);
+    this.dnsAddress = props.get(DEFAULT_PROBE_DNS_ADDRESS);
+    String additionalName = "";
+    if (dnsCheckEnabled) {
+      if (dnsAddress == null) {
+        additionalName = " with DNS checking";
+      } else {
+        additionalName =  " with DNS checking and DNS server address " +
+            dnsAddress;
+      }
+    }
+    setName(name + additionalName);
+  }
+
+  public static DefaultProbe create() throws IOException {
+    return new DefaultProbe(Collections.emptyMap());
+  }
+
+  public static DefaultProbe create(Map<String, String> props) throws
+      IOException {
+    return new DefaultProbe(props);
+  }
+
+  @Override
+  public ProbeStatus ping(ComponentInstance instance) {
+    ProbeStatus status = new ProbeStatus();
+
+    ContainerStatus containerStatus = instance.getContainerStatus();
+    if (containerStatus == null || ServiceUtils.isEmpty(containerStatus
+        .getIPs())) {
+      status.fail(this, new IOException(
+          instance.getCompInstanceName() + ": IP is not available yet"));
+      return status;
+    }
+
+    String hostname = instance.getHostname();
+    if (dnsCheckEnabled && !ServiceRegistryUtils.registryDNSLookupExists(
+        dnsAddress, hostname)) {
+      status.fail(this, new IOException(
+          instance.getCompInstanceName() + ": DNS checking is enabled, but " +
+              "lookup for " + hostname + " is not available yet"));
+      return status;
+    }
+
+    status.succeed(this);
+    return status;
+  }
+
+  protected boolean isDnsCheckEnabled() {
+    return dnsCheckEnabled;
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/HttpProbe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/HttpProbe.java
index 1ed13a9..492a11b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/HttpProbe.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/HttpProbe.java
@@ -17,11 +17,7 @@
 
 package org.apache.hadoop.yarn.service.monitor.probe;
 
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
-import org.apache.hadoop.yarn.service.utils.ServiceUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -30,7 +26,20 @@
 import java.net.URL;
 import java.util.Map;
 
-public class HttpProbe extends Probe {
+/**
+ * A probe that checks whether a successful HTTP response code can be obtained
+ * from a container. A well-formed URL must be provided. The URL is intended
+ * to contain a token ${THIS_HOST} that will be replaced by the IP of the
+ * container. This probe also performs the checks of the {@link DefaultProbe}.
+ * Additional configurable properties include:
+ *
+ *   url - required URL for HTTP connection, e.g. http://${THIS_HOST}:8080
+ *   timeout - connection timeout (default 1000)
+ *   min.success - minimum response code considered successful (default 200)
+ *   max.success - maximum response code considered successful (default 299)
+ *
+ */
+public class HttpProbe extends DefaultProbe {
   protected static final Logger log = LoggerFactory.getLogger(HttpProbe.class);
 
   private static final String HOST_TOKEN = "${THIS_HOST}";
@@ -40,9 +49,9 @@
   private final int min, max;
 
 
-  public HttpProbe(String url, int timeout, int min, int max, Configuration
-      conf) {
-    super("Http probe of " + url + " [" + min + "-" + max + "]", conf);
+  public HttpProbe(String url, int timeout, int min, int max,
+      Map<String, String> props) {
+    super("Http probe of " + url + " [" + min + "-" + max + "]", props);
     this.urlString = url;
     this.timeout = timeout;
     this.min = min;
@@ -59,7 +68,7 @@
         WEB_PROBE_MIN_SUCCESS_DEFAULT);
     int maxSuccess = getPropertyInt(props, WEB_PROBE_MAX_SUCCESS,
         WEB_PROBE_MAX_SUCCESS_DEFAULT);
-    return new HttpProbe(urlString, timeout, minSuccess, maxSuccess, null);
+    return new HttpProbe(urlString, timeout, minSuccess, maxSuccess, props);
   }
 
 
@@ -73,15 +82,11 @@
 
   @Override
   public ProbeStatus ping(ComponentInstance instance) {
-    ProbeStatus status = new ProbeStatus();
-    ContainerStatus containerStatus = instance.getContainerStatus();
-    if (containerStatus == null || ServiceUtils.isEmpty(containerStatus.getIPs())
-        || StringUtils.isEmpty(containerStatus.getHost())) {
-      status.fail(this, new IOException("IP is not available yet"));
+    ProbeStatus status = super.ping(instance);
+    if (!status.isSuccess()) {
       return status;
     }
-
-    String ip = containerStatus.getIPs().get(0);
+    String ip = instance.getContainerStatus().getIPs().get(0);
     HttpURLConnection connection = null;
     try {
       URL url = new URL(urlString.replace(HOST_TOKEN, ip));
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorKeys.java
index 55b55f6..97770d4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorKeys.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorKeys.java
@@ -23,6 +23,18 @@
 public interface MonitorKeys {
 
   /**
+   * Default probing key : DNS check enabled {@value}.
+   */
+  String DEFAULT_PROBE_DNS_CHECK_ENABLED = "dns.check.enabled";
+  /**
+   * Default probing default : DNS check enabled {@value}.
+   */
+  boolean DEFAULT_PROBE_DNS_CHECK_ENABLED_DEFAULT = false;
+  /**
+   * Default probing key : DNS checking address IP:port {@value}.
+   */
+  String DEFAULT_PROBE_DNS_ADDRESS = "dns.address";
+  /**
    * Port probing key : port to attempt to create a TCP connection to {@value}.
    */
   String PORT_PROBE_PORT = "port";
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorUtils.java
index c4f63ae..0b57e6c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorUtils.java
@@ -61,20 +61,20 @@
   }
 
   public static Probe getProbe(ReadinessCheck readinessCheck) {
-    if (readinessCheck == null) {
-      return null;
-    }
-    if (readinessCheck.getType() == null) {
-      return null;
-    }
     try {
+      if (readinessCheck == null) {
+        return DefaultProbe.create();
+      }
+      if (readinessCheck.getType() == null) {
+        return DefaultProbe.create(readinessCheck.getProperties());
+      }
       switch (readinessCheck.getType()) {
       case HTTP:
         return HttpProbe.create(readinessCheck.getProperties());
       case PORT:
         return PortProbe.create(readinessCheck.getProperties());
       default:
-        return null;
+        return DefaultProbe.create(readinessCheck.getProperties());
       }
     } catch (Throwable t) {
       throw new IllegalArgumentException("Error creating readiness check " +
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/PortProbe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/PortProbe.java
index 85569f8..e62048a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/PortProbe.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/PortProbe.java
@@ -19,7 +19,6 @@
 
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
-import org.apache.hadoop.yarn.service.utils.ServiceUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -29,15 +28,20 @@
 import java.util.Map;
 
 /**
- * Probe for a port being open.
+ * A probe that checks whether a container has a specified port open. This
+ * probe also performs the checks of the {@link DefaultProbe}. Additional
+ * configurable properties include:
+ *
+ *   port - required port for socket connection
+ *   timeout - connection timeout (default 1000)
  */
-public class PortProbe extends Probe {
+public class PortProbe extends DefaultProbe {
   protected static final Logger log = LoggerFactory.getLogger(PortProbe.class);
   private final int port;
   private final int timeout;
 
-  public PortProbe(int port, int timeout) {
-    super("Port probe of " + port + " for " + timeout + "ms", null);
+  public PortProbe(int port, int timeout, Map<String, String> props) {
+    super("Port probe of " + port + " for " + timeout + "ms", props);
     this.port = port;
     this.timeout = timeout;
   }
@@ -54,7 +58,7 @@
     int timeout = getPropertyInt(props, PORT_PROBE_CONNECT_TIMEOUT,
         PORT_PROBE_CONNECT_TIMEOUT_DEFAULT);
 
-    return new PortProbe(port, timeout);
+    return new PortProbe(port, timeout, props);
   }
 
   /**
@@ -65,12 +69,8 @@
    */
   @Override
   public ProbeStatus ping(ComponentInstance instance) {
-    ProbeStatus status = new ProbeStatus();
-
-    if (instance.getContainerStatus() == null || ServiceUtils
-        .isEmpty(instance.getContainerStatus().getIPs())) {
-      status.fail(this, new IOException(
-          instance.getCompInstanceName() + ": IP is not available yet"));
+    ProbeStatus status = super.ping(instance);
+    if (!status.isSuccess()) {
       return status;
     }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/Probe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/Probe.java
index 3237a2b..341a0c8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/Probe.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/Probe.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.yarn.service.monitor.probe;
 
 import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 
 import java.io.IOException;
@@ -29,18 +28,18 @@
  */
 public abstract class Probe implements MonitorKeys {
 
-  protected final Configuration conf;
   private String name;
 
+  protected Probe() {
+  }
+
   /**
    * Create a probe of a specific name
    *
    * @param name probe name
-   * @param conf configuration being stored.
    */
-  public Probe(String name, Configuration conf) {
+  public Probe(String name) {
     this.name = name;
-    this.conf = conf;
   }
 
 
@@ -82,6 +81,15 @@
     return Integer.parseInt(value);
   }
 
+  public static boolean getPropertyBool(Map<String, String> props, String name,
+      boolean defaultValue) {
+    String value = props.get(name);
+    if (StringUtils.isEmpty(value)) {
+      return defaultValue;
+    }
+    return Boolean.parseBoolean(value);
+  }
+
   /**
    * perform any prelaunch initialization
    */
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
index 2f840b1..ee27686 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
@@ -42,6 +42,9 @@
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_FAILURES_VALIDITY_INTERVAL;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_RETRY_INTERVAL;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_RETRY_MAX;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_CONTAINER_FAILURES_VALIDITY_INTERVAL;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_CONTAINER_RETRY_INTERVAL;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_CONTAINER_RETRY_MAX;
 import static org.apache.hadoop.yarn.service.utils.ServiceApiUtil.$;
 
 public abstract class AbstractProviderService implements ProviderService,
@@ -106,12 +109,14 @@
     }
 
     // By default retry forever every 30 seconds
-    launcher.setRetryContext(YarnServiceConf
-        .getInt(CONTAINER_RETRY_MAX, -1, service.getConfiguration(),
-            yarnConf), YarnServiceConf
-        .getInt(CONTAINER_RETRY_INTERVAL, 30000, service.getConfiguration(),
+    launcher.setRetryContext(
+        YarnServiceConf.getInt(CONTAINER_RETRY_MAX, DEFAULT_CONTAINER_RETRY_MAX,
+            component.getConfiguration(), yarnConf),
+        YarnServiceConf.getInt(CONTAINER_RETRY_INTERVAL,
+            DEFAULT_CONTAINER_RETRY_INTERVAL, component.getConfiguration(),
             yarnConf),
-        YarnServiceConf.getLong(CONTAINER_FAILURES_VALIDITY_INTERVAL, -1,
-            service.getConfiguration(), yarnConf));
+        YarnServiceConf.getLong(CONTAINER_FAILURES_VALIDITY_INTERVAL,
+            DEFAULT_CONTAINER_FAILURES_VALIDITY_INTERVAL,
+            component.getConfiguration(), yarnConf));
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java
index 6ac8de1..c3e2619 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.yarn.service.provider.docker;
 
-import org.apache.hadoop.registry.client.api.RegistryConstants;
-import org.apache.hadoop.registry.client.binding.RegistryUtils;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.provider.AbstractProviderService;
 import org.apache.hadoop.yarn.service.api.records.Service;
@@ -26,7 +24,6 @@
 import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher;
 
 import java.io.IOException;
-import java.text.MessageFormat;
 
 public class DockerProviderService extends AbstractProviderService
     implements DockerKeys {
@@ -38,19 +35,7 @@
     launcher.setDockerImage(compInstance.getCompSpec().getArtifact().getId());
     launcher.setDockerNetwork(compInstance.getCompSpec().getConfiguration()
         .getProperty(DOCKER_NETWORK));
-    String domain = compInstance.getComponent().getScheduler().getConfig()
-        .get(RegistryConstants.KEY_DNS_DOMAIN);
-    String hostname;
-    if (domain == null || domain.isEmpty()) {
-      hostname = MessageFormat
-          .format("{0}.{1}.{2}", compInstance.getCompInstanceName(),
-              service.getName(), RegistryUtils.currentUser());
-    } else {
-      hostname = MessageFormat
-          .format("{0}.{1}.{2}.{3}", compInstance.getCompInstanceName(),
-              service.getName(), RegistryUtils.currentUser(), domain);
-    }
-    launcher.setDockerHostname(hostname);
+    launcher.setDockerHostname(compInstance.getHostname());
     launcher.setRunPrivilegedContainer(
         compInstance.getCompSpec().getRunPrivilegedContainer());
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
index fc1b45b..194ae83 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
@@ -116,6 +116,13 @@
       }
     }
 
+    // Validate the Docker client config.
+    try {
+      validateDockerClientConfiguration(service, conf);
+    } catch (IOException e) {
+      throw new IllegalArgumentException(e);
+    }
+
     // Validate there are no component name collisions (collisions are not
     // currently supported) and add any components from external services
     Configuration globalConf = service.getConfiguration();
@@ -214,6 +221,20 @@
     }
   }
 
+  private static void validateDockerClientConfiguration(Service service,
+      org.apache.hadoop.conf.Configuration conf) throws IOException {
+    String dockerClientConfig = service.getDockerClientConfig();
+    if (!StringUtils.isEmpty(dockerClientConfig)) {
+      Path dockerClientConfigPath = new Path(dockerClientConfig);
+      FileSystem fs = dockerClientConfigPath.getFileSystem(conf);
+      if (!fs.exists(dockerClientConfigPath)) {
+        throw new IOException(
+            "The supplied Docker client config does not exist: "
+                + dockerClientConfig);
+      }
+    }
+  }
+
   private static void validateComponent(Component comp, FileSystem fs,
       org.apache.hadoop.conf.Configuration conf)
       throws IOException {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceRegistryUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceRegistryUtils.java
index dfc30f7..30ba503 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceRegistryUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceRegistryUtils.java
@@ -20,9 +20,23 @@
 
 import org.apache.hadoop.registry.client.binding.RegistryUtils;
 import org.apache.hadoop.yarn.service.conf.YarnServiceConstants;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.naming.Context;
+import javax.naming.NameNotFoundException;
+import javax.naming.NamingException;
+import javax.naming.directory.Attributes;
+import javax.naming.directory.DirContext;
+import javax.naming.directory.InitialDirContext;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.Hashtable;
 
 
 public class ServiceRegistryUtils {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ServiceRegistryUtils.class);
 
   public static final String SVC_USERS = "/services/yarn/users";
 
@@ -53,4 +67,50 @@
   public static String mkUserHomePath(String username) {
     return SVC_USERS + "/" + username;
   }
+
+  /**
+   * Determine whether a DNS lookup exists for a given name. If a DNS server
+   * address is provided, the lookup will be performed against this DNS
+   * server. This option is provided because it may be desirable to perform
+   * the lookup against Registry DNS directly to avoid caching of negative
+   * responses that may be performed by other DNS servers, thereby allowing the
+   * lookup to succeed sooner.
+   *
+   * @param addr host:port dns address, or null
+   * @param name name to look up
+   * @return true if a lookup succeeds for the specified name
+   */
+  public static boolean registryDNSLookupExists(String addr, String
+      name) {
+    if (addr == null) {
+      try {
+        InetAddress.getByName(name);
+        return true;
+      } catch (UnknownHostException e) {
+        return false;
+      }
+    }
+
+    String dnsURI = String.format("dns://%s", addr);
+    Hashtable<String, Object> env = new Hashtable<>();
+    env.put(Context.INITIAL_CONTEXT_FACTORY,
+        "com.sun.jndi.dns.DnsContextFactory");
+    env.put(Context.PROVIDER_URL, dnsURI);
+
+    try {
+      DirContext ictx = new InitialDirContext(env);
+      Attributes attrs = ictx.getAttributes(name, new String[]{"A"});
+
+      if (attrs.size() > 0) {
+        return true;
+      }
+    } catch (NameNotFoundException e) {
+      // this doesn't need to be logged
+    } catch (NamingException e) {
+      LOG.error("Got exception when performing DNS lookup", e);
+    }
+
+    return false;
+  }
+
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
index 4373893..04b0347 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
@@ -21,11 +21,13 @@
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.registry.client.api.RegistryOperations;
 import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
 import org.apache.hadoop.registry.client.types.ServiceRecord;
 import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
 import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes;
+import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
@@ -60,6 +62,7 @@
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.LinkedList;
@@ -96,11 +99,19 @@
   private Map<ContainerId, ContainerStatus> containerStatuses =
       new ConcurrentHashMap<>();
 
+  private Credentials amCreds;
+
   public MockServiceAM(Service service) {
     super(service.getName());
     this.service = service;
   }
 
+  public MockServiceAM(Service service, Credentials amCreds) {
+    super(service.getName());
+    this.service = service;
+    this.amCreds = amCreds;
+  }
+
   @Override
   protected ContainerId getAMContainerId()
       throws BadClusterStateException {
@@ -385,4 +396,18 @@
     containerStatuses.put(container.getId(), status);
   }
 
+  @Override
+  protected ByteBuffer recordTokensForContainers()
+      throws IOException {
+    DataOutputBuffer dob = new DataOutputBuffer();
+    if (amCreds == null) {
+      return ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
+    }
+    try {
+      amCreds.writeTokenStorageToStream(dob);
+    } finally {
+      dob.close();
+    }
+    return ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
index 8db98bd..57cf367 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
@@ -21,6 +21,10 @@
 import com.google.common.collect.ImmutableMap;
 import org.apache.commons.io.FileUtils;
 import org.apache.curator.test.TestingCluster;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -29,6 +33,7 @@
 import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.security.DockerCredentialTokenIdentifier;
 import org.apache.hadoop.yarn.service.api.records.Component;
 import org.apache.hadoop.yarn.service.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.service.api.records.Service;
@@ -36,6 +41,7 @@
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceState;
 import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
+import org.apache.hadoop.yarn.util.DockerClientConfigHandler;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.junit.After;
 import org.junit.Assert;
@@ -44,14 +50,18 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.BufferedWriter;
 import java.io.File;
+import java.io.FileWriter;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 import java.util.concurrent.TimeoutException;
 
 import static org.apache.hadoop.registry.client.api.RegistryConstants.KEY_REGISTRY_ZK_QUORUM;
+import static org.junit.Assert.assertEquals;
 
 public class TestServiceAM extends ServiceTestUtils{
 
@@ -294,4 +304,44 @@
 
     am.stop();
   }
+
+  @Test
+  public void testRecordTokensForContainers() throws Exception {
+    ApplicationId applicationId = ApplicationId.newInstance(123456, 1);
+    Service exampleApp = new Service();
+    exampleApp.setId(applicationId.toString());
+    exampleApp.setName("testContainerCompleted");
+    exampleApp.addComponent(createComponent("compa", 1, "pwd"));
+
+    String json = "{\"auths\": "
+        + "{\"https://index.docker.io/v1/\": "
+        + "{\"auth\": \"foobarbaz\"},"
+        + "\"registry.example.com\": "
+        + "{\"auth\": \"bazbarfoo\"}}}";
+    File dockerTmpDir = new File("target", "docker-tmp");
+    FileUtils.deleteQuietly(dockerTmpDir);
+    dockerTmpDir.mkdirs();
+    String dockerConfig = dockerTmpDir + "/config.json";
+    BufferedWriter bw = new BufferedWriter(new FileWriter(dockerConfig));
+    bw.write(json);
+    bw.close();
+    Credentials dockerCred =
+        DockerClientConfigHandler.readCredentialsFromConfigFile(
+            new Path(dockerConfig), conf, applicationId.toString());
+
+
+    MockServiceAM am = new MockServiceAM(exampleApp, dockerCred);
+    ByteBuffer amCredBuffer = am.recordTokensForContainers();
+    Credentials amCreds =
+        DockerClientConfigHandler.getCredentialsFromTokensByteBuffer(
+            amCredBuffer);
+
+    assertEquals(2, amCreds.numberOfTokens());
+    for (Token<? extends TokenIdentifier> tk : amCreds.getAllTokens()) {
+      Assert.assertTrue(
+          tk.getKind().equals(DockerCredentialTokenIdentifier.KIND));
+    }
+
+    am.stop();
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestSystemServiceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestSystemServiceManager.java
deleted file mode 100644
index dbff02f..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestSystemServiceManager.java
+++ /dev/null
@@ -1,156 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.service;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.registry.client.api.RegistryOperations;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.service.api.records.Artifact;
-import org.apache.hadoop.yarn.service.api.records.ComponentState;
-import org.apache.hadoop.yarn.service.api.records.Service;
-import org.apache.hadoop.yarn.service.api.records.ServiceState;
-import org.apache.hadoop.yarn.service.exceptions.SliderException;
-import org.apache.hadoop.yarn.service.registry.YarnRegistryViewForProviders;
-import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.Map;
-
-import static org.mockito.Mockito.mock;
-
-/**
- * Tests for {@link ServiceManager}.
- */
-public class TestSystemServiceManager {
-
-  @Rule
-  public ServiceTestUtils.ServiceFSWatcher rule =
-      new ServiceTestUtils.ServiceFSWatcher();
-
-  @Test
-  public void testUpgrade() throws IOException, SliderException {
-    ServiceManager serviceManager = createTestServiceManager("testUpgrade");
-    upgrade(serviceManager, "v2", false);
-    Assert.assertEquals("service not upgraded", ServiceState.UPGRADING,
-        serviceManager.getServiceSpec().getState());
-  }
-
-  @Test
-  public void testRestartNothingToUpgrade()
-      throws IOException, SliderException {
-    ServiceManager serviceManager = createTestServiceManager("testRestart");
-    upgrade(serviceManager, "v2", false);
-
-    //make components stable
-    serviceManager.getServiceSpec().getComponents().forEach(comp -> {
-      comp.setState(ComponentState.STABLE);
-    });
-    serviceManager.handle(new ServiceEvent(ServiceEventType.START));
-    Assert.assertEquals("service not re-started", ServiceState.STABLE,
-        serviceManager.getServiceSpec().getState());
-  }
-
-  @Test
-  public void testRestartWithPendingUpgrade()
-      throws IOException, SliderException {
-    ServiceManager serviceManager = createTestServiceManager("testRestart");
-    upgrade(serviceManager, "v2", true);
-    serviceManager.handle(new ServiceEvent(ServiceEventType.START));
-    Assert.assertEquals("service should still be upgrading",
-        ServiceState.UPGRADING, serviceManager.getServiceSpec().getState());
-  }
-
-
-  private void upgrade(ServiceManager service, String version,
-      boolean upgradeArtifact)
-      throws IOException, SliderException {
-    Service upgradedDef = ServiceTestUtils.createExampleApplication();
-    upgradedDef.setName(service.getName());
-    upgradedDef.setVersion(version);
-    if (upgradeArtifact) {
-      Artifact upgradedArtifact = createTestArtifact("2");
-      upgradedDef.getComponents().forEach(component -> {
-        component.setArtifact(upgradedArtifact);
-      });
-    }
-    writeUpgradedDef(upgradedDef);
-    ServiceEvent upgradeEvent = new ServiceEvent(ServiceEventType.UPGRADE);
-    upgradeEvent.setVersion("v2");
-    service.handle(upgradeEvent);
-  }
-
-  private ServiceManager createTestServiceManager(String name)
-      throws IOException {
-    ServiceContext context = new ServiceContext();
-    context.service = createBaseDef(name);
-    context.fs = rule.getFs();
-
-    context.scheduler = new ServiceScheduler(context) {
-      @Override
-      protected YarnRegistryViewForProviders createYarnRegistryOperations(
-          ServiceContext context, RegistryOperations registryClient) {
-        return mock(YarnRegistryViewForProviders.class);
-      }
-    };
-
-    context.scheduler.init(rule.getConf());
-
-    Map<String, org.apache.hadoop.yarn.service.component.Component>
-        componentState = context.scheduler.getAllComponents();
-    context.service.getComponents().forEach(component -> {
-      componentState.put(component.getName(),
-          new org.apache.hadoop.yarn.service.component.Component(component,
-              1L, context));
-    });
-    return new ServiceManager(context);
-  }
-
-  static Service createBaseDef(String name) {
-    ApplicationId applicationId = ApplicationId.newInstance(
-        System.currentTimeMillis(), 1);
-    Service serviceDef = ServiceTestUtils.createExampleApplication();
-    serviceDef.setId(applicationId.toString());
-    serviceDef.setName(name);
-    serviceDef.setState(ServiceState.STARTED);
-    Artifact artifact = createTestArtifact("1");
-
-    serviceDef.getComponents().forEach(component ->
-        component.setArtifact(artifact));
-    return serviceDef;
-  }
-
-  static Artifact createTestArtifact(String artifactId) {
-    Artifact artifact = new Artifact();
-    artifact.setId(artifactId);
-    artifact.setType(Artifact.TypeEnum.TARBALL);
-    return artifact;
-  }
-
-  private void writeUpgradedDef(Service upgradedDef)
-      throws IOException, SliderException {
-    Path upgradePath = rule.getFs().buildClusterUpgradeDirPath(
-        upgradedDef.getName(), upgradedDef.getVersion());
-    ServiceApiUtil.createDirAndPersistApp(rule.getFs(), upgradePath,
-        upgradedDef);
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/monitor/probe/TestDefaultProbe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/monitor/probe/TestDefaultProbe.java
new file mode 100644
index 0000000..8169e67
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/monitor/probe/TestDefaultProbe.java
@@ -0,0 +1,155 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.monitor.probe;
+
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.service.api.records.ReadinessCheck;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * Tests for default probe.
+ */
+@RunWith(Parameterized.class)
+public class TestDefaultProbe {
+  private final DefaultProbe probe;
+
+  public TestDefaultProbe(Probe probe) {
+    this.probe = (DefaultProbe) probe;
+  }
+
+  @Parameterized.Parameters
+  public static Collection<Object[]> data() {
+    // test run 1: Default probe checks that container has an IP
+    Probe p1 = MonitorUtils.getProbe(null);
+
+    // test run 2: Default probe with DNS check for component instance hostname
+    ReadinessCheck rc2 = new ReadinessCheck()
+        .type(ReadinessCheck.TypeEnum.DEFAULT)
+        .properties(Collections.singletonMap(
+            MonitorKeys.DEFAULT_PROBE_DNS_CHECK_ENABLED, "true"));
+    Probe p2 = MonitorUtils.getProbe(rc2);
+
+    // test run 3: Default probe with DNS check using specific DNS server
+    Map<String, String> props = new HashMap<>();
+    props.put(MonitorKeys.DEFAULT_PROBE_DNS_CHECK_ENABLED, "true");
+    props.put(MonitorKeys.DEFAULT_PROBE_DNS_ADDRESS, "8.8.8.8");
+    ReadinessCheck rc3 = new ReadinessCheck()
+        .type(ReadinessCheck.TypeEnum.DEFAULT).properties(props);
+    Probe p3 = MonitorUtils.getProbe(rc3);
+
+    return Arrays.asList(new Object[][] {{p1}, {p2}, {p3}});
+  }
+
+  @Test
+  public void testDefaultProbe() {
+    // component instance has a good hostname, so probe will eventually succeed
+    // whether or not DNS checking is enabled
+    ComponentInstance componentInstance =
+        createMockComponentInstance("example.com");
+    checkPingResults(probe, componentInstance, false);
+
+    // component instance has a bad hostname, so probe will fail when DNS
+    // checking is enabled
+    componentInstance = createMockComponentInstance("bad.dns.test");
+    checkPingResults(probe, componentInstance, probe.isDnsCheckEnabled());
+  }
+
+  private static void checkPingResults(Probe probe, ComponentInstance
+      componentInstance, boolean expectDNSCheckFailure) {
+    // on the first ping, null container status results in failure
+    ProbeStatus probeStatus = probe.ping(componentInstance);
+    assertFalse("Expected failure for " + probeStatus.toString(),
+        probeStatus.isSuccess());
+    assertTrue("Expected IP failure for " + probeStatus.toString(),
+        probeStatus.toString().contains(
+        componentInstance.getCompInstanceName() + ": IP is not available yet"));
+
+    // on the second ping, container status is retrieved but there are no
+    // IPs, resulting in failure
+    probeStatus = probe.ping(componentInstance);
+    assertFalse("Expected failure for " + probeStatus.toString(),
+        probeStatus.isSuccess());
+    assertTrue("Expected IP failure for " + probeStatus.toString(),
+        probeStatus.toString().contains(componentInstance
+            .getCompInstanceName() + ": IP is not available yet"));
+
+    // on the third ping, IPs are retrieved and success depends on whether or
+    // not a DNS lookup can be performed for the component instance hostname
+    probeStatus = probe.ping(componentInstance);
+    if (expectDNSCheckFailure) {
+      assertFalse("Expected failure for " + probeStatus.toString(),
+          probeStatus.isSuccess());
+      assertTrue("Expected DNS failure for " + probeStatus.toString(),
+          probeStatus.toString().contains(componentInstance
+              .getCompInstanceName() + ": DNS checking is enabled, but lookup" +
+              " for " + componentInstance.getHostname() + " is not available " +
+              "yet"));
+    } else {
+      assertTrue("Expected success for " + probeStatus.toString(),
+          probeStatus.isSuccess());
+    }
+  }
+
+  private static ComponentInstance createMockComponentInstance(String
+      hostname) {
+    ComponentInstance componentInstance = mock(ComponentInstance.class);
+    when(componentInstance.getHostname()).thenReturn(hostname);
+    when(componentInstance.getCompInstanceName()).thenReturn("comp-0");
+    when(componentInstance.getContainerStatus())
+        .thenAnswer(new Answer<ContainerStatus>() {
+          private int count = 0;
+
+          @Override
+          public ContainerStatus answer(InvocationOnMock invocationOnMock) {
+            count++;
+            if (count == 1) {
+              // first call to getContainerStatus returns null
+              return null;
+            } else if (count == 2) {
+              // second call returns a ContainerStatus with no IPs
+              ContainerStatus containerStatus = mock(ContainerStatus.class);
+              when(containerStatus.getIPs()).thenReturn(null);
+              return containerStatus;
+            } else {
+              // third call returns a ContainerStatus with one IP
+              ContainerStatus containerStatus = mock(ContainerStatus.class);
+              when(containerStatus.getIPs())
+                  .thenReturn(Collections.singletonList("1.2.3.4"));
+              return containerStatus;
+            }
+          }
+        });
+    return componentInstance;
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
index 7937b15..474ae78 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
@@ -626,7 +626,7 @@
           ApplicationReport.newInstance(appId, attemptId, "fakeUser",
               "fakeQueue", "fakeApplicationName", "localhost", 0, null,
               YarnApplicationState.FINISHED, "fake an application report", "",
-              1000L, 1200L, FinalApplicationStatus.FAILED, null, "", 50f,
+              1000L, 1000L, 1200L, FinalApplicationStatus.FAILED, null, "", 50f,
               "fakeApplicationType", null);
       return report;
     }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java
index c3e3c41..f0e3ca2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java
@@ -332,7 +332,7 @@
           ApplicationReport.newInstance(applicationId,
             ApplicationAttemptId.newInstance(applicationId, 1), "user",
             "queue", "appname", "host", 124, null,
-            YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
+            YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
             FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN",
             null);
       List<ApplicationReport> applicationReports =
@@ -389,7 +389,7 @@
           ApplicationReport.newInstance(applicationId2,
             ApplicationAttemptId.newInstance(applicationId2, 2), "user2",
             "queue2", "appname2", "host2", 125, null,
-            YarnApplicationState.FINISHED, "diagnostics2", "url2", 2, 2,
+            YarnApplicationState.FINISHED, "diagnostics2", "url2", 2, 2, 2,
             FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.63789f,
             "NON-YARN", null);
       applicationReports.add(newApplicationReport2);
@@ -399,7 +399,7 @@
           ApplicationReport.newInstance(applicationId3,
             ApplicationAttemptId.newInstance(applicationId3, 3), "user3",
             "queue3", "appname3", "host3", 126, null,
-            YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3,
+            YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3, 3,
             FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f,
             "MAPREDUCE", null);
       applicationReports.add(newApplicationReport3);
@@ -409,7 +409,7 @@
           ApplicationReport.newInstance(applicationId4,
             ApplicationAttemptId.newInstance(applicationId4, 4), "user4",
             "queue4", "appname4", "host4", 127, null,
-            YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4,
+            YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4, 4,
             FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.83789f,
             "NON-MAPREDUCE", null);
       applicationReports.add(newApplicationReport4);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index f6e305f..b84b49c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -737,7 +737,7 @@
       ApplicationReport newApplicationReport = ApplicationReport.newInstance(
           applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
           "user", "queue", "appname", "host", 124, null,
-          YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
+          YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
           FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
       List<ApplicationReport> applicationReports = new ArrayList<ApplicationReport>();
       applicationReports.add(newApplicationReport);
@@ -812,7 +812,7 @@
       ApplicationReport newApplicationReport2 = ApplicationReport.newInstance(
           applicationId2, ApplicationAttemptId.newInstance(applicationId2, 2),
           "user2", "queue2", "appname2", "host2", 125, null,
-          YarnApplicationState.FINISHED, "diagnostics2", "url2", 2, 2,
+          YarnApplicationState.FINISHED, "diagnostics2", "url2", 2, 2, 2,
           FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.63789f, "NON-YARN", 
         null);
       applicationReports.add(newApplicationReport2);
@@ -821,7 +821,7 @@
       ApplicationReport newApplicationReport3 = ApplicationReport.newInstance(
           applicationId3, ApplicationAttemptId.newInstance(applicationId3, 3),
           "user3", "queue3", "appname3", "host3", 126, null,
-          YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3,
+          YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3, 3,
           FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f, "MAPREDUCE",
         null);
       applicationReports.add(newApplicationReport3);
@@ -832,7 +832,7 @@
               applicationId4,
               ApplicationAttemptId.newInstance(applicationId4, 4),
               "user4", "queue4", "appname4", "host4", 127, null,
-              YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4,
+              YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4, 4,
               FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.83789f,
               "NON-MAPREDUCE", null);
       applicationReports.add(newApplicationReport4);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
index 1f6488d..82a20eb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
@@ -136,7 +136,7 @@
       ApplicationReport newApplicationReport = ApplicationReport.newInstance(
           applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
           "user", "queue", "appname", "host", 124, null,
-          YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
+          YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, 0,
           FinalApplicationStatus.SUCCEEDED, usageReport, "N/A", 0.53789f, "YARN",
           null, null, false, Priority.newInstance(0), "high-mem", "high-mem");
       newApplicationReport.setLogAggregationStatus(LogAggregationStatus.SUCCEEDED);
@@ -383,7 +383,7 @@
     ApplicationReport newApplicationReport = ApplicationReport.newInstance(
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
+        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null,
         Sets.newHashSet("tag1", "tag3"), false, Priority.UNDEFINED, "", "");
     List<ApplicationReport> applicationReports =
@@ -394,7 +394,7 @@
     ApplicationReport newApplicationReport2 = ApplicationReport.newInstance(
         applicationId2, ApplicationAttemptId.newInstance(applicationId2, 2),
         "user2", "queue2", "appname2", "host2", 125, null,
-        YarnApplicationState.FINISHED, "diagnostics2", "url2", 2, 2,
+        YarnApplicationState.FINISHED, "diagnostics2", "url2", 2, 2, 2,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.63789f, "NON-YARN", 
         null, Sets.newHashSet("tag2", "tag3"), false, Priority.UNDEFINED,
         "", "");
@@ -404,7 +404,7 @@
     ApplicationReport newApplicationReport3 = ApplicationReport.newInstance(
         applicationId3, ApplicationAttemptId.newInstance(applicationId3, 3),
         "user3", "queue3", "appname3", "host3", 126, null,
-        YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3,
+        YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3, 3,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f, "MAPREDUCE", 
         null, Sets.newHashSet("tag1", "tag4"), false, Priority.UNDEFINED,
         "", "");
@@ -414,7 +414,7 @@
     ApplicationReport newApplicationReport4 = ApplicationReport.newInstance(
         applicationId4, ApplicationAttemptId.newInstance(applicationId4, 4),
         "user4", "queue4", "appname4", "host4", 127, null,
-        YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4,
+        YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4, 4,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.83789f,
         "NON-MAPREDUCE", null, Sets.newHashSet("tag1"), false,
         Priority.UNDEFINED, "", "");
@@ -424,7 +424,7 @@
     ApplicationReport newApplicationReport5 = ApplicationReport.newInstance(
         applicationId5, ApplicationAttemptId.newInstance(applicationId5, 5),
         "user5", "queue5", "appname5", "host5", 128, null,
-        YarnApplicationState.ACCEPTED, "diagnostics5", "url5", 5, 5,
+        YarnApplicationState.ACCEPTED, "diagnostics5", "url5", 5, 5, 5,
         FinalApplicationStatus.KILLED, null, "N/A", 0.93789f, "HIVE", null,
         Sets.newHashSet("tag2", "tag4"), false, Priority.UNDEFINED, "", "");
     applicationReports.add(newApplicationReport5);
@@ -433,7 +433,7 @@
     ApplicationReport newApplicationReport6 = ApplicationReport.newInstance(
         applicationId6, ApplicationAttemptId.newInstance(applicationId6, 6),
         "user6", "queue6", "appname6", "host6", 129, null,
-        YarnApplicationState.SUBMITTED, "diagnostics6", "url6", 6, 6,
+        YarnApplicationState.SUBMITTED, "diagnostics6", "url6", 6, 6, 6,
         FinalApplicationStatus.KILLED, null, "N/A", 0.99789f, "PIG",
         null, new HashSet<String>(), false, Priority.UNDEFINED, "", "");
     applicationReports.add(newApplicationReport6);
@@ -1007,7 +1007,7 @@
     ApplicationReport newApplicationReport2 = ApplicationReport.newInstance(
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
+        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
         newApplicationReport2);
@@ -1020,7 +1020,7 @@
     ApplicationReport newApplicationReport = ApplicationReport.newInstance(
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
+        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
         newApplicationReport);
@@ -1059,12 +1059,12 @@
     ApplicationReport newApplicationReport1 = ApplicationReport.newInstance(
         applicationId1, ApplicationAttemptId.newInstance(applicationId1, 1),
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
+        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     ApplicationReport newApplicationReport2 = ApplicationReport.newInstance(
         applicationId2, ApplicationAttemptId.newInstance(applicationId2, 1),
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
+        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.34344f, "YARN", null);
     when(client.getApplicationReport(applicationId1)).thenReturn(
         newApplicationReport1);
@@ -1084,12 +1084,12 @@
     ApplicationReport newApplicationReport3 = ApplicationReport.newInstance(
         applicationId1, ApplicationAttemptId.newInstance(applicationId1, 1),
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
+        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     ApplicationReport newApplicationReport4 = ApplicationReport.newInstance(
         applicationId2, ApplicationAttemptId.newInstance(applicationId2, 1),
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
+        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53345f, "YARN", null);
     when(client.getApplicationReport(applicationId1)).thenReturn(
         newApplicationReport3);
@@ -1127,7 +1127,7 @@
     ApplicationReport newApplicationReport5 = ApplicationReport.newInstance(
         applicationId1, ApplicationAttemptId.newInstance(applicationId1, 1),
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
+        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53345f, "YARN", null);
     when(client.getApplicationReport(applicationId1)).thenReturn(
         newApplicationReport5);
@@ -1154,12 +1154,12 @@
     ApplicationReport newApplicationReport5 = ApplicationReport.newInstance(
         applicationId1, ApplicationAttemptId.newInstance(applicationId1, 1),
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
+        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     ApplicationReport newApplicationReport6 = ApplicationReport.newInstance(
         applicationId2, ApplicationAttemptId.newInstance(applicationId2, 1),
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
+        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53345f, "YARN", null);
     when(client.getApplicationReport(applicationId1)).thenReturn(
         newApplicationReport5);
@@ -1182,7 +1182,7 @@
     ApplicationReport newApplicationReport2 = ApplicationReport.newInstance(
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
+        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
         newApplicationReport2);
@@ -1197,7 +1197,7 @@
     ApplicationReport newApplicationReport = ApplicationReport.newInstance(
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
+        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
         newApplicationReport);
@@ -1232,7 +1232,7 @@
     ApplicationReport newApplicationReport2 = ApplicationReport.newInstance(
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
+        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     when(client.getApplicationReport(any(ApplicationId.class)))
         .thenReturn(newApplicationReport2);
@@ -1247,7 +1247,7 @@
     ApplicationReport newApplicationReport = ApplicationReport.newInstance(
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
+        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     when(client.getApplicationReport(any(ApplicationId.class)))
         .thenReturn(newApplicationReport);
@@ -2015,7 +2015,7 @@
         ApplicationReport.newInstance(applicationId,
             ApplicationAttemptId.newInstance(applicationId, 1), "user",
             "queue", "appname", "host", 124, null,
-            YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
+            YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
             FinalApplicationStatus.UNDEFINED, null, "N/A", 0.53789f, "YARN",
             null);
     when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java
index 036fa90..ddd9acc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java
@@ -221,6 +221,17 @@
   }
 
   @Override
+  public long getLaunchTime() {
+    ApplicationReportProtoOrBuilder p = viaProto ? proto : builder;
+    return p.getLaunchTime();
+  }
+
+  @Override
+  public void setLaunchTime(long launchTime) {
+    maybeInitBuilder();
+    builder.setLaunchTime(launchTime);
+  }
+  @Override
   public long getFinishTime() {
     ApplicationReportProtoOrBuilder p = viaProto ? proto : builder;
     return p.getFinishTime();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java
index 13c576b2..98be93b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java
@@ -119,7 +119,8 @@
         credentials.addToken(
             new Text(registryUrl + "-" + applicationId), token);
         if (LOG.isDebugEnabled()) {
-          LOG.debug("Added token: " + token.toString());
+          LOG.debug("Token read from Docker client configuration file: "
+              + token.toString());
         }
       }
     }
@@ -142,7 +143,7 @@
     tokens.rewind();
     if (LOG.isDebugEnabled()) {
       for (Token token : credentials.getAllTokens()) {
-        LOG.debug("Added token: " + token.toString());
+        LOG.debug("Token read from token storage: " + token.toString());
       }
     }
     return credentials;
@@ -161,9 +162,11 @@
     ObjectMapper mapper = new ObjectMapper();
     ObjectNode rootNode = mapper.createObjectNode();
     ObjectNode registryUrlNode = mapper.createObjectNode();
+    boolean foundDockerCred = false;
     if (credentials.numberOfTokens() > 0) {
       for (Token<? extends TokenIdentifier> tk : credentials.getAllTokens()) {
         if (tk.getKind().equals(DockerCredentialTokenIdentifier.KIND)) {
+          foundDockerCred = true;
           DockerCredentialTokenIdentifier ti =
               (DockerCredentialTokenIdentifier) tk.decodeIdentifier();
           ObjectNode registryCredNode = mapper.createObjectNode();
@@ -176,9 +179,11 @@
         }
       }
     }
-    rootNode.put(CONFIG_AUTHS_KEY, registryUrlNode);
-    String json =
-        mapper.writerWithDefaultPrettyPrinter().writeValueAsString(rootNode);
-    FileUtils.writeStringToFile(outConfigFile, json, StandardCharsets.UTF_8);
+    if (foundDockerCred) {
+      rootNode.put(CONFIG_AUTHS_KEY, registryUrlNode);
+      String json =
+          mapper.writerWithDefaultPrettyPrinter().writeValueAsString(rootNode);
+      FileUtils.writeStringToFile(outConfigFile, json, StandardCharsets.UTF_8);
+    }
   }
 }
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicatonReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicatonReport.java
index 46fc4d5..ea39a4c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicatonReport.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicatonReport.java
@@ -58,7 +58,7 @@
     ApplicationReport appReport =
         ApplicationReport.newInstance(appId, appAttemptId, "user", "queue",
           "appname", "host", 124, null, YarnApplicationState.FINISHED,
-          "diagnostics", "url", 0, 0, FinalApplicationStatus.SUCCEEDED, null,
+          "diagnostics", "url", 0, 0, 0, FinalApplicationStatus.SUCCEEDED, null,
           "N/A", 0.53789f, YarnConfiguration.DEFAULT_APPLICATION_TYPE, null,
           null, false, Priority.newInstance(0),"","");
     return appReport;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
index b8931d8..3734e39 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
@@ -137,7 +137,7 @@
       currentApplicationAttemptId, appHistory.getUser(), appHistory.getQueue(),
       appHistory.getApplicationName(), host, rpcPort, null,
       appHistory.getYarnApplicationState(), appHistory.getDiagnosticsInfo(),
-      trackingUrl, appHistory.getStartTime(), appHistory.getFinishTime(),
+      trackingUrl, appHistory.getStartTime(), 0, appHistory.getFinishTime(),
       appHistory.getFinalApplicationStatus(), null, "", 100,
       appHistory.getApplicationType(), null);
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
index 4ba1bdf..0de834c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
@@ -391,7 +391,7 @@
       ApplicationId applicationId, ApplicationAttemptId applicationAttemptId,
       String user, String queue, String name, String host, int rpcPort,
       Token clientToAMToken, YarnApplicationState state, String diagnostics,
-      String url, long startTime, long finishTime,
+      String url, long startTime, long launchTime, long finishTime,
       FinalApplicationStatus finalStatus,
       ApplicationResourceUsageReport appResources, String origTrackingUrl,
       float progress, String appType, Token amRmToken, Set<String> tags,
@@ -410,6 +410,7 @@
     report.setDiagnostics(diagnostics);
     report.setTrackingUrl(url);
     report.setStartTime(startTime);
+    report.setLaunchTime(launchTime);
     report.setFinishTime(finishTime);
     report.setFinalApplicationStatus(finalStatus);
     report.setApplicationResourceUsageReport(appResources);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
index 8600b72..9dd8aa1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
@@ -242,10 +242,9 @@
         .__("FinalStatus Reported by AM:",
             clairfyAppFinalStatus(app.getFinalAppStatus()))
         .__("Started:", Times.format(app.getStartedTime()))
-        .__(
-            "Elapsed:",
-            StringUtils.formatTime(Times.elapsed(app.getStartedTime(),
-                app.getFinishedTime())))
+        .__("Launched:", Times.format(app.getLaunchTime()))
+        .__("Finished:", Times.format(app.getFinishedTime()))
+        .__("Elapsed:", StringUtils.formatTime(app.getElapsedTime()))
         .__(
             "Tracking URL:",
             app.getTrackingUrl() == null
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java
index b2dd4de..cb15449 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java
@@ -150,7 +150,9 @@
         html.table("#apps").thead().tr().th(".id", "ID").th(".user", "User")
           .th(".name", "Name").th(".type", "Application Type")
           .th(".queue", "Queue").th(".priority", "Application Priority")
-          .th(".starttime", "StartTime").th(".finishtime", "FinishTime")
+          .th(".starttime", "StartTime")
+          .th(".launchtime", "LaunchTime")
+          .th(".finishtime", "FinishTime")
           .th(".state", "State").th(".finalstatus", "FinalStatus")
           .th(".progress", "Progress").th(".ui", "Tracking UI").__().__().tbody();
 
@@ -188,6 +190,7 @@
             .getQueue()))).append("\",\"").append(String
                 .valueOf(app.getPriority()))
         .append("\",\"").append(app.getStartedTime())
+        .append("\",\"").append(app.getLaunchTime())
         .append("\",\"").append(app.getFinishedTime())
         .append("\",\"")
         .append(app.getAppState() == null ? UNAVAILABLE : app.getAppState())
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
index 29f1e63..b2f65a8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
@@ -51,7 +51,7 @@
     sb.append("[\n")
       .append("{'sType':'natural', 'aTargets': [0]")
       .append(", 'mRender': parseHadoopID }")
-      .append("\n, {'sType':'numeric', 'aTargets': [6, 7]")
+      .append("\n, {'sType':'numeric', 'aTargets': [6, 7, 8]")
       .append(", 'mRender': renderHadoopDate }")
       .append("\n, {'sType':'numeric', bSearchable:false, 'aTargets':");
     if (isFairSchedulerPage) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
index ac2f8da..b4687a4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
@@ -55,6 +55,7 @@
   protected FinalApplicationStatus finalAppStatus;
   protected long submittedTime;
   protected long startedTime;
+  private long launchTime;
   protected long finishedTime;
   protected long elapsedTime;
   protected String applicationTags;
@@ -88,6 +89,7 @@
     originalTrackingUrl = app.getOriginalTrackingUrl();
     submittedTime = app.getStartTime();
     startedTime = app.getStartTime();
+    launchTime = app.getLaunchTime();
     finishedTime = app.getFinishTime();
     elapsedTime = Times.elapsed(startedTime, finishedTime);
     finalAppStatus = app.getFinalApplicationStatus();
@@ -198,6 +200,10 @@
     return submittedTime;
   }
 
+  public long getLaunchTime() {
+    return launchTime;
+  }
+
   public long getStartedTime() {
     return startedTime;
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
index 7a840b1..7debbdc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
@@ -36,8 +36,6 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang.RandomStringUtils;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileContext;
@@ -490,9 +488,6 @@
             new DiskErrorInformation(DiskErrorCause.DISK_FULL, msg));
           continue;
         }
-
-        // create a random dir to make sure fs isn't in read-only mode
-        verifyDirUsingMkdir(testDir);
       } catch (IOException ie) {
         ret.put(dir,
           new DiskErrorInformation(DiskErrorCause.OTHER, ie.getMessage()));
@@ -501,31 +496,6 @@
     return ret;
   }
 
-  /**
-   * Function to test whether a dir is working correctly by actually creating a
-   * random directory.
-   *
-   * @param dir
-   *          the dir to test
-   */
-  private void verifyDirUsingMkdir(File dir) throws IOException {
-
-    String randomDirName = RandomStringUtils.randomAlphanumeric(5);
-    File target = new File(dir, randomDirName);
-    int i = 0;
-    while (target.exists()) {
-
-      randomDirName = RandomStringUtils.randomAlphanumeric(5) + i;
-      target = new File(dir, randomDirName);
-      i++;
-    }
-    try {
-      diskValidator.checkStatus(target);
-    } finally {
-      FileUtils.deleteQuietly(target);
-    }
-  }
-
   private boolean isDiskUsageOverPercentageLimit(File dir,
       float diskUtilizationPercentageCutoff) {
     float freePercentage =
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
old mode 100755
new mode 100644
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
old mode 100755
new mode 100644
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index f5d84a3..a7d2fe8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -34,7 +34,6 @@
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.ProxyUsers;
@@ -254,9 +253,6 @@
     // load core-site.xml
     loadConfigurationXml(YarnConfiguration.CORE_SITE_CONFIGURATION_FILE);
 
-    // Refresh user to group mappings during init.
-    refreshUserToGroupMappingsWithConf();
-
     // Do refreshSuperUserGroupsConfiguration with loaded core-site.xml
     // Or use RM specific configurations to overwrite the common ones first
     // if they exist
@@ -340,21 +336,6 @@
     super.serviceInit(this.conf);
   }
 
-  private void refreshUserToGroupMappingsWithConf()
-      throws YarnException, IOException {
-    Configuration newConf = new Configuration(false);
-    InputStream confFileInputStream =
-        configurationProvider
-        .getConfigurationInputStream(newConf, YarnConfiguration.CORE_SITE_CONFIGURATION_FILE);
-    if (confFileInputStream != null) {
-      newConf.addResource(confFileInputStream);
-    }
-
-    // Do refreshUserToGroupsMappings with loaded core-site.xml
-    Groups.getUserToGroupsMappingServiceWithLoadedConfiguration(newConf)
-        .refresh();
-  }
-
   private void loadConfigurationXml(String configurationFile)
       throws YarnException, IOException {
     InputStream configurationInputStream =
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
index 7051f8c..5896808 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
@@ -303,7 +303,7 @@
         LOG.info("Launching master" + application.getAppAttemptId());
         launch();
         handler.handle(new RMAppAttemptEvent(application.getAppAttemptId(),
-            RMAppAttemptEventType.LAUNCHED));
+            RMAppAttemptEventType.LAUNCHED, System.currentTimeMillis()));
       } catch(Exception ie) {
         String message = "Error launching " + application.getAppAttemptId()
             + ". Got exception: " + StringUtils.stringifyException(ie);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/ApplicationStateData.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/ApplicationStateData.java
index 79a5de2..2b0bd2b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/ApplicationStateData.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/ApplicationStateData.java
@@ -47,7 +47,8 @@
   public static ApplicationStateData newInstance(long submitTime,
       long startTime, String user,
       ApplicationSubmissionContext submissionContext, RMAppState state,
-      String diagnostics, long finishTime, CallerContext callerContext) {
+      String diagnostics, long launchTime, long finishTime,
+      CallerContext callerContext) {
     ApplicationStateData appState = Records.newRecord(ApplicationStateData.class);
     appState.setSubmitTime(submitTime);
     appState.setStartTime(startTime);
@@ -55,6 +56,7 @@
     appState.setApplicationSubmissionContext(submissionContext);
     appState.setState(state);
     appState.setDiagnostics(diagnostics);
+    appState.setLaunchTime(launchTime);
     appState.setFinishTime(finishTime);
     appState.setCallerContext(callerContext);
     return appState;
@@ -63,7 +65,8 @@
   public static ApplicationStateData newInstance(long submitTime,
       long startTime, String user,
       ApplicationSubmissionContext submissionContext, RMAppState state,
-      String diagnostics, long finishTime, CallerContext callerContext,
+      String diagnostics, long launchTime, long finishTime,
+      CallerContext callerContext,
       Map<ApplicationTimeoutType, Long> applicationTimeouts) {
     ApplicationStateData appState =
         Records.newRecord(ApplicationStateData.class);
@@ -73,6 +76,7 @@
     appState.setApplicationSubmissionContext(submissionContext);
     appState.setState(state);
     appState.setDiagnostics(diagnostics);
+    appState.setLaunchTime(launchTime);
     appState.setFinishTime(finishTime);
     appState.setCallerContext(callerContext);
     appState.setApplicationTimeouts(applicationTimeouts);
@@ -82,7 +86,7 @@
   public static ApplicationStateData newInstance(long submitTime,
       long startTime, ApplicationSubmissionContext context, String user,
       CallerContext callerContext) {
-    return newInstance(submitTime, startTime, user, context, null, "", 0,
+    return newInstance(submitTime, startTime, user, context, null, "", 0, 0,
         callerContext);
   }
   
@@ -136,6 +140,20 @@
   @Unstable
   public abstract void setStartTime(long startTime);
 
+
+
+  /**
+   * Get the <em>launch time</em> of the application.
+   * @return <em>launch time</em> of the application
+   */
+  @Public
+  @Stable
+  public abstract long getLaunchTime();
+
+  @Private
+  @Unstable
+  public abstract void setLaunchTime(long launchTime);
+
   /**
    * The application submitter
    */
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationStateDataPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationStateDataPBImpl.java
index d037e68..f5cd107 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationStateDataPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationStateDataPBImpl.java
@@ -120,6 +120,19 @@
     builder.setStartTime(startTime);
   }
 
+
+  @Override
+  public long getLaunchTime() {
+    ApplicationStateDataProtoOrBuilder p = viaProto ? proto : builder;
+    return p.getLaunchTime();
+  }
+
+  @Override
+  public void setLaunchTime(long launchTime) {
+    maybeInitBuilder();
+    builder.setLaunchTime(launchTime);
+  }
+
   @Override
   public String getUser() {
     ApplicationStateDataProtoOrBuilder p = viaProto ? proto : builder;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractSchedulerPlanFollower.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractSchedulerPlanFollower.java
index 9b6a0b0..11811f1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractSchedulerPlanFollower.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractSchedulerPlanFollower.java
@@ -263,7 +263,10 @@
         if (shouldMove) {
           moveAppsInQueueSync(expiredReservation, defReservationQueue);
         }
-        if (scheduler.getAppsInQueue(expiredReservation).size() > 0) {
+        List<ApplicationAttemptId> appsInQueue = scheduler.
+              getAppsInQueue(expiredReservation);
+        int size = (appsInQueue == null ? 0 : appsInQueue.size());
+        if (size > 0) {
           scheduler.killAllAppsInQueue(expiredReservation);
           LOG.info("Killing applications in queue: {}", expiredReservation);
         } else {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
index e286834..99cce87 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
@@ -180,7 +180,15 @@
    * @return the submit time of the application.
    */
   long getSubmitTime();
-  
+
+  /**
+   * The launch time of the application.
+   * Since getStartTime() returns what is essentially submit time,
+   * this new field is to prevent potential backwards compatibility issues.
+   * @return the launch time of the application.
+   */
+  long getLaunchTime();
+
   /**
    * The tracking url for the application master.
    * @return the tracking url for the application master.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEvent.java
index 5c46945..3212d6c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEvent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEvent.java
@@ -37,6 +37,12 @@
     this.diagnosticMsg = diagnostic;
   }
 
+  public RMAppEvent(ApplicationId appId, RMAppEventType type, long timeStamp) {
+    super(type, timeStamp);
+    this.appId = appId;
+    this.diagnosticMsg = "";
+  }
+
   public ApplicationId getApplicationId() {
     return this.appId;
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java
index 514efd4..4b55d38 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java
@@ -37,6 +37,7 @@
   ATTEMPT_FAILED,
   ATTEMPT_KILLED,
   NODE_UPDATE,
+  ATTEMPT_LAUNCHED,
   
   // Source: Container and ResourceTracker
   APP_RUNNING_ON_NODE,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 4234e58..daf14c4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -164,6 +164,7 @@
 
   // Mutable fields
   private long startTime;
+  private long launchTime = 0;
   private long finishTime = 0;
   private long storedFinishTime = 0;
   private int firstAttemptIdInStateStore = 1;
@@ -290,6 +291,10 @@
     .addTransition(RMAppState.ACCEPTED, RMAppState.ACCEPTED, 
         RMAppEventType.APP_RUNNING_ON_NODE,
         new AppRunningOnNodeTransition())
+      // Handle AppAttemptLaunch to upate the launchTime and publish to ATS
+      .addTransition(RMAppState.ACCEPTED, RMAppState.ACCEPTED,
+        RMAppEventType.ATTEMPT_LAUNCHED,
+        new AttemptLaunchedTransition())
 
      // Transitions from RUNNING state
     .addTransition(RMAppState.RUNNING, RMAppState.RUNNING,
@@ -784,9 +789,9 @@
           this.applicationId, currentApplicationAttemptId, this.user,
           this.queue, this.name, host, rpcPort, clientToAMToken,
           createApplicationState(), diags, trackingUrl, this.startTime,
-          this.finishTime, finishState, appUsageReport, origTrackingUrl,
-          progress, this.applicationType, amrmToken, applicationTags,
-          this.getApplicationPriority());
+          this.launchTime, this.finishTime, finishState, appUsageReport,
+          origTrackingUrl, progress, this.applicationType, amrmToken,
+          applicationTags, this.getApplicationPriority());
       report.setLogAggregationStatus(logAggregationStatus);
       report.setUnmanagedApp(submissionContext.getUnmanagedAM());
       report.setAppNodeLabelExpression(getAppNodeLabelExpression());
@@ -840,6 +845,17 @@
   }
 
   @Override
+  public long getLaunchTime() {
+    this.readLock.lock();
+
+    try {
+      return this.launchTime;
+    } finally {
+      this.readLock.unlock();
+    }
+  }
+
+  @Override
   public long getSubmitTime() {
     return this.submitTime;
   }
@@ -936,6 +952,7 @@
         .getDiagnostics());
     this.storedFinishTime = appState.getFinishTime();
     this.startTime = appState.getStartTime();
+    this.launchTime = appState.getLaunchTime();
     this.callerContext = appState.getCallerContext();
     this.applicationTimeouts = appState.getApplicationTimeouts();
     // If interval > 0, some attempts might have been deleted.
@@ -1038,6 +1055,21 @@
     };
   }
 
+  private static final class AttemptLaunchedTransition
+      extends  RMAppTransition {
+    @Override
+    public void transition(RMAppImpl app, RMAppEvent event) {
+
+      if(app.launchTime == 0) {
+        LOG.info("update the launch time for applicationId: "+
+                app.getApplicationId()+", attemptId: "+
+                app.getCurrentAppAttempt().getAppAttemptId()+
+                "launchTime: "+event.getTimestamp());
+        app.launchTime = event.getTimestamp();
+      }
+    }
+  }
+
   private static final class AppRunningOnNodeTransition extends RMAppTransition {
     public void transition(RMAppImpl app, RMAppEvent event) {
       RMAppRunningOnNodeEvent nodeAddedEvent = (RMAppRunningOnNodeEvent) event;
@@ -1297,7 +1329,8 @@
     ApplicationStateData appState =
         ApplicationStateData.newInstance(this.submitTime, this.startTime,
             this.user, this.submissionContext,
-            stateToBeStored, diags, this.storedFinishTime, this.callerContext);
+            stateToBeStored, diags, this.launchTime, this.storedFinishTime,
+            this.callerContext);
     appState.setApplicationTimeouts(this.applicationTimeouts);
     this.rmContext.getStateStore().updateApplicationState(appState);
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptEvent.java
index 6df6b19..2ddc981 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptEvent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptEvent.java
@@ -38,6 +38,13 @@
     this.diagnosticMsg = diagnostics;
   }
 
+  public RMAppAttemptEvent(ApplicationAttemptId appAttemptId,
+                           RMAppAttemptEventType type, long timeStamp) {
+    super(type, timeStamp);
+    this.appAttemptId = appAttemptId;
+    this.diagnosticMsg = "";
+  }
+
   public ApplicationAttemptId getApplicationAttemptId() {
     return this.appAttemptId;
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 1b1e2c4..32f275f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -1557,7 +1557,9 @@
             appAttempt.launchAMStartTime;
         ClusterMetrics.getMetrics().addAMLaunchDelay(delay);
       }
-
+      appAttempt.eventHandler.handle(
+          new RMAppEvent(appAttempt.getAppAttemptId().getApplicationId(),
+            RMAppEventType.ATTEMPT_LAUNCHED, event.getTimestamp()));
       appAttempt
           .updateAMLaunchDiagnostics(AMState.LAUNCHED.getDiagnosticMessage());
       // Register with AMLivelinessMonitor
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java
index 95e0533..36665d6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java
@@ -96,7 +96,8 @@
         // This is a reserved container
         LOG.info("Reserved container " + " application="
             + application.getApplicationId() + " resource=" + allocatedResource
-            + " queue=" + this.toString() + " cluster=" + clusterResource);
+            + " queue=" + appInfo.getQueueName()
+            + " cluster=" + clusterResource);
         assignment.getAssignmentInformation().addReservationDetails(
             updatedContainer, application.getCSLeafQueue().getQueuePath());
         assignment.getAssignmentInformation().incrReservations();
@@ -124,9 +125,9 @@
         // Inform the ordering policy
         LOG.info("assignedContainer" + " application attempt=" + application
             .getApplicationAttemptId() + " container=" + updatedContainer
-            .getContainerId() + " queue=" + this + " clusterResource="
-            + clusterResource + " type=" + assignment.getType()
-            + " requestedPartition="
+            .getContainerId() + " queue=" + appInfo.getQueueName()
+            + " clusterResource=" + clusterResource
+            + " type=" + assignment.getType() + " requestedPartition="
             + updatedContainer.getNodeLabelExpression());
 
         assignment.getAssignmentInformation().addAllocationDetails(
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
index c98aadc..54dd090 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
@@ -25,12 +25,10 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.authorize.AccessControlList;
-import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.ReservationACL;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.security.AccessType;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSchedulerConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.allocation.AllocationFileParser;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.allocation.QueueProperties;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -172,24 +170,6 @@
   }
 
   /**
-   * Get the ACLs associated with this queue. If a given ACL is not explicitly
-   * configured, include the default value for that ACL.  The default for the
-   * root queue is everybody ("*") and the default for all other queues is
-   * nobody ("")
-   */
-  public AccessControlList getQueueAcl(String queue, QueueACL operation) {
-    Map<AccessType, AccessControlList> acls = this.queueAcls.get(queue);
-    if (acls != null) {
-      AccessControlList operationAcl =
-          acls.get(SchedulerUtils.toAccessType(operation));
-      if (operationAcl != null) {
-        return operationAcl;
-      }
-    }
-    return (queue.equals("root")) ? EVERYBODY_ACL : NOBODY_ACL;
-  }
-
-  /**
    * Get the map of ACLs of all queues.
    * @return the map of ACLs of all queues
    */
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
index ac88f86..2bbbcc5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
@@ -89,6 +89,7 @@
             th(".queue", "Queue").
             th(".fairshare", "Fair Share").
             th(".starttime", "StartTime").
+            th(".launchTime", "LaunchTime").
             th(".finishtime", "FinishTime").
             th(".state", "State").
             th(".finalstatus", "FinalStatus").
@@ -135,6 +136,7 @@
         appInfo.getQueue()))).append("\",\"")
       .append(fairShare).append("\",\"")
       .append(appInfo.getStartTime()).append("\",\"")
+      .append(appInfo.getLaunchTime()).append("\",\"")
       .append(appInfo.getFinishTime()).append("\",\"")
       .append(appInfo.getState()).append("\",\"")
       .append(appInfo.getFinalStatus()).append("\",\"")
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
index d0dccab..a525cff 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java
@@ -62,6 +62,7 @@
           .th(".name", "Name").th(".type", "Application Type")
           .th(".queue", "Queue").th(".priority", "Application Priority")
           .th(".starttime", "StartTime")
+          .th("launchtime", "LaunchTime")
           .th(".finishtime", "FinishTime").th(".state", "State")
           .th(".finalstatus", "FinalStatus")
           .th(".runningcontainer", "Running Containers")
@@ -134,6 +135,7 @@
              .getQueue()))).append("\",\"").append(String
              .valueOf(app.getPriority()))
         .append("\",\"").append(app.getStartedTime())
+        .append("\",\"").append(app.getLaunchTime())
         .append("\",\"").append(app.getFinishedTime())
         .append("\",\"")
         .append(app.getAppState() == null ? UNAVAILABLE : app.getAppState())
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
index 631c908..d47f13d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
@@ -87,6 +87,7 @@
 
   // these are only allowed if acls allow
   protected long startedTime;
+  private long launchTime;
   protected long finishedTime;
   protected long elapsedTime;
   protected String amContainerLogs;
@@ -181,6 +182,7 @@
       this.clusterId = ResourceManager.getClusterTimeStamp();
       if (hasAccess) {
         this.startedTime = app.getStartTime();
+        this.launchTime = app.getLaunchTime();
         this.finishedTime = app.getFinishTime();
         this.elapsedTime =
             Times.elapsed(app.getStartTime(), app.getFinishTime());
@@ -394,6 +396,10 @@
     return this.startedTime;
   }
 
+  public long getLaunchTime() {
+    return this.launchTime;
+  }
+
   public long getFinishTime() {
     return this.finishedTime;
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto
index 39a56a8..35c77ab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto
@@ -70,6 +70,7 @@
     optional int64 finish_time = 7;
     optional hadoop.common.RPCCallerContextProto caller_context = 8;
     repeated ApplicationTimeoutMapProto application_timeouts = 9;
+    optional int64 launch_time = 10;
 }
 
 message ApplicationAttemptStateDataProto {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
index 03fc081..f0484e1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
@@ -722,6 +722,7 @@
     }
 
     // Make sure RM will use the updated GroupMappingServiceProvider
+    Groups.getUserToGroupsMappingServiceWithLoadedConfiguration(conf).refresh();
     List<String> groupBefore =
         new ArrayList<String>(Groups.getUserToGroupsMappingService(
             configuration).getGroups(user));
@@ -1099,6 +1100,7 @@
           .get("hadoop.proxyuser.test.hosts").contains("test_hosts"));
 
       // verify UserToGroupsMappings
+      Groups.getUserToGroupsMappingServiceWithLoadedConfiguration(conf).refresh();
       List<String> groupAfter =
           Groups.getUserToGroupsMappingService(configuration).getGroups(
               UserGroupInformation.getCurrentUser().getUserName());
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
index 72de27c..6c6c4b4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
@@ -92,7 +92,12 @@
     public long getSubmitTime() {
       throw new UnsupportedOperationException("Not supported yet.");
     }
-    
+
+    @Override
+    public long getLaunchTime() {
+      throw new UnsupportedOperationException("Not supported yet.");
+    }
+
     @Override
     public long getFinishTime() {
       throw new UnsupportedOperationException("Not supported yet.");
@@ -272,6 +277,7 @@
     final String name = newAppName();
     final String queue = newQueue();
     final long start = 123456 + i * 1000;
+    final long launch = start + i * 100;
     final long finish = 234567 + i * 1000;
     final String type = YarnConfiguration.DEFAULT_APPLICATION_TYPE;
     YarnApplicationState[] allStates = YarnApplicationState.values();
@@ -308,6 +314,11 @@
       }
 
       @Override
+      public long getLaunchTime() {
+        return launch;
+      }
+
+      @Override
       public long getFinishTime() {
         return finish;
       }
@@ -357,7 +368,7 @@
         ApplicationReport report = ApplicationReport.newInstance(
             getApplicationId(), appAttemptId, getUser(), getQueue(), 
             getName(), null, 0, null, null, getDiagnostics().toString(), 
-            getTrackingUrl(), getStartTime(), getFinishTime(), 
+            getTrackingUrl(), getLaunchTime(), getStartTime(), getFinishTime(),
             getFinalApplicationStatus(), usageReport , null, getProgress(),
             type, null);
         return report;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java
old mode 100755
new mode 100644
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java
index dbb2148..957d4ce 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java
@@ -358,7 +358,7 @@
         ApplicationStateData.newInstance(appState.getSubmitTime(),
             appState.getStartTime(), appState.getUser(),
             appState.getApplicationSubmissionContext(), RMAppState.FINISHED,
-            "appDiagnostics", 1234, appState.getCallerContext());
+            "appDiagnostics", 123, 1234, appState.getCallerContext());
     appState2.attempts.putAll(appState.attempts);
     store.updateApplicationState(appState2);
 
@@ -384,7 +384,7 @@
     ApplicationStateData dummyApp =
         ApplicationStateData.newInstance(appState.getSubmitTime(),
             appState.getStartTime(), appState.getUser(), dummyContext,
-            RMAppState.FINISHED, "appDiagnostics", 1234, null);
+            RMAppState.FINISHED, "appDiagnostics", 123, 1234, null);
     store.updateApplicationState(dummyApp);
 
     ApplicationAttemptId dummyAttemptId =
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java
index 0738730..fe4a701 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java
@@ -407,7 +407,7 @@
             store.storeApplicationStateInternal(
                 ApplicationId.newInstance(100L, 1),
                 ApplicationStateData.newInstance(111, 111, "user", null,
-                    RMAppState.ACCEPTED, "diagnostics", 333, null));
+                    RMAppState.ACCEPTED, "diagnostics", 222, 333, null));
           } catch (Exception e) {
             assertionFailedInThread.set(true);
             e.printStackTrace();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
index 0a1b152..d8718e0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
@@ -790,7 +790,7 @@
       long finishTime, boolean isFinished) {
     return ApplicationStateData.newInstance(submitTime, startTime, "test",
         ctxt, isFinished ? RMAppState.FINISHED : null, isFinished ?
-        "appDiagnostics" : "", isFinished ? finishTime : 0, null);
+        "appDiagnostics" : "", 0, isFinished ? finishTime : 0, null);
   }
 
   private static ApplicationAttemptStateData createFinishedAttempt(
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
index 664fae2..ad29d27 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
@@ -57,6 +57,7 @@
   String name = MockApps.newAppName();
   String queue = MockApps.newQueue();
   long start = System.currentTimeMillis() - (int) (Math.random() * DT);
+  private long launch = start;
   long submit = start - (int) (Math.random() * DT);
   long finish = 0;
   RMAppState state = RMAppState.NEW;
@@ -194,6 +195,11 @@
     return submit;
   }
 
+  @Override
+  public long getLaunchTime() {
+    return launch;
+  }
+
   public void setStartTime(long time) {
     this.start = time;
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
index 920bd4f..70887e0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
@@ -1239,7 +1239,7 @@
     ApplicationStateData appState =
         ApplicationStateData.newInstance(app.getSubmitTime(), app.getStartTime(),
             app.getUser(), app.getApplicationSubmissionContext(), rmAppState,
-            null, app.getFinishTime(), null);
+            null, app.getLaunchTime(), app.getFinishTime(), null);
     applicationState.put(app.getApplicationId(), appState);
   }
   
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
index 4a7461d..5522333 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
@@ -23,7 +23,6 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSchedulerConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueuePlacementRule.NestedUserQueue;
@@ -356,26 +355,6 @@
     assertEquals(.4f, queueConf.getQueueMaxAMShare("root.queueD"), 0.01);
     assertEquals(.5f, queueConf.getQueueMaxAMShare("root.queueE"), 0.01);
 
-    // Root should get * ACL
-    assertEquals("*", queueConf.getQueueAcl("root",
-        QueueACL.ADMINISTER_QUEUE).getAclString());
-    assertEquals("*", queueConf.getQueueAcl("root",
-        QueueACL.SUBMIT_APPLICATIONS).getAclString());
-
-    // Unspecified queues should get default ACL
-    assertEquals(" ", queueConf.getQueueAcl("root.queueA",
-        QueueACL.ADMINISTER_QUEUE).getAclString());
-    assertEquals(" ", queueConf.getQueueAcl("root.queueA",
-        QueueACL.SUBMIT_APPLICATIONS).getAclString());
-
-    // Queue B ACL
-    assertEquals("alice,bob admins", queueConf.getQueueAcl("root.queueB",
-        QueueACL.ADMINISTER_QUEUE).getAclString());
-
-    // Queue C ACL
-    assertEquals("alice,bob admins", queueConf.getQueueAcl("root.queueC",
-        QueueACL.SUBMIT_APPLICATIONS).getAclString());
-
     assertEquals(120000, queueConf.getMinSharePreemptionTimeout("root"));
     assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root." +
         YarnConfiguration.DEFAULT_QUEUE_NAME));
@@ -522,20 +501,6 @@
     assertEquals(10, queueConf.getUserMaxApps("user1"));
     assertEquals(5, queueConf.getUserMaxApps("user2"));
 
-    // Unspecified queues should get default ACL
-    assertEquals(" ", queueConf.getQueueAcl("root.queueA",
-        QueueACL.ADMINISTER_QUEUE).getAclString());
-    assertEquals(" ", queueConf.getQueueAcl("root.queueA",
-        QueueACL.SUBMIT_APPLICATIONS).getAclString());
-
-    // Queue B ACL
-    assertEquals("alice,bob admins", queueConf.getQueueAcl("root.queueB",
-        QueueACL.ADMINISTER_QUEUE).getAclString());
-
-    // Queue C ACL
-    assertEquals("alice,bob admins", queueConf.getQueueAcl("root.queueC",
-        QueueACL.SUBMIT_APPLICATIONS).getAclString());
-
     assertEquals(120000, queueConf.getMinSharePreemptionTimeout("root"));
     assertEquals(-1, queueConf.getMinSharePreemptionTimeout("root." +
         YarnConfiguration.DEFAULT_QUEUE_NAME));
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
index b125608..93377be 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
@@ -257,7 +257,7 @@
               app.getName(), (String) null, 0, (Token) null,
               app.createApplicationState(),
               app.getDiagnostics().toString(), (String) null,
-              app.getStartTime(), app.getFinishTime(),
+              app.getStartTime(), app.getLaunchTime(), app.getFinishTime(),
               app.getFinalApplicationStatus(),
               (ApplicationResourceUsageReport) null, app.getTrackingUrl(),
               app.getProgress(), app.getApplicationType(), (Token) null);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
index a381ed4..6c6f400 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
@@ -1553,6 +1553,7 @@
           WebServicesTestUtils.getXmlString(element, "diagnostics"),
           WebServicesTestUtils.getXmlLong(element, "clusterId"),
           WebServicesTestUtils.getXmlLong(element, "startedTime"),
+          WebServicesTestUtils.getXmlLong(element, "launchTime"),
           WebServicesTestUtils.getXmlLong(element, "finishedTime"),
           WebServicesTestUtils.getXmlLong(element, "elapsedTime"),
           WebServicesTestUtils.getXmlString(element, "amHostHttpAddress"),
@@ -1603,7 +1604,7 @@
   public void verifyAppInfo(JSONObject info, RMApp app, boolean hasResourceReqs)
       throws JSONException, Exception {
 
-    int expectedNumberOfElements = 39 + (hasResourceReqs ? 2 : 0);
+    int expectedNumberOfElements = 40 + (hasResourceReqs ? 2 : 0);
     String appNodeLabelExpression = null;
     String amNodeLabelExpression = null;
     if (app.getApplicationSubmissionContext()
@@ -1629,8 +1630,10 @@
         info.getString("state"), info.getString("finalStatus"),
         (float) info.getDouble("progress"), info.getString("trackingUI"),
         info.getString("diagnostics"), info.getLong("clusterId"),
-        info.getLong("startedTime"), info.getLong("finishedTime"),
-        info.getLong("elapsedTime"), info.getString("amHostHttpAddress"),
+        info.getLong("startedTime"), info.getLong("launchTime"),
+        info.getLong("finishedTime"),
+        info.getLong("elapsedTime"),
+        info.getString("amHostHttpAddress"),
         info.getString("amContainerLogs"), info.getInt("allocatedMB"),
         info.getInt("allocatedVCores"), info.getInt("runningContainers"),
         (float) info.getDouble("queueUsagePercentage"),
@@ -1653,8 +1656,9 @@
   public void verifyAppInfoGeneric(RMApp app, String id, String user,
       String name, String applicationType, String queue, int prioirty,
       String state, String finalStatus, float progress, String trackingUI,
-      String diagnostics, long clusterId, long startedTime, long finishedTime,
-      long elapsedTime, String amHostHttpAddress, String amContainerLogs,
+      String diagnostics, long clusterId, long startedTime,
+      long launchTime, long finishedTime, long elapsedTime,
+      String amHostHttpAddress, String amContainerLogs,
       int allocatedMB, int allocatedVCores, int numContainers,
       float queueUsagePerc, float clusterUsagePerc,
       int preemptedResourceMB, int preemptedResourceVCores,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
index ac4afa8..bd425a7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
@@ -20,6 +20,7 @@
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.server.webproxy.ProxyUtils;
 import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServlet;
 import org.slf4j.Logger;
@@ -44,6 +45,7 @@
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.TimeUnit;
 
 @Public
 public class AmIpFilter implements Filter {
@@ -59,7 +61,7 @@
   public static final String PROXY_URI_BASES_DELIMITER = ",";
   private static final String PROXY_PATH = "/proxy";
   //update the proxy IP list about every 5 min
-  private static final long UPDATE_INTERVAL = 5 * 60 * 1000;
+  private static long updateInterval = TimeUnit.MINUTES.toMillis(5);
 
   private String[] proxyHosts;
   private Set<String> proxyAddresses = null;
@@ -99,9 +101,9 @@
   }
 
   protected Set<String> getProxyAddresses() throws ServletException {
-    long now = System.currentTimeMillis();
+    long now = Time.monotonicNow();
     synchronized(this) {
-      if (proxyAddresses == null || (lastUpdate + UPDATE_INTERVAL) >= now) {
+      if (proxyAddresses == null || (lastUpdate + updateInterval) <= now) {
         proxyAddresses = new HashSet<>();
         for (String proxyHost : proxyHosts) {
           try {
@@ -226,4 +228,9 @@
     }
     return isValid;
   }
+
+  @VisibleForTesting
+  protected static void setUpdateInterval(long updateInterval) {
+    AmIpFilter.updateInterval = updateInterval;
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java
index f775c6d..6eb4d48 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java
@@ -49,7 +49,9 @@
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
+import com.google.common.base.Supplier;
 import org.apache.hadoop.http.TestHttpServer;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.server.webproxy.ProxyUtils;
 import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServlet;
 import org.eclipse.jetty.server.Server;
@@ -179,6 +181,44 @@
     return server.getURI().toString() + servletPath;
   }
 
+  @Test(timeout = 2000)
+  public void testProxyUpdate() throws Exception {
+    Map<String, String> params = new HashMap<>();
+    params.put(AmIpFilter.PROXY_HOSTS, proxyHost);
+    params.put(AmIpFilter.PROXY_URI_BASES, proxyUri);
+
+    FilterConfig conf = new DummyFilterConfig(params);
+    AmIpFilter filter = new AmIpFilter();
+    int updateInterval = 1000;
+    AmIpFilter.setUpdateInterval(updateInterval);
+    filter.init(conf);
+    filter.getProxyAddresses();
+
+    // check that the configuration was applied
+    assertTrue(filter.getProxyAddresses().contains("127.0.0.1"));
+
+    // change proxy configurations
+    params = new HashMap<>();
+    params.put(AmIpFilter.PROXY_HOSTS, "unknownhost");
+    params.put(AmIpFilter.PROXY_URI_BASES, proxyUri);
+    conf = new DummyFilterConfig(params);
+    filter.init(conf);
+
+    // configurations shouldn't be updated now
+    assertFalse(filter.getProxyAddresses().isEmpty());
+    // waiting for configuration update
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        try {
+          return filter.getProxyAddresses().isEmpty();
+        } catch (ServletException e) {
+          return true;
+        }
+      }
+    }, 500, updateInterval);
+  }
+
   /**
    * Test AmIpFilter
    */
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Configurations.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Configurations.md
index 7ec2ecb..fb1375a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Configurations.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Configurations.md
@@ -12,7 +12,9 @@
   limitations under the License. See accompanying LICENSE file.
 -->
 
-This document describes how to configure the services to be deployed on YARN
+# YARN Service Configurations
+
+This document describes how to configure the services to be deployed on YARN.
 
 There are mainly three types of configurations:
 
@@ -93,7 +95,19 @@
 ## Configuration for YARN service AM
 This section describes the configurations for configuring the YARN service AM.
 
-These can be specified either in the cluster `yarn-site.xml` at the global level or in the `properties` field of the `Configuration` object as per service basis like below:
+### System-wide configuration properties
+System-wide service AM properties can only be configured in the cluster `yarn-site.xml` file.
+
+| System-Level Config Name | Description |
+| ------------ | ------------- |
+|yarn.service.framework.path | HDFS parent directory where the service AM dependency tarball can be found.|
+|yarn.service.base.path | HDFS parent directory where service artifacts will be stored (default ${user_home_dir}/.yarn/).
+|yarn.service.client-am.retry.max-wait-ms | Max retry time in milliseconds for the service client to talk to the service AM (default 900000, i.e. 15 minutes).|
+|yarn.service.client-am.retry-interval-ms | Retry interval in milliseconds for the service client to talk to the service AM (default 2000, i.e. 2 seconds).|
+|yarn.service.queue | Default queue to which the service will be submitted (default submits to the `default` queue). Note that queue can be specified per-service through the queue field, rather than through the service-level configuration properties.|
+
+### Service-level configuration properties
+Service-level service AM configuration properties can be specified either in the cluster `yarn-site.xml` at the global level (effectively overriding the default values system-wide) or specified per service in the `properties` field of the `Configuration` object as in the example below:
 ```
 {
     "configuration" : {
@@ -103,34 +117,97 @@
     }
 }
 ```
-Above config make the service AM to be retried at max 10 times.
+The above config allows the service AM to be retried a maximum of 10 times.
 
-#### Available configurations:
-
-| Name | Description |
+| Service-Level Config Name | Description |
 | ------------ | ------------- |
-|yarn.service.client-am.retry.max-wait-ms | the max retry time in milliseconds for the service client to talk to the service AM. By default, it is set to 0, which means no retry |
-|yarn.service.client-am.retry-interval-ms | the retry interval in milliseconds for the service client to talk to the service AM. By default, it is 2000, i.e. 2 seconds |
-|yarn.service.container-failure.retry.max | the max number of retries for the container to be auto restarted if it fails. By default, it is set to -1, which means forever.
-|yarn.service.container-failure.retry-interval-ms| the retry interval in milliseconds for the container to be restarted. By default, it is 30000, i.e. 30 seconds |
-|yarn.service.container-failure.validity-interval-ms | the failure validity interval in milliseconds which when set to a value greater than 0, will not take the failures that happened outside of this interval into failure count. By default, it is set to -1, which means that all the failures so far will be included in failure count. |
-|yarn.service.am-restart.max-attempts| the max number of attempts for the framework AM
-|yarn.service.am-resource.memory | the memory size in GB for the framework AM. By default, it is set to 1024
-|yarn.service.queue | the default queue to which the service will be submitted. By default, it is submitted to `default` queue
-|yarn.service.base.path | the root location for the service artifacts on hdfs for a user. By default, it is under ${user_home_dir}/.yarn/
-|yarn.service.container-failure-per-component.threshold | the max number of container failures for a given component before the AM exits.
-|yarn.service.node-blacklist.threshold | Maximum number of container failures on a node before the node is blacklisted by the AM
-|yarn.service.failure-count-reset.window | The interval in seconds when the `yarn.service.container-failure-per-component.threshold` and `yarn.service.node-blacklist.threshold` gets reset. By default, it is 21600, i.e. 6 hours
-|yarn.service.readiness-check-interval.seconds | The interval in seconds between readiness checks. By default, it is 30 seconds
-|yarn.service.log.include-pattern| The regex expression for including log files whose file name matches it when aggregating the logs after the application completes.
-|yarn.service.log.exclude-pattern| The regex expression for excluding log files whose file name matches it when aggregating the logs after the application completes. If the log file name matches both include and exclude pattern, this file will be excluded.
-|yarn.service.rolling-log.include-pattern| The regex expression for including log files whose file name matches it when aggregating the logs while app is running.
-|yarn.service.rolling-log.exclude-pattern| The regex expression for excluding log files whose file name matches it when aggregating the logs while app is running. If the log file name matches both include and exclude pattern, this file will be excluded.
-|yarn.service.container-recovery.timeout.ms| The timeout in milliseconds after which the service AM releases all the containers of previous attempt which are not yet recovered by the RM. By default, it is set to 120000, i.e. 2 minutes.
+|yarn.service.am-restart.max-attempts | Max number of times to start the service AM, after which the service will be killed (default 20).|
+|yarn.service.am-resource.memory | Memory size in GB for the service AM (default 1024).|
+|yarn.service.am.java.opts | Additional JVM options for the service AM (default " -Xmx768m" will be appended to any JVM opts that do not specify -Xmx).|
+|yarn.service.container-recovery.timeout.ms | Timeout in milliseconds after which a newly started service AM releases all the containers of previous AM attempts which are not yet recovered from the RM (default 120000, i.e. 2 minutes).|
+|yarn.service.failure-count-reset.window | Interval in seconds after which the container failure counts that will be evaluated for the per-component `yarn.service.container-failure-per-component.threshold` and `yarn.service.node-blacklist.threshold` are reset (default 21600, i.e. 6 hours).|
+|yarn.service.readiness-check-interval.seconds | Interval in seconds between readiness checks (default 30 seconds).|
+|yarn.service.log.include-pattern | Regex expression for including log files by name when aggregating the logs after the application completes (default includes all files).|
+|yarn.service.log.exclude-pattern | Regex expression for excluding log files by name when aggregating the logs after the application completes. If the log file name matches both include and exclude pattern, this file will be excluded (default does not exclude any files).|
+|yarn.service.rolling-log.include-pattern | Regex expression for including log files by name when aggregating the logs while app is running.|
+|yarn.service.rolling-log.exclude-pattern | Regex expression for excluding log files by name when aggregating the logs while app is running. If the log file name matches both include and exclude pattern, this file will be excluded.|
+
+### Component-level configuration properties
+Component-level service AM configuration properties can be specified either in the cluster `yarn-site.xml` at the global level (effectively overriding the default values system-wide), specified per service in the `properties` field of the `Configuration` object, or specified per component in the `properties` field of the component's `Configuration` object.
+
+| Component-Level Config Name | Description |
+| ------------ | ------------- |
+|yarn.service.container-failure.retry.max | Max number of retries for the container to be auto restarted if it fails (default -1, which means forever).|
+|yarn.service.container-failure.retry-interval-ms | Retry interval in milliseconds for the container to be restarted (default 30000, i.e. 30 seconds).|
+|yarn.service.container-failure.validity-interval-ms | Failure validity interval in milliseconds. When set to a value greater than 0, the container retry policy will not take the failures that happened outside of this interval into the failure count (default -1, which means that all the failures so far will be included in the failure count).|
+|yarn.service.container-failure-per-component.threshold | Max number of container failures (not including retries) for a given component before the AM stops the service (default 10).|
+|yarn.service.node-blacklist.threshold | Maximum number of container failures on a node (not including retries) before the node is blacklisted by the AM (default 3).|
+|yarn.service.default-readiness-check.enabled | Whether or not the default readiness check is enabled (default true).|
+
+There is one component-level configuration property that is set differently in the `yarn-site.xml` file than it is in the service specification.
+To select the docker network type that will be used for docker containers, `docker.network` may be set in the service `Configuration` `properties` or the component `Configuration` `properties`.
+The system-wide default for the docker network type (for both YARN service containers and all other application containers) is set via the `yarn.nodemanager.runtime.linux.docker.default-container-network` property in the `yarn-site.xml` file.
+
+### Component-level readiness check properties
+The AM can be configured to perform readiness checks for containers through the `Component` field `readiness_check`.
+A container will not reach the `READY` state until its readiness check succeeds.
+If no readiness check is specified, the default readiness check is performed unless it is disabled through the `yarn.service.default-readiness-check.enabled` component-level configuration property.
+
+The default readiness check succeeds when an IP becomes available for a container.
+There are also optional properties that configure a DNS check in addition to the IP check.
+DNS checking ensures that a DNS lookup succeeds for the container hostname before the container is considered ready.
+For example, DNS checking can be enabled for the default readiness check as follows:
+```
+      "readiness_check": {
+        "type": "DEFAULT",
+        "properties": {
+          "dns.check.enabled": "true"
+        }
+      },
+```
+
+Here is a full list of configurable properties for readiness checks that can be performed by the AM.
+
+| Readiness Check | Configurable Property | Description |
+| ------------ | ------------- | ------------- |
+|DEFAULT, HTTP, PORT| dns.check.enabled | true if DNS check should be performed (default false)|
+|DEFAULT, HTTP, PORT| dns.address | optional IP:port address of DNS server to use for DNS check|
+|HTTP| url | required URL for HTTP response check, e.g. http://${THIS_HOST}:8080|
+|HTTP| timeout | connection timeout (default 1000)|
+|HTTP| min.success | minimum response code considered successful (default 200)|
+|HTTP| max.success | maximum response code considered successful (default 299)|
+|PORT| port | required port for socket connection|
+|PORT| timeout | socket connection timeout (default 1000)|
+
+HTTP readiness check example:
+```
+      "readiness_check": {
+        "type": "HTTP",
+        "properties": {
+          "url": "http://${THIS_HOST}:8080"
+        }
+      },
+```
+
+PORT readiness check example:
+```
+      "readiness_check": {
+        "type": "PORT",
+        "properties": {
+          "port": "8080"
+        }
+      },
+```
+
+#### Warning on configuring readiness checks with `host` network for docker containers
+When the `host` docker network is configured for a component that has more than one container and the containers are binding to a specific port, there will be a port collision if the containers happen to be allocated on the same host.
+HTTP and PORT readiness checks will not be valid in this situation.
+In particular, both containers (the one that successfully binds to the port and the one that does not) may have their HTTP or PORT readiness check succeed since the checks are being performed against the same IP (the host's IP).
+A valid configuration for such a service could use the anti-affinity placement policy, ensuring that containers will be assigned on different hosts so that port collisions will not occur.
 
 ## Constant variables for custom service
 The service framework provides some constant variables for user to configure their services. These variables are either dynamically generated by the system or are static ones such as service name defined by the user.
-User can use these constants in their configurations to be dynamically substituted by the service AM.E.g.
+User can use these constants in their configurations to be dynamically substituted by the service AM. E.g.
 ```
 {
     "type" : "HADOOP_XML",
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/SystemServices.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/SystemServices.md
new file mode 100644
index 0000000..20fed15
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/SystemServices.md
@@ -0,0 +1,66 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+# System Services
+
+## Overview
+System services are admin configured services which are auto deployed during bootstrap of ResourceManager. This would work only when API-Server is started as part of ResourceManager. Refer [Manage services on YARN](QuickStart.html#Manage_services_on_YARN_via_REST_API). This document describes how to configure and deploy system services.
+
+## Configuration
+
+| Name | Description |
+| ------------ | ------------- |
+|yarn.service.system-service.dir| FS directory path to load and deploy admin configured services. These service spec files should be kept with proper hierarchy.|
+
+## Hierarchy of FS path
+After configuring *yarn.service.system-service.dir* path, the spec files should be kept with below hierarchy.
+````
+$SYSTEM_SERVICE_DIR_PATH/<Launch-Mode>/<Users>/<Yarnfiles>.
+````
+### Launch-Mode
+Launch-Mode indicates that how the service should be deployed. Services can be auto deployed either synchronously or asynchronously.
+
+#### sync
+These services are started synchronously along with RM. This might delay a bit RM transition to active period. This is useful when deploying critical services to get started sooner.
+
+#### async
+These services are started asynchronously without impacting RM transition period.
+
+### Users
+Users are the owner of the system service who has full access to modify it. Each users can own multiple services. Note that service names are unique per user.
+
+### Yarnfiles
+YarnFiles are the spec files to launch services. These files must have .yarnfile extension otherwise those files are ignored.
+
+### Example of hierarchy to configure system services.
+
+```
+SYSTEM_SERVICE_DIR_PATH
+|---- sync
+|     |--- user1
+|     |    |---- service1.yarnfile
+|     |    |---- service2.yarnfile
+|     |--- user2
+|     |    |---- service3.yarnfile
+|     |    ....
+|     |
+|---- async
+|     |--- user3
+|     |    |---- service1.yarnfile
+|     |    |---- service2.yarnfile
+|     |--- user4
+|     |    |---- service3.yarnfile
+|     |    ....
+|     |
+```
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
index 429c8c1..16d0c6e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
@@ -349,11 +349,13 @@
 
 ### ReadinessCheck
 
-A custom command or a pluggable helper container to determine the readiness of a container of a component. Readiness for every service is different. Hence the need for a simple interface, with scope to support advanced usecases.
+A check to be performed to determine the readiness of a component instance (a container).
+If no readiness check is specified, the default readiness check will be used unless the yarn.service.default-readiness-check.enabled configuration property is set to false at the component or global level.
+The artifact field is currently unsupported but may be implemented in the future, enabling a pluggable helper container to support advanced use cases.
 
 |Name|Description|Required|Schema|Default|
 |----|----|----|----|----|
-|type|E.g. HTTP (YARN will perform a simple REST call at a regular interval and expect a 204 No content).|true|enum (HTTP, PORT)||
+|type|DEFAULT (AM checks whether the container has an IP and optionally performs a DNS lookup for the container hostname), HTTP (AM performs default checks, plus sends a REST call to the container and expects a response code between 200 and 299), or PORT (AM performs default checks, plus attempts to open a socket connection to the container on a specified port).|true|enum (DEFAULT, HTTP, PORT)||
 |properties|A blob of key value pairs that will be used to configure the check.|false|object||
 |artifact|Artifact of the pluggable readiness check helper container (optional). If specified, this helper container typically hosts the http uri and encapsulates the complex scripts required to perform actual container readiness check. At the end it is expected to respond a 204 No content just like the simplified use case. This pluggable framework benefits service owners who can run services without any packaging modifications. Note, artifacts of type docker only is supported for now. NOT IMPLEMENTED YET|false|Artifact||
 
@@ -401,6 +403,7 @@
 |quicklinks|A blob of key-value pairs of quicklinks to be exported for a service.|false|object||
 |queue|The YARN queue that this service should be submitted to.|false|string||
 |kerberos_principal | The principal info of the user who launches the service|false|KerberosPrincipal||
+|docker_client_config|URI of the file containing the docker client configuration (e.g. hdfs:///tmp/config.json)|false|string||
 
 ### ServiceState
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
index dd95765..cffe198 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
@@ -140,4 +140,9 @@
     return this.get("state");
   }.property(),
 
+  masterNodeURL: function() {
+    var addr = encodeURIComponent(this.get("nodeHttpAddress"));
+    return `#/yarn-node/${this.get("nodeId")}/${addr}/info/`;
+  }.property("nodeId", "nodeHttpAddress"),
+
 });
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/app-attempt-table.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/app-attempt-table.hbs
index d33bf2f..c02c6f7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/app-attempt-table.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/app-attempt-table.hbs
@@ -53,7 +53,7 @@
     {{#if attempt.nodeHttpAddress}}
     <tr>
       <td>AM Node Web UI</td>
-      <td title="{{attempt.nodeHttpAddress}}"><a href="{{prepend-protocol attempt.nodeHttpAddress}}" target="_blank">{{attempt.nodeHttpAddress}}</a></td>
+      <td title="{{attempt.nodeHttpAddress}}"><a href="{{attempt.masterNodeURL}}">{{attempt.nodeHttpAddress}}</a></td>
     </tr>
     {{/if}}
     {{#if attempt.logsLink}}