Merge branch 'trunk' into HADOOP-13345
diff --git a/BUILDING.txt b/BUILDING.txt
index f9cc842..14deec8 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -7,7 +7,7 @@
 * JDK 1.8+
 * Maven 3.3 or later
 * ProtocolBuffer 2.5.0
-* CMake 2.6 or newer (if compiling native code), must be 3.0 or newer on Mac
+* CMake 3.1 or newer (if compiling native code)
 * Zlib devel (if compiling native code)
 * openssl devel (if compiling native hadoop-pipes and to get the best HDFS encryption performance)
 * Linux FUSE (Filesystem in Userspace) version 2.6 or above (if compiling fuse_dfs)
@@ -345,7 +345,7 @@
 * JDK 1.8+
 * Maven 3.0 or later
 * ProtocolBuffer 2.5.0
-* CMake 2.6 or newer
+* CMake 3.1 or newer
 * Windows SDK 7.1 or Visual Studio 2010 Professional
 * Windows SDK 8.1 (if building CPU rate control for the container executor)
 * zlib headers (if building native code bindings for zlib)
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 1775323..31ac611 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -35,7 +35,6 @@
 RUN apt-get -q update && apt-get -q install --no-install-recommends -y \
     build-essential \
     bzip2 \
-    cmake \
     curl \
     doxygen \
     fuse \
@@ -89,11 +88,22 @@
 ######
 RUN mkdir -p /opt/maven && \
     curl -L -s -S \
-         http://www-us.apache.org/dist/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz \
+         https://www-us.apache.org/dist/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz \
          -o /opt/maven.tar.gz && \
     tar xzf /opt/maven.tar.gz --strip-components 1 -C /opt/maven
 ENV MAVEN_HOME /opt/maven
-ENV PATH "$PATH:/opt/maven/bin"
+ENV PATH "${PATH}:/opt/maven/bin"
+
+######
+# Install cmake
+######
+RUN mkdir -p /opt/cmake && \
+    curl -L -s -S \
+         https://cmake.org/files/v3.1/cmake-3.1.0-Linux-x86_64.tar.gz \
+         -o /opt/cmake.tar.gz && \
+    tar xzf /opt/cmake.tar.gz --strip-components 1 -C /opt/cmake
+ENV CMAKE_HOME /opt/cmake
+ENV PATH "${PATH}:/opt/cmake/bin"
 
 ######
 # Install findbugs
@@ -104,6 +114,7 @@
          -o /opt/findbugs.tar.gz && \
     tar xzf /opt/findbugs.tar.gz --strip-components 1 -C /opt/findbugs
 ENV FINDBUGS_HOME /opt/findbugs
+ENV PATH "${PATH}:/opt/findbugs/bin"
 
 ####
 # Install shellcheck
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
index 5696ba0..1093d8a 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
@@ -19,8 +19,14 @@
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.net.CookieHandler;
+import java.net.HttpCookie;
 import java.net.HttpURLConnection;
+import java.net.URI;
 import java.net.URL;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
@@ -69,14 +75,99 @@
    */
   public static final String AUTH_COOKIE = "hadoop.auth";
 
-  private static final String AUTH_COOKIE_EQ = AUTH_COOKIE + "=";
+  // a lightweight cookie handler that will be attached to url connections.
+  // client code is not required to extract or inject auth cookies.
+  private static class AuthCookieHandler extends CookieHandler {
+    private HttpCookie authCookie;
+    private Map<String, List<String>> cookieHeaders = Collections.emptyMap();
+
+    @Override
+    public synchronized Map<String, List<String>> get(URI uri,
+        Map<String, List<String>> requestHeaders) throws IOException {
+      // call getter so it will reset headers if token is expiring.
+      getAuthCookie();
+      return cookieHeaders;
+    }
+
+    @Override
+    public void put(URI uri, Map<String, List<String>> responseHeaders) {
+      List<String> headers = responseHeaders.get("Set-Cookie");
+      if (headers != null) {
+        for (String header : headers) {
+          List<HttpCookie> cookies;
+          try {
+            cookies = HttpCookie.parse(header);
+          } catch (IllegalArgumentException iae) {
+            // don't care. just skip malformed cookie headers.
+            LOG.debug("Cannot parse cookie header: " + header, iae);
+            continue;
+          }
+          for (HttpCookie cookie : cookies) {
+            if (AUTH_COOKIE.equals(cookie.getName())) {
+              setAuthCookie(cookie);
+            }
+          }
+        }
+      }
+    }
+
+    // return the auth cookie if still valid.
+    private synchronized HttpCookie getAuthCookie() {
+      if (authCookie != null && authCookie.hasExpired()) {
+        setAuthCookie(null);
+      }
+      return authCookie;
+    }
+
+    private synchronized void setAuthCookie(HttpCookie cookie) {
+      final HttpCookie oldCookie = authCookie;
+      // will redefine if new cookie is valid.
+      authCookie = null;
+      cookieHeaders = Collections.emptyMap();
+      boolean valid = cookie != null && !cookie.getValue().isEmpty() &&
+          !cookie.hasExpired();
+      if (valid) {
+        // decrease lifetime to avoid using a cookie soon to expire.
+        // allows authenticators to pre-emptively reauthenticate to
+        // prevent clients unnecessarily receiving a 401.
+        long maxAge = cookie.getMaxAge();
+        if (maxAge != -1) {
+          cookie.setMaxAge(maxAge * 9/10);
+          valid = !cookie.hasExpired();
+        }
+      }
+      if (valid) {
+        // v0 cookies value aren't quoted by default but tomcat demands
+        // quoting.
+        if (cookie.getVersion() == 0) {
+          String value = cookie.getValue();
+          if (!value.startsWith("\"")) {
+            value = "\"" + value + "\"";
+            cookie.setValue(value);
+          }
+        }
+        authCookie = cookie;
+        cookieHeaders = new HashMap<>();
+        cookieHeaders.put("Cookie", Arrays.asList(cookie.toString()));
+      }
+      LOG.trace("Setting token value to {} ({})", authCookie, oldCookie);
+    }
+
+    private void setAuthCookieValue(String value) {
+      HttpCookie c = null;
+      if (value != null) {
+        c = new HttpCookie(AUTH_COOKIE, value);
+      }
+      setAuthCookie(c);
+    }
+  }
 
   /**
    * Client side authentication token.
    */
   public static class Token {
 
-    private String token;
+    private final AuthCookieHandler cookieHandler = new AuthCookieHandler();
 
     /**
      * Creates a token.
@@ -102,7 +193,7 @@
      * @return if a token from the server has been set.
      */
     public boolean isSet() {
-      return token != null;
+      return cookieHandler.getAuthCookie() != null;
     }
 
     /**
@@ -111,7 +202,36 @@
      * @param tokenStr string representation of the tokenStr.
      */
     void set(String tokenStr) {
-      token = tokenStr;
+      cookieHandler.setAuthCookieValue(tokenStr);
+    }
+
+    /**
+     * Installs a cookie handler for the http request to manage session
+     * cookies.
+     * @param url
+     * @return HttpUrlConnection
+     * @throws IOException
+     */
+    HttpURLConnection openConnection(URL url,
+        ConnectionConfigurator connConfigurator) throws IOException {
+      // the cookie handler is unfortunately a global static.  it's a
+      // synchronized class method so we can safely swap the handler while
+      // instantiating the connection object to prevent it leaking into
+      // other connections.
+      final HttpURLConnection conn;
+      synchronized(CookieHandler.class) {
+        CookieHandler current = CookieHandler.getDefault();
+        CookieHandler.setDefault(cookieHandler);
+        try {
+          conn = (HttpURLConnection)url.openConnection();
+        } finally {
+          CookieHandler.setDefault(current);
+        }
+      }
+      if (connConfigurator != null) {
+        connConfigurator.configure(conn);
+      }
+      return conn;
     }
 
     /**
@@ -121,7 +241,15 @@
      */
     @Override
     public String toString() {
-      return token;
+      String value = "";
+      HttpCookie authCookie = cookieHandler.getAuthCookie();
+      if (authCookie != null) {
+        value = authCookie.getValue();
+        if (value.startsWith("\"")) { // tests don't want the quotes.
+          value = value.substring(1, value.length()-1);
+        }
+      }
+      return value;
     }
 
   }
@@ -218,27 +346,25 @@
       throw new IllegalArgumentException("token cannot be NULL");
     }
     authenticator.authenticate(url, token);
-    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
-    if (connConfigurator != null) {
-      conn = connConfigurator.configure(conn);
-    }
-    injectToken(conn, token);
-    return conn;
+
+    // allow the token to create the connection with a cookie handler for
+    // managing session cookies.
+    return token.openConnection(url, connConfigurator);
   }
 
   /**
-   * Helper method that injects an authentication token to send with a connection.
+   * Helper method that injects an authentication token to send with a
+   * connection. Callers should prefer using
+   * {@link Token#openConnection(URL, ConnectionConfigurator)} which
+   * automatically manages authentication tokens.
    *
    * @param conn connection to inject the authentication token into.
    * @param token authentication token to inject.
    */
   public static void injectToken(HttpURLConnection conn, Token token) {
-    String t = token.token;
-    if (t != null) {
-      if (!t.startsWith("\"")) {
-        t = "\"" + t + "\"";
-      }
-      conn.addRequestProperty("Cookie", AUTH_COOKIE_EQ + t);
+    HttpCookie authCookie = token.cookieHandler.getAuthCookie();
+    if (authCookie != null) {
+      conn.addRequestProperty("Cookie", authCookie.toString());
     }
   }
 
@@ -258,24 +384,10 @@
     if (respCode == HttpURLConnection.HTTP_OK
         || respCode == HttpURLConnection.HTTP_CREATED
         || respCode == HttpURLConnection.HTTP_ACCEPTED) {
-      Map<String, List<String>> headers = conn.getHeaderFields();
-      List<String> cookies = headers.get("Set-Cookie");
-      if (cookies != null) {
-        for (String cookie : cookies) {
-          if (cookie.startsWith(AUTH_COOKIE_EQ)) {
-            String value = cookie.substring(AUTH_COOKIE_EQ.length());
-            int separator = value.indexOf(";");
-            if (separator > -1) {
-              value = value.substring(0, separator);
-            }
-            if (value.length() > 0) {
-              LOG.trace("Setting token value to {} ({}), resp={}", value,
-                  token, respCode);
-              token.set(value);
-            }
-          }
-        }
-      }
+      // cookie handler should have already extracted the token.  try again
+      // for backwards compatibility if this method is called on a connection
+      // not opened via this instance.
+      token.cookieHandler.put(null, conn.getHeaderFields());
     } else if (respCode == HttpURLConnection.HTTP_NOT_FOUND) {
       LOG.trace("Setting token value to null ({}), resp={}", token, respCode);
       token.set(null);
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
index 9bcebc3..942d13c 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
@@ -147,7 +147,6 @@
   }
   
   private URL url;
-  private HttpURLConnection conn;
   private Base64 base64;
   private ConnectionConfigurator connConfigurator;
 
@@ -182,10 +181,7 @@
     if (!token.isSet()) {
       this.url = url;
       base64 = new Base64(0);
-      conn = (HttpURLConnection) url.openConnection();
-      if (connConfigurator != null) {
-        conn = connConfigurator.configure(conn);
-      }
+      HttpURLConnection conn = token.openConnection(url, connConfigurator);
       conn.setRequestMethod(AUTH_HTTP_METHOD);
       conn.connect();
       
@@ -200,7 +196,7 @@
         }
         needFallback = true;
       }
-      if (!needFallback && isNegotiate()) {
+      if (!needFallback && isNegotiate(conn)) {
         LOG.debug("Performing our own SPNEGO sequence.");
         doSpnegoSequence(token);
       } else {
@@ -249,7 +245,7 @@
   /*
   * Indicates if the response is starting a SPNEGO negotiation.
   */
-  private boolean isNegotiate() throws IOException {
+  private boolean isNegotiate(HttpURLConnection conn) throws IOException {
     boolean negotiate = false;
     if (conn.getResponseCode() == HttpURLConnection.HTTP_UNAUTHORIZED) {
       String authHeader = conn.getHeaderField(WWW_AUTHENTICATE);
@@ -267,7 +263,8 @@
    * @throws IOException if an IO error occurred.
    * @throws AuthenticationException if an authentication error occurred.
    */
-  private void doSpnegoSequence(AuthenticatedURL.Token token) throws IOException, AuthenticationException {
+  private void doSpnegoSequence(final AuthenticatedURL.Token token)
+      throws IOException, AuthenticationException {
     try {
       AccessControlContext context = AccessController.getContext();
       Subject subject = Subject.getSubject(context);
@@ -308,13 +305,15 @@
 
             // Loop while the context is still not established
             while (!established) {
+              HttpURLConnection conn =
+                  token.openConnection(url, connConfigurator);
               outToken = gssContext.initSecContext(inToken, 0, inToken.length);
               if (outToken != null) {
-                sendToken(outToken);
+                sendToken(conn, outToken);
               }
 
               if (!gssContext.isEstablished()) {
-                inToken = readToken();
+                inToken = readToken(conn);
               } else {
                 established = true;
               }
@@ -337,18 +336,14 @@
     } catch (LoginException ex) {
       throw new AuthenticationException(ex);
     }
-    AuthenticatedURL.extractToken(conn, token);
   }
 
   /*
   * Sends the Kerberos token to the server.
   */
-  private void sendToken(byte[] outToken) throws IOException {
+  private void sendToken(HttpURLConnection conn, byte[] outToken)
+      throws IOException {
     String token = base64.encodeToString(outToken);
-    conn = (HttpURLConnection) url.openConnection();
-    if (connConfigurator != null) {
-      conn = connConfigurator.configure(conn);
-    }
     conn.setRequestMethod(AUTH_HTTP_METHOD);
     conn.setRequestProperty(AUTHORIZATION, NEGOTIATE + " " + token);
     conn.connect();
@@ -357,7 +352,8 @@
   /*
   * Retrieves the Kerberos token returned by the server.
   */
-  private byte[] readToken() throws IOException, AuthenticationException {
+  private byte[] readToken(HttpURLConnection conn)
+      throws IOException, AuthenticationException {
     int status = conn.getResponseCode();
     if (status == HttpURLConnection.HTTP_OK || status == HttpURLConnection.HTTP_UNAUTHORIZED) {
       String authHeader = conn.getHeaderField(WWW_AUTHENTICATE);
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java
index 29ca9cf..66c6252 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java
@@ -68,10 +68,7 @@
     String paramSeparator = (strUrl.contains("?")) ? "&" : "?";
     strUrl += paramSeparator + USER_NAME_EQ + getUserName();
     url = new URL(strUrl);
-    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
-    if (connConfigurator != null) {
-      conn = connConfigurator.configure(conn);
-    }
+    HttpURLConnection conn = token.openConnection(url, connConfigurator);
     conn.setRequestMethod("OPTIONS");
     conn.connect();
     AuthenticatedURL.extractToken(conn, token);
diff --git a/hadoop-common-project/hadoop-common/HadoopCommon.cmake b/hadoop-common-project/hadoop-common/HadoopCommon.cmake
index c46d9e5..faabeed 100644
--- a/hadoop-common-project/hadoop-common/HadoopCommon.cmake
+++ b/hadoop-common-project/hadoop-common/HadoopCommon.cmake
@@ -117,19 +117,25 @@
     endif()
 endmacro()
 
-#
-# Configuration.
-#
 
-# Initialise the shared gcc/g++ flags if they aren't already defined.
-if(NOT DEFINED GCC_SHARED_FLAGS)
-    set(GCC_SHARED_FLAGS "-g -O2 -Wall -pthread -D_FILE_OFFSET_BITS=64")
+# set the shared compiler flags
+# support for GNU C/C++, add other compilers as necessary
+
+if (CMAKE_C_COMPILER_ID STREQUAL "GNU")
+  if(NOT DEFINED GCC_SHARED_FLAGS)
+    find_package(Threads REQUIRED)
+    if(CMAKE_USE_PTHREADS_INIT)
+      set(GCC_SHARED_FLAGS "-g -O2 -Wall -pthread -D_FILE_OFFSET_BITS=64")
+    else()
+      set(GCC_SHARED_FLAGS "-g -O2 -Wall -D_FILE_OFFSET_BITS=64")
+    endif()
+  endif()
+elseif (CMAKE_C_COMPILER_ID STREQUAL "Clang" OR
+        CMAKE_C_COMPILER_ID STREQUAL "AppleClang")
+  set(GCC_SHARED_FLAGS "-g -O2 -Wall -D_FILE_OFFSET_BITS=64")
 endif()
 
-# Add in support other compilers here, if necessary,
-# the assumption is that GCC or a GCC-compatible compiler is being used.
-
-# Set the shared GCC-compatible compiler and linker flags.
+# Set the shared linker flags.
 hadoop_add_compiler_flags("${GCC_SHARED_FLAGS}")
 hadoop_add_linker_flags("${LINKER_SHARED_FLAGS}")
 
diff --git a/hadoop-common-project/hadoop-common/src/CMakeLists.txt b/hadoop-common-project/hadoop-common/src/CMakeLists.txt
index 10b0f23..b9287c0 100644
--- a/hadoop-common-project/hadoop-common/src/CMakeLists.txt
+++ b/hadoop-common-project/hadoop-common/src/CMakeLists.txt
@@ -20,7 +20,7 @@
 # CMake configuration.
 #
 
-cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
 
 list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/..)
 include(HadoopCommon)
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
index 2bf5c02..d282c58 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
@@ -223,4 +223,15 @@
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
   </property>
+
+  <property>
+    <name>security.collector-nodemanager.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for CollectorNodemanagerProtocol, used by nodemanager
+    if timeline service v2 is enabled, for the timeline collector and nodemanager
+    to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
 </configuration>
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index edaee68..a1a40b9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -308,18 +308,25 @@
       this.customMessage = customMessage;
     }
 
+    private final String getWarningMessage(String key) {
+      return getWarningMessage(key, null);
+    }
+
     /**
      * Method to provide the warning message. It gives the custom message if
      * non-null, and default message otherwise.
      * @param key the associated deprecated key.
+     * @param source the property source.
      * @return message that is to be logged when a deprecated key is used.
      */
-    private final String getWarningMessage(String key) {
+    private String getWarningMessage(String key, String source) {
       String warningMessage;
       if(customMessage == null) {
         StringBuilder message = new StringBuilder(key);
-        String deprecatedKeySuffix = " is deprecated. Instead, use ";
-        message.append(deprecatedKeySuffix);
+        if (source != null) {
+          message.append(" in " + source);
+        }
+        message.append(" is deprecated. Instead, use ");
         for (int i = 0; i < newKeys.length; i++) {
           message.append(newKeys[i]);
           if(i != newKeys.length-1) {
@@ -593,6 +600,14 @@
     return deprecationContext.get().getDeprecatedKeyMap().containsKey(key);
   }
 
+  private static String getDeprecatedKey(String key) {
+    return deprecationContext.get().getReverseDeprecatedKeyMap().get(key);
+  }
+
+  private static DeprecatedKeyInfo getDeprecatedKeyInfo(String key) {
+    return deprecationContext.get().getDeprecatedKeyMap().get(key);
+  }
+
   /**
    * Sets all deprecated properties that are not currently set but have a
    * corresponding new property that is set. Useful for iterating the
@@ -1270,6 +1285,13 @@
     LOG_DEPRECATION.info(message);
   }
 
+  void logDeprecationOnce(String name, String source) {
+    DeprecatedKeyInfo keyInfo = getDeprecatedKeyInfo(name);
+    if (keyInfo != null && !keyInfo.getAndSetAccessed()) {
+      LOG_DEPRECATION.info(keyInfo.getWarningMessage(name, source));
+    }
+  }
+
   /**
    * Unset a previously set property.
    */
@@ -2080,6 +2102,47 @@
   }
 
   /**
+   * Get the credential entry by name from a credential provider.
+   *
+   * Handle key deprecation.
+   *
+   * @param provider a credential provider
+   * @param name alias of the credential
+   * @return the credential entry or null if not found
+   */
+  private CredentialEntry getCredentialEntry(CredentialProvider provider,
+                                             String name) throws IOException {
+    CredentialEntry entry = provider.getCredentialEntry(name);
+    if (entry != null) {
+      return entry;
+    }
+
+    // The old name is stored in the credential provider.
+    String oldName = getDeprecatedKey(name);
+    if (oldName != null) {
+      entry = provider.getCredentialEntry(oldName);
+      if (entry != null) {
+        logDeprecationOnce(oldName, provider.toString());
+        return entry;
+      }
+    }
+
+    // The name is deprecated.
+    DeprecatedKeyInfo keyInfo = getDeprecatedKeyInfo(name);
+    if (keyInfo != null && keyInfo.newKeys != null) {
+      for (String newName : keyInfo.newKeys) {
+        entry = provider.getCredentialEntry(newName);
+        if (entry != null) {
+          logDeprecationOnce(name, null);
+          return entry;
+        }
+      }
+    }
+
+    return null;
+  }
+
+  /**
    * Try and resolve the provided element name as a credential provider
    * alias.
    * @param name alias of the provisioned credential
@@ -2096,7 +2159,7 @@
       if (providers != null) {
         for (CredentialProvider provider : providers) {
           try {
-            CredentialEntry entry = provider.getCredentialEntry(name);
+            CredentialEntry entry = getCredentialEntry(provider, name);
             if (entry != null) {
               pass = entry.getCredential();
               break;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
index ea5ff28..8c879b3c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
@@ -22,6 +22,8 @@
 import java.nio.ByteBuffer;
 import java.security.GeneralSecurityException;
 import java.security.SecureRandom;
+import java.util.List;
+import java.util.ListIterator;
 
 import javax.crypto.Cipher;
 import javax.crypto.spec.IvParameterSpec;
@@ -247,6 +249,25 @@
      */
     EncryptedKeyVersion reencryptEncryptedKey(EncryptedKeyVersion ekv)
         throws IOException, GeneralSecurityException;
+
+    /**
+     * Batched version of {@link #reencryptEncryptedKey(EncryptedKeyVersion)}.
+     * <p>
+     * For each encrypted key version, re-encrypts an encrypted key version,
+     * using its initialization vector and key material, but with the latest
+     * key version name of its key name. If the latest key version name in the
+     * provider is the same as the one encrypted the passed-in encrypted key
+     * version, the same encrypted key version is returned.
+     * <p>
+     * NOTE: The generated key is not stored by the <code>KeyProvider</code>
+     *
+     * @param  ekvs List containing the EncryptedKeyVersion's
+     * @throws IOException If any EncryptedKeyVersion could not be re-encrypted
+     * @throws GeneralSecurityException If any EncryptedKeyVersion could not be
+     *                            re-encrypted because of a cryptographic issue.
+     */
+    void reencryptEncryptedKeys(List<EncryptedKeyVersion> ekvs)
+        throws IOException, GeneralSecurityException;
   }
 
   private static class DefaultCryptoExtension implements CryptoExtension {
@@ -315,7 +336,7 @@
           .checkNotNull(ekNow, "KeyVersion name '%s' does not exist", ekName);
       Preconditions.checkArgument(ekv.getEncryptedKeyVersion().getVersionName()
               .equals(KeyProviderCryptoExtension.EEK),
-          "encryptedKey version name must be '%s', is '%s'",
+          "encryptedKey version name must be '%s', but found '%s'",
           KeyProviderCryptoExtension.EEK,
           ekv.getEncryptedKeyVersion().getVersionName());
 
@@ -336,30 +357,67 @@
     }
 
     @Override
-    public KeyVersion decryptEncryptedKey(
-        EncryptedKeyVersion encryptedKeyVersion) throws IOException,
-        GeneralSecurityException {
-      // Fetch the encryption key material
-      final String encryptionKeyVersionName =
-          encryptedKeyVersion.getEncryptionKeyVersionName();
-      final KeyVersion encryptionKey =
-          keyProvider.getKeyVersion(encryptionKeyVersionName);
-      Preconditions.checkNotNull(encryptionKey,
-          "KeyVersion name '%s' does not exist", encryptionKeyVersionName);
-      Preconditions.checkArgument(
-              encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
-                    .equals(KeyProviderCryptoExtension.EEK),
-                "encryptedKey version name must be '%s', is '%s'",
-                KeyProviderCryptoExtension.EEK,
-                encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
-            );
+    public void reencryptEncryptedKeys(List<EncryptedKeyVersion> ekvs)
+        throws IOException, GeneralSecurityException {
+      Preconditions.checkNotNull(ekvs, "Input list is null");
+      KeyVersion ekNow = null;
+      Decryptor decryptor = null;
+      Encryptor encryptor = null;
+      try (CryptoCodec cc = CryptoCodec.getInstance(keyProvider.getConf())) {
+        decryptor = cc.createDecryptor();
+        encryptor = cc.createEncryptor();
+        ListIterator<EncryptedKeyVersion> iter = ekvs.listIterator();
+        while (iter.hasNext()) {
+          final EncryptedKeyVersion ekv = iter.next();
+          Preconditions.checkNotNull(ekv, "EncryptedKeyVersion is null");
+          final String ekName = ekv.getEncryptionKeyName();
+          Preconditions.checkNotNull(ekName, "Key name is null");
+          Preconditions.checkNotNull(ekv.getEncryptedKeyVersion(),
+              "EncryptedKeyVersion is null");
+          Preconditions.checkArgument(
+              ekv.getEncryptedKeyVersion().getVersionName()
+                  .equals(KeyProviderCryptoExtension.EEK),
+              "encryptedKey version name must be '%s', but found '%s'",
+              KeyProviderCryptoExtension.EEK,
+              ekv.getEncryptedKeyVersion().getVersionName());
 
+          if (ekNow == null) {
+            ekNow = keyProvider.getCurrentKey(ekName);
+            Preconditions
+                .checkNotNull(ekNow, "Key name '%s' does not exist", ekName);
+          } else {
+            Preconditions.checkArgument(ekNow.getName().equals(ekName),
+                "All keys must have the same key name. Expected '%s' "
+                    + "but found '%s'", ekNow.getName(), ekName);
+          }
+
+          final String encryptionKeyVersionName =
+              ekv.getEncryptionKeyVersionName();
+          final KeyVersion encryptionKey =
+              keyProvider.getKeyVersion(encryptionKeyVersionName);
+          Preconditions.checkNotNull(encryptionKey,
+              "KeyVersion name '%s' does not exist", encryptionKeyVersionName);
+          if (encryptionKey.equals(ekNow)) {
+            // no-op if same key version
+            continue;
+          }
+
+          final KeyVersion ek =
+              decryptEncryptedKey(decryptor, encryptionKey, ekv);
+          iter.set(generateEncryptedKey(encryptor, ekNow, ek.getMaterial(),
+              ekv.getEncryptedKeyIv()));
+        }
+      }
+    }
+
+    private KeyVersion decryptEncryptedKey(final Decryptor decryptor,
+        final KeyVersion encryptionKey,
+        final EncryptedKeyVersion encryptedKeyVersion)
+        throws IOException, GeneralSecurityException {
       // Encryption key IV is determined from encrypted key's IV
       final byte[] encryptionIV =
           EncryptedKeyVersion.deriveIV(encryptedKeyVersion.getEncryptedKeyIv());
 
-      CryptoCodec cc = CryptoCodec.getInstance(keyProvider.getConf());
-      Decryptor decryptor = cc.createDecryptor();
       decryptor.init(encryptionKey.getMaterial(), encryptionIV);
       final KeyVersion encryptedKV =
           encryptedKeyVersion.getEncryptedKeyVersion();
@@ -372,11 +430,36 @@
       bbOut.flip();
       byte[] decryptedKey = new byte[keyLen];
       bbOut.get(decryptedKey);
-      cc.close();
       return new KeyVersion(encryptionKey.getName(), EK, decryptedKey);
     }
 
     @Override
+    public KeyVersion decryptEncryptedKey(
+        EncryptedKeyVersion encryptedKeyVersion)
+        throws IOException, GeneralSecurityException {
+      // Fetch the encryption key material
+      final String encryptionKeyVersionName =
+          encryptedKeyVersion.getEncryptionKeyVersionName();
+      final KeyVersion encryptionKey =
+          keyProvider.getKeyVersion(encryptionKeyVersionName);
+      Preconditions
+          .checkNotNull(encryptionKey, "KeyVersion name '%s' does not exist",
+              encryptionKeyVersionName);
+      Preconditions.checkArgument(
+          encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
+              .equals(KeyProviderCryptoExtension.EEK),
+          "encryptedKey version name must be '%s', but found '%s'",
+          KeyProviderCryptoExtension.EEK,
+          encryptedKeyVersion.getEncryptedKeyVersion().getVersionName());
+
+      try (CryptoCodec cc = CryptoCodec.getInstance(keyProvider.getConf())) {
+        final Decryptor decryptor = cc.createDecryptor();
+        return decryptEncryptedKey(decryptor, encryptionKey,
+            encryptedKeyVersion);
+      }
+    }
+
+    @Override
     public void warmUpEncryptedKeys(String... keyNames)
         throws IOException {
       // NO-OP since the default version does not cache any keys
@@ -471,6 +554,28 @@
   }
 
   /**
+   * Batched version of {@link #reencryptEncryptedKey(EncryptedKeyVersion)}.
+   * <p>
+   * For each encrypted key version, re-encrypts an encrypted key version,
+   * using its initialization vector and key material, but with the latest
+   * key version name of its key name. If the latest key version name in the
+   * provider is the same as the one encrypted the passed-in encrypted key
+   * version, the same encrypted key version is returned.
+   * <p>
+   * NOTE: The generated key is not stored by the <code>KeyProvider</code>
+   *
+   * @param  ekvs List containing the EncryptedKeyVersion's
+   * @return      The re-encrypted EncryptedKeyVersion's, in the same order.
+   * @throws IOException If any EncryptedKeyVersion could not be re-encrypted
+   * @throws GeneralSecurityException If any EncryptedKeyVersion could not be
+   *                            re-encrypted because of a cryptographic issue.
+   */
+  public void reencryptEncryptedKeys(List<EncryptedKeyVersion> ekvs)
+      throws IOException, GeneralSecurityException {
+    getExtension().reencryptEncryptedKeys(ekvs);
+  }
+
+  /**
    * Creates a <code>KeyProviderCryptoExtension</code> using a given
    * {@link KeyProvider}.
    * <p/>
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 20ad58c..9bef32c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -32,8 +32,6 @@
 import org.apache.hadoop.security.ProviderUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
@@ -70,7 +68,6 @@
 import java.util.ArrayList;
 import java.util.Date;
 import java.util.HashMap;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Queue;
@@ -84,6 +81,13 @@
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 
+import static org.apache.hadoop.util.KMSUtil.checkNotEmpty;
+import static org.apache.hadoop.util.KMSUtil.checkNotNull;
+import static org.apache.hadoop.util.KMSUtil.parseJSONEncKeyVersion;
+import static org.apache.hadoop.util.KMSUtil.parseJSONEncKeyVersions;
+import static org.apache.hadoop.util.KMSUtil.parseJSONKeyVersion;
+import static org.apache.hadoop.util.KMSUtil.parseJSONMetadata;
+
 /**
  * KMS client <code>KeyProvider</code> implementation.
  */
@@ -219,77 +223,11 @@
     }
   }
 
-  @SuppressWarnings("rawtypes")
-  private static List<EncryptedKeyVersion>
-      parseJSONEncKeyVersions(String keyName, List valueList) {
-    List<EncryptedKeyVersion> ekvs = new LinkedList<EncryptedKeyVersion>();
-    if (!valueList.isEmpty()) {
-      for (Object values : valueList) {
-        Map valueMap = (Map) values;
-        ekvs.add(parseJSONEncKeyVersion(keyName, valueMap));
-      }
-    }
-    return ekvs;
-  }
-
-  private static EncryptedKeyVersion parseJSONEncKeyVersion(String keyName,
-      Map valueMap) {
-    String versionName = checkNotNull(
-        (String) valueMap.get(KMSRESTConstants.VERSION_NAME_FIELD),
-        KMSRESTConstants.VERSION_NAME_FIELD);
-
-    byte[] iv = Base64.decodeBase64(checkNotNull(
-        (String) valueMap.get(KMSRESTConstants.IV_FIELD),
-        KMSRESTConstants.IV_FIELD));
-
-    Map encValueMap = checkNotNull((Map)
-            valueMap.get(KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD),
-        KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD);
-
-    String encVersionName = checkNotNull((String)
-            encValueMap.get(KMSRESTConstants.VERSION_NAME_FIELD),
-        KMSRESTConstants.VERSION_NAME_FIELD);
-
-    byte[] encKeyMaterial = Base64.decodeBase64(checkNotNull((String)
-            encValueMap.get(KMSRESTConstants.MATERIAL_FIELD),
-        KMSRESTConstants.MATERIAL_FIELD));
-
-    return new KMSEncryptedKeyVersion(keyName, versionName, iv,
-        encVersionName, encKeyMaterial);
-  }
-
-  private static KeyVersion parseJSONKeyVersion(Map valueMap) {
-    KeyVersion keyVersion = null;
-    if (!valueMap.isEmpty()) {
-      byte[] material = (valueMap.containsKey(KMSRESTConstants.MATERIAL_FIELD))
-          ? Base64.decodeBase64((String) valueMap.get(KMSRESTConstants.MATERIAL_FIELD))
-          : null;
-      String versionName = (String)valueMap.get(KMSRESTConstants.VERSION_NAME_FIELD);
-      String keyName = (String)valueMap.get(KMSRESTConstants.NAME_FIELD);
-      keyVersion = new KMSKeyVersion(keyName, versionName, material);
-    }
-    return keyVersion;
-  }
-
-  @SuppressWarnings("unchecked")
-  private static Metadata parseJSONMetadata(Map valueMap) {
-    Metadata metadata = null;
-    if (!valueMap.isEmpty()) {
-      metadata = new KMSMetadata(
-          (String) valueMap.get(KMSRESTConstants.CIPHER_FIELD),
-          (Integer) valueMap.get(KMSRESTConstants.LENGTH_FIELD),
-          (String) valueMap.get(KMSRESTConstants.DESCRIPTION_FIELD),
-          (Map<String, String>) valueMap.get(KMSRESTConstants.ATTRIBUTES_FIELD),
-          new Date((Long) valueMap.get(KMSRESTConstants.CREATED_FIELD)),
-          (Integer) valueMap.get(KMSRESTConstants.VERSIONS_FIELD));
-    }
-    return metadata;
-  }
-
-  private static void writeJson(Map map, OutputStream os) throws IOException {
+  private static void writeJson(Object obj, OutputStream os)
+      throws IOException {
     Writer writer = new OutputStreamWriter(os, StandardCharsets.UTF_8);
     ObjectMapper jsonMapper = new ObjectMapper();
-    jsonMapper.writerWithDefaultPrettyPrinter().writeValue(writer, map);
+    jsonMapper.writerWithDefaultPrettyPrinter().writeValue(writer, obj);
   }
 
   /**
@@ -360,25 +298,6 @@
     }
   }
 
-  public static <T> T checkNotNull(T o, String name)
-      throws IllegalArgumentException {
-    if (o == null) {
-      throw new IllegalArgumentException("Parameter '" + name +
-          "' cannot be null");
-    }
-    return o;
-  }
-
-  public static String checkNotEmpty(String s, String name)
-      throws IllegalArgumentException {
-    checkNotNull(s, name);
-    if (s.isEmpty()) {
-      throw new IllegalArgumentException("Parameter '" + name +
-          "' cannot be empty");
-    }
-    return s;
-  }
-
   private String kmsUrl;
   private SSLFactory sslFactory;
   private ConnectionConfigurator configurator;
@@ -560,12 +479,12 @@
     return conn;
   }
 
-  private <T> T call(HttpURLConnection conn, Map jsonOutput,
+  private <T> T call(HttpURLConnection conn, Object jsonOutput,
       int expectedResponse, Class<T> klass) throws IOException {
     return call(conn, jsonOutput, expectedResponse, klass, authRetry);
   }
 
-  private <T> T call(HttpURLConnection conn, Map jsonOutput,
+  private <T> T call(HttpURLConnection conn, Object jsonOutput,
       int expectedResponse, Class<T> klass, int authRetryCount)
       throws IOException {
     T ret = null;
@@ -601,17 +520,6 @@
             authRetryCount - 1);
       }
     }
-    try {
-      AuthenticatedURL.extractToken(conn, authToken);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Extracted token, authToken={}, its dt={}", authToken,
-            authToken.getDelegationToken());
-      }
-    } catch (AuthenticationException e) {
-      // Ignore the AuthExceptions.. since we are just using the method to
-      // extract and set the authToken.. (Workaround till we actually fix
-      // AuthenticatedURL properly to set authToken post initialization)
-    }
     HttpExceptionUtils.validateResponse(conn, expectedResponse);
     if (conn.getContentType() != null
         && conn.getContentType().trim().toLowerCase()
@@ -885,6 +793,48 @@
   }
 
   @Override
+  public void reencryptEncryptedKeys(List<EncryptedKeyVersion> ekvs)
+      throws IOException, GeneralSecurityException {
+    checkNotNull(ekvs, "ekvs");
+    if (ekvs.isEmpty()) {
+      return;
+    }
+    final List<Map> jsonPayload = new ArrayList<>();
+    String keyName = null;
+    for (EncryptedKeyVersion ekv : ekvs) {
+      checkNotNull(ekv.getEncryptionKeyName(), "keyName");
+      checkNotNull(ekv.getEncryptionKeyVersionName(), "versionName");
+      checkNotNull(ekv.getEncryptedKeyIv(), "iv");
+      checkNotNull(ekv.getEncryptedKeyVersion(), "encryptedKey");
+      Preconditions.checkArgument(ekv.getEncryptedKeyVersion().getVersionName()
+              .equals(KeyProviderCryptoExtension.EEK),
+          "encryptedKey version name must be '%s', is '%s'",
+          KeyProviderCryptoExtension.EEK,
+          ekv.getEncryptedKeyVersion().getVersionName());
+      if (keyName == null) {
+        keyName = ekv.getEncryptionKeyName();
+      } else {
+        Preconditions.checkArgument(keyName.equals(ekv.getEncryptionKeyName()),
+            "All EncryptedKey must have the same key name.");
+      }
+      jsonPayload.add(KMSUtil.toJSON(ekv));
+    }
+    final URL url = createURL(KMSRESTConstants.KEY_RESOURCE, keyName,
+        KMSRESTConstants.REENCRYPT_BATCH_SUB_RESOURCE, null);
+    final HttpURLConnection conn = createConnection(url, HTTP_POST);
+    conn.setRequestProperty(CONTENT_TYPE, APPLICATION_JSON_MIME);
+    final List<Map> response =
+        call(conn, jsonPayload, HttpURLConnection.HTTP_OK, List.class);
+    Preconditions.checkArgument(response.size() == ekvs.size(),
+        "Response size is different than input size.");
+    for (int i = 0; i < response.size(); ++i) {
+      final Map item = response.get(i);
+      final EncryptedKeyVersion ekv = parseJSONEncKeyVersion(keyName, item);
+      ekvs.set(i, ekv);
+    }
+  }
+
+  @Override
   public List<KeyVersion> getKeyVersions(String name) throws IOException {
     checkNotEmpty(name, "name");
     URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name,
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSRESTConstants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSRESTConstants.java
index 81bdd33..e0197e0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSRESTConstants.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSRESTConstants.java
@@ -37,6 +37,7 @@
   public static final String EEK_SUB_RESOURCE = "_eek";
   public static final String CURRENT_VERSION_SUB_RESOURCE = "_currentversion";
   public static final String INVALIDATECACHE_RESOURCE = "_invalidatecache";
+  public static final String REENCRYPT_BATCH_SUB_RESOURCE = "_reencryptbatch";
 
   public static final String KEY = "key";
   public static final String EEK_OP = "eek_op";
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
index 6e010b1..de4d25a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
@@ -313,6 +313,26 @@
   }
 
   @Override
+  public void reencryptEncryptedKeys(final List<EncryptedKeyVersion> ekvs)
+      throws IOException, GeneralSecurityException {
+    try {
+      doOp(new ProviderCallable<Void>() {
+        @Override
+        public Void call(KMSClientProvider provider)
+            throws IOException, GeneralSecurityException {
+          provider.reencryptEncryptedKeys(ekvs);
+          return null;
+        }
+      }, nextIdx());
+    } catch (WrapperException we) {
+      if (we.getCause() instanceof GeneralSecurityException) {
+        throw (GeneralSecurityException) we.getCause();
+      }
+      throw new IOException(we.getCause());
+    }
+  }
+
+  @Override
   public KeyVersion getKeyVersion(final String versionName) throws IOException {
     return doOp(new ProviderCallable<KeyVersion>() {
       @Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
index 1f668eb..86c284a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
@@ -54,16 +54,29 @@
  * options accordingly, for example:
  *
  * <code>
- * FSDataOutputStreamBuilder builder = fs.createFile(path);
- * builder.permission(perm)
+ *
+ * // Don't
+ * if (fs instanceof FooFileSystem) {
+ *   FooFileSystem fs = (FooFileSystem) fs;
+ *   OutputStream out = dfs.createFile(path)
+ *     .optionA()
+ *     .optionB("value")
+ *     .cache()
+ *   .build()
+ * } else if (fs instanceof BarFileSystem) {
+ *   ...
+ * }
+ *
+ * // Do
+ * OutputStream out = fs.createFile(path)
+ *   .permission(perm)
  *   .bufferSize(bufSize)
- *   .opt("dfs.outputstream.builder.lazy-persist", true)
- *   .opt("dfs.outputstream.builder.ec.policy-name", "rs-3-2-64k")
- *   .opt("fs.local.o-direct", true)
- *   .must("fs.s3a.fast-upload", true)
- *   .must("fs.azure.buffer-size", 256 * 1024 * 1024);
- * FSDataOutputStream out = builder.build();
- * ...
+ *   .opt("foofs:option.a", true)
+ *   .opt("foofs:option.b", "value")
+ *   .opt("barfs:cache", true)
+ *   .must("foofs:cache", true)
+ *   .must("barfs:cache-size", 256 * 1024 * 1024)
+ *   .build();
  * </code>
  *
  * If the option is not related to the file system, the option will be ignored.
@@ -263,6 +276,8 @@
 
   /**
    * Set optional boolean parameter for the Builder.
+   *
+   * @see #opt(String, String)
    */
   public B opt(@Nonnull final String key, boolean value) {
     mandatoryKeys.remove(key);
@@ -272,6 +287,8 @@
 
   /**
    * Set optional int parameter for the Builder.
+   *
+   * @see #opt(String, String)
    */
   public B opt(@Nonnull final String key, int value) {
     mandatoryKeys.remove(key);
@@ -281,6 +298,8 @@
 
   /**
    * Set optional float parameter for the Builder.
+   *
+   * @see #opt(String, String)
    */
   public B opt(@Nonnull final String key, float value) {
     mandatoryKeys.remove(key);
@@ -290,6 +309,8 @@
 
   /**
    * Set optional double parameter for the Builder.
+   *
+   * @see #opt(String, String)
    */
   public B opt(@Nonnull final String key, double value) {
     mandatoryKeys.remove(key);
@@ -299,6 +320,8 @@
 
   /**
    * Set an array of string values as optional parameter for the Builder.
+   *
+   * @see #opt(String, String)
    */
   public B opt(@Nonnull final String key, @Nonnull final String... values) {
     mandatoryKeys.remove(key);
@@ -310,8 +333,7 @@
    * Set mandatory option to the Builder.
    *
    * If the option is not supported or unavailable on the {@link FileSystem},
-   * the client should expect {@link #build()} throws
-   * {@link IllegalArgumentException}.
+   * the client should expect {@link #build()} throws IllegalArgumentException.
    */
   public B must(@Nonnull final String key, @Nonnull final String value) {
     mandatoryKeys.add(key);
@@ -319,35 +341,55 @@
     return getThisBuilder();
   }
 
-  /** Set mandatory boolean option. */
+  /**
+   * Set mandatory boolean option.
+   *
+   * @see #must(String, String)
+   */
   public B must(@Nonnull final String key, boolean value) {
     mandatoryKeys.add(key);
     options.setBoolean(key, value);
     return getThisBuilder();
   }
 
-  /** Set mandatory int option. */
+  /**
+   * Set mandatory int option.
+   *
+   * @see #must(String, String)
+   */
   public B must(@Nonnull final String key, int value) {
     mandatoryKeys.add(key);
     options.setInt(key, value);
     return getThisBuilder();
   }
 
-  /** Set mandatory float option. */
+  /**
+   * Set mandatory float option.
+   *
+   * @see #must(String, String)
+   */
   public B must(@Nonnull final String key, float value) {
     mandatoryKeys.add(key);
     options.setFloat(key, value);
     return getThisBuilder();
   }
 
-  /** Set mandatory double option. */
+  /**
+   * Set mandatory double option.
+   *
+   * @see #must(String, String)
+   */
   public B must(@Nonnull final String key, double value) {
     mandatoryKeys.add(key);
     options.setDouble(key, value);
     return getThisBuilder();
   }
 
-  /** Set a string array as mandatory option. */
+  /**
+   * Set a string array as mandatory option.
+   *
+   * @see #must(String, String)
+   */
   public B must(@Nonnull final String key, @Nonnull final String... values) {
     mandatoryKeys.add(key);
     options.setStrings(key, values);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
index 84a40d2..f639648 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
@@ -63,6 +63,15 @@
   public FsServerDefaults(long blockSize, int bytesPerChecksum,
       int writePacketSize, short replication, int fileBufferSize,
       boolean encryptDataTransfer, long trashInterval,
+      DataChecksum.Type checksumType) {
+    this(blockSize, bytesPerChecksum, writePacketSize, replication,
+        fileBufferSize, encryptDataTransfer, trashInterval, checksumType,
+        null, (byte) 0);
+  }
+
+  public FsServerDefaults(long blockSize, int bytesPerChecksum,
+      int writePacketSize, short replication, int fileBufferSize,
+      boolean encryptDataTransfer, long trashInterval,
       DataChecksum.Type checksumType, String keyProviderUri) {
     this(blockSize, bytesPerChecksum, writePacketSize, replication,
         fileBufferSize, encryptDataTransfer, trashInterval, checksumType,
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
index 885da07..dbb751d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
@@ -84,8 +84,10 @@
           Path symlink, Path path, BlockLocation[] locations) {
     this(length, isdir, block_replication, blocksize, modification_time,
         access_time, permission, owner, group, symlink, path,
-        permission.getAclBit(), permission.getEncryptedBit(),
-        permission.getErasureCodedBit(), locations);
+        permission == null ? false : permission.getAclBit(),
+        permission == null ? false : permission.getEncryptedBit(),
+        permission == null ? false : permission.getErasureCodedBit(),
+        locations);
   }
 
   /**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
index 23692de..031092b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
@@ -163,7 +163,7 @@
    */
   public static FsPermission read(DataInput in) throws IOException {
     FsPermission p = new FsPermission();
-    p.readFields(in);
+    p.fromShort(in.readShort());
     return p;
   }
 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
index 421769d6..43eb783 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
@@ -326,8 +326,10 @@
         String parentDir = parent.toUri().getPath();
         boolean succeeded = true;
         try {
+          final String previousCwd = client.pwd();
           client.cd(parentDir);
           client.mkdir(pathName);
+          client.cd(previousCwd);
         } catch (SftpException e) {
           throw new IOException(String.format(E_MAKE_DIR_FORPATH, pathName,
               parentDir));
@@ -474,8 +476,10 @@
     }
     boolean renamed = true;
     try {
+      final String previousCwd = channel.pwd();
       channel.cd("/");
       channel.rename(src.toUri().getPath(), dst.toUri().getPath());
+      channel.cd(previousCwd);
     } catch (SftpException e) {
       renamed = false;
     }
@@ -558,8 +562,10 @@
     }
     OutputStream os;
     try {
+      final String previousCwd = client.pwd();
       client.cd(parent.toUri().getPath());
       os = client.put(f.getName());
+      client.cd(previousCwd);
     } catch (SftpException e) {
       throw new IOException(e);
     }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
index a5e386c..701c9de 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
@@ -86,9 +86,9 @@
           (perm.getOtherAction().implies(FsAction.EXECUTE) ? "t" : "T"));
       }
 
-      AclStatus aclStatus = null;
-      List<AclEntry> entries = null;
-      if (perm.getAclBit()) {
+      final AclStatus aclStatus;
+      final List<AclEntry> entries;
+      if (item.stat.hasAcl()) {
         aclStatus = item.fs.getAclStatus(item.path);
         entries = aclStatus.getEntries();
       } else {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
index 2a483c0..0bd4882 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
@@ -444,8 +444,8 @@
           src.stat.getPermission());
     }
     if (shouldPreserve(FileAttribute.ACL)) {
-      FsPermission perm = src.stat.getPermission();
-      if (perm.getAclBit()) {
+      if (src.stat.hasAcl()) {
+        FsPermission perm = src.stat.getPermission();
         List<AclEntry> srcEntries =
             src.fs.getAclStatus(src.path).getEntries();
         List<AclEntry> srcFullEntries =
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
index 221b3cb..a2d5017 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
@@ -255,7 +255,7 @@
       ContentSummary contentSummary = item.fs.getContentSummary(item.path);
       String line = String.format(lineFormat,
           (stat.isDirectory() ? "d" : "-"),
-          stat.getPermission() + (stat.getPermission().getAclBit() ? "+" : " "),
+          stat.getPermission() + (stat.hasAcl() ? "+" : " "),
           (stat.isFile() ? stat.getReplication() : "-"),
           stat.getOwner(),
           stat.getGroup(),
@@ -269,7 +269,7 @@
     } else {
       String line = String.format(lineFormat,
           (stat.isDirectory() ? "d" : "-"),
-          stat.getPermission() + (stat.getPermission().getAclBit() ? "+" : " "),
+          stat.getPermission() + (stat.hasAcl() ? "+" : " "),
           (stat.isFile() ? stat.getReplication() : "-"),
           stat.getOwner(),
           stat.getGroup(),
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 28b9bb0..a450f66 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -128,6 +128,10 @@
   public static final String HTTP_MAX_RESPONSE_HEADER_SIZE_KEY =
       "hadoop.http.max.response.header.size";
   public static final int HTTP_MAX_RESPONSE_HEADER_SIZE_DEFAULT = 65536;
+
+  public static final String HTTP_SOCKET_BACKLOG_SIZE_KEY =
+      "hadoop.http.socket.backlog.size";
+  public static final int HTTP_SOCKET_BACKLOG_SIZE_DEFAULT = 128;
   public static final String HTTP_MAX_THREADS_KEY = "hadoop.http.max.threads";
   public static final String HTTP_TEMP_DIR_KEY = "hadoop.http.temp.dir";
 
@@ -433,6 +437,9 @@
       httpConfig.setResponseHeaderSize(responseHeaderSize);
       httpConfig.setSendServerVersion(false);
 
+      int backlogSize = conf.getInt(HTTP_SOCKET_BACKLOG_SIZE_KEY,
+          HTTP_SOCKET_BACKLOG_SIZE_DEFAULT);
+
       for (URI ep : endpoints) {
         final ServerConnector connector;
         String scheme = ep.getScheme();
@@ -448,6 +455,7 @@
         }
         connector.setHost(ep.getHost());
         connector.setPort(ep.getPort() == -1 ? 0 : ep.getPort());
+        connector.setAcceptQueueSize(backlogSize);
         server.addListener(connector);
       }
       server.loadListeners();
@@ -640,7 +648,6 @@
 
   private static void configureChannelConnector(ServerConnector c) {
     c.setIdleTimeout(10000);
-    c.setAcceptQueueSize(128);
     if(Shell.WINDOWS) {
       // result of setting the SO_REUSEADDR flag is different on Windows
       // http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
index fcf1349..daf91e2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
@@ -176,7 +176,7 @@
    * @return a map of all codec names, and their corresponding code list
    * separated by ','.
    */
-  public HashMap<String, String> getCodec2CoderCompactMap() {
+  public Map<String, String> getCodec2CoderCompactMap() {
     return coderNameCompactMap;
   }
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
index e0d7946..d3c3b6b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
@@ -30,6 +30,7 @@
   public static final String RS_LEGACY_CODEC_NAME = "rs-legacy";
   public static final String XOR_CODEC_NAME = "xor";
   public static final String HHXOR_CODEC_NAME = "hhxor";
+  public static final String REPLICATION_CODEC_NAME = "replication";
 
   public static final ECSchema RS_6_3_SCHEMA = new ECSchema(
       RS_CODEC_NAME, 6, 3);
@@ -45,4 +46,11 @@
 
   public static final ECSchema RS_10_4_SCHEMA = new ECSchema(
       RS_CODEC_NAME, 10, 4);
+
+  public static final ECSchema REPLICATION_1_2_SCHEMA = new ECSchema(
+      REPLICATION_CODEC_NAME, 1, 2);
+
+  public static final byte USER_DEFINED_POLICY_START_ID = (byte) 64;
+  public static final byte REPLICATION_POLICY_ID = (byte) 63;
+  public static final String REPLICATION_POLICY_NAME = REPLICATION_CODEC_NAME;
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 2c0cfe5..639bbad 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -190,7 +190,7 @@
         throws ServiceException {
       long startTime = 0;
       if (LOG.isDebugEnabled()) {
-        startTime = Time.monotonicNow();
+        startTime = Time.now();
       }
       
       if (args.length != 2) { // RpcController + Message
@@ -245,7 +245,7 @@
       }
 
       if (LOG.isDebugEnabled()) {
-        long callTime = Time.monotonicNow() - startTime;
+        long callTime = Time.now() - startTime;
         LOG.debug("Call: " + method.getName() + " took " + callTime + "ms");
       }
       
@@ -373,19 +373,19 @@
         this.server = currentCallInfo.get().server;
         this.call = Server.getCurCall().get();
         this.methodName = currentCallInfo.get().methodName;
-        this.setupTime = Time.monotonicNow();
+        this.setupTime = Time.now();
       }
 
       @Override
       public void setResponse(Message message) {
-        long processingTime = Time.monotonicNow() - setupTime;
+        long processingTime = Time.now() - setupTime;
         call.setDeferredResponse(RpcWritable.wrap(message));
         server.updateDeferredMetrics(methodName, processingTime);
       }
 
       @Override
       public void error(Throwable t) {
-        long processingTime = Time.monotonicNow() - setupTime;
+        long processingTime = Time.now() - setupTime;
         String detailedMetricsName = t.getClass().getSimpleName();
         server.updateDeferredMetrics(detailedMetricsName, processingTime);
         call.setDeferredError(t);
@@ -513,7 +513,7 @@
         Message param = request.getValue(prototype);
 
         Message result;
-        long startTime = Time.monotonicNow();
+        long startTime = Time.now();
         int qTime = (int) (startTime - receiveTime);
         Exception exception = null;
         boolean isDeferred = false;
@@ -537,7 +537,7 @@
           throw e;
         } finally {
           currentCallInfo.set(null);
-          int processingTime = (int) (Time.monotonicNow() - startTime);
+          int processingTime = (int) (Time.now() - startTime);
           if (LOG.isDebugEnabled()) {
             String msg =
                 "Served: " + methodName + (isDeferred ? ", deferred" : "") +
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/KMSUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/KMSUtil.java
index 5f783a9..c96c6fb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/KMSUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/KMSUtil.java
@@ -17,15 +17,24 @@
  */
 package org.apache.hadoop.util;
 
+import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
+import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
+import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.net.URI;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
 
 /**
  * Utils for KMS.
@@ -71,4 +80,129 @@
     }
     return keyProvider;
   }
+
+  @SuppressWarnings("unchecked")
+  public static Map toJSON(KeyProvider.KeyVersion keyVersion) {
+    Map json = new HashMap();
+    if (keyVersion != null) {
+      json.put(KMSRESTConstants.NAME_FIELD,
+          keyVersion.getName());
+      json.put(KMSRESTConstants.VERSION_NAME_FIELD,
+          keyVersion.getVersionName());
+      json.put(KMSRESTConstants.MATERIAL_FIELD,
+          Base64.encodeBase64URLSafeString(
+              keyVersion.getMaterial()));
+    }
+    return json;
+  }
+
+  @SuppressWarnings("unchecked")
+  public static Map toJSON(EncryptedKeyVersion encryptedKeyVersion) {
+    Map json = new HashMap();
+    if (encryptedKeyVersion != null) {
+      json.put(KMSRESTConstants.VERSION_NAME_FIELD,
+          encryptedKeyVersion.getEncryptionKeyVersionName());
+      json.put(KMSRESTConstants.IV_FIELD, Base64
+          .encodeBase64URLSafeString(encryptedKeyVersion.getEncryptedKeyIv()));
+      json.put(KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD,
+          toJSON(encryptedKeyVersion.getEncryptedKeyVersion()));
+    }
+    return json;
+  }
+
+  public static <T> T checkNotNull(T o, String name)
+      throws IllegalArgumentException {
+    if (o == null) {
+      throw new IllegalArgumentException("Parameter '" + name +
+          "' cannot be null");
+    }
+    return o;
+  }
+
+  public static String checkNotEmpty(String s, String name)
+      throws IllegalArgumentException {
+    checkNotNull(s, name);
+    if (s.isEmpty()) {
+      throw new IllegalArgumentException("Parameter '" + name +
+          "' cannot be empty");
+    }
+    return s;
+  }
+
+  @SuppressWarnings("rawtypes")
+  public static List<EncryptedKeyVersion>
+      parseJSONEncKeyVersions(String keyName, List valueList) {
+    checkNotNull(valueList, "valueList");
+    List<EncryptedKeyVersion> ekvs = new ArrayList<>(valueList.size());
+    if (!valueList.isEmpty()) {
+      for (Object values : valueList) {
+        Map valueMap = (Map) values;
+        ekvs.add(parseJSONEncKeyVersion(keyName, valueMap));
+      }
+    }
+    return ekvs;
+  }
+
+  @SuppressWarnings("unchecked")
+  public static EncryptedKeyVersion parseJSONEncKeyVersion(String keyName,
+      Map valueMap) {
+    checkNotNull(valueMap, "valueMap");
+    String versionName = checkNotNull(
+        (String) valueMap.get(KMSRESTConstants.VERSION_NAME_FIELD),
+        KMSRESTConstants.VERSION_NAME_FIELD);
+
+    byte[] iv = Base64.decodeBase64(checkNotNull(
+        (String) valueMap.get(KMSRESTConstants.IV_FIELD),
+        KMSRESTConstants.IV_FIELD));
+
+    Map encValueMap = checkNotNull((Map)
+            valueMap.get(KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD),
+        KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD);
+
+    String encVersionName = checkNotNull((String)
+            encValueMap.get(KMSRESTConstants.VERSION_NAME_FIELD),
+        KMSRESTConstants.VERSION_NAME_FIELD);
+
+    byte[] encKeyMaterial = Base64.decodeBase64(checkNotNull((String)
+            encValueMap.get(KMSRESTConstants.MATERIAL_FIELD),
+        KMSRESTConstants.MATERIAL_FIELD));
+
+    return new KMSClientProvider.KMSEncryptedKeyVersion(keyName, versionName,
+        iv, encVersionName, encKeyMaterial);
+  }
+
+  @SuppressWarnings("unchecked")
+  public static KeyProvider.KeyVersion parseJSONKeyVersion(Map valueMap) {
+    checkNotNull(valueMap, "valueMap");
+    KeyProvider.KeyVersion keyVersion = null;
+    if (!valueMap.isEmpty()) {
+      byte[] material =
+          (valueMap.containsKey(KMSRESTConstants.MATERIAL_FIELD)) ?
+              Base64.decodeBase64(
+                  (String) valueMap.get(KMSRESTConstants.MATERIAL_FIELD)) :
+              null;
+      String versionName =
+          (String) valueMap.get(KMSRESTConstants.VERSION_NAME_FIELD);
+      String keyName = (String) valueMap.get(KMSRESTConstants.NAME_FIELD);
+      keyVersion =
+          new KMSClientProvider.KMSKeyVersion(keyName, versionName, material);
+    }
+    return keyVersion;
+  }
+
+  @SuppressWarnings("unchecked")
+  public static KeyProvider.Metadata parseJSONMetadata(Map valueMap) {
+    checkNotNull(valueMap, "valueMap");
+    KeyProvider.Metadata metadata = null;
+    if (!valueMap.isEmpty()) {
+      metadata = new KMSClientProvider.KMSMetadata(
+          (String) valueMap.get(KMSRESTConstants.CIPHER_FIELD),
+          (Integer) valueMap.get(KMSRESTConstants.LENGTH_FIELD),
+          (String) valueMap.get(KMSRESTConstants.DESCRIPTION_FIELD),
+          (Map<String, String>) valueMap.get(KMSRESTConstants.ATTRIBUTES_FIELD),
+          new Date((Long) valueMap.get(KMSRESTConstants.CREATED_FIELD)),
+          (Integer) valueMap.get(KMSRESTConstants.VERSIONS_FIELD));
+    }
+    return metadata;
+  }
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
index 3adf028..e1efcb5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
@@ -26,6 +26,8 @@
 import org.apache.curator.framework.AuthInfo;
 import org.apache.curator.framework.CuratorFramework;
 import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.framework.api.transaction.CuratorTransaction;
+import org.apache.curator.framework.api.transaction.CuratorTransactionFinal;
 import org.apache.curator.retry.RetryNTimes;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -33,9 +35,12 @@
 import org.apache.hadoop.util.ZKUtil;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.data.ACL;
+import org.apache.zookeeper.data.Stat;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Preconditions;
+
 /**
  * Helper class that provides utility methods specific to ZK operations.
  */
@@ -51,7 +56,6 @@
   /** Curator for ZooKeeper. */
   private CuratorFramework curator;
 
-
   public ZKCuratorManager(Configuration config) throws IOException {
     this.conf = config;
   }
@@ -116,7 +120,6 @@
 
   /**
    * Start the connection to the ZooKeeper ensemble.
-   * @param conf Configuration for the connection.
    * @throws IOException If the connection cannot be started.
    */
   public void start() throws IOException {
@@ -125,7 +128,6 @@
 
   /**
    * Start the connection to the ZooKeeper ensemble.
-   * @param conf Configuration for the connection.
    * @param authInfos List of authentication keys.
    * @throws IOException If the connection cannot be started.
    */
@@ -179,7 +181,6 @@
   /**
    * Get the data in a ZNode.
    * @param path Path of the ZNode.
-   * @param stat Output statistics of the ZNode.
    * @return The data in the ZNode.
    * @throws Exception If it cannot contact Zookeeper.
    */
@@ -190,12 +191,34 @@
   /**
    * Get the data in a ZNode.
    * @param path Path of the ZNode.
+   * @param stat
+   * @return The data in the ZNode.
+   * @throws Exception If it cannot contact Zookeeper.
+   */
+  public byte[] getData(final String path, Stat stat) throws Exception {
+    return curator.getData().storingStatIn(stat).forPath(path);
+  }
+
+  /**
+   * Get the data in a ZNode.
+   * @param path Path of the ZNode.
+   * @return The data in the ZNode.
+   * @throws Exception If it cannot contact Zookeeper.
+   */
+  public String getStringData(final String path) throws Exception {
+    byte[] bytes = getData(path);
+    return new String(bytes, Charset.forName("UTF-8"));
+  }
+
+  /**
+   * Get the data in a ZNode.
+   * @param path Path of the ZNode.
    * @param stat Output statistics of the ZNode.
    * @return The data in the ZNode.
    * @throws Exception If it cannot contact Zookeeper.
    */
-  public String getSringData(final String path) throws Exception {
-    byte[] bytes = getData(path);
+  public String getStringData(final String path, Stat stat) throws Exception {
+    byte[] bytes = getData(path, stat);
     return new String(bytes, Charset.forName("UTF-8"));
   }
 
@@ -272,14 +295,36 @@
   }
 
   /**
+   * Utility function to ensure that the configured base znode exists.
+   * This recursively creates the znode as well as all of its parents.
+   * @param path Path of the znode to create.
+   * @throws Exception If it cannot create the file.
+   */
+  public void createRootDirRecursively(String path) throws Exception {
+    String[] pathParts = path.split("/");
+    Preconditions.checkArgument(
+        pathParts.length >= 1 && pathParts[0].isEmpty(),
+        "Invalid path: %s", path);
+    StringBuilder sb = new StringBuilder();
+
+    for (int i = 1; i < pathParts.length; i++) {
+      sb.append("/").append(pathParts[i]);
+      create(sb.toString());
+    }
+  }
+
+  /**
    * Delete a ZNode.
    * @param path Path of the ZNode.
+   * @return If the znode was deleted.
    * @throws Exception If it cannot contact ZooKeeper.
    */
-  public void delete(final String path) throws Exception {
+  public boolean delete(final String path) throws Exception {
     if (exists(path)) {
       curator.delete().deletingChildrenIfNeeded().forPath(path);
+      return true;
     }
+    return false;
   }
 
   /**
@@ -291,4 +336,87 @@
   public static String getNodePath(String root, String nodeName) {
     return root + "/" + nodeName;
   }
+
+  public void safeCreate(String path, byte[] data, List<ACL> acl,
+      CreateMode mode, List<ACL> fencingACL, String fencingNodePath)
+      throws Exception {
+    if (!exists(path)) {
+      SafeTransaction transaction = createTransaction(fencingACL,
+          fencingNodePath);
+      transaction.create(path, data, acl, mode);
+      transaction.commit();
+    }
+  }
+
+  /**
+   * Deletes the path. Checks for existence of path as well.
+   * @param path Path to be deleted.
+   * @throws Exception if any problem occurs while performing deletion.
+   */
+  public void safeDelete(final String path, List<ACL> fencingACL,
+      String fencingNodePath) throws Exception {
+    if (exists(path)) {
+      SafeTransaction transaction = createTransaction(fencingACL,
+          fencingNodePath);
+      transaction.delete(path);
+      transaction.commit();
+    }
+  }
+
+  public void safeSetData(String path, byte[] data, int version,
+      List<ACL> fencingACL, String fencingNodePath)
+      throws Exception {
+    SafeTransaction transaction = createTransaction(fencingACL,
+        fencingNodePath);
+    transaction.setData(path, data, version);
+    transaction.commit();
+  }
+
+  public SafeTransaction createTransaction(List<ACL> fencingACL,
+      String fencingNodePath) throws Exception {
+    return new SafeTransaction(fencingACL, fencingNodePath);
+  }
+
+  /**
+   * Use curator transactions to ensure zk-operations are performed in an all
+   * or nothing fashion. This is equivalent to using ZooKeeper#multi.
+   *
+   * TODO (YARN-3774): Curator 3.0 introduces CuratorOp similar to Op. We ll
+   * have to rewrite this inner class when we adopt that.
+   */
+  public class SafeTransaction {
+    private CuratorTransactionFinal transactionFinal;
+    private String fencingNodePath;
+
+    SafeTransaction(List<ACL> fencingACL, String fencingNodePath)
+        throws Exception {
+      this.fencingNodePath = fencingNodePath;
+      CuratorTransaction transaction = curator.inTransaction();
+      transactionFinal = transaction.create()
+          .withMode(CreateMode.PERSISTENT).withACL(fencingACL)
+          .forPath(fencingNodePath, new byte[0]).and();
+    }
+
+    public void commit() throws Exception {
+      transactionFinal = transactionFinal.delete()
+          .forPath(fencingNodePath).and();
+      transactionFinal.commit();
+    }
+
+    public void create(String path, byte[] data, List<ACL> acl, CreateMode mode)
+        throws Exception {
+      transactionFinal = transactionFinal.create()
+          .withMode(mode).withACL(acl).forPath(path, data).and();
+    }
+
+    public void delete(String path) throws Exception {
+      transactionFinal = transactionFinal.delete().forPath(path).and();
+    }
+
+    public void setData(String path, byte[] data, int version)
+        throws Exception {
+      transactionFinal = transactionFinal.setData()
+          .withVersion(version).forPath(path, data).and();
+    }
+  }
 }
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/org_apache_hadoop_io_compress_bzip2.h b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/org_apache_hadoop_io_compress_bzip2.h
index fa525bd..a1db6c6 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/org_apache_hadoop_io_compress_bzip2.h
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/org_apache_hadoop_io_compress_bzip2.h
@@ -27,7 +27,9 @@
 
 #include "org_apache_hadoop.h"
 
+#ifndef HADOOP_BZIP2_LIBRARY
 #define HADOOP_BZIP2_LIBRARY "libbz2.so.1"
+#endif
 
 
 /* A helper macro to convert the java 'stream-handle' to a bz_stream pointer. */
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index cf03075..9e2c553 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1488,7 +1488,15 @@
     This flag is relevant only when fs.azure.authorization is enabled.
   </description>
 </property>
-
+<property>
+  <name>fs.azure.saskey.usecontainersaskeyforallaccess</name>
+  <value>true</value>
+  <description>
+    Use container saskey for access to all blobs within the container.
+    Blob-specific saskeys are not used when this setting is enabled.
+    This setting provides better performance compared to blob-specific saskeys.
+  </description>
+</property>
 <property>
   <name>io.seqfile.compress.blocksize</name>
   <value>1000000</value>
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 4543fac..367d9e0 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -179,8 +179,8 @@
 | `GenerateEDEKTimeAvgTime` | Average time of generating EDEK in milliseconds |
 | `WarmUpEDEKTimeNumOps` | Total number of warming up EDEK |
 | `WarmUpEDEKTimeAvgTime` | Average time of warming up EDEK in milliseconds |
-| `ResourceCheckTime`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of NameNode resource check latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
-| `StorageBlockReport`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of storage block report latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `ResourceCheckTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of NameNode resource check latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `StorageBlockReport`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of storage block report latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 
 FSNamesystem
 ------------
@@ -240,8 +240,8 @@
 | `NumInMaintenanceLiveDataNodes` | Number of live Datanodes which are in maintenance state |
 | `NumInMaintenanceDeadDataNodes` | Number of dead Datanodes which are in maintenance state |
 | `NumEnteringMaintenanceDataNodes` | Number of Datanodes that are entering the maintenance state |
-| `FSN(Read|Write)Lock`*OperationName*`NumOps` | Total number of acquiring lock by operations |
-| `FSN(Read|Write)Lock`*OperationName*`AvgTime` | Average time of holding the lock by operations in milliseconds |
+| `FSN(Read/Write)Lock`*OperationName*`NumOps` | Total number of acquiring lock by operations |
+| `FSN(Read/Write)Lock`*OperationName*`AvgTime` | Average time of holding the lock by operations in milliseconds |
 
 JournalNode
 -----------
@@ -308,13 +308,13 @@
 | `RamDiskBlocksEvictedWithoutRead` | Total number of blocks evicted in memory without ever being read from memory |
 | `RamDiskBlocksEvictionWindowMsNumOps` | Number of blocks evicted in memory|
 | `RamDiskBlocksEvictionWindowMsAvgTime` | Average time of blocks in memory before being evicted in milliseconds |
-| `RamDiskBlocksEvictionWindows`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of latency between memory write and eviction in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `RamDiskBlocksEvictionWindows`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of latency between memory write and eviction in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `RamDiskBlocksLazyPersisted` | Total number of blocks written to disk by lazy writer |
 | `RamDiskBlocksDeletedBeforeLazyPersisted` | Total number of blocks deleted by application before being persisted to disk |
 | `RamDiskBytesLazyPersisted` | Total number of bytes written to disk by lazy writer |
 | `RamDiskBlocksLazyPersistWindowMsNumOps` | Number of blocks written to disk by lazy writer |
 | `RamDiskBlocksLazyPersistWindowMsAvgTime` | Average time of blocks written to disk by lazy writer in milliseconds |
-| `RamDiskBlocksLazyPersistWindows`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of latency between memory write and disk persist in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `RamDiskBlocksLazyPersistWindows`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of latency between memory write and disk persist in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `FsyncCount` | Total number of fsync |
 | `VolumeFailures` | Total number of volume failures occurred |
 | `ReadBlockOpNumOps` | Total number of read operations |
@@ -380,23 +380,23 @@
 | `TotalMetadataOperations` | Total number (monotonically increasing) of metadata operations. Metadata operations include stat, list, mkdir, delete, move, open and posix_fadvise. |
 | `MetadataOperationRateNumOps` | The number of metadata operations within an interval time of metric |
 | `MetadataOperationRateAvgTime` | Mean time of metadata operations in milliseconds |
-| `MetadataOperationLatency`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of metadata operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `MetadataOperationLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of metadata operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `TotalDataFileIos` | Total number (monotonically increasing) of data file io operations |
 | `DataFileIoRateNumOps` | The number of data file io operations within an interval time of metric |
 | `DataFileIoRateAvgTime` | Mean time of data file io operations in milliseconds |
-| `DataFileIoLatency`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of data file io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `DataFileIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of data file io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `FlushIoRateNumOps` | The number of file flush io operations within an interval time of metric |
 | `FlushIoRateAvgTime` | Mean time of file flush io operations in milliseconds |
-| `FlushIoLatency`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of file flush io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `FlushIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file flush io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `SyncIoRateNumOps` | The number of file sync io operations within an interval time of metric |
 | `SyncIoRateAvgTime` | Mean time of file sync io operations in milliseconds |
-| `SyncIoLatency`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of file sync io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `SyncIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file sync io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `ReadIoRateNumOps` | The number of file read io operations within an interval time of metric |
 | `ReadIoRateAvgTime` | Mean time of file read io operations in milliseconds |
-| `ReadIoLatency`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of file read io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `ReadIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file read io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `WriteIoRateNumOps` | The number of file write io operations within an interval time of metric |
 | `WriteIoRateAvgTime` | Mean time of file write io operations in milliseconds |
-| `WriteIoLatency`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of file write io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `WriteIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file write io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `TotalFileIoErrors` | Total number (monotonically increasing) of file io error operations |
 | `FileIoErrorRateNumOps` | The number of file io error operations within an interval time of metric |
 | `FileIoErrorRateAvgTime` | It measures the mean time in milliseconds from the start of an operation to hitting a failure |
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
index d7e57ce..1e522c7 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
@@ -553,7 +553,7 @@
        FS' = FS
        result = False
 
-### `FSDataOutputStream create(Path, ...)`
+### <a name='FileSystem.create'></a> `FSDataOutputStream create(Path, ...)`
 
 
     FSDataOutputStream create(Path p,
@@ -616,7 +616,24 @@
 
 * Not covered: symlinks. The resolved path of the symlink is used as the final path argument to the `create()` operation
 
-### `FSDataOutputStream append(Path p, int bufferSize, Progressable progress)`
+### `FSDataOutputStreamBuilder createFile(Path p)`
+
+Make a `FSDataOutputStreamBuilder` to specify the parameters to create a file.
+
+#### Implementation Notes
+
+`createFile(p)` returns a `FSDataOutputStreamBuilder` only and does not make
+change on filesystem immediately. When `build()` is invoked on the `FSDataOutputStreamBuilder`,
+the builder parameters are verified and [`create(Path p)`](#FileSystem.create)
+is invoked on the underlying filesystem. `build()` has the same preconditions
+and postconditions as [`create(Path p)`](#FileSystem.create).
+
+* Similar to [`create(Path p)`](#FileSystem.create), files are overwritten
+by default, unless specify `builder.overwrite(false)`.
+* Unlike [`create(Path p)`](#FileSystem.create), missing parent directories are
+not created by default, unless specify `builder.recursive()`.
+
+### <a name='FileSystem.append'></a> `FSDataOutputStream append(Path p, int bufferSize, Progressable progress)`
 
 Implementations without a compliant call SHOULD throw `UnsupportedOperationException`.
 
@@ -634,6 +651,18 @@
 Return: `FSDataOutputStream`, which can update the entry `FS.Files[p]`
 by appending data to the existing list.
 
+### `FSDataOutputStreamBuilder appendFile(Path p)`
+
+Make a `FSDataOutputStreamBuilder` to specify the parameters to append to an
+existing file.
+
+#### Implementation Notes
+
+`appendFile(p)` returns a `FSDataOutputStreamBuilder` only and does not make
+change on filesystem immediately. When `build()` is invoked on the `FSDataOutputStreamBuilder`,
+the builder parameters are verified and [`append()`](#FileSystem.append) is
+invoked on the underlying filesystem. `build()` has the same preconditions and
+postconditions as [`append()`](#FileSystem.append).
 
 ### `FSDataInputStream open(Path f, int bufferSize)`
 
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdataoutputstreambuilder.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdataoutputstreambuilder.md
new file mode 100644
index 0000000..4ea1fd1
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdataoutputstreambuilder.md
@@ -0,0 +1,182 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!--  ============================================================= -->
+<!--  CLASS: FSDataOutputStreamBuilder -->
+<!--  ============================================================= -->
+
+# class `org.apache.hadoop.fs.FSDataOutputStreamBuilder`
+
+<!-- MACRO{toc|fromDepth=1|toDepth=2} -->
+
+Builder pattern for `FSDataOutputStream` and its subclasses. It is used to
+create a new file or open an existing file on `FileSystem` for write.
+
+## Invariants
+
+The `FSDataOutputStreamBuilder` interface does not validate parameters
+and modify the state of `FileSystem` until [`build()`](#Builder.build) is
+invoked.
+
+## Implementation-agnostic parameters.
+
+### <a name="Builder.create"></a> `FSDataOutputStreamBuilder create()`
+
+Specify `FSDataOutputStreamBuilder` to create a file on `FileSystem`, equivalent
+to `CreateFlag#CREATE`.
+
+### <a name="Builder.append"></a> `FSDataOutputStreamBuilder append()`
+
+Specify `FSDataOutputStreamBuilder` to append to an existing file on
+`FileSystem`, equivalent to `CreateFlag#APPEND`.
+
+### <a name="Builder.overwrite"></a> `FSDataOutputStreamBuilder overwrite(boolean overwrite)`
+
+Specify `FSDataOutputStreamBuilder` to overwrite an existing file or not. If
+giving `overwrite==true`, it truncates an existing file, equivalent to
+`CreateFlag#OVERWITE`.
+
+### <a name="Builder.permission"></a> `FSDataOutputStreamBuilder permission(FsPermission permission)`
+
+Set permission for the file.
+
+### <a name="Builder.bufferSize"></a> `FSDataOutputStreamBuilder bufferSize(int bufSize)`
+
+Set the size of the buffer to be used.
+
+### <a name="Builder.replication"></a> `FSDataOutputStreamBuilder replication(short replica)`
+
+Set the replication factor.
+
+### <a name="Builder.blockSize"></a> `FSDataOutputStreamBuilder blockSize(long size)`
+
+Set block size in bytes.
+
+### <a name="Builder.recursive"></a> `FSDataOutputStreamBuilder recursive()`
+
+Create parent directories if they do not exist.
+
+### <a name="Builder.progress"></a> `FSDataOutputStreamBuilder progress(Progresable prog)`
+
+Set the facility of reporting progress.
+
+### <a name="Builder.checksumOpt"></a> `FSDataOutputStreamBuilder checksumOpt(ChecksumOpt chksumOpt)`
+
+Set checksum opt.
+
+### Set optional or mandatory parameters
+
+    FSDataOutputStreamBuilder opt(String key, ...)
+    FSDataOutputStreamBuilder must(String key, ...)
+
+Set optional or mandatory parameters to the builder. Using `opt()` or `must()`,
+client can specify FS-specific parameters without inspecting the concrete type
+of `FileSystem`.
+
+    // Don't
+    if (fs instanceof FooFileSystem) {
+        FooFileSystem fs = (FooFileSystem) fs;
+        out = dfs.createFile(path)
+            .optionA()
+            .optionB("value")
+            .cache()
+            .build()
+    } else if (fs instanceof BarFileSystem) {
+        ...
+    }
+
+    // Do
+    out = fs.createFile(path)
+        .permission(perm)
+        .bufferSize(bufSize)
+        .opt("foofs:option.a", true)
+        .opt("foofs:option.b", "value")
+        .opt("barfs:cache", true)
+        .must("foofs:cache", true)
+        .must("barfs:cache-size", 256 * 1024 * 1024)
+        .build();
+
+#### Implementation Notes
+
+The concrete `FileSystem` and/or `FSDataOutputStreamBuilder` implementation
+MUST verify that implementation-agnostic parameters (i.e., "syncable") or
+implementation-specific parameters (i.e., "foofs:cache")
+are supported. `FileSystem` will satisfy optional parameters (via `opt(key, ...)`)
+on best effort. If the mandatory parameters (via `must(key, ...)`) can not be satisfied
+in the `FileSystem`, `IllegalArgumentException` should be thrown in `build()`.
+
+The behavior of resolving the conflicts between the parameters set by
+builder methods (i.e., `bufferSize()`) and `opt()`/`must()` is undefined.
+
+## HDFS-specific parameters.
+
+`HdfsDataOutputStreamBuilder extends FSDataOutputStreamBuilder` provides additional
+HDFS-specific parameters, for further customize file creation / append behavior.
+
+### `FSDataOutpuStreamBuilder favoredNodes(InetSocketAddress[] nodes)`
+
+Set favored DataNodes for new blocks.
+
+### `FSDataOutputStreamBuilder syncBlock()`
+
+Force closed blocks to the disk device. See `CreateFlag#SYNC_BLOCK`
+
+### `FSDataOutputStreamBuilder lazyPersist()`
+
+Create the block on transient storage if possible.
+
+### `FSDataOutputStreamBuilder newBlock()`
+
+Append data to a new block instead of the end of the last partial block.
+
+### `FSDataOutputStreamBuilder noLocalWrite()`
+
+Advise that a block replica NOT be written to the local DataNode.
+
+### `FSDataOutputStreamBuilder ecPolicyName()`
+
+Enforce the file to be a striped file with erasure coding policy 'policyName',
+no matter what its parent directory's replication or erasure coding policy is.
+
+### `FSDataOutputStreamBuilder replicate()`
+
+Enforce the file to be a replicated file, no matter what its parent directory's
+replication or erasure coding policy is.
+
+## Builder interface
+
+### <a name="Builder.build"></a> `FSDataOutputStream build()`
+
+Create a new file or append an existing file on the underlying `FileSystem`,
+and return `FSDataOutputStream` for write.
+
+#### Preconditions
+
+The following combinations of parameters are not supported:
+
+    if APPEND|OVERWRITE: raise HadoopIllegalArgumentException
+    if CREATE|APPEND|OVERWRITE: raise HadoopIllegalArgumentExdeption
+
+`FileSystem` may reject the request for other reasons and throw `IOException`,
+see `FileSystem#create(path, ...)` and `FileSystem#append()`.
+
+#### Postconditions
+
+    FS' where :
+       FS'.Files'[p] == []
+       ancestors(p) is-subset-of FS'.Directories'
+
+    result = FSDataOutputStream
+
+The result is `FSDataOutputStream` to be used to write data to filesystem.
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/index.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/index.md
index 66a7eb3..532b6c7 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/index.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/index.md
@@ -33,5 +33,6 @@
 1. [Model](model.html)
 1. [FileSystem class](filesystem.html)
 1. [FSDataInputStream class](fsdatainputstream.html)
+1. [FSDataOutputStreamBuilder class](fsdataoutputstreambuilder.html)
 2. [Testing with the Filesystem specification](testing.html)
 2. [Extending the specification and its tests](extending.html)
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index d0e0a35..3324886 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -123,6 +123,7 @@
     xmlPropsToSkipCompare.add("fs.azure.secure.mode");
     xmlPropsToSkipCompare.add("fs.azure.authorization");
     xmlPropsToSkipCompare.add("fs.azure.authorization.caching.enable");
+    xmlPropsToSkipCompare.add("fs.azure.saskey.usecontainersaskeyforallaccess");
     xmlPropsToSkipCompare.add("fs.azure.user.agent.prefix");
 
     // Deprecated properties.  These should eventually be removed from the
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 91f25fa..b41a807 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -44,15 +44,23 @@
 import static java.util.concurrent.TimeUnit.*;
 
 import com.fasterxml.jackson.databind.ObjectMapper;
-import junit.framework.TestCase;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
 import static org.junit.Assert.assertArrayEquals;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration.IntegerRanges;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.alias.CredentialProvider;
+import org.apache.hadoop.security.alias.CredentialProviderFactory;
+import org.apache.hadoop.security.alias.LocalJavaKeyStoreProvider;
 import org.apache.hadoop.test.GenericTestUtils;
 
 import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
@@ -60,10 +68,12 @@
 import org.apache.log4j.AppenderSkeleton;
 import org.apache.log4j.Logger;
 import org.apache.log4j.spi.LoggingEvent;
+import org.hamcrest.CoreMatchers;
 import org.mockito.Mockito;
 
-public class TestConfiguration extends TestCase {
+public class TestConfiguration {
 
+  private static final double DOUBLE_DELTA = 0.000000001f;
   private Configuration conf;
   final static String CONFIG = new File("./test-config-TestConfiguration.xml").getAbsolutePath();
   final static String CONFIG2 = new File("./test-config2-TestConfiguration.xml").getAbsolutePath();
@@ -76,7 +86,7 @@
   private static final String CONFIG_MULTI_BYTE_SAVED = new File(
     "./test-config-multi-byte-saved-TestConfiguration.xml").getAbsolutePath();
   final static Random RAN = new Random();
-  final static String XMLHEADER = 
+  final static String XMLHEADER =
             IBM_JAVA?"<?xml version=\"1.0\" encoding=\"UTF-8\"?><configuration>":
   "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?><configuration>";
 
@@ -88,18 +98,16 @@
 
   private BufferedWriter out;
 
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
+  @Before
+  public void setUp() throws Exception {
     conf = new Configuration();
   }
-  
-  @Override
-  protected void tearDown() throws Exception {
+
+  @After
+  public void tearDown() throws Exception {
     if(out != null) {
       out.close();
     }
-    super.tearDown();
     new File(CONFIG).delete();
     new File(CONFIG2).delete();
     new File(CONFIG_FOR_ENUM).delete();
@@ -107,7 +115,7 @@
     new File(CONFIG_MULTI_BYTE).delete();
     new File(CONFIG_MULTI_BYTE_SAVED).delete();
   }
-  
+
   private void startConfig() throws IOException{
     out.write("<?xml version=\"1.0\"?>\n");
     out.write("<configuration>\n");
@@ -158,6 +166,7 @@
         + " [\n<!ENTITY " + entity + " SYSTEM \"" + value + "\">\n]>");
   }
 
+  @Test
   public void testInputStreamResource() throws Exception {
     StringWriter writer = new StringWriter();
     out = new BufferedWriter(writer);
@@ -176,6 +185,7 @@
     assertEquals("A", conf.get("prop"));
   }
 
+  @Test
   public void testFinalWarnings() throws Exception {
     // Make a configuration file with a final property
     StringWriter writer = new StringWriter();
@@ -220,6 +230,7 @@
     }
   }
 
+  @Test
   public void testNoFinalWarnings() throws Exception {
     // Make a configuration file with a final property
     StringWriter writer = new StringWriter();
@@ -257,6 +268,7 @@
 
 
 
+  @Test
   public void testFinalWarningsMultiple() throws Exception {
     // Make a configuration file with a repeated final property
     StringWriter writer = new StringWriter();
@@ -290,6 +302,7 @@
     }
   }
 
+  @Test
   public void testFinalWarningsMultipleOverride() throws Exception {
     // Make a configuration file with 2 final properties with different values
     StringWriter writer = new StringWriter();
@@ -352,6 +365,7 @@
    * round-trips multi-byte string literals through saving and loading of config
    * and asserts that the same values were read.
    */
+  @Test
   public void testMultiByteCharacters() throws IOException {
     String priorDefaultEncoding = System.getProperty("file.encoding");
     try {
@@ -382,6 +396,7 @@
     }
   }
 
+  @Test
   public void testVariableSubstitution() throws IOException {
     // stubbing only environment dependent functions
     Configuration mock = Mockito.spy(conf);
@@ -413,12 +428,13 @@
       assertEq(p.val, gotRawVal);
       assertEq(p.expectEval, gotVal);
     }
-      
+
     // check that expansion also occurs for getInt()
     assertTrue(mock.getInt("intvar", -1) == 42);
     assertTrue(mock.getInt("my.int", -1) == 42);
   }
 
+  @Test
   public void testEnvDefault() throws IOException {
     Configuration mock = Mockito.spy(conf);
     Mockito.when(mock.getenv("NULL_VALUE")).thenReturn(null);
@@ -459,6 +475,7 @@
     }
   }
 
+  @Test
   public void testFinalParam() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -468,7 +485,7 @@
     Configuration conf1 = new Configuration();
     conf1.addResource(fileResource);
     assertNull("my var is not null", conf1.get("my.var"));
-	
+
     out=new BufferedWriter(new FileWriter(CONFIG2));
     startConfig();
     declareProperty("my.var", "myval", "myval", false);
@@ -480,6 +497,7 @@
     assertNull("my var is not final", conf2.get("my.var"));
   }
 
+  @Test
   public void testCompactFormat() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -524,8 +542,8 @@
   }
 
   void declareProperty(String name, String val, String expectEval,
-                       boolean isFinal)
-    throws IOException {
+      boolean isFinal)
+      throws IOException {
     appendProperty(name, val, isFinal);
     Prop p = new Prop();
     p.name = name;
@@ -537,10 +555,10 @@
   void appendProperty(String name, String val) throws IOException {
     appendProperty(name, val, false);
   }
- 
-  void appendProperty(String name, String val, boolean isFinal, 
+
+  void appendProperty(String name, String val, boolean isFinal,
       String ... sources)
-    throws IOException {
+      throws IOException {
     out.write("<property>");
     out.write("<name>");
     out.write(name);
@@ -558,19 +576,19 @@
     }
     out.write("</property>\n");
   }
-  
+
   void appendCompactFormatProperty(String name, String val) throws IOException {
     appendCompactFormatProperty(name, val, false);
   }
 
   void appendCompactFormatProperty(String name, String val, boolean isFinal)
-    throws IOException {
+      throws IOException {
     appendCompactFormatProperty(name, val, isFinal, null);
   }
 
   void appendCompactFormatProperty(String name, String val, boolean isFinal,
       String source)
-    throws IOException {
+      throws IOException {
     out.write("<property ");
     out.write("name=\"");
     out.write(name);
@@ -589,6 +607,7 @@
     out.write("/>\n");
   }
 
+  @Test
   public void testOverlay() throws IOException{
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -604,25 +623,26 @@
     appendProperty("b","d");
     appendProperty("e","e");
     endConfig();
-    
+
     Path fileResource = new Path(CONFIG);
     conf.addResource(fileResource);
-    
+
     //set dynamically something
     conf.set("c","d");
     conf.set("a","d");
-    
+
     Configuration clone=new Configuration(conf);
     clone.addResource(new Path(CONFIG2));
-    
-    assertEquals(clone.get("a"), "d"); 
-    assertEquals(clone.get("b"), "d"); 
-    assertEquals(clone.get("c"), "d"); 
-    assertEquals(clone.get("d"), "e"); 
-    assertEquals(clone.get("e"), "f"); 
-    
+
+    assertEquals(clone.get("a"), "d");
+    assertEquals(clone.get("b"), "d");
+    assertEquals(clone.get("c"), "d");
+    assertEquals(clone.get("d"), "e");
+    assertEquals(clone.get("e"), "f");
+
   }
-  
+
+  @Test
   public void testCommentsInValue() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -634,6 +654,7 @@
     assertEquals("this  contains a comment", conf.get("my.comment"));
   }
 
+  @Test
   public void testEscapedCharactersInValue() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -645,6 +666,7 @@
     assertEquals("''''", conf.get("my.comment"));
   }
 
+  @Test
   public void testTrim() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -652,13 +674,13 @@
     String[] name = new String[100];
     for(int i = 0; i < name.length; i++) {
       name[i] = "foo" + i;
-      StringBuilder prefix = new StringBuilder(); 
-      StringBuilder postfix = new StringBuilder(); 
+      StringBuilder prefix = new StringBuilder();
+      StringBuilder postfix = new StringBuilder();
       for(int j = 0; j < 3; j++) {
         prefix.append(whitespaces[RAN.nextInt(whitespaces.length)]);
         postfix.append(whitespaces[RAN.nextInt(whitespaces.length)]);
       }
-      
+
       appendProperty(prefix + name[i] + postfix, name[i] + ".value");
     }
     endConfig();
@@ -669,6 +691,7 @@
     }
   }
 
+  @Test
   public void testGetLocalPath() throws IOException {
     Configuration conf = new Configuration();
     String[] dirs = new String[]{"a", "b", "c"};
@@ -684,7 +707,8 @@
         localPath.contains(" "));
     }
   }
-  
+
+  @Test
   public void testGetFile() throws IOException {
     Configuration conf = new Configuration();
     String[] dirs = new String[]{"a", "b", "c"};
@@ -701,29 +725,32 @@
     }
   }
 
+  @Test
   public void testToString() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
     endConfig();
     Path fileResource = new Path(CONFIG);
     conf.addResource(fileResource);
-    
-    String expectedOutput = 
-      "Configuration: core-default.xml, core-site.xml, " + 
+
+    String expectedOutput =
+      "Configuration: core-default.xml, core-site.xml, " +
       fileResource.toString();
     assertEquals(expectedOutput, conf.toString());
   }
-  
+
+  @Test
   public void testWriteXml() throws IOException {
     Configuration conf = new Configuration();
-    ByteArrayOutputStream baos = new ByteArrayOutputStream(); 
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
     conf.writeXml(baos);
     String result = baos.toString();
     assertTrue("Result has proper header", result.startsWith(XMLHEADER));
-	  
+
     assertTrue("Result has proper footer", result.endsWith("</configuration>"));
   }
-  
+
+  @Test
   public void testIncludes() throws Exception {
     tearDown();
     System.out.println("XXX testIncludes");
@@ -752,8 +779,8 @@
     // verify that the includes file contains all properties
     Path fileResource = new Path(CONFIG);
     conf.addResource(fileResource);
-    assertEquals(conf.get("a"), "b"); 
-    assertEquals(conf.get("c"), "d"); 
+    assertEquals(conf.get("a"), "b");
+    assertEquals(conf.get("c"), "d");
     assertEquals(conf.get("e"), "f");
     assertEquals(conf.get("g"), "h");
     assertEquals(conf.get("i"), "j");
@@ -761,6 +788,7 @@
     tearDown();
   }
 
+  @Test
   public void testCharsetInDocumentEncoding() throws Exception {
     tearDown();
     out=new BufferedWriter(new OutputStreamWriter(new FileOutputStream(CONFIG),
@@ -779,6 +807,7 @@
     tearDown();
   }
 
+  @Test
   public void testEntityReference() throws Exception {
     tearDown();
     out=new BufferedWriter(new FileWriter(CONFIG));
@@ -797,6 +826,7 @@
     tearDown();
   }
 
+  @Test
   public void testSystemEntityReference() throws Exception {
     tearDown();
     out=new BufferedWriter(new FileWriter(CONFIG2));
@@ -818,6 +848,7 @@
     tearDown();
   }
 
+  @Test
   public void testIncludesWithFallback() throws Exception {
     tearDown();
     out=new BufferedWriter(new FileWriter(CONFIG2));
@@ -856,6 +887,7 @@
     tearDown();
   }
 
+  @Test
   public void testRelativeIncludes() throws Exception {
     tearDown();
     String relConfig = new File("./tmp/test-config.xml").getAbsolutePath();
@@ -887,6 +919,7 @@
     new File(new File(relConfig).getParent()).delete();
   }
 
+  @Test
   public void testIntegerRanges() {
     Configuration conf = new Configuration();
     conf.set("first", "-100");
@@ -917,7 +950,8 @@
     assertEquals(true, range.isIncluded(34));
     assertEquals(true, range.isIncluded(100000000));
   }
-  
+
+  @Test
   public void testGetRangeIterator() throws Exception {
     Configuration config = new Configuration(false);
     IntegerRanges ranges = config.getRange("Test", "");
@@ -937,7 +971,7 @@
       found.add(i);
     }
     assertEquals(expected, found);
-    
+
     ranges = config.getRange("Test", "8-12, 5- 7");
     expected = new HashSet<Integer>(Arrays.asList(5,6,7,8,9,10,11,12));
     found = new HashSet<Integer>();
@@ -947,6 +981,7 @@
     assertEquals(expected, found);
   }
 
+  @Test
   public void testHexValues() throws IOException{
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -978,6 +1013,7 @@
     }
   }
 
+  @Test
   public void testIntegerValues() throws IOException{
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1008,7 +1044,8 @@
       // pass
     }
   }
-  
+
+  @Test
   public void testHumanReadableValues() throws IOException {
     out = new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1029,6 +1066,7 @@
     }
   }
 
+  @Test
   public void testBooleanValues() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1052,7 +1090,8 @@
     assertEquals(false, conf.getBoolean("test.bool7", true));
     assertEquals(false, conf.getBoolean("test.bool8", false));
   }
-  
+
+  @Test
   public void testFloatValues() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1064,10 +1103,10 @@
     endConfig();
     Path fileResource = new Path(CONFIG);
     conf.addResource(fileResource);
-    assertEquals(3.1415f, conf.getFloat("test.float1", 0.0f));
-    assertEquals(3.1415f, conf.getFloat("test.float2", 0.0f));
-    assertEquals(-3.1415f, conf.getFloat("test.float3", 0.0f));
-    assertEquals(-3.1415f, conf.getFloat("test.float4", 0.0f));
+    assertEquals(3.1415f, conf.getFloat("test.float1", 0.0f), DOUBLE_DELTA);
+    assertEquals(3.1415f, conf.getFloat("test.float2", 0.0f), DOUBLE_DELTA);
+    assertEquals(-3.1415f, conf.getFloat("test.float3", 0.0f), DOUBLE_DELTA);
+    assertEquals(-3.1415f, conf.getFloat("test.float4", 0.0f), DOUBLE_DELTA);
     try {
       conf.getFloat("test.float5", 0.0f);
       fail("Property had invalid float value, but was read successfully.");
@@ -1075,7 +1114,8 @@
       // pass
     }
   }
-  
+
+  @Test
   public void testDoubleValues() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1087,10 +1127,10 @@
     endConfig();
     Path fileResource = new Path(CONFIG);
     conf.addResource(fileResource);
-    assertEquals(3.1415, conf.getDouble("test.double1", 0.0));
-    assertEquals(3.1415, conf.getDouble("test.double2", 0.0));
-    assertEquals(-3.1415, conf.getDouble("test.double3", 0.0));
-    assertEquals(-3.1415, conf.getDouble("test.double4", 0.0));
+    assertEquals(3.1415, conf.getDouble("test.double1", 0.0), DOUBLE_DELTA);
+    assertEquals(3.1415, conf.getDouble("test.double2", 0.0), DOUBLE_DELTA);
+    assertEquals(-3.1415, conf.getDouble("test.double3", 0.0), DOUBLE_DELTA);
+    assertEquals(-3.1415, conf.getDouble("test.double4", 0.0), DOUBLE_DELTA);
     try {
       conf.getDouble("test.double5", 0.0);
       fail("Property had invalid double value, but was read successfully.");
@@ -1098,7 +1138,8 @@
       // pass
     }
   }
-  
+
+  @Test
   public void testGetClass() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1107,10 +1148,13 @@
     endConfig();
     Path fileResource = new Path(CONFIG);
     conf.addResource(fileResource);
-    assertEquals("java.lang.Integer", conf.getClass("test.class1", null).getCanonicalName());
-    assertEquals("java.lang.Integer", conf.getClass("test.class2", null).getCanonicalName());
+    assertEquals("java.lang.Integer",
+        conf.getClass("test.class1", null).getCanonicalName());
+    assertEquals("java.lang.Integer",
+        conf.getClass("test.class2", null).getCanonicalName());
   }
-  
+
+  @Test
   public void testGetClasses() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1126,14 +1170,15 @@
     assertArrayEquals(expectedNames, extractClassNames(classes1));
     assertArrayEquals(expectedNames, extractClassNames(classes2));
   }
-  
+
+  @Test
   public void testGetStringCollection() {
     Configuration c = new Configuration();
     c.set("x", " a, b\n,\nc ");
     Collection<String> strs = c.getTrimmedStringCollection("x");
     assertEquals(3, strs.size());
     assertArrayEquals(new String[]{ "a", "b", "c" },
-                      strs.toArray(new String[0]));
+        strs.toArray(new String[0]));
 
     // Check that the result is mutable
     strs.add("z");
@@ -1144,13 +1189,14 @@
     strs.add("z");
   }
 
+  @Test
   public void testGetTrimmedStringCollection() {
     Configuration c = new Configuration();
     c.set("x", "a, b, c");
     Collection<String> strs = c.getStringCollection("x");
     assertEquals(3, strs.size());
     assertArrayEquals(new String[]{ "a", " b", " c" },
-                      strs.toArray(new String[0]));
+        strs.toArray(new String[0]));
 
     // Check that the result is mutable
     strs.add("z");
@@ -1168,9 +1214,10 @@
     }
     return classNames;
   }
-  
+
   enum Dingo { FOO, BAR };
   enum Yak { RAB, FOO };
+  @Test
   public void testEnum() {
     Configuration conf = new Configuration();
     conf.setEnum("test.enum", Dingo.FOO);
@@ -1187,6 +1234,7 @@
     assertTrue(fail);
   }
 
+  @Test
   public void testEnumFromXml() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG_FOR_ENUM));
     startConfig();
@@ -1207,6 +1255,7 @@
     assertTrue(fail);
   }
 
+  @Test
   public void testTimeDuration() {
     Configuration conf = new Configuration(false);
     conf.setTimeDuration("test.time.a", 7L, SECONDS);
@@ -1240,17 +1289,18 @@
     assertEquals(30L, conf.getTimeDuration("test.time.d", 40, SECONDS));
 
     for (Configuration.ParsedTimeDuration ptd :
-         Configuration.ParsedTimeDuration.values()) {
+        Configuration.ParsedTimeDuration.values()) {
       conf.setTimeDuration("test.time.unit", 1, ptd.unit());
       assertEquals(1 + ptd.suffix(), conf.get("test.time.unit"));
       assertEquals(1, conf.getTimeDuration("test.time.unit", 2, ptd.unit()));
     }
   }
 
+  @Test
   public void testTimeDurationWarning() {
     // check warn for possible loss of precision
     final String warnFormat = "Possible loss of precision converting %s" +
-            " to %s for test.time.warn";
+        " to %s for test.time.warn";
     final ArrayList<String> warnchk = new ArrayList<>();
     Configuration wconf = new Configuration(false) {
       @Override
@@ -1284,6 +1334,7 @@
     assertEquals(2, warnchk.size());
   }
 
+  @Test
   public void testPattern() throws IOException {
     out = new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1297,20 +1348,21 @@
     Pattern defaultPattern = Pattern.compile("x+");
     // Return default if missing
     assertEquals(defaultPattern.pattern(),
-                 conf.getPattern("xxxxx", defaultPattern).pattern());
+        conf.getPattern("xxxxx", defaultPattern).pattern());
     // Return null if empty and default is null
     assertNull(conf.getPattern("test.pattern1", null));
     // Return default for empty
     assertEquals(defaultPattern.pattern(),
-                 conf.getPattern("test.pattern1", defaultPattern).pattern());
+        conf.getPattern("test.pattern1", defaultPattern).pattern());
     // Return default for malformed
     assertEquals(defaultPattern.pattern(),
-                 conf.getPattern("test.pattern2", defaultPattern).pattern());
+        conf.getPattern("test.pattern2", defaultPattern).pattern());
     // Works for correct patterns
     assertEquals("a+b",
-                 conf.getPattern("test.pattern3", defaultPattern).pattern());
+        conf.getPattern("test.pattern3", defaultPattern).pattern());
   }
 
+  @Test
   public void testPropertySource() throws IOException {
     out = new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1323,17 +1375,19 @@
     assertEquals(1, sources.length);
     assertEquals(
         "Resource string returned for a file-loaded property" +
-        " must be a proper absolute path",
+            " must be a proper absolute path",
         fileResource,
         new Path(sources[0]));
     assertArrayEquals("Resource string returned for a set() property must be " +
-        "\"programmatically\"",
+            "\"programmatically\"",
         new String[]{"programmatically"},
         conf.getPropertySources("fs.defaultFS"));
-    assertEquals("Resource string returned for an unset property must be null",
+    assertArrayEquals("Resource string returned for an unset property must "
+            + "be null",
         null, conf.getPropertySources("fs.defaultFoo"));
   }
-  
+
+  @Test
   public void testMultiplePropertySource() throws IOException {
     out = new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1348,24 +1402,25 @@
     assertEquals("c", sources[2]);
     assertEquals(
         "Resource string returned for a file-loaded property" +
-        " must be a proper absolute path",
+            " must be a proper absolute path",
         fileResource,
         new Path(sources[3]));
   }
 
+  @Test
   public void testSocketAddress() {
     Configuration conf = new Configuration();
     final String defaultAddr = "host:1";
     final int defaultPort = 2;
     InetSocketAddress addr = null;
-    
+
     addr = conf.getSocketAddr("myAddress", defaultAddr, defaultPort);
     assertEquals(defaultAddr, NetUtils.getHostPortString(addr));
-    
+
     conf.set("myAddress", "host2");
     addr = conf.getSocketAddr("myAddress", defaultAddr, defaultPort);
     assertEquals("host2:"+defaultPort, NetUtils.getHostPortString(addr));
-    
+
     conf.set("myAddress", "host2:3");
     addr = conf.getSocketAddr("myAddress", defaultAddr, defaultPort);
     assertEquals("host2:3", NetUtils.getHostPortString(addr));
@@ -1381,35 +1436,38 @@
     } catch (IllegalArgumentException iae) {
       threwException = true;
       assertEquals("Does not contain a valid host:port authority: " +
-                   "bad:-port (configuration property 'myAddress')",
-                   iae.getMessage());
-      
+              "bad:-port (configuration property 'myAddress')",
+          iae.getMessage());
+
     } finally {
       assertTrue(threwException);
     }
   }
 
+  @Test
   public void testSetSocketAddress() {
     Configuration conf = new Configuration();
     NetUtils.addStaticResolution("host", "127.0.0.1");
     final String defaultAddr = "host:1";
-    
-    InetSocketAddress addr = NetUtils.createSocketAddr(defaultAddr);    
+
+    InetSocketAddress addr = NetUtils.createSocketAddr(defaultAddr);
     conf.setSocketAddr("myAddress", addr);
     assertEquals(defaultAddr, NetUtils.getHostPortString(addr));
   }
-  
+
+  @Test
   public void testUpdateSocketAddress() throws IOException {
     InetSocketAddress addr = NetUtils.createSocketAddrForHost("host", 1);
     InetSocketAddress connectAddr = conf.updateConnectAddr("myAddress", addr);
     assertEquals(connectAddr.getHostName(), addr.getHostName());
-    
+
     addr = new InetSocketAddress(1);
     connectAddr = conf.updateConnectAddr("myAddress", addr);
     assertEquals(connectAddr.getHostName(),
-                 InetAddress.getLocalHost().getHostName());
+        InetAddress.getLocalHost().getHostName());
   }
 
+  @Test
   public void testReload() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1418,7 +1476,7 @@
     endConfig();
     Path fileResource = new Path(CONFIG);
     conf.addResource(fileResource);
-    
+
     out=new BufferedWriter(new FileWriter(CONFIG2));
     startConfig();
     appendProperty("test.key1", "value1");
@@ -1426,23 +1484,23 @@
     endConfig();
     Path fileResource1 = new Path(CONFIG2);
     conf.addResource(fileResource1);
-    
+
     // add a few values via set.
     conf.set("test.key3", "value4");
     conf.set("test.key4", "value5");
-    
+
     assertEquals("final-value1", conf.get("test.key1"));
     assertEquals("value2", conf.get("test.key2"));
     assertEquals("value4", conf.get("test.key3"));
     assertEquals("value5", conf.get("test.key4"));
-    
+
     // change values in the test file...
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
     appendProperty("test.key1", "final-value1");
     appendProperty("test.key3", "final-value3", true);
     endConfig();
-    
+
     conf.reloadConfiguration();
     assertEquals("value1", conf.get("test.key1"));
     // overlayed property overrides.
@@ -1451,6 +1509,7 @@
     assertEquals("value5", conf.get("test.key4"));
   }
 
+  @Test
   public void testSize() {
     Configuration conf = new Configuration(false);
     conf.set("a", "A");
@@ -1458,6 +1517,7 @@
     assertEquals(2, conf.size());
   }
 
+  @Test
   public void testClear() {
     Configuration conf = new Configuration(false);
     conf.set("a", "A");
@@ -1470,6 +1530,7 @@
   public static class Fake_ClassLoader extends ClassLoader {
   }
 
+  @Test
   public void testClassLoader() {
     Configuration conf = new Configuration(false);
     conf.setQuietMode(false);
@@ -1477,7 +1538,7 @@
     Configuration other = new Configuration(conf);
     assertTrue(other.getClassLoader() instanceof Fake_ClassLoader);
   }
-  
+
   static class JsonConfiguration {
     JsonProperty[] properties;
 
@@ -1540,6 +1601,7 @@
     return ac;
   }
 
+  @Test
   public void testGetSetTrimmedNames() throws IOException {
     Configuration conf = new Configuration(false);
     conf.set(" name", "value");
@@ -1548,6 +1610,7 @@
     assertEquals("value", conf.getRaw("  name  "));
   }
 
+  @Test
   public void testDumpProperty() throws IOException {
     StringWriter outWriter = new StringWriter();
     ObjectMapper mapper = new ObjectMapper();
@@ -1662,15 +1725,16 @@
     }
   }
 
+  @Test
   public void testDumpConfiguration() throws IOException {
     StringWriter outWriter = new StringWriter();
     Configuration.dumpConfiguration(conf, outWriter);
     String jsonStr = outWriter.toString();
     ObjectMapper mapper = new ObjectMapper();
-    JsonConfiguration jconf = 
-      mapper.readValue(jsonStr, JsonConfiguration.class);
+    JsonConfiguration jconf =
+        mapper.readValue(jsonStr, JsonConfiguration.class);
     int defaultLength = jconf.getProperties().length;
-    
+
     // add 3 keys to the existing configuration properties
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1681,7 +1745,7 @@
     Path fileResource = new Path(CONFIG);
     conf.addResource(fileResource);
     out.close();
-    
+
     outWriter = new StringWriter();
     Configuration.dumpConfiguration(conf, outWriter);
     jsonStr = outWriter.toString();
@@ -1690,7 +1754,7 @@
     int length = jconf.getProperties().length;
     // check for consistency in the number of properties parsed in Json format.
     assertEquals(length, defaultLength+3);
-    
+
     //change few keys in another resource file
     out=new BufferedWriter(new FileWriter(CONFIG2));
     startConfig();
@@ -1700,14 +1764,14 @@
     Path fileResource1 = new Path(CONFIG2);
     conf.addResource(fileResource1);
     out.close();
-    
+
     outWriter = new StringWriter();
     Configuration.dumpConfiguration(conf, outWriter);
     jsonStr = outWriter.toString();
     mapper = new ObjectMapper();
     jconf = mapper.readValue(jsonStr, JsonConfiguration.class);
-    
-    // put the keys and their corresponding attributes into a hashmap for their 
+
+    // put the keys and their corresponding attributes into a hashmap for their
     // efficient retrieval
     HashMap<String,JsonProperty> confDump = new HashMap<String,JsonProperty>();
     for(JsonProperty prop : jconf.getProperties()) {
@@ -1718,7 +1782,7 @@
     assertEquals(false, confDump.get("test.key1").getIsFinal());
     assertEquals(fileResource1.toString(),
         confDump.get("test.key1").getResource());
-    // check if final parameter test.key2 is not changed, since it is first 
+    // check if final parameter test.key2 is not changed, since it is first
     // loaded as final parameter
     assertEquals("value2", confDump.get("test.key2").getValue());
     assertEquals(true, confDump.get("test.key2").getIsFinal());
@@ -1729,7 +1793,7 @@
     assertEquals(false, confDump.get("test.key3").getIsFinal());
     assertEquals(fileResource.toString(),
         confDump.get("test.key3").getResource());
-    // check for resource to be "Unknown" for keys which are loaded using 'set' 
+    // check for resource to be "Unknown" for keys which are loaded using 'set'
     // and expansion of properties
     conf.set("test.key4", "value4");
     conf.set("test.key5", "value5");
@@ -1747,7 +1811,8 @@
     assertEquals("programmatically", confDump.get("test.key4").getResource());
     outWriter.close();
   }
-  
+
+  @Test
   public void testDumpConfiguratioWithoutDefaults() throws IOException {
     // check for case when default resources are not loaded
     Configuration config = new Configuration(false);
@@ -1755,12 +1820,12 @@
     Configuration.dumpConfiguration(config, outWriter);
     String jsonStr = outWriter.toString();
     ObjectMapper mapper = new ObjectMapper();
-    JsonConfiguration jconf = 
-      mapper.readValue(jsonStr, JsonConfiguration.class);
-    
+    JsonConfiguration jconf =
+        mapper.readValue(jsonStr, JsonConfiguration.class);
+
     //ensure that no properties are loaded.
     assertEquals(0, jconf.getProperties().length);
-    
+
     // add 2 keys
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -1770,13 +1835,13 @@
     Path fileResource = new Path(CONFIG);
     config.addResource(fileResource);
     out.close();
-    
+
     outWriter = new StringWriter();
     Configuration.dumpConfiguration(config, outWriter);
     jsonStr = outWriter.toString();
     mapper = new ObjectMapper();
     jconf = mapper.readValue(jsonStr, JsonConfiguration.class);
-    
+
     HashMap<String, JsonProperty>confDump = new HashMap<String, JsonProperty>();
     for (JsonProperty prop : jconf.getProperties()) {
       confDump.put(prop.getKey(), prop);
@@ -1795,6 +1860,7 @@
     }
   }
 
+  @Test
   public void testDumpSensitiveProperty() throws IOException {
     final String myPassword = "ThisIsMyPassword";
     Configuration testConf = new Configuration(false);
@@ -1812,6 +1878,7 @@
     }
   }
 
+  @Test
   public void testDumpSensitiveConfiguration() throws IOException {
     final String myPassword = "ThisIsMyPassword";
     Configuration testConf = new Configuration(false);
@@ -1829,6 +1896,7 @@
     }
   }
 
+  @Test
   public void testGetValByRegex() {
     Configuration conf = new Configuration();
     String key1 = "t.abc.key1";
@@ -1847,10 +1915,11 @@
     assertTrue("Picked out wrong key " + key4, !res.containsKey(key4));
   }
 
+  @Test
   public void testGetClassesShouldReturnDefaultValue() throws Exception {
     Configuration config = new Configuration();
-    Class<?>[] classes = 
-      config.getClasses("testClassName", Configuration.class);
+    Class<?>[] classes =
+        config.getClasses("testClassName", Configuration.class);
     assertEquals(
         "Not returning expected number of classes. Number of returned classes ="
             + classes.length, 1, classes.length);
@@ -1858,6 +1927,7 @@
         classes[0]);
   }
 
+  @Test
   public void testGetClassesShouldReturnEmptyArray()
       throws Exception {
     Configuration config = new Configuration();
@@ -1867,7 +1937,8 @@
         "Not returning expected number of classes. Number of returned classes ="
             + classes.length, 0, classes.length);
   }
-  
+
+  @Test
   public void testSettingValueNull() throws Exception {
     Configuration config = new Configuration();
     try {
@@ -1880,6 +1951,7 @@
     }
   }
 
+  @Test
   public void testSettingKeyNull() throws Exception {
     Configuration config = new Configuration();
     try {
@@ -1891,6 +1963,7 @@
     }
   }
 
+  @Test
   public void testInvalidSubstitution() {
     final Configuration configuration = new Configuration(false);
 
@@ -1907,6 +1980,7 @@
     }
   }
 
+  @Test
   public void testIncompleteSubbing() {
     Configuration configuration = new Configuration(false);
     String key = "test.random.key";
@@ -1925,6 +1999,7 @@
     }
   }
 
+  @Test
   public void testBoolean() {
     boolean value = true;
     Configuration configuration = new Configuration();
@@ -1932,6 +2007,7 @@
     assertEquals(value, configuration.getBoolean("value", false));
   }
 
+  @Test
   public void testBooleanIfUnset() {
     boolean value = true;
     Configuration configuration = new Configuration();
@@ -1941,20 +2017,23 @@
     assertEquals(value, configuration.getBoolean("value", false));
   }
 
+  @Test
   public void testFloat() {
     float value = 1.0F;
     Configuration configuration = new Configuration();
     configuration.setFloat("value", value);
-    assertEquals(value, configuration.getFloat("value", 0.0F));
+    assertEquals(value, configuration.getFloat("value", 0.0F), DOUBLE_DELTA);
   }
-  
+
+  @Test
   public void testDouble() {
     double value = 1.0D;
     Configuration configuration = new Configuration();
     configuration.setDouble("value", value);
-    assertEquals(value, configuration.getDouble("value", 0.0D));
+    assertEquals(value, configuration.getDouble("value", 0.0D), DOUBLE_DELTA);
   }
 
+  @Test
   public void testInt() {
     int value = 1;
     Configuration configuration = new Configuration();
@@ -1962,6 +2041,7 @@
     assertEquals(value, configuration.getInt("value", 0));
   }
 
+  @Test
   public void testLong() {
     long value = 1L;
     Configuration configuration = new Configuration();
@@ -1969,16 +2049,18 @@
     assertEquals(value, configuration.getLong("value", 0L));
   }
 
+  @Test
   public void testStrings() {
     String [] strings = {"FOO","BAR"};
     Configuration configuration = new Configuration();
     configuration.setStrings("strings", strings);
     String [] returnStrings = configuration.getStrings("strings");
     for(int i=0;i<returnStrings.length;i++) {
-       assertEquals(strings[i], returnStrings[i]);
+      assertEquals(strings[i], returnStrings[i]);
     }
   }
-  
+
+  @Test
   public void testSetPattern() {
     Pattern testPattern = Pattern.compile("a+b");
     Configuration configuration = new Configuration();
@@ -1986,13 +2068,15 @@
     assertEquals(testPattern.pattern(),
         configuration.getPattern("testPattern", Pattern.compile("")).pattern());
   }
-  
+
+  @Test
   public void testGetClassByNameOrNull() throws Exception {
-   Configuration config = new Configuration();
-   Class<?> clazz = config.getClassByNameOrNull("java.lang.Object");
-   assertNotNull(clazz);
+    Configuration config = new Configuration();
+    Class<?> clazz = config.getClassByNameOrNull("java.lang.Object");
+    assertNotNull(clazz);
   }
 
+  @Test
   public void testGetFinalParameters() throws Exception {
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -2015,6 +2099,7 @@
    * by SPARK-2546.
    * @throws Exception
    */
+  @Test
   public void testConcurrentAccesses() throws Exception {
     out = new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
@@ -2055,6 +2140,7 @@
     // it's expected behaviour.
   }
 
+  @Test
   public void testNullValueProperties() throws Exception {
     Configuration conf = new Configuration();
     conf.setAllowNullValueProperties(true);
@@ -2068,6 +2154,62 @@
     assertEquals("value", conf.get("attr"));
   }
 
+  @Test
+  public void testGetPasswordDeprecatedKeyStored() throws Exception {
+    final String oldKey = "test.password.old.key";
+    final String newKey = "test.password.new.key";
+    final String password = "MyPasswordForDeprecatedKey";
+
+    final File tmpDir = GenericTestUtils.getRandomizedTestDir();
+    tmpDir.mkdirs();
+    final String ourUrl = new URI(LocalJavaKeyStoreProvider.SCHEME_NAME,
+        "file",  new File(tmpDir, "test.jks").toString(), null).toString();
+
+    conf = new Configuration(false);
+    conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
+    CredentialProvider provider =
+        CredentialProviderFactory.getProviders(conf).get(0);
+    provider.createCredentialEntry(oldKey, password.toCharArray());
+    provider.flush();
+
+    Configuration.addDeprecation(oldKey, newKey);
+
+    assertThat(conf.getPassword(newKey),
+        CoreMatchers.is(password.toCharArray()));
+    assertThat(conf.getPassword(oldKey),
+        CoreMatchers.is(password.toCharArray()));
+
+    FileUtil.fullyDelete(tmpDir);
+  }
+
+  @Test
+  public void testGetPasswordByDeprecatedKey() throws Exception {
+    final String oldKey = "test.password.old.key";
+    final String newKey = "test.password.new.key";
+    final String password = "MyPasswordForDeprecatedKey";
+
+    final File tmpDir = GenericTestUtils.getRandomizedTestDir();
+    tmpDir.mkdirs();
+    final String ourUrl = new URI(LocalJavaKeyStoreProvider.SCHEME_NAME,
+        "file",  new File(tmpDir, "test.jks").toString(), null).toString();
+
+    conf = new Configuration(false);
+    conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
+    CredentialProvider provider =
+        CredentialProviderFactory.getProviders(conf).get(0);
+    provider.createCredentialEntry(newKey, password.toCharArray());
+    provider.flush();
+
+    Configuration.addDeprecation(oldKey, newKey);
+
+    assertThat(conf.getPassword(newKey),
+        CoreMatchers.is(password.toCharArray()));
+    assertThat(conf.getPassword(oldKey),
+        CoreMatchers.is(password.toCharArray()));
+
+    FileUtil.fullyDelete(tmpDir);
+  }
+
   public static void main(String[] argv) throws Exception {
     junit.textui.TestRunner.main(new String[]{
       TestConfiguration.class.getName()
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationSubclass.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationSubclass.java
index fd2fa38..e15e699 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationSubclass.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationSubclass.java
@@ -17,7 +17,8 @@
  */
 package org.apache.hadoop.conf;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import java.util.Properties;
 
@@ -25,11 +26,12 @@
  * Created 21-Jan-2009 13:42:36
  */
 
-public class TestConfigurationSubclass extends TestCase {
+public class TestConfigurationSubclass {
   private static final String EMPTY_CONFIGURATION_XML
           = "/org/apache/hadoop/conf/empty-configuration.xml";
 
 
+  @Test
   public void testGetProps() {
     SubConf conf = new SubConf(true);
     Properties properties = conf.getProperties();
@@ -37,6 +39,7 @@
             properties.getProperty("hadoop.tmp.dir"));
   }
 
+  @Test
   public void testReload() throws Throwable {
     SubConf conf = new SubConf(true);
     assertFalse(conf.isReloaded());
@@ -45,6 +48,7 @@
     Properties properties = conf.getProperties();
   }
 
+  @Test
   public void testReloadNotQuiet() throws Throwable {
     SubConf conf = new SubConf(true);
     conf.setQuietMode(false);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
index 167daa5..fd01650 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
@@ -21,15 +21,14 @@
 import java.io.ByteArrayOutputStream;
 import java.util.Map;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.junit.Test;
+import static org.junit.Assert.*;
 
-import junit.framework.TestCase;
-
-public class TestDeprecatedKeys extends TestCase {
+public class TestDeprecatedKeys {
  
   //Tests a deprecated key
+  @Test
   public void testDeprecatedKeys() throws Exception {
     Configuration conf = new Configuration();
     conf.set("topology.script.file.name", "xyz");
@@ -39,6 +38,7 @@
   }
   
   //Tests reading / writing a conf file with deprecation after setting
+  @Test
   public void testReadWriteWithDeprecatedKeys() throws Exception {
     Configuration conf = new Configuration();
     conf.setBoolean("old.config.yet.to.be.deprecated", true);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestGetInstances.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestGetInstances.java
index 57b7ff4..bc08e66 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestGetInstances.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestGetInstances.java
@@ -18,10 +18,11 @@
 package org.apache.hadoop.conf;
 
 import java.util.List;
+import org.junit.Test;
 
-import junit.framework.TestCase;
+import static org.junit.Assert.*;
 
-public class TestGetInstances extends TestCase {
+public class TestGetInstances {
   
   interface SampleInterface {}
   
@@ -30,7 +31,7 @@
   static class SampleClass implements SampleInterface {
     SampleClass() {}
   }
-	
+
   static class AnotherClass implements ChildInterface {
     AnotherClass() {}
   }
@@ -39,6 +40,7 @@
    * Makes sure <code>Configuration.getInstances()</code> returns
    * instances of the required type.
    */
+  @Test
   public void testGetInstances() throws Exception {
     Configuration conf = new Configuration();
     
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java
index 32938e3..e897423 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java
@@ -22,6 +22,7 @@
 import java.net.URISyntaxException;
 import java.security.GeneralSecurityException;
 import java.security.SecureRandom;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
@@ -40,7 +41,9 @@
 import static org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 public class TestKeyProviderCryptoExtension {
@@ -90,13 +93,7 @@
     KeyVersion k1 = kpExt.decryptEncryptedKey(ek1);
     assertEquals(KeyProviderCryptoExtension.EK, k1.getVersionName());
     assertEquals(encryptionKey.getMaterial().length, k1.getMaterial().length);
-    if (Arrays.equals(k1.getMaterial(), encryptionKey.getMaterial())) {
-      fail("Encrypted key material should not equal encryption key material");
-    }
-    if (Arrays.equals(ek1.getEncryptedKeyVersion().getMaterial(),
-        encryptionKey.getMaterial())) {
-      fail("Encrypted key material should not equal decrypted key material");
-    }
+
     // Decrypt it again and it should be the same
     KeyVersion k1a = kpExt.decryptEncryptedKey(ek1);
     assertArrayEquals(k1.getMaterial(), k1a.getMaterial());
@@ -153,9 +150,6 @@
     final KeyVersion k1 = kpExt.decryptEncryptedKey(ek1);
     assertEquals(KeyProviderCryptoExtension.EK, k1.getVersionName());
     assertEquals(encryptionKey.getMaterial().length, k1.getMaterial().length);
-    if (Arrays.equals(k1.getMaterial(), encryptionKey.getMaterial())) {
-      fail("Encrypted key material should not equal encryption key material");
-    }
 
     // Roll the EK
     kpExt.rollNewVersion(ek1.getEncryptionKeyName());
@@ -173,10 +167,7 @@
     assertEquals("Length of encryption key material and EEK material should "
             + "be the same", encryptionKey.getMaterial().length,
         ek2.getEncryptedKeyVersion().getMaterial().length);
-    if (Arrays.equals(ek2.getEncryptedKeyVersion().getMaterial(),
-        encryptionKey.getMaterial())) {
-      fail("Encrypted key material should not equal decrypted key material");
-    }
+
     if (Arrays.equals(ek2.getEncryptedKeyVersion().getMaterial(),
         ek1.getEncryptedKeyVersion().getMaterial())) {
       fail("Re-encrypted EEK should have different material");
@@ -186,9 +177,6 @@
     final KeyVersion k2 = kpExt.decryptEncryptedKey(ek2);
     assertEquals(KeyProviderCryptoExtension.EK, k2.getVersionName());
     assertEquals(encryptionKey.getMaterial().length, k2.getMaterial().length);
-    if (Arrays.equals(k2.getMaterial(), encryptionKey.getMaterial())) {
-      fail("Encrypted key material should not equal encryption key material");
-    }
 
     // Re-encrypting the same EEK with the same EK should be deterministic
     final KeyProviderCryptoExtension.EncryptedKeyVersion ek2a =
@@ -203,10 +191,7 @@
     assertEquals("Length of encryption key material and EEK material should "
             + "be the same", encryptionKey.getMaterial().length,
         ek2a.getEncryptedKeyVersion().getMaterial().length);
-    if (Arrays.equals(ek2a.getEncryptedKeyVersion().getMaterial(),
-        encryptionKey.getMaterial())) {
-      fail("Encrypted key material should not equal decrypted key material");
-    }
+
     if (Arrays.equals(ek2a.getEncryptedKeyVersion().getMaterial(),
         ek1.getEncryptedKeyVersion().getMaterial())) {
       fail("Re-encrypted EEK should have different material");
@@ -227,10 +212,6 @@
     assertEquals("Length of encryption key material and EEK material should "
             + "be the same", encryptionKey.getMaterial().length,
         ek3.getEncryptedKeyVersion().getMaterial().length);
-    if (Arrays.equals(ek3.getEncryptedKeyVersion().getMaterial(),
-        encryptionKey.getMaterial())) {
-      fail("Encrypted key material should not equal decrypted key material");
-    }
 
     if (Arrays.equals(ek3.getEncryptedKeyVersion().getMaterial(),
         ek1.getEncryptedKeyVersion().getMaterial())) {
@@ -241,6 +222,78 @@
   }
 
   @Test
+  public void testReencryptEncryptedKeys() throws Exception {
+    List<EncryptedKeyVersion> ekvs = new ArrayList<>(4);
+    // Generate 2 new EEKs @v0 and add to the list
+    ekvs.add(kpExt.generateEncryptedKey(encryptionKey.getName()));
+    ekvs.add(kpExt.generateEncryptedKey(encryptionKey.getName()));
+
+    // Roll the EK
+    kpExt.rollNewVersion(ekvs.get(0).getEncryptionKeyName());
+    // Generate 1 new EEK @v1 add to the list.
+    ekvs.add(kpExt.generateEncryptedKey(encryptionKey.getName()));
+
+    // Roll the EK again
+    kpExt.rollNewVersion(ekvs.get(0).getEncryptionKeyName());
+    // Generate 1 new EEK @v2 add to the list.
+    ekvs.add(kpExt.generateEncryptedKey(encryptionKey.getName()));
+
+    // leave a deep copy of the original, for verification purpose.
+    List<EncryptedKeyVersion> ekvsOrig = new ArrayList<>(ekvs.size());
+    for (EncryptedKeyVersion ekv : ekvs) {
+      ekvsOrig.add(new EncryptedKeyVersion(ekv.getEncryptionKeyName(),
+          ekv.getEncryptionKeyVersionName(), ekv.getEncryptedKeyIv(),
+          ekv.getEncryptedKeyVersion()));
+    }
+
+    // Reencrypt ekvs
+    kpExt.reencryptEncryptedKeys(ekvs);
+
+    // Verify each ekv
+    for (int i = 0; i < ekvs.size(); ++i) {
+      final EncryptedKeyVersion ekv = ekvs.get(i);
+      final EncryptedKeyVersion orig = ekvsOrig.get(i);
+      assertEquals("Version name should be EEK",
+          KeyProviderCryptoExtension.EEK,
+          ekv.getEncryptedKeyVersion().getVersionName());
+      assertEquals("Encryption key name should be " + ENCRYPTION_KEY_NAME,
+          ENCRYPTION_KEY_NAME, ekv.getEncryptionKeyName());
+      assertNotNull("Expected encrypted key material",
+          ekv.getEncryptedKeyVersion().getMaterial());
+      assertEquals("Length of encryption key material and EEK material should "
+              + "be the same", encryptionKey.getMaterial().length,
+          ekv.getEncryptedKeyVersion().getMaterial().length);
+      assertFalse(
+          "Encrypted key material should not equal encryption key material",
+          Arrays.equals(ekv.getEncryptedKeyVersion().getMaterial(),
+              encryptionKey.getMaterial()));
+
+      if (i < 3) {
+        assertFalse("Re-encrypted EEK should have different material",
+            Arrays.equals(ekv.getEncryptedKeyVersion().getMaterial(),
+                orig.getEncryptedKeyVersion().getMaterial()));
+      } else {
+        assertTrue("Re-encrypted EEK should have same material",
+            Arrays.equals(ekv.getEncryptedKeyVersion().getMaterial(),
+                orig.getEncryptedKeyVersion().getMaterial()));
+      }
+
+      // Decrypt the new EEK into an EK and check it
+      final KeyVersion kv = kpExt.decryptEncryptedKey(ekv);
+      assertEquals(KeyProviderCryptoExtension.EK, kv.getVersionName());
+
+      // Decrypt it again and it should be the same
+      KeyVersion kv1 = kpExt.decryptEncryptedKey(ekv);
+      assertArrayEquals(kv.getMaterial(), kv1.getMaterial());
+
+      // Verify decrypting the new EEK and orig EEK gives the same material.
+      final KeyVersion origKv = kpExt.decryptEncryptedKey(orig);
+      assertTrue("Returned EEK and original EEK should both decrypt to the "
+          + "same kv.", Arrays.equals(origKv.getMaterial(), kv.getMaterial()));
+    }
+  }
+
+  @Test
   public void testNonDefaultCryptoExtensionSelectionWithCachingKeyProvider()
           throws Exception {
     Configuration config = new Configuration();
@@ -342,6 +395,11 @@
     }
 
     @Override
+    public void reencryptEncryptedKeys(List<EncryptedKeyVersion> ekvs)
+        throws IOException, GeneralSecurityException {
+    }
+
+    @Override
     public KeyVersion decryptEncryptedKey(
             EncryptedKeyVersion encryptedKeyVersion)
             throws IOException, GeneralSecurityException {
@@ -453,5 +511,10 @@
         throws IOException, GeneralSecurityException {
       return ekv;
     }
+
+    @Override
+    public void reencryptEncryptedKeys(List<EncryptedKeyVersion> ekvs)
+        throws IOException, GeneralSecurityException {
+    }
   }
 }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAvroFSInput.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAvroFSInput.java
index 4009a60..f182fe5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAvroFSInput.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAvroFSInput.java
@@ -24,9 +24,10 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.test.GenericTestUtils;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
-public class TestAvroFSInput extends TestCase {
+public class TestAvroFSInput {
 
   private static final String INPUT_DIR = "AvroFSInput";
 
@@ -34,6 +35,7 @@
     return new Path(GenericTestUtils.getTempPath(INPUT_DIR));
   }
 
+  @Test
   public void testAFSInput() throws Exception {
     Configuration conf = new Configuration();
     FileSystem fs = FileSystem.getLocal(conf);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java
index 615d0b5..a22b765 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java
@@ -17,7 +17,10 @@
  */
 package org.apache.hadoop.fs;
 
-import junit.framework.TestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -29,16 +32,16 @@
 import org.apache.hadoop.test.GenericTestUtils;
 
 /** This test makes sure that "DU" does not get to run on each call to getUsed */
-public class TestDU extends TestCase {
+public class TestDU {
   final static private File DU_DIR = GenericTestUtils.getTestDir("dutmp");
 
-  @Override
+  @Before
   public void setUp() {
-      FileUtil.fullyDelete(DU_DIR);
-      assertTrue(DU_DIR.mkdirs());
+    FileUtil.fullyDelete(DU_DIR);
+    assertTrue(DU_DIR.mkdirs());
   }
 
-  @Override
+  @After
   public void tearDown() throws IOException {
       FileUtil.fullyDelete(DU_DIR);
   }
@@ -69,6 +72,7 @@
    * @throws IOException
    * @throws InterruptedException
    */
+  @Test
   public void testDU() throws IOException, InterruptedException {
     final int writtenSize = 32*1024;   // writing 32K
     // Allow for extra 4K on-disk slack for local file systems
@@ -107,6 +111,8 @@
         duSize >= writtenSize &&
         writtenSize <= (duSize + slack));
   }
+
+  @Test
   public void testDUGetUsedWillNotReturnNegative() throws IOException {
     File file = new File(DU_DIR, "data");
     assertTrue(file.createNewFile());
@@ -118,6 +124,7 @@
     assertTrue(String.valueOf(duSize), duSize >= 0L);
   }
 
+  @Test
   public void testDUSetInitialValue() throws IOException {
     File file = new File(DU_DIR, "dataX");
     createFile(file, 8192);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
index a2f0905..5ed743f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java
@@ -23,12 +23,12 @@
 import java.net.URI;
 import java.util.Iterator;
 
-import junit.framework.TestCase;
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.viewfs.ConfigUtil;
+import org.junit.Test;
 
-public class TestFilterFs extends TestCase {
+public class TestFilterFs {
 
   private static final Log LOG = FileSystem.LOG;
 
@@ -41,7 +41,8 @@
       return null;
     }
   }
-  
+
+  @Test
   public void testFilterFileSystem() throws Exception {
     for (Method m : AbstractFileSystem.class.getDeclaredMethods()) {
       if (Modifier.isStatic(m.getModifiers()))
@@ -69,6 +70,7 @@
   
   // Test that FilterFs will accept an AbstractFileSystem to be filtered which
   // has an optional authority, such as ViewFs
+  @Test
   public void testFilteringWithNonrequiredAuthority() throws Exception {
     Configuration conf = new Configuration();
     ConfigUtil.addLink(conf, "custom", "/mnt", URI.create("file:///"));
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGetFileBlockLocations.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGetFileBlockLocations.java
index 87265f4..f43480e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGetFileBlockLocations.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGetFileBlockLocations.java
@@ -22,7 +22,10 @@
 import java.util.Comparator;
 import java.util.Random;
 
-import junit.framework.TestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -30,7 +33,7 @@
 /**
  * Testing the correctness of FileSystem.getFileBlockLocations.
  */
-public class TestGetFileBlockLocations extends TestCase {
+public class TestGetFileBlockLocations {
   private static String TEST_ROOT_DIR = GenericTestUtils.getTempPath(
       "testGetFileBlockLocations");
   private static final int FileLength = 4 * 1024 * 1024; // 4MB
@@ -39,11 +42,8 @@
   private FileSystem fs;
   private Random random;
 
-  /**
-   * @see TestCase#setUp()
-   */
-  @Override
-  protected void setUp() throws IOException {
+  @Before
+  public void setUp() throws IOException {
     conf = new Configuration();
     Path rootPath = new Path(TEST_ROOT_DIR);
     path = new Path(rootPath, "TestGetFileBlockLocations");
@@ -91,15 +91,14 @@
       assertTrue(locations.length == 0);
     }
   }
-  /**
-   * @see TestCase#tearDown()
-   */
-  @Override
-  protected void tearDown() throws IOException {
+
+  @After
+  public void tearDown() throws IOException {
     fs.delete(path, true);
     fs.close();
   }
 
+  @Test
   public void testFailureNegativeParameters() throws IOException {
     FileStatus status = fs.getFileStatus(path);
     try {
@@ -117,6 +116,7 @@
     }
   }
 
+  @Test
   public void testGetFileBlockLocations1() throws IOException {
     FileStatus status = fs.getFileStatus(path);
     oneTest(0, (int) status.getLen(), status);
@@ -130,6 +130,7 @@
     }
   }
 
+  @Test
   public void testGetFileBlockLocations2() throws IOException {
     FileStatus status = fs.getFileStatus(path);
     for (int i = 0; i < 1000; ++i) {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobExpander.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobExpander.java
index b0466b8..9d75ba0 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobExpander.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGlobExpander.java
@@ -20,10 +20,12 @@
 import java.io.IOException;
 import java.util.List;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
-public class TestGlobExpander extends TestCase {
+public class TestGlobExpander {
 
+  @Test
   public void testExpansionIsIdentical() throws IOException {
     checkExpansionIsIdentical("");
     checkExpansionIsIdentical("/}");
@@ -35,6 +37,7 @@
     checkExpansionIsIdentical("p{a\\/b,c\\/d}s");
   }
 
+  @Test
   public void testExpansion() throws IOException {
     checkExpansion("{a/b}", "a/b");
     checkExpansion("/}{a/b}", "/}a/b");
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocatedFileStatus.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocatedFileStatus.java
new file mode 100644
index 0000000..4490f92
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocatedFileStatus.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.mockito.Mockito.mock;
+
+/**
+ * This class tests the LocatedFileStatus class.
+ */
+public class TestLocatedFileStatus {
+  @Test
+  public void testDeprecatedConstruction() throws IOException {
+    BlockLocation[] locs = new BlockLocation[] {mock(BlockLocation.class)};
+    final boolean isDir = false;
+    final int repl = 3;
+    final long blocksize = 64 * 1024 * 1024;
+    final long modificationTime = 0;
+    final long accessTime = 0;
+
+    // We should be able to pass null for the permission.
+    LocatedFileStatus lfsNullPerm = new LocatedFileStatus(1, isDir,
+        repl, blocksize, modificationTime, accessTime, null, null, null,
+        null, new Path("/some-file.txt"), locs);
+    FsPermission permission = mock(FsPermission.class);
+
+    // We should also be able to pass a permission or the permission.
+    LocatedFileStatus lfsNonNullPerm = new LocatedFileStatus(1, isDir,
+        repl, blocksize, modificationTime, accessTime, permission, null,
+        null, null, new Path("/some-file.txt"), locs);
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index 7a5b25e..12aed29 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -33,20 +33,21 @@
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import junit.framework.TestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.TrashPolicyDefault.Emptier;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.Before;
-import org.junit.Test;
 
 /**
  * This class tests commands from Trash.
  */
-public class TestTrash extends TestCase {
+public class TestTrash {
 
   private final static Path TEST_DIR = new Path(GenericTestUtils.getTempPath(
       "testTrash"));
@@ -507,19 +508,22 @@
     }
   }
 
+  @Test
   public void testTrash() throws IOException {
     Configuration conf = new Configuration();
     conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class);
     trashShell(FileSystem.getLocal(conf), TEST_DIR);
   }
 
+  @Test
   public void testNonDefaultFS() throws IOException {
     Configuration conf = new Configuration();
     conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class);
     conf.set("fs.defaultFS", "invalid://host/bar/foo");
     trashNonDefaultFS(conf);
   }
-  
+
+  @Test
   public void testPluggableTrash() throws IOException {
     Configuration conf = new Configuration();
 
@@ -604,6 +608,7 @@
     verifyTrashPermission(FileSystem.getLocal(conf), conf);
   }
 
+  @Test
   public void testTrashEmptier() throws Exception {
     Configuration conf = new Configuration();
     // Trash with 12 second deletes and 6 seconds checkpoints
@@ -665,12 +670,9 @@
     emptierThread.interrupt();
     emptierThread.join();
   }
-  
-  /**
-   * @see TestCase#tearDown()
-   */
-  @Override
-  protected void tearDown() throws IOException {
+
+  @After
+  public void tearDown() throws IOException {
     File trashDir = new File(TEST_DIR.toUri().getPath());
     if (trashDir.exists() && !FileUtil.fullyDelete(trashDir)) {
       throw new IOException("Cannot remove data directory: " + trashDir);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTruncatedInputBug.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTruncatedInputBug.java
index 41c4d47..799471b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTruncatedInputBug.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTruncatedInputBug.java
@@ -20,16 +20,17 @@
 import java.io.DataOutputStream;
 import java.io.IOException;
 
-import junit.framework.TestCase;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Test;
 
 /**
  * test for the input truncation bug when mark/reset is used.
  * HADOOP-1489
  */
-public class TestTruncatedInputBug extends TestCase {
+public class TestTruncatedInputBug {
   private static String TEST_ROOT_DIR =
       GenericTestUtils.getTestDir().getAbsolutePath();
   
@@ -49,6 +50,7 @@
    * checksum file system currently depends on the request size
    * >= bytesPerSum to work properly.
    */
+  @Test
   public void testTruncatedInputBug() throws IOException {
     final int ioBufSize = 512;
     final int fileSize = ioBufSize*4;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
index 6368a57..a22985d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
@@ -21,11 +21,14 @@
 
 import org.apache.hadoop.conf.Configuration;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import static org.apache.hadoop.fs.permission.FsAction.*;
 
-public class TestFsPermission extends TestCase {
+public class TestFsPermission {
+
+  @Test
   public void testFsAction() {
     //implies
     for(FsAction a : FsAction.values()) {
@@ -53,6 +56,7 @@
    * Ensure that when manually specifying permission modes we get
    * the expected values back out for all combinations
    */
+  @Test
   public void testConvertingPermissions() {
     for(short s = 0; s <= 01777; s++) {
       assertEquals(s, new FsPermission(s).toShort());
@@ -80,6 +84,7 @@
     assertEquals(02000, s);
   }
 
+  @Test
   public void testSpecialBitsToString() {
     for (boolean sb : new boolean[] { false, true }) {
       for (FsAction u : FsAction.values()) {
@@ -106,6 +111,7 @@
     }
   }
 
+  @Test
   public void testFsPermission() {
     String symbolic = "-rwxrwxrwx";
 
@@ -132,6 +138,7 @@
     }
   }
 
+  @Test
   public void testSymbolicPermission() {
     for (int i = 0; i < SYMBOLIC.length; ++i) {
       short val = 0777;
@@ -146,6 +153,7 @@
     }
   }
 
+  @Test
   public void testUMaskParser() throws IOException {
     Configuration conf = new Configuration();
     
@@ -163,6 +171,7 @@
     }
   }
 
+  @Test
   public void testSymbolicUmasks() {
     Configuration conf = new Configuration();
     
@@ -176,6 +185,7 @@
     assertEquals(0111, FsPermission.getUMask(conf).toShort());
   }
 
+  @Test
   public void testBadUmasks() {
     Configuration conf = new Configuration();
     
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
index 6ec6e0f..ca7e466 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
@@ -682,4 +682,17 @@
       stopHttpServer(myServer2);
     }
   }
+
+  @Test
+  public void testBacklogSize() throws Exception
+  {
+    final int backlogSize = 2048;
+    Configuration conf = new Configuration();
+    conf.setInt(HttpServer2.HTTP_SOCKET_BACKLOG_SIZE_KEY, backlogSize);
+    HttpServer2 srv = createServer("test", conf);
+    List<?> listeners = (List<?>) Whitebox.getInternalState(srv,
+            "listeners");
+    ServerConnector listener = (ServerConnector)listeners.get(0);
+    assertEquals(backlogSize, listener.getAcceptQueueSize());
+  }
 }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpengo.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpengo.java
index 5239ed6..8f5dd04 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpengo.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpengo.java
@@ -22,9 +22,12 @@
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AuthenticationFilterInitializer;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.authentication.KerberosTestUtils;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authentication.server.AuthenticationToken;
 import org.apache.hadoop.security.authentication.util.Signer;
@@ -32,6 +35,7 @@
 import org.apache.hadoop.security.authentication.util.StringSignerSecretProviderCreator;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.ietf.jgss.GSSException;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -45,7 +49,14 @@
 import java.net.HttpURLConnection;
 import java.net.URI;
 import java.net.URL;
+import java.security.AccessController;
+import java.security.PrivilegedExceptionAction;
+import java.util.HashSet;
 import java.util.Properties;
+import java.util.Set;
+import javax.security.auth.Subject;
+import javax.servlet.ServletContext;
+
 import static org.junit.Assert.assertTrue;
 
 /**
@@ -72,16 +83,25 @@
   private static MiniKdc testMiniKDC;
   private static File secretFile = new File(testRootDir, SECRET_STR);
 
+  private static UserGroupInformation authUgi;
+
   @BeforeClass
   public static void setUp() throws Exception {
     try {
       testMiniKDC = new MiniKdc(MiniKdc.createConf(), testRootDir);
       testMiniKDC.start();
       testMiniKDC.createPrincipal(
-          httpSpnegoKeytabFile, HTTP_USER + "/localhost");
+          httpSpnegoKeytabFile, HTTP_USER + "/localhost", "keytab-user");
     } catch (Exception e) {
       assertTrue("Couldn't setup MiniKDC", false);
     }
+
+    System.setProperty("sun.security.krb5.debug", "true");
+    Configuration conf = new Configuration();
+    SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
+    UserGroupInformation.setConfiguration(conf);
+    authUgi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(
+        "keytab-user", httpSpnegoKeytabFile.toString());
     Writer w = new FileWriter(secretFile);
     w.write("secret");
     w.close();
@@ -209,6 +229,226 @@
     }
   }
 
+  @Test
+  public void testSessionCookie() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
+        AuthenticationFilterInitializer.class.getName());
+    conf.set(PREFIX + "type", "kerberos");
+    conf.setBoolean(PREFIX + "simple.anonymous.allowed", false);
+    conf.set(PREFIX + "signer.secret.provider",
+        TestSignerSecretProvider.class.getName());
+
+    conf.set(PREFIX + "kerberos.keytab",
+        httpSpnegoKeytabFile.getAbsolutePath());
+    conf.set(PREFIX + "kerberos.principal", httpSpnegoPrincipal);
+    conf.set(PREFIX + "cookie.domain", realm);
+    conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
+        true);
+
+    //setup logs dir
+    System.setProperty("hadoop.log.dir", testRootDir.getAbsolutePath());
+
+    HttpServer2 httpServer = null;
+    // Create http server to test.
+    httpServer = getCommonBuilder()
+        .setConf(conf)
+        .build();
+    httpServer.start();
+
+    // Get signer to encrypt token
+    final Signer signer = new Signer(new TestSignerSecretProvider());
+    final AuthenticatedURL authUrl = new AuthenticatedURL();
+
+    final URL url = new URL("http://" + NetUtils.getHostPortString(
+        httpServer.getConnectorAddress(0)) + "/conf");
+
+    // this illustrates an inconsistency with AuthenticatedURL.  the
+    // authenticator is only called when the token is not set.  if the
+    // authenticator fails then it must throw an AuthenticationException to
+    // the caller, yet the caller may see 401 for subsequent requests
+    // that require re-authentication like token expiration.
+    final UserGroupInformation simpleUgi =
+        UserGroupInformation.createRemoteUser("simple-user");
+
+    authUgi.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        TestSignerSecretProvider.rollSecret();
+        HttpURLConnection conn = null;
+        AuthenticatedURL.Token token = new AuthenticatedURL.Token();
+
+        // initial request should trigger authentication and set the token.
+        conn = authUrl.openConnection(url, token);
+        Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+        Assert.assertTrue(token.isSet());
+        String cookie = token.toString();
+
+        // token should not change.
+        conn = authUrl.openConnection(url, token);
+        Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+        Assert.assertTrue(token.isSet());
+        Assert.assertEquals(cookie, token.toString());
+
+        // roll secret to invalidate token.
+        TestSignerSecretProvider.rollSecret();
+        conn = authUrl.openConnection(url, token);
+        // this may or may not happen.  under normal circumstances the
+        // jdk will silently renegotiate and the client never sees a 401.
+        // however in some cases the jdk will give up doing spnego.  since
+        // the token is already set, the authenticator isn't invoked (which
+        // would do the spnego if the jdk doesn't), which causes the client
+        // to see a 401.
+        if (conn.getResponseCode() == HttpURLConnection.HTTP_UNAUTHORIZED) {
+          // if this happens, the token should be cleared which means the
+          // next request should succeed and receive a new token.
+          Assert.assertFalse(token.isSet());
+          conn = authUrl.openConnection(url, token);
+        }
+
+        // token should change.
+        Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+        Assert.assertTrue(token.isSet());
+        Assert.assertNotEquals(cookie, token.toString());
+        cookie = token.toString();
+
+        // token should not change.
+        for (int i=0; i < 3; i++) {
+          conn = authUrl.openConnection(url, token);
+          Assert.assertEquals("attempt"+i,
+              HttpURLConnection.HTTP_OK, conn.getResponseCode());
+          Assert.assertTrue(token.isSet());
+          Assert.assertEquals(cookie, token.toString());
+        }
+
+        // blow out the kerberos creds test only auth token is used.
+        Subject s = Subject.getSubject(AccessController.getContext());
+        Set<Object> oldCreds = new HashSet<>(s.getPrivateCredentials());
+        s.getPrivateCredentials().clear();
+
+        // token should not change.
+        for (int i=0; i < 3; i++) {
+          try {
+            conn = authUrl.openConnection(url, token);
+            Assert.assertEquals("attempt"+i,
+                HttpURLConnection.HTTP_OK, conn.getResponseCode());
+          } catch (AuthenticationException ae) {
+            Assert.fail("attempt"+i+" "+ae);
+          }
+          Assert.assertTrue(token.isSet());
+          Assert.assertEquals(cookie, token.toString());
+        }
+
+        // invalidate token.  connections should fail now and token should be
+        // unset.
+        TestSignerSecretProvider.rollSecret();
+        conn = authUrl.openConnection(url, token);
+        Assert.assertEquals(
+            HttpURLConnection.HTTP_UNAUTHORIZED, conn.getResponseCode());
+        Assert.assertFalse(token.isSet());
+        Assert.assertEquals("", token.toString());
+
+        // restore the kerberos creds, should work again.
+        s.getPrivateCredentials().addAll(oldCreds);
+        conn = authUrl.openConnection(url, token);
+        Assert.assertEquals(
+            HttpURLConnection.HTTP_OK, conn.getResponseCode());
+        Assert.assertTrue(token.isSet());
+        cookie = token.toString();
+
+        // token should not change.
+        for (int i=0; i < 3; i++) {
+          conn = authUrl.openConnection(url, token);
+          Assert.assertEquals("attempt"+i,
+              HttpURLConnection.HTTP_OK, conn.getResponseCode());
+          Assert.assertTrue(token.isSet());
+          Assert.assertEquals(cookie, token.toString());
+        }
+        return null;
+      }
+    });
+
+    simpleUgi.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        TestSignerSecretProvider.rollSecret();
+        AuthenticatedURL authUrl = new AuthenticatedURL();
+        AuthenticatedURL.Token token = new AuthenticatedURL.Token();
+        HttpURLConnection conn = null;
+
+        // initial connect with unset token will trigger authenticator which
+        // should fail since we have no creds and leave token unset.
+        try {
+          authUrl.openConnection(url, token);
+          Assert.fail("should fail with no credentials");
+        } catch (AuthenticationException ae) {
+          Assert.assertNotNull(ae.getCause());
+          Assert.assertEquals(GSSException.class, ae.getCause().getClass());
+          GSSException gsse = (GSSException)ae.getCause();
+          Assert.assertEquals(GSSException.NO_CRED, gsse.getMajor());
+        } catch (Throwable t) {
+          Assert.fail("Unexpected exception" + t);
+        }
+        Assert.assertFalse(token.isSet());
+
+        // create a valid token and save its value.
+        token = getEncryptedAuthToken(signer, "valid");
+        String cookie = token.toString();
+
+        // server should accept token.  after the request the token should
+        // be set to the same value (ie. server didn't reissue cookie)
+        conn = authUrl.openConnection(url, token);
+        Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+        Assert.assertTrue(token.isSet());
+        Assert.assertEquals(cookie, token.toString());
+
+        conn = authUrl.openConnection(url, token);
+        Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+        Assert.assertTrue(token.isSet());
+        Assert.assertEquals(cookie, token.toString());
+
+        // change the secret to effectively invalidate the cookie.  see above
+        // regarding inconsistency.  the authenticator has no way to know the
+        // token is bad, so the client will encounter a 401 instead of
+        // AuthenticationException.
+        TestSignerSecretProvider.rollSecret();
+        conn = authUrl.openConnection(url, token);
+        Assert.assertEquals(
+            HttpURLConnection.HTTP_UNAUTHORIZED, conn.getResponseCode());
+        Assert.assertFalse(token.isSet());
+        Assert.assertEquals("", token.toString());
+        return null;
+      }
+    });
+  }
+
+  public static class TestSignerSecretProvider extends SignerSecretProvider {
+    static int n = 0;
+    static byte[] secret;
+
+    static void rollSecret() {
+      secret = ("secret[" + (n++) + "]").getBytes();
+    }
+
+    public TestSignerSecretProvider() {
+    }
+
+    @Override
+    public void init(Properties config, ServletContext servletContext,
+            long tokenValidity) throws Exception {
+      rollSecret();
+    }
+
+    @Override
+    public byte[] getCurrentSecret() {
+      return secret;
+    }
+
+    @Override
+    public byte[][] getAllSecrets() {
+      return new byte[][]{secret};
+    }
+  }
 
   private AuthenticatedURL.Token getEncryptedAuthToken(Signer signer,
       String user) throws Exception {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
index 6b1cd29..d82a2f1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
@@ -25,7 +25,9 @@
 import static org.mockito.Mockito.when;
 import static org.mockito.Mockito.times;
 
-import junit.framework.TestCase;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
@@ -39,13 +41,12 @@
 import java.util.List;
 import java.util.concurrent.BlockingQueue;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.Test;
 import org.mockito.Mockito;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto;
 
-public class TestFairCallQueue extends TestCase {
+public class TestFairCallQueue {
   private FairCallQueue<Schedulable> fcq;
 
   private Schedulable mockCall(String id, int priority) {
@@ -65,6 +66,7 @@
   }
 
   @SuppressWarnings("deprecation")
+  @Before
   public void setUp() {
     Configuration conf = new Configuration();
     conf.setInt("ns." + FairCallQueue.IPC_CALLQUEUE_PRIORITY_LEVELS_KEY, 2);
@@ -74,6 +76,7 @@
 
   // Validate that the total capacity of all subqueues equals
   // the maxQueueSize for different values of maxQueueSize
+  @Test
   public void testTotalCapacityOfSubQueues() {
     Configuration conf = new Configuration();
     FairCallQueue<Schedulable> fairCallQueue;
@@ -291,11 +294,12 @@
 
   //
   // Ensure that FairCallQueue properly implements BlockingQueue
-  //
+  @Test
   public void testPollReturnsNullWhenEmpty() {
     assertNull(fcq.poll());
   }
 
+  @Test
   public void testPollReturnsTopCallWhenNotEmpty() {
     Schedulable call = mockCall("c");
     assertTrue(fcq.offer(call));
@@ -306,6 +310,7 @@
     assertEquals(0, fcq.size());
   }
 
+  @Test
   public void testOfferSucceeds() {
 
     for (int i = 0; i < 5; i++) {
@@ -316,6 +321,7 @@
     assertEquals(5, fcq.size());
   }
 
+  @Test
   public void testOfferFailsWhenFull() {
     for (int i = 0; i < 5; i++) { assertTrue(fcq.offer(mockCall("c"))); }
 
@@ -324,6 +330,7 @@
     assertEquals(5, fcq.size());
   }
 
+  @Test
   public void testOfferSucceedsWhenScheduledLowPriority() {
     // Scheduler will schedule into queue 0 x 5, then queue 1
     int mockedPriorities[] = {0, 0, 0, 0, 0, 1, 0};
@@ -334,10 +341,12 @@
     assertEquals(6, fcq.size());
   }
 
+  @Test
   public void testPeekNullWhenEmpty() {
     assertNull(fcq.peek());
   }
 
+  @Test
   public void testPeekNonDestructive() {
     Schedulable call = mockCall("c", 0);
     assertTrue(fcq.offer(call));
@@ -347,6 +356,7 @@
     assertEquals(1, fcq.size());
   }
 
+  @Test
   public void testPeekPointsAtHead() {
     Schedulable call = mockCall("c", 0);
     Schedulable next = mockCall("b", 0);
@@ -356,10 +366,12 @@
     assertEquals(call, fcq.peek()); // Peek points at the head
   }
 
+  @Test
   public void testPollTimeout() throws InterruptedException {
     assertNull(fcq.poll(10, TimeUnit.MILLISECONDS));
   }
 
+  @Test
   public void testPollSuccess() throws InterruptedException {
     Schedulable call = mockCall("c", 0);
     assertTrue(fcq.offer(call));
@@ -369,6 +381,7 @@
     assertEquals(0, fcq.size());
   }
 
+  @Test
   public void testOfferTimeout() throws InterruptedException {
     for (int i = 0; i < 5; i++) {
       assertTrue(fcq.offer(mockCall("c"), 10, TimeUnit.MILLISECONDS));
@@ -380,6 +393,7 @@
   }
 
   @SuppressWarnings("deprecation")
+  @Test
   public void testDrainTo() {
     Configuration conf = new Configuration();
     conf.setInt("ns." + FairCallQueue.IPC_CALLQUEUE_PRIORITY_LEVELS_KEY, 2);
@@ -397,6 +411,7 @@
   }
 
   @SuppressWarnings("deprecation")
+  @Test
   public void testDrainToWithLimit() {
     Configuration conf = new Configuration();
     conf.setInt("ns." + FairCallQueue.IPC_CALLQUEUE_PRIORITY_LEVELS_KEY, 2);
@@ -413,16 +428,19 @@
     assertEquals(2, fcq2.size());
   }
 
+  @Test
   public void testInitialRemainingCapacity() {
     assertEquals(10, fcq.remainingCapacity());
   }
 
+  @Test
   public void testFirstQueueFullRemainingCapacity() {
     while (fcq.offer(mockCall("c"))) ; // Queue 0 will fill up first, then queue 1
 
     assertEquals(5, fcq.remainingCapacity());
   }
 
+  @Test
   public void testAllQueuesFullRemainingCapacity() {
     int[] mockedPriorities = {0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0};
     int i = 0;
@@ -432,6 +450,7 @@
     assertEquals(10, fcq.size());
   }
 
+  @Test
   public void testQueuesPartialFilledRemainingCapacity() {
     int[] mockedPriorities = {0, 1, 0, 1, 0};
     for (int i = 0; i < 5; i++) { fcq.offer(mockCall("c", mockedPriorities[i])); }
@@ -555,12 +574,14 @@
   }
 
   // Make sure put will overflow into lower queues when the top is full
+  @Test
   public void testPutOverflows() throws InterruptedException {
     // We can fit more than 5, even though the scheduler suggests the top queue
     assertCanPut(fcq, 8, 8);
     assertEquals(8, fcq.size());
   }
 
+  @Test
   public void testPutBlocksWhenAllFull() throws InterruptedException {
     assertCanPut(fcq, 10, 10); // Fill up
     assertEquals(10, fcq.size());
@@ -569,10 +590,12 @@
     assertCanPut(fcq, 0, 1); // Will block
   }
 
+  @Test
   public void testTakeBlocksWhenEmpty() throws InterruptedException {
     assertCanTake(fcq, 0, 1);
   }
 
+  @Test
   public void testTakeRemovesCall() throws InterruptedException {
     Schedulable call = mockCall("c");
     fcq.offer(call);
@@ -581,6 +604,7 @@
     assertEquals(0, fcq.size());
   }
 
+  @Test
   public void testTakeTriesNextQueue() throws InterruptedException {
 
     // A mux which only draws from q 0
@@ -597,6 +621,7 @@
     assertEquals(0, fcq.size());
   }
 
+  @Test
   public void testFairCallQueueMXBean() throws Exception {
     MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
     ObjectName mxbeanName = new ObjectName(
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLog4Json.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLog4Json.java
index 9fea50e..d41a587 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLog4Json.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLog4Json.java
@@ -20,7 +20,8 @@
 
 import com.fasterxml.jackson.databind.JsonNode;
 import com.fasterxml.jackson.databind.node.ContainerNode;
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.util.Time;
@@ -33,7 +34,6 @@
 import org.apache.log4j.spi.LoggerFactory;
 import org.apache.log4j.spi.LoggerRepository;
 import org.apache.log4j.spi.ThrowableInformation;
-import org.junit.Test;
 
 import java.io.IOException;
 import java.io.StringWriter;
@@ -42,7 +42,7 @@
 import java.util.Enumeration;
 import java.util.Vector;
 
-public class TestLog4Json extends TestCase {
+public class TestLog4Json {
 
   private static final Log LOG = LogFactory.getLog(TestLog4Json.class);
 
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestScriptBasedMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestScriptBasedMapping.java
index e201787..0d0d5b1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestScriptBasedMapping.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestScriptBasedMapping.java
@@ -19,15 +19,12 @@
 
 import java.util.ArrayList;
 import java.util.List;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
 
-import junit.framework.TestCase;
-import org.junit.Test;
-
-public class TestScriptBasedMapping extends TestCase {
-
-
+public class TestScriptBasedMapping {
   
   public TestScriptBasedMapping() {
 
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestScriptBasedMappingWithDependency.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestScriptBasedMappingWithDependency.java
index 77da45b..8638591 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestScriptBasedMappingWithDependency.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestScriptBasedMappingWithDependency.java
@@ -19,13 +19,12 @@
 
 import java.util.ArrayList;
 import java.util.List;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
 
-import junit.framework.TestCase;
-import org.junit.Test;
-
-public class TestScriptBasedMappingWithDependency extends TestCase {
+public class TestScriptBasedMappingWithDependency {
 
   
   public TestScriptBasedMappingWithDependency() {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java
index 64cd9b7..9fae536 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java
@@ -17,19 +17,21 @@
 package org.apache.hadoop.security;
 
 
-import junit.framework.TestCase;
+import static org.junit.Assert.*;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
+import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
 import java.util.Map;
 
-public class TestAuthenticationFilter extends TestCase {
+public class TestAuthenticationFilter {
 
   @SuppressWarnings("unchecked")
+  @Test
   public void testConfiguration() throws Exception {
     Configuration conf = new Configuration();
     conf.set("hadoop.http.authentication.foo", "bar");
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationWithProxyUserFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationWithProxyUserFilter.java
index 504f5a1..dac6a55 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationWithProxyUserFilter.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationWithProxyUserFilter.java
@@ -16,8 +16,8 @@
  */
 package org.apache.hadoop.security;
 
-
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
@@ -30,9 +30,10 @@
  * This class is tested for {@link AuthenticationWithProxyUserFilter}
  * to verify configurations of this filter.
  */
-public class TestAuthenticationWithProxyUserFilter extends TestCase {
+public class TestAuthenticationWithProxyUserFilter {
 
   @SuppressWarnings("unchecked")
+  @Test
   public void testConfiguration() throws Exception {
     Configuration conf = new Configuration();
     conf.set("hadoop.http.authentication.foo", "bar");
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestWhitelistBasedResolver.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestWhitelistBasedResolver.java
index 684ef3b..03fc4cb 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestWhitelistBasedResolver.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestWhitelistBasedResolver.java
@@ -21,17 +21,18 @@
 import java.net.InetAddress;
 import java.util.Map;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.WhitelistBasedResolver;
 import org.apache.hadoop.util.TestFileBasedIPList;
 
-public class TestWhitelistBasedResolver extends TestCase {
+public class TestWhitelistBasedResolver {
 
   public static final Map<String, String> SASL_PRIVACY_PROPS =
     WhitelistBasedResolver.getSaslProperties(new Configuration());
 
+  @Test
   public void testFixedVariableAndLocalWhiteList() throws IOException {
 
     String[] fixedIps = {"10.119.103.112", "10.221.102.0/23"};
@@ -79,6 +80,7 @@
    * Check  for inclusion in whitelist
    * Check for exclusion from whitelist
    */
+  @Test
   public void testFixedAndLocalWhiteList() throws IOException {
 
     String[] fixedIps = {"10.119.103.112", "10.221.102.0/23"};
@@ -128,6 +130,7 @@
    * Add a bunch of subnets and IPSs to the whitelist
    * Check  for inclusion in whitelist with a null value
    */
+  @Test
   public void testNullIPAddress() throws IOException {
 
     String[] fixedIps = {"10.119.103.112", "10.221.102.0/23"};
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java
index 1741eb7..f6e5133 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java
@@ -25,11 +25,12 @@
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenIdentifier;
 import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenSecretManager;
+import org.junit.Test;
 
-import junit.framework.TestCase;
+import static org.junit.Assert.*;
 
 /** Unit tests for Token */
-public class TestToken extends TestCase {
+public class TestToken {
 
   static boolean isEqual(Object a, Object b) {
     return a == null ? b == null : a.equals(b);
@@ -45,6 +46,7 @@
   /**
    * Test token serialization
    */
+  @Test
   public void testTokenSerialization() throws IOException {
     // Get a token
     Token<TokenIdentifier> sourceToken = new Token<TokenIdentifier>();
@@ -76,7 +78,8 @@
     }
   }
 
-  public static void testEncodeWritable() throws Exception {
+  @Test
+  public void testEncodeWritable() throws Exception {
     String[] values = new String[]{"", "a", "bb", "ccc", "dddd", "eeeee",
         "ffffff", "ggggggg", "hhhhhhhh", "iiiiiiiii",
         "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLM" +
@@ -96,7 +99,8 @@
       checkUrlSafe(encode);
     }
   }
-  
+
+  @Test
   public void testDecodeIdentifier() throws IOException {
     TestDelegationTokenSecretManager secretManager =
       new TestDelegationTokenSecretManager(0, 0, 0, 0);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAsyncDiskService.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAsyncDiskService.java
index 58935f2..f36c586 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAsyncDiskService.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAsyncDiskService.java
@@ -17,17 +17,15 @@
  */
 package org.apache.hadoop.util;
 
-import junit.framework.TestCase;
-
-import org.apache.hadoop.util.AsyncDiskService;
 import org.junit.Test;
+import static org.junit.Assert.*;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
  * A test for AsyncDiskService.
  */
-public class TestAsyncDiskService extends TestCase {
+public class TestAsyncDiskService {
   
   public static final Logger LOG =
       LoggerFactory.getLogger(TestAsyncDiskService.class);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCacheableIPList.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCacheableIPList.java
index 3289d78..88f3b69 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCacheableIPList.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCacheableIPList.java
@@ -18,14 +18,11 @@
 package org.apache.hadoop.util;
 
 import java.io.IOException;
+import org.junit.Test;
 
-import org.apache.hadoop.util.CacheableIPList;
-import org.apache.hadoop.util.FileBasedIPList;
+import static org.junit.Assert.*;
 
-
-import junit.framework.TestCase;
-
-public class TestCacheableIPList extends TestCase {
+public class TestCacheableIPList {
 
   /**
    * Add a bunch of subnets and IPSs to the file
@@ -37,6 +34,7 @@
    * test for inclusion
    * Check for exclusion
    */
+  @Test
   public void testAddWithSleepForCacheTimeout() throws IOException, InterruptedException {
 
     String[] ips = {"10.119.103.112", "10.221.102.0/23", "10.113.221.221"};
@@ -76,6 +74,7 @@
    * test for inclusion
    * Check for exclusion
    */
+  @Test
   public void testRemovalWithSleepForCacheTimeout() throws IOException, InterruptedException {
 
     String[] ips = {"10.119.103.112", "10.221.102.0/23",
@@ -115,6 +114,7 @@
    * test for inclusion
    * Check for exclusion
    */
+  @Test
   public void testAddWithRefresh() throws IOException, InterruptedException {
 
     String[] ips = {"10.119.103.112", "10.221.102.0/23", "10.113.221.221"};
@@ -154,6 +154,7 @@
    * test for inclusion
    * Check for exclusion
    */
+  @Test
   public void testRemovalWithRefresh() throws IOException, InterruptedException {
 
     String[] ips = {"10.119.103.112", "10.221.102.0/23",
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFileBasedIPList.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFileBasedIPList.java
index 0e79fd1..1bb595c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFileBasedIPList.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFileBasedIPList.java
@@ -22,14 +22,11 @@
 import java.util.Arrays;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.util.FileBasedIPList;
-import org.apache.hadoop.util.IPList;
 import org.junit.After;
 import org.junit.Test;
+import static org.junit.Assert.*;
 
-import junit.framework.TestCase;
-
-public class TestFileBasedIPList extends TestCase {
+public class TestFileBasedIPList {
 
   @After
   public void tearDown() {
@@ -127,6 +124,7 @@
    * test for inclusion
    * should be true as if the feature is turned off
    */
+  @Test
   public void testFileNotSpecified() {
 
     IPList ipl = new FileBasedIPList(null);
@@ -140,6 +138,7 @@
    * test for inclusion
    * should be true as if the feature is turned off
    */
+  @Test
   public void testFileMissing() {
 
     IPList ipl = new FileBasedIPList("missingips.txt");
@@ -153,6 +152,7 @@
    * test for inclusion
    * should be true as if the feature is turned off
    */
+  @Test
   public void testWithEmptyList() throws IOException {
     String[] ips = {};
 
@@ -168,6 +168,7 @@
    * test for inclusion
    * should be true as if the feature is turned off
    */
+  @Test
   public void testForBadFIle() throws IOException {
     String[] ips = { "10.221.102/23"};
 
@@ -187,6 +188,7 @@
    * Check  for inclusion with good entries
    * Check for exclusion
    */
+  @Test
   public void testWithAWrongEntry() throws IOException {
 
     String[] ips = {"10.119.103.112", "10.221.102/23", "10.221.204.1/23"};
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFindClass.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFindClass.java
index 3a4ebd5..8ba930b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFindClass.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFindClass.java
@@ -19,7 +19,7 @@
 
 import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
-import junit.framework.Assert;
+import org.junit.Assert;
 import org.apache.hadoop.util.FindClass;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.Test;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericsUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericsUtil.java
index 58537ad..85d649c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericsUtil.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericsUtil.java
@@ -21,12 +21,14 @@
 import java.util.ArrayList;
 import java.util.List;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
 
-public class TestGenericsUtil extends TestCase {
+public class TestGenericsUtil {
 
+  @Test
   public void testToArray() {
 
     //test a list of size 10
@@ -45,6 +47,7 @@
     }
   }
 
+  @Test
   public void testWithEmptyList() {
     try {
       List<String> list = new ArrayList<String>();
@@ -57,6 +60,7 @@
     }
   }
 
+  @Test
   public void testWithEmptyList2() {
     List<String> list = new ArrayList<String>();
     //this method should not throw IndexOutOfBoundsException
@@ -81,6 +85,7 @@
     }
   }
 
+  @Test
   public void testWithGenericClass() {
 
     GenericClass<String> testSubject = new GenericClass<String>();
@@ -102,6 +107,7 @@
 
   }
 
+  @Test
   public void testGenericOptionsParser() throws Exception {
      GenericOptionsParser parser = new GenericOptionsParser(
         new Configuration(), new String[] {"-jt"});
@@ -116,6 +122,7 @@
             "y=z", parser.getConfiguration().get("x"));
   }
 
+  @Test
   public void testGetClass() {
 
     //test with Integer
@@ -131,6 +138,7 @@
             GenericClass.class, c2);
   }
 
+  @Test
   public void testIsLog4jLogger() throws Exception {
     assertFalse("False if clazz is null", GenericsUtil.isLog4jLogger(null));
     assertTrue("The implementation is Log4j",
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIndexedSort.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIndexedSort.java
index 8f33c9d..3de0854 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIndexedSort.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIndexedSort.java
@@ -21,14 +21,15 @@
 import java.util.Arrays;
 import java.util.Random;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableComparator;
 
-public class TestIndexedSort extends TestCase {
+public class TestIndexedSort {
 
   public void sortAllEqual(IndexedSorter sorter) throws Exception {
     final int SAMPLE = 500;
@@ -128,6 +129,7 @@
   }
 
 
+  @Test
   public void testQuickSort() throws Exception {
     QuickSort sorter = new QuickSort();
     sortRandom(sorter);
@@ -158,6 +160,7 @@
     assertTrue(Arrays.equals(values, check));
   }
 
+  @Test
   public void testHeapSort() throws Exception {
     HeapSort sorter = new HeapSort();
     sortRandom(sorter);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeLibraryChecker.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeLibraryChecker.java
index 7589e5a..e4792dc 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeLibraryChecker.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeLibraryChecker.java
@@ -19,13 +19,13 @@
 
 import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
-
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.util.ExitUtil.ExitException;
-import org.junit.Test;
 
-public class TestNativeLibraryChecker extends TestCase {
+
+public class TestNativeLibraryChecker {
   private void expectExit(String [] args) {
     try {
       // should throw exit exception
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/curator/TestZKCuratorManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/curator/TestZKCuratorManager.java
index 2bcf508..486e89a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/curator/TestZKCuratorManager.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/curator/TestZKCuratorManager.java
@@ -21,11 +21,15 @@
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
+import java.util.Arrays;
 import java.util.List;
 
 import org.apache.curator.test.TestingServer;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.util.ZKUtil;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.data.ACL;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -67,7 +71,7 @@
     curator.create(testZNode);
     assertTrue(curator.exists(testZNode));
     curator.setData(testZNode, expectedString, -1);
-    String testString = curator.getSringData("/test");
+    String testString = curator.getStringData("/test");
     assertEquals(expectedString, testString);
   }
 
@@ -92,4 +96,39 @@
     children = curator.getChildren("/");
     assertEquals(2, children.size());
   }
+
+  @Test
+  public void testTransaction() throws Exception {
+    List<ACL> zkAcl = ZKUtil.parseACLs(CommonConfigurationKeys.ZK_ACL_DEFAULT);
+    String fencingNodePath = "/fencing";
+    String node1 = "/node1";
+    String node2 = "/node2";
+    byte[] testData = "testData".getBytes("UTF-8");
+    assertFalse(curator.exists(fencingNodePath));
+    assertFalse(curator.exists(node1));
+    assertFalse(curator.exists(node2));
+    ZKCuratorManager.SafeTransaction txn = curator.createTransaction(
+        zkAcl, fencingNodePath);
+    txn.create(node1, testData, zkAcl, CreateMode.PERSISTENT);
+    txn.create(node2, testData, zkAcl, CreateMode.PERSISTENT);
+    assertFalse(curator.exists(fencingNodePath));
+    assertFalse(curator.exists(node1));
+    assertFalse(curator.exists(node2));
+    txn.commit();
+    assertFalse(curator.exists(fencingNodePath));
+    assertTrue(curator.exists(node1));
+    assertTrue(curator.exists(node2));
+    assertTrue(Arrays.equals(testData, curator.getData(node1)));
+    assertTrue(Arrays.equals(testData, curator.getData(node2)));
+
+    byte[] setData = "setData".getBytes("UTF-8");
+    txn = curator.createTransaction(zkAcl, fencingNodePath);
+    txn.setData(node1, setData, -1);
+    txn.delete(node2);
+    assertTrue(curator.exists(node2));
+    assertTrue(Arrays.equals(testData, curator.getData(node1)));
+    txn.commit();
+    assertFalse(curator.exists(node2));
+    assertTrue(Arrays.equals(setData, curator.getData(node1)));
+  }
 }
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/EagerKeyGeneratorKeyProviderCryptoExtension.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/EagerKeyGeneratorKeyProviderCryptoExtension.java
index eb24394..273c673 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/EagerKeyGeneratorKeyProviderCryptoExtension.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/EagerKeyGeneratorKeyProviderCryptoExtension.java
@@ -141,6 +141,12 @@
         throws IOException, GeneralSecurityException {
       return keyProviderCryptoExtension.reencryptEncryptedKey(ekv);
     }
+
+    @Override
+    public void reencryptEncryptedKeys(List<EncryptedKeyVersion> ekvs)
+        throws IOException, GeneralSecurityException {
+      keyProviderCryptoExtension.reencryptEncryptedKeys(ekvs);
+    }
   }
 
   /**
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
index 27cc05d..dfc6872 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.crypto.key.kms.server;
 
+import com.google.common.base.Preconditions;
+import com.google.common.base.Stopwatch;
+import org.apache.hadoop.util.KMSUtil;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.crypto.key.KeyProvider;
@@ -53,6 +56,9 @@
 import java.util.List;
 import java.util.Map;
 
+import static org.apache.hadoop.util.KMSUtil.checkNotEmpty;
+import static org.apache.hadoop.util.KMSUtil.checkNotNull;
+
 /**
  * Class providing the REST bindings, via Jersey, for the KMS.
  */
@@ -64,7 +70,7 @@
     CREATE_KEY, DELETE_KEY, ROLL_NEW_VERSION, INVALIDATE_CACHE,
     GET_KEYS, GET_KEYS_METADATA,
     GET_KEY_VERSIONS, GET_METADATA, GET_KEY_VERSION, GET_CURRENT_KEY,
-    GENERATE_EEK, DECRYPT_EEK, REENCRYPT_EEK
+    GENERATE_EEK, DECRYPT_EEK, REENCRYPT_EEK, REENCRYPT_EEK_BATCH
   }
 
   private KeyProviderCryptoExtension provider;
@@ -72,6 +78,8 @@
 
   static final Logger LOG = LoggerFactory.getLogger(KMS.class);
 
+  private static final int MAX_NUM_PER_BATCH = 10000;
+
   public KMS() throws Exception {
     provider = KMSWebApp.getKeyProvider();
     kmsAudit= KMSWebApp.getKMSAudit();
@@ -109,7 +117,7 @@
       KMSWebApp.getAdminCallsMeter().mark();
       UserGroupInformation user = HttpUserGroupInformation.get();
       final String name = (String) jsonKey.get(KMSRESTConstants.NAME_FIELD);
-      KMSClientProvider.checkNotEmpty(name, KMSRESTConstants.NAME_FIELD);
+      checkNotEmpty(name, KMSRESTConstants.NAME_FIELD);
       assertAccess(KMSACLs.Type.CREATE, user, KMSOp.CREATE_KEY, name);
       String cipher = (String) jsonKey.get(KMSRESTConstants.CIPHER_FIELD);
       final String material;
@@ -158,7 +166,7 @@
       if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user)) {
         keyVersion = removeKeyMaterial(keyVersion);
       }
-      Map json = KMSServerJSONUtils.toJSON(keyVersion);
+      Map json = KMSUtil.toJSON(keyVersion);
       String requestURL = KMSMDCFilter.getURL();
       int idx = requestURL.lastIndexOf(KMSRESTConstants.KEYS_RESOURCE);
       requestURL = requestURL.substring(0, idx);
@@ -181,7 +189,7 @@
       KMSWebApp.getAdminCallsMeter().mark();
       UserGroupInformation user = HttpUserGroupInformation.get();
       assertAccess(KMSACLs.Type.DELETE, user, KMSOp.DELETE_KEY, name);
-      KMSClientProvider.checkNotEmpty(name, "name");
+      checkNotEmpty(name, "name");
       LOG.debug("Deleting key with name {}.", name);
       user.doAs(new PrivilegedExceptionAction<Void>() {
         @Override
@@ -212,7 +220,7 @@
       KMSWebApp.getAdminCallsMeter().mark();
       UserGroupInformation user = HttpUserGroupInformation.get();
       assertAccess(KMSACLs.Type.ROLLOVER, user, KMSOp.ROLL_NEW_VERSION, name);
-      KMSClientProvider.checkNotEmpty(name, "name");
+      checkNotEmpty(name, "name");
       LOG.debug("Rolling key with name {}.", name);
       final String material = (String)
               jsonMaterial.get(KMSRESTConstants.MATERIAL_FIELD);
@@ -242,7 +250,7 @@
       if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user)) {
         keyVersion = removeKeyMaterial(keyVersion);
       }
-      Map json = KMSServerJSONUtils.toJSON(keyVersion);
+      Map json = KMSUtil.toJSON(keyVersion);
       LOG.trace("Exiting rolloverKey Method.");
       return Response.ok().type(MediaType.APPLICATION_JSON).entity(json)
               .build();
@@ -260,7 +268,7 @@
     try {
       LOG.trace("Entering invalidateCache Method.");
       KMSWebApp.getAdminCallsMeter().mark();
-      KMSClientProvider.checkNotEmpty(name, "name");
+      checkNotEmpty(name, "name");
       UserGroupInformation user = HttpUserGroupInformation.get();
       assertAccess(KMSACLs.Type.ROLLOVER, user, KMSOp.INVALIDATE_CACHE, name);
       LOG.debug("Invalidating cache with key name {}.", name);
@@ -369,7 +377,7 @@
     try {
       LOG.trace("Entering getMetadata method.");
       UserGroupInformation user = HttpUserGroupInformation.get();
-      KMSClientProvider.checkNotEmpty(name, "name");
+      checkNotEmpty(name, "name");
       KMSWebApp.getAdminCallsMeter().mark();
       assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_METADATA, name);
       LOG.debug("Getting metadata for key with name {}.", name);
@@ -403,7 +411,7 @@
     try {
       LOG.trace("Entering getCurrentVersion method.");
       UserGroupInformation user = HttpUserGroupInformation.get();
-      KMSClientProvider.checkNotEmpty(name, "name");
+      checkNotEmpty(name, "name");
       KMSWebApp.getKeyCallsMeter().mark();
       assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_CURRENT_KEY, name);
       LOG.debug("Getting key version for key with name {}.", name);
@@ -417,7 +425,7 @@
             }
       );
 
-      Object json = KMSServerJSONUtils.toJSON(keyVersion);
+      Object json = KMSUtil.toJSON(keyVersion);
       kmsAudit.ok(user, KMSOp.GET_CURRENT_KEY, name, "");
       LOG.trace("Exiting getCurrentVersion method.");
       return Response.ok().type(MediaType.APPLICATION_JSON).entity(json)
@@ -436,7 +444,7 @@
     try {
       LOG.trace("Entering getKeyVersion method.");
       UserGroupInformation user = HttpUserGroupInformation.get();
-      KMSClientProvider.checkNotEmpty(versionName, "versionName");
+      checkNotEmpty(versionName, "versionName");
       KMSWebApp.getKeyCallsMeter().mark();
       assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSION);
       LOG.debug("Getting key with version name {}.", versionName);
@@ -453,7 +461,7 @@
       if (keyVersion != null) {
         kmsAudit.ok(user, KMSOp.GET_KEY_VERSION, keyVersion.getName(), "");
       }
-      Object json = KMSServerJSONUtils.toJSON(keyVersion);
+      Object json = KMSUtil.toJSON(keyVersion);
       LOG.trace("Exiting getKeyVersion method.");
       return Response.ok().type(MediaType.APPLICATION_JSON).entity(json)
               .build();
@@ -477,8 +485,8 @@
     try {
       LOG.trace("Entering generateEncryptedKeys method.");
       UserGroupInformation user = HttpUserGroupInformation.get();
-      KMSClientProvider.checkNotEmpty(name, "name");
-      KMSClientProvider.checkNotNull(edekOp, "eekOp");
+      checkNotEmpty(name, "name");
+      checkNotNull(edekOp, "eekOp");
       LOG.debug("Generating encrypted key with name {}," +
               " the edek Operation is {}.", name, edekOp);
 
@@ -512,7 +520,7 @@
         kmsAudit.ok(user, KMSOp.GENERATE_EEK, name, "");
         retJSON = new ArrayList();
         for (EncryptedKeyVersion edek : retEdeks) {
-          ((ArrayList) retJSON).add(KMSServerJSONUtils.toJSON(edek));
+          ((ArrayList) retJSON).add(KMSUtil.toJSON(edek));
         }
       } else {
         StringBuilder error;
@@ -537,6 +545,64 @@
 
   @SuppressWarnings("rawtypes")
   @POST
+  @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" +
+      KMSRESTConstants.REENCRYPT_BATCH_SUB_RESOURCE)
+  @Consumes(MediaType.APPLICATION_JSON)
+  @Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8)
+  public Response reencryptEncryptedKeys(
+      @PathParam("name") final String name,
+      final List<Map> jsonPayload)
+      throws Exception {
+    LOG.trace("Entering reencryptEncryptedKeys method.");
+    try {
+      final Stopwatch sw = new Stopwatch().start();
+      checkNotEmpty(name, "name");
+      checkNotNull(jsonPayload, "jsonPayload");
+      final UserGroupInformation user = HttpUserGroupInformation.get();
+      KMSWebApp.getReencryptEEKBatchCallsMeter().mark();
+      if (jsonPayload.size() > MAX_NUM_PER_BATCH) {
+        LOG.warn("Payload size {} too big for reencryptEncryptedKeys from"
+            + " user {}.", jsonPayload.size(), user);
+      }
+      assertAccess(KMSACLs.Type.GENERATE_EEK, user, KMSOp.REENCRYPT_EEK_BATCH,
+          name);
+      LOG.debug("Batch reencrypting {} Encrypted Keys for key name {}",
+          jsonPayload.size(), name);
+      final List<EncryptedKeyVersion> ekvs =
+          KMSUtil.parseJSONEncKeyVersions(name, jsonPayload);
+      Preconditions.checkArgument(ekvs.size() == jsonPayload.size(),
+          "EncryptedKey size mismatch after parsing from json");
+      for (EncryptedKeyVersion ekv : ekvs) {
+        Preconditions.checkArgument(name.equals(ekv.getEncryptionKeyName()),
+            "All EncryptedKeys must be under the given key name " + name);
+      }
+
+      user.doAs(new PrivilegedExceptionAction<Void>() {
+        @Override
+        public Void run() throws Exception {
+          provider.reencryptEncryptedKeys(ekvs);
+          return null;
+        }
+      });
+      List retJSON = new ArrayList<>(ekvs.size());
+      for (EncryptedKeyVersion ekv: ekvs) {
+        retJSON.add(KMSUtil.toJSON(ekv));
+      }
+      kmsAudit.ok(user, KMSOp.REENCRYPT_EEK_BATCH, name,
+          "reencrypted " + ekvs.size() + " keys");
+      LOG.info("reencryptEncryptedKeys {} keys for key {} took {}",
+          jsonPayload.size(), name, sw.stop());
+      LOG.trace("Exiting reencryptEncryptedKeys method.");
+      return Response.ok().type(MediaType.APPLICATION_JSON).entity(retJSON)
+          .build();
+    } catch (Exception e) {
+      LOG.debug("Exception in reencryptEncryptedKeys.", e);
+      throw e;
+    }
+  }
+
+  @SuppressWarnings("rawtypes")
+  @POST
   @Path(KMSRESTConstants.KEY_VERSION_RESOURCE + "/{versionName:.*}/" +
       KMSRESTConstants.EEK_SUB_RESOURCE)
   @Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8)
@@ -548,8 +614,8 @@
     try {
       LOG.trace("Entering decryptEncryptedKey method.");
       UserGroupInformation user = HttpUserGroupInformation.get();
-      KMSClientProvider.checkNotEmpty(versionName, "versionName");
-      KMSClientProvider.checkNotNull(eekOp, "eekOp");
+      checkNotEmpty(versionName, "versionName");
+      checkNotNull(eekOp, "eekOp");
       LOG.debug("Decrypting key for {}, the edek Operation is {}.",
               versionName, eekOp);
 
@@ -558,13 +624,14 @@
       String ivStr = (String) jsonPayload.get(KMSRESTConstants.IV_FIELD);
       String encMaterialStr =
               (String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD);
-      KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD);
+      checkNotNull(ivStr, KMSRESTConstants.IV_FIELD);
       final byte[] iv = Base64.decodeBase64(ivStr);
-      KMSClientProvider.checkNotNull(encMaterialStr,
+      checkNotNull(encMaterialStr,
           KMSRESTConstants.MATERIAL_FIELD);
       final byte[] encMaterial = Base64.decodeBase64(encMaterialStr);
       Object retJSON;
       if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) {
+        KMSWebApp.getDecryptEEKCallsMeter().mark();
         assertAccess(KMSACLs.Type.DECRYPT_EEK, user, KMSOp.DECRYPT_EEK,
                 keyName);
 
@@ -582,9 +649,10 @@
               }
         );
 
-        retJSON = KMSServerJSONUtils.toJSON(retKeyVersion);
+        retJSON = KMSUtil.toJSON(retKeyVersion);
         kmsAudit.ok(user, KMSOp.DECRYPT_EEK, keyName, "");
       } else if (eekOp.equals(KMSRESTConstants.EEK_REENCRYPT)) {
+        KMSWebApp.getReencryptEEKCallsMeter().mark();
         assertAccess(KMSACLs.Type.GENERATE_EEK, user, KMSOp.REENCRYPT_EEK,
             keyName);
 
@@ -599,7 +667,7 @@
               }
             });
 
-        retJSON = KMSServerJSONUtils.toJSON(retEncryptedKeyVersion);
+        retJSON = KMSUtil.toJSON(retEncryptedKeyVersion);
         kmsAudit.ok(user, KMSOp.REENCRYPT_EEK, keyName, "");
       } else {
         StringBuilder error;
@@ -612,7 +680,6 @@
         LOG.error(error.toString());
         throw new IllegalArgumentException(error.toString());
       }
-      KMSWebApp.getDecryptEEKCallsMeter().mark();
       LOG.trace("Exiting handleEncryptedKeyOp method.");
       return Response.ok().type(MediaType.APPLICATION_JSON).entity(retJSON)
           .build();
@@ -631,7 +698,7 @@
     try {
       LOG.trace("Entering getKeyVersions method.");
       UserGroupInformation user = HttpUserGroupInformation.get();
-      KMSClientProvider.checkNotEmpty(name, "name");
+      checkNotEmpty(name, "name");
       KMSWebApp.getKeyCallsMeter().mark();
       assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSIONS, name);
       LOG.debug("Getting key versions for key {}", name);
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONReader.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONReader.java
index 14a5451..f6f670b 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONReader.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONReader.java
@@ -31,21 +31,23 @@
 import java.io.InputStream;
 import java.lang.annotation.Annotation;
 import java.lang.reflect.Type;
+import java.util.List;
 import java.util.Map;
 
 @Provider
 @Consumes(MediaType.APPLICATION_JSON)
 @InterfaceAudience.Private
-public class KMSJSONReader implements MessageBodyReader<Map> {
+public class KMSJSONReader implements MessageBodyReader<Object> {
 
   @Override
   public boolean isReadable(Class<?> type, Type genericType,
       Annotation[] annotations, MediaType mediaType) {
-    return type.isAssignableFrom(Map.class);
+    return type.isAssignableFrom(Map.class) || type
+        .isAssignableFrom(List.class);
   }
 
   @Override
-  public Map readFrom(Class<Map> type, Type genericType,
+  public Object readFrom(Class<Object> type, Type genericType,
       Annotation[] annotations, MediaType mediaType,
       MultivaluedMap<String, String> httpHeaders, InputStream entityStream)
       throws IOException, WebApplicationException {
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSServerJSONUtils.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSServerJSONUtils.java
index 24af81b..d7e6e46 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSServerJSONUtils.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSServerJSONUtils.java
@@ -17,11 +17,10 @@
  */
 package org.apache.hadoop.crypto.key.kms.server;
 
-import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.crypto.key.KeyProvider;
-import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
 import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
+import org.apache.hadoop.util.KMSUtil;
 
 import java.util.ArrayList;
 import java.util.LinkedHashMap;
@@ -33,48 +32,19 @@
  */
 @InterfaceAudience.Private
 public class KMSServerJSONUtils {
-  @SuppressWarnings("unchecked")
-  public static Map toJSON(KeyProvider.KeyVersion keyVersion) {
-    Map json = new LinkedHashMap();
-    if (keyVersion != null) {
-      json.put(KMSRESTConstants.NAME_FIELD,
-          keyVersion.getName());
-      json.put(KMSRESTConstants.VERSION_NAME_FIELD,
-          keyVersion.getVersionName());
-      json.put(KMSRESTConstants.MATERIAL_FIELD,
-          Base64.encodeBase64URLSafeString(
-              keyVersion.getMaterial()));
-    }
-    return json;
-  }
 
   @SuppressWarnings("unchecked")
   public static List toJSON(List<KeyProvider.KeyVersion> keyVersions) {
     List json = new ArrayList();
     if (keyVersions != null) {
       for (KeyProvider.KeyVersion version : keyVersions) {
-        json.add(toJSON(version));
+        json.add(KMSUtil.toJSON(version));
       }
     }
     return json;
   }
 
   @SuppressWarnings("unchecked")
-  public static Map toJSON(EncryptedKeyVersion encryptedKeyVersion) {
-    Map json = new LinkedHashMap();
-    if (encryptedKeyVersion != null) {
-      json.put(KMSRESTConstants.VERSION_NAME_FIELD,
-          encryptedKeyVersion.getEncryptionKeyVersionName());
-      json.put(KMSRESTConstants.IV_FIELD,
-          Base64.encodeBase64URLSafeString(
-              encryptedKeyVersion.getEncryptedKeyIv()));
-      json.put(KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD,
-          toJSON(encryptedKeyVersion.getEncryptedKeyVersion()));
-    }
-    return json;
-  }
-
-  @SuppressWarnings("unchecked")
   public static Map toJSON(String keyName, KeyProvider.Metadata meta) {
     Map json = new LinkedHashMap();
     if (meta != null) {
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
index ac24105..9a71fa2 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
@@ -60,6 +60,10 @@
       "generate_eek.calls.meter";
   private static final String DECRYPT_EEK_METER = METRICS_PREFIX +
       "decrypt_eek.calls.meter";
+  private static final String REENCRYPT_EEK_METER = METRICS_PREFIX +
+      "reencrypt_eek.calls.meter";
+  private static final String REENCRYPT_EEK_BATCH_METER = METRICS_PREFIX +
+      "reencrypt_eek_batch.calls.meter";
 
   private static Logger LOG;
   private static MetricRegistry metricRegistry;
@@ -72,6 +76,8 @@
   private static Meter unauthorizedCallsMeter;
   private static Meter unauthenticatedCallsMeter;
   private static Meter decryptEEKCallsMeter;
+  private static Meter reencryptEEKCallsMeter;
+  private static Meter reencryptEEKBatchCallsMeter;
   private static Meter generateEEKCallsMeter;
   private static Meter invalidCallsMeter;
   private static KMSAudit kmsAudit;
@@ -131,6 +137,10 @@
           new Meter());
       decryptEEKCallsMeter = metricRegistry.register(DECRYPT_EEK_METER,
           new Meter());
+      reencryptEEKCallsMeter = metricRegistry.register(REENCRYPT_EEK_METER,
+          new Meter());
+      reencryptEEKBatchCallsMeter = metricRegistry.register(
+          REENCRYPT_EEK_BATCH_METER, new Meter());
       adminCallsMeter = metricRegistry.register(ADMIN_CALLS_METER, new Meter());
       keyCallsMeter = metricRegistry.register(KEY_CALLS_METER, new Meter());
       invalidCallsMeter = metricRegistry.register(INVALID_CALLS_METER,
@@ -239,6 +249,14 @@
     return decryptEEKCallsMeter;
   }
 
+  public static Meter getReencryptEEKCallsMeter() {
+    return reencryptEEKCallsMeter;
+  }
+
+  public static Meter getReencryptEEKBatchCallsMeter() {
+    return reencryptEEKBatchCallsMeter;
+  }
+
   public static Meter getUnauthorizedCallsMeter() {
     return unauthorizedCallsMeter;
   }
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java
index d53cdea..868db76 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java
@@ -289,6 +289,25 @@
   }
 
   @Override
+  public void reencryptEncryptedKeys(List<EncryptedKeyVersion> ekvs)
+      throws IOException, GeneralSecurityException {
+    if (ekvs.isEmpty()) {
+      return;
+    }
+    readLock.lock();
+    try {
+      for (EncryptedKeyVersion ekv : ekvs) {
+        verifyKeyVersionBelongsToKey(ekv);
+      }
+      final String keyName = ekvs.get(0).getEncryptionKeyName();
+      doAccessCheck(keyName, KeyOpType.GENERATE_EEK);
+      provider.reencryptEncryptedKeys(ekvs);
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+  @Override
   public KeyVersion getKeyVersion(String versionName) throws IOException {
     readLock.lock();
     try {
diff --git a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
index 4573b06..1dd89e9 100644
--- a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
+++ b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
@@ -506,7 +506,7 @@
 KMS supports access control for all non-read operations at the Key level. All Key Access operations are classified as :
 
 * MANAGEMENT - createKey, deleteKey, rolloverNewVersion
-* GENERATE_EEK - generateEncryptedKey, reencryptEncryptedKey, warmUpEncryptedKeys
+* GENERATE_EEK - generateEncryptedKey, reencryptEncryptedKey, reencryptEncryptedKeys, warmUpEncryptedKeys
 * DECRYPT_EEK - decryptEncryptedKey
 * READ - getKeyVersion, getKeyVersions, getMetadata, getKeysMetadata, getCurrentKey
 * ALL - all of the above
@@ -983,6 +983,64 @@
       }
     }
 
+$H4 Batch Re-encrypt Encrypted Keys With The Latest KeyVersion
+
+Batched version of the above re-encrypt Encrypted Key. This command takes a list of previously generated encrypted key, and re-encrypts them using the latest KeyVersion encryption key in the KeyProvider, and return the re-encrypted encrypted keys in the same sequence. For each encrypted key, if the latest KeyVersion is the same as the one used to generate the encrypted key, no action is taken and the same encrypted key is returned.
+
+This is usually useful after a [Rollover](#Rollover_Key) of an encryption key. Re-encrypting the encrypted key will allow it to be encrypted using the latest version of the encryption key, but still with the same key material and initialization vector.
+
+All Encrypted keys for a batch request must be under the same encryption key name, but could be potentially under different versions of the encryption key.
+
+*REQUEST:*
+
+    POST http://HOST:PORT/kms/v1/key/<key-name>/_reencryptbatch
+    Content-Type: application/json
+
+    [
+      {
+        "versionName"         : "<encryptionVersionName>",
+        "iv"                  : "<iv>",            //base64
+        "encryptedKeyVersion" : {
+            "versionName"       : "EEK",
+            "material"          : "<material>",    //base64
+        }
+      },
+      {
+        "versionName"         : "<encryptionVersionName>",
+        "iv"                  : "<iv>",            //base64
+        "encryptedKeyVersion" : {
+            "versionName"       : "EEK",
+            "material"          : "<material>",    //base64
+        }
+      },
+      ...
+    ]
+
+*RESPONSE:*
+
+    200 OK
+    Content-Type: application/json
+
+    [
+      {
+        "versionName"         : "<encryptionVersionName>",
+        "iv"                  : "<iv>",            //base64
+        "encryptedKeyVersion" : {
+            "versionName"       : "EEK",
+            "material"          : "<material>",    //base64
+        }
+      },
+      {
+        "versionName"         : "<encryptionVersionName>",
+        "iv"                  : "<iv>",            //base64
+        "encryptedKeyVersion" : {
+            "versionName"       : "EEK",
+            "material"          : "<material>",    //base64
+        }
+      },
+      ...
+    ]
+
 $H4 Get Key Version
 
 *REQUEST:*
diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index a45906a..d6f2c25 100644
--- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -97,6 +97,7 @@
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
 import static org.mockito.Mockito.when;
 
 public class TestKMS {
@@ -722,6 +723,22 @@
         assertArrayEquals(k1.getMaterial(), k1r.getMaterial());
         assertEquals(kv.getMaterial().length, k1r.getMaterial().length);
 
+        // test re-encrypt batch
+        EncryptedKeyVersion ek3 = kpExt.generateEncryptedKey(kv.getName());
+        KeyVersion latest = kpExt.rollNewVersion(kv.getName());
+        List<EncryptedKeyVersion> ekvs = new ArrayList<>(3);
+        ekvs.add(ek1);
+        ekvs.add(ek2);
+        ekvs.add(ek3);
+        ekvs.add(ek1);
+        ekvs.add(ek2);
+        ekvs.add(ek3);
+        kpExt.reencryptEncryptedKeys(ekvs);
+        for (EncryptedKeyVersion ekv: ekvs) {
+          assertEquals(latest.getVersionName(),
+              ekv.getEncryptionKeyVersionName());
+        }
+
         // deleteKey()
         kp.deleteKey("k1");
 
@@ -1134,6 +1151,10 @@
                 KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
             EncryptedKeyVersion ekv = kpce.generateEncryptedKey("k1");
             kpce.reencryptEncryptedKey(ekv);
+            List<EncryptedKeyVersion> ekvs = new ArrayList<>(2);
+            ekvs.add(ekv);
+            ekvs.add(ekv);
+            kpce.reencryptEncryptedKeys(ekvs);
             return null;
           }
         });
@@ -1563,6 +1584,10 @@
             KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension.
                 createKeyProviderCryptoExtension(kp);
             kpCE.reencryptEncryptedKey(encKv);
+            List<EncryptedKeyVersion> ekvs = new ArrayList<>(2);
+            ekvs.add(encKv);
+            ekvs.add(encKv);
+            kpCE.reencryptEncryptedKeys(ekvs);
             return null;
           }
         });
@@ -1669,8 +1694,27 @@
               KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension.
                   createKeyProviderCryptoExtension(kp);
               kpCE.reencryptEncryptedKey(encKv);
+              fail("Should not have been able to reencryptEncryptedKey");
             } catch (AuthorizationException ex) {
-              LOG.info("Caught expected exception.", ex);
+              LOG.info("reencryptEncryptedKey caught expected exception.", ex);
+            }
+            return null;
+          }
+        });
+        doAs("GENERATE_EEK", new PrivilegedExceptionAction<Void>() {
+          @Override
+          public Void run() throws Exception {
+            KeyProvider kp = createProvider(uri, conf);
+            try {
+              KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension.
+                  createKeyProviderCryptoExtension(kp);
+              List<EncryptedKeyVersion> ekvs = new ArrayList<>(2);
+              ekvs.add(encKv);
+              ekvs.add(encKv);
+              kpCE.reencryptEncryptedKeys(ekvs);
+              fail("Should not have been able to reencryptEncryptedKeys");
+            } catch (AuthorizationException ex) {
+              LOG.info("reencryptEncryptedKeys caught expected exception.", ex);
             }
             return null;
           }
diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java
index bd4ddbd..2e01f98 100644
--- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java
+++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java
@@ -115,6 +115,9 @@
     kmsAudit.ok(luser, KMSOp.REENCRYPT_EEK, "k1", "testmsg");
     kmsAudit.ok(luser, KMSOp.REENCRYPT_EEK, "k1", "testmsg");
     kmsAudit.evictCacheForTesting();
+    kmsAudit.ok(luser, KMSOp.REENCRYPT_EEK_BATCH, "k1", "testmsg");
+    kmsAudit.ok(luser, KMSOp.REENCRYPT_EEK_BATCH, "k1", "testmsg");
+    kmsAudit.evictCacheForTesting();
     String out = getAndResetLogOutput();
     System.out.println(out);
     Assert.assertTrue(
@@ -128,7 +131,9 @@
             + "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=6, interval=[^m]{1,4}ms\\] testmsg"
             + "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg"
             + "OK\\[op=REENCRYPT_EEK, key=k1, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg"
-            + "OK\\[op=REENCRYPT_EEK, key=k1, user=luser, accessCount=3, interval=[^m]{1,4}ms\\] testmsg"));
+            + "OK\\[op=REENCRYPT_EEK, key=k1, user=luser, accessCount=3, interval=[^m]{1,4}ms\\] testmsg"
+            + "OK\\[op=REENCRYPT_EEK_BATCH, key=k1, user=luser\\] testmsg"
+            + "OK\\[op=REENCRYPT_EEK_BATCH, key=k1, user=luser\\] testmsg"));
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 88b273a..9239df3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -121,6 +121,7 @@
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -131,11 +132,13 @@
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
 import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
+import org.apache.hadoop.hdfs.protocol.ReencryptionStatusIterator;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
+import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
 import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
 import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
@@ -2639,6 +2642,23 @@
     return new EncryptionZoneIterator(namenode, tracer);
   }
 
+  public void reencryptEncryptionZone(String zone, ReencryptAction action)
+      throws IOException {
+    checkOpen();
+    try (TraceScope ignored = newPathTraceScope("reencryptEncryptionZone",
+        zone)) {
+      namenode.reencryptEncryptionZone(zone, action);
+    } catch (RemoteException re) {
+      throw re.unwrapRemoteException(AccessControlException.class,
+          SafeModeException.class, UnresolvedPathException.class);
+    }
+  }
+
+  public RemoteIterator<ZoneReencryptionStatus> listReencryptionStatus()
+      throws IOException {
+    checkOpen();
+    return new ReencryptionStatusIterator(namenode, tracer);
+  }
 
   public void setErasureCodingPolicy(String src, String ecPolicyName)
       throws IOException {
@@ -2764,7 +2784,7 @@
     }
   }
 
-  public HashMap<String, String> getErasureCodingCodecs() throws IOException {
+  public Map<String, String> getErasureCodingCodecs() throws IOException {
     checkOpen();
     try (TraceScope ignored = tracer.newScope("getErasureCodingCodecs")) {
       return namenode.getErasureCodingCodecs();
@@ -3044,7 +3064,8 @@
    *
    * @param src path to get the information for
    * @return Returns the policy information if file or directory on the path is
-   * erasure coded, null otherwise
+   * erasure coded, null otherwise. Null will be returned if directory or file
+   * has REPLICATION policy.
    * @throws IOException
    */
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
index 748edcd..b58cf16 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
@@ -61,4 +61,6 @@
   public boolean skipRollingRestartWait() {
     return false;
   }
+
+  public void sleepBeforeHedgedGet() {}
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 6bff172..e3d7ade 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -182,6 +182,11 @@
     openInfo(false);
   }
 
+  @VisibleForTesting
+  public long getlastBlockBeingWrittenLengthForTesting() {
+    return lastBlockBeingWrittenLength;
+  }
+
   /**
    * Grab the open-file info from namenode
    * @param refreshLocatedBlocks whether to re-fetch locatedblocks
@@ -209,7 +214,8 @@
         }
         retriesForLastBlockLength--;
       }
-      if (retriesForLastBlockLength == 0) {
+      if (lastBlockBeingWrittenLength == -1
+          && retriesForLastBlockLength == 0) {
         throw new IOException("Could not obtain the last block locations.");
       }
     }
@@ -830,60 +836,85 @@
 
   private DNAddrPair chooseDataNode(LocatedBlock block,
       Collection<DatanodeInfo> ignoredNodes) throws IOException {
+    return chooseDataNode(block, ignoredNodes, true);
+  }
+
+  /**
+   * Choose datanode to read from.
+   *
+   * @param block             Block to choose datanode addr from
+   * @param ignoredNodes      Ignored nodes inside.
+   * @param refetchIfRequired Whether to refetch if no nodes to chose
+   *                          from.
+   * @return Returns chosen DNAddrPair; Can be null if refetchIfRequired is
+   * false.
+   */
+  private DNAddrPair chooseDataNode(LocatedBlock block,
+      Collection<DatanodeInfo> ignoredNodes, boolean refetchIfRequired)
+      throws IOException {
     while (true) {
       DNAddrPair result = getBestNodeDNAddrPair(block, ignoredNodes);
       if (result != null) {
         return result;
+      } else if (refetchIfRequired) {
+        block = refetchLocations(block, ignoredNodes);
       } else {
-        String errMsg = getBestNodeDNAddrPairErrorString(block.getLocations(),
-            deadNodes, ignoredNodes);
-        String blockInfo = block.getBlock() + " file=" + src;
-        if (failures >= dfsClient.getConf().getMaxBlockAcquireFailures()) {
-          String description = "Could not obtain block: " + blockInfo;
-          DFSClient.LOG.warn(description + errMsg
-              + ". Throwing a BlockMissingException");
-          throw new BlockMissingException(src, description,
-              block.getStartOffset());
-        }
-
-        DatanodeInfo[] nodes = block.getLocations();
-        if (nodes == null || nodes.length == 0) {
-          DFSClient.LOG.info("No node available for " + blockInfo);
-        }
-        DFSClient.LOG.info("Could not obtain " + block.getBlock()
-            + " from any node: " + errMsg
-            + ". Will get new block locations from namenode and retry...");
-        try {
-          // Introducing a random factor to the wait time before another retry.
-          // The wait time is dependent on # of failures and a random factor.
-          // At the first time of getting a BlockMissingException, the wait time
-          // is a random number between 0..3000 ms. If the first retry
-          // still fails, we will wait 3000 ms grace period before the 2nd retry.
-          // Also at the second retry, the waiting window is expanded to 6000 ms
-          // alleviating the request rate from the server. Similarly the 3rd retry
-          // will wait 6000ms grace period before retry and the waiting window is
-          // expanded to 9000ms.
-          final int timeWindow = dfsClient.getConf().getTimeWindow();
-          double waitTime = timeWindow * failures +       // grace period for the last round of attempt
-              // expanding time window for each failure
-              timeWindow * (failures + 1) *
-              ThreadLocalRandom.current().nextDouble();
-          DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) +
-              " IOException, will wait for " + waitTime + " msec.");
-          Thread.sleep((long)waitTime);
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-          throw new InterruptedIOException(
-              "Interrupted while choosing DataNode for read.");
-        }
-        deadNodes.clear(); //2nd option is to remove only nodes[blockId]
-        openInfo(true);
-        block = refreshLocatedBlock(block);
-        failures++;
+        return null;
       }
     }
   }
 
+  private LocatedBlock refetchLocations(LocatedBlock block,
+      Collection<DatanodeInfo> ignoredNodes) throws IOException {
+    String errMsg = getBestNodeDNAddrPairErrorString(block.getLocations(),
+        deadNodes, ignoredNodes);
+    String blockInfo = block.getBlock() + " file=" + src;
+    if (failures >= dfsClient.getConf().getMaxBlockAcquireFailures()) {
+      String description = "Could not obtain block: " + blockInfo;
+      DFSClient.LOG.warn(description + errMsg
+          + ". Throwing a BlockMissingException");
+      throw new BlockMissingException(src, description,
+          block.getStartOffset());
+    }
+
+    DatanodeInfo[] nodes = block.getLocations();
+    if (nodes == null || nodes.length == 0) {
+      DFSClient.LOG.info("No node available for " + blockInfo);
+    }
+    DFSClient.LOG.info("Could not obtain " + block.getBlock()
+        + " from any node: " + errMsg
+        + ". Will get new block locations from namenode and retry...");
+    try {
+      // Introducing a random factor to the wait time before another retry.
+      // The wait time is dependent on # of failures and a random factor.
+      // At the first time of getting a BlockMissingException, the wait time
+      // is a random number between 0..3000 ms. If the first retry
+      // still fails, we will wait 3000 ms grace period before the 2nd retry.
+      // Also at the second retry, the waiting window is expanded to 6000 ms
+      // alleviating the request rate from the server. Similarly the 3rd retry
+      // will wait 6000ms grace period before retry and the waiting window is
+      // expanded to 9000ms.
+      final int timeWindow = dfsClient.getConf().getTimeWindow();
+      // grace period for the last round of attempt
+      double waitTime = timeWindow * failures +
+          // expanding time window for each failure
+          timeWindow * (failures + 1) *
+          ThreadLocalRandom.current().nextDouble();
+      DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) +
+          " IOException, will wait for " + waitTime + " msec.");
+      Thread.sleep((long)waitTime);
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+      throw new InterruptedIOException(
+          "Interrupted while choosing DataNode for read.");
+    }
+    deadNodes.clear(); //2nd option is to remove only nodes[blockId]
+    openInfo(true);
+    block = refreshLocatedBlock(block);
+    failures++;
+    return block;
+  }
+
   /**
    * Get the best node from which to stream the data.
    * @param block LocatedBlock, containing nodes in priority order.
@@ -985,6 +1016,7 @@
     return new Callable<ByteBuffer>() {
       @Override
       public ByteBuffer call() throws Exception {
+        DFSClientFaultInjector.get().sleepBeforeHedgedGet();
         try (TraceScope ignored = dfsClient.getTracer().
             newScope("hedgedRead" + hedgedReadId, parentSpanId)) {
           actualGetFromOneDataNode(datanode, start, end, bb, corruptedBlocks);
@@ -1159,20 +1191,22 @@
         // We are starting up a 'hedged' read. We have a read already
         // ongoing. Call getBestNodeDNAddrPair instead of chooseDataNode.
         // If no nodes to do hedged reads against, pass.
+        boolean refetch = false;
         try {
-          chosenNode = getBestNodeDNAddrPair(block, ignored);
-          if (chosenNode == null) {
-            chosenNode = chooseDataNode(block, ignored);
+          chosenNode = chooseDataNode(block, ignored, false);
+          if (chosenNode != null) {
+            // Latest block, if refreshed internally
+            block = chosenNode.block;
+            bb = ByteBuffer.allocate(len);
+            Callable<ByteBuffer> getFromDataNodeCallable =
+                getFromOneDataNode(chosenNode, block, start, end, bb,
+                    corruptedBlocks, hedgedReadId++);
+            Future<ByteBuffer> oneMoreRequest =
+                hedgedService.submit(getFromDataNodeCallable);
+            futures.add(oneMoreRequest);
+          } else {
+            refetch = true;
           }
-          // Latest block, if refreshed internally
-          block = chosenNode.block;
-          bb = ByteBuffer.allocate(len);
-          Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(
-              chosenNode, block, start, end, bb,
-              corruptedBlocks, hedgedReadId++);
-          Future<ByteBuffer> oneMoreRequest = hedgedService
-              .submit(getFromDataNodeCallable);
-          futures.add(oneMoreRequest);
         } catch (IOException ioe) {
           DFSClient.LOG.debug("Failed getting node for hedged read: {}",
               ioe.getMessage());
@@ -1190,6 +1224,9 @@
         } catch (InterruptedException ie) {
           // Ignore and retry
         }
+        if (refetch) {
+          refetchLocations(block, ignored);
+        }
         // We got here if exception. Ignore this node on next go around IFF
         // we found a chosenNode to hedge read against.
         if (chosenNode != null && chosenNode.info != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 2aa9e98c..842f23b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -17,6 +17,35 @@
  */
 package org.apache.hadoop.hdfs;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.StreamCapabilities;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
+import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil;
+import org.apache.hadoop.io.ByteBufferPool;
+import org.apache.hadoop.io.ElasticByteBufferPool;
+import org.apache.hadoop.io.MultipleIOException;
+import org.apache.hadoop.io.erasurecode.CodecUtil;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
+import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.Time;
+import org.apache.htrace.core.TraceScope;
+
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.nio.ByteBuffer;
@@ -32,45 +61,15 @@
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CompletionService;
-import java.util.concurrent.ExecutorCompletionService;
 import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.StreamCapabilities;
-import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
-import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
-import org.apache.hadoop.hdfs.util.StripedBlockUtil;
-import org.apache.hadoop.io.ByteBufferPool;
-import org.apache.hadoop.io.ElasticByteBufferPool;
-import org.apache.hadoop.io.MultipleIOException;
-import org.apache.hadoop.io.erasurecode.CodecUtil;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
-import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
-import org.apache.hadoop.util.DataChecksum;
-import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.util.Time;
-
-import com.google.common.base.Preconditions;
-import org.apache.htrace.core.TraceScope;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
 
 
 /**
@@ -777,7 +776,8 @@
     // should update the block group length based on the acked length
     final long sentBytes = currentBlockGroup.getNumBytes();
     final long ackedBytes = getNumAckedStripes() * cellSize * numDataBlocks;
-    Preconditions.checkState(ackedBytes <= sentBytes);
+    Preconditions.checkState(ackedBytes <= sentBytes,
+        "Acked:" + ackedBytes + ", Sent:" + sentBytes);
     currentBlockGroup.setNumBytes(ackedBytes);
     newBG.setNumBytes(ackedBytes);
     dfsClient.namenode.updatePipeline(dfsClient.clientName, currentBlockGroup,
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index f5ce0ff..838da7e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -1620,7 +1620,8 @@
   }
 
   /** update pipeline at the namenode */
-  private void updatePipeline(long newGS) throws IOException {
+  @VisibleForTesting
+  public void updatePipeline(long newGS) throws IOException {
     final ExtendedBlock oldBlock = block.getCurrentBlock();
     // the new GS has been propagated to all DN, it should be ok to update the
     // local block state
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index cd368d4..44caed6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -18,22 +18,14 @@
 
 package org.apache.hadoop.hdfs;
 
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.CacheFlag;
@@ -54,24 +46,24 @@
 import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Options;
-import org.apache.hadoop.fs.StorageStatistics;
-import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.StorageStatistics;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
-import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
 import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
@@ -82,29 +74,37 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
+import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
 
 import javax.annotation.Nonnull;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
 
 /****************************************************************
  * Implementation of the abstract FileSystem for the DFS system.
@@ -2315,6 +2315,38 @@
   }
 
   /* HDFS only */
+  public void reencryptEncryptionZone(final Path zone,
+      final ReencryptAction action) throws IOException {
+    final Path absF = fixRelativePart(zone);
+    new FileSystemLinkResolver<Void>() {
+      @Override
+      public Void doCall(final Path p) throws IOException {
+        dfs.reencryptEncryptionZone(getPathName(p), action);
+        return null;
+      }
+
+      @Override
+      public Void next(final FileSystem fs, final Path p) throws IOException {
+        if (fs instanceof DistributedFileSystem) {
+          DistributedFileSystem myDfs = (DistributedFileSystem) fs;
+          myDfs.reencryptEncryptionZone(p, action);
+          return null;
+        }
+        throw new UnsupportedOperationException(
+            "Cannot call reencryptEncryptionZone"
+                + " on a symlink to a non-DistributedFileSystem: " + zone
+                + " -> " + p);
+      }
+    }.resolve(this, absF);
+  }
+
+  /* HDFS only */
+  public RemoteIterator<ZoneReencryptionStatus> listReencryptionStatus()
+      throws IOException {
+    return dfs.listReencryptionStatus();
+  }
+
+  /* HDFS only */
   public FileEncryptionInfo getFileEncryptionInfo(final Path path)
       throws IOException {
     Path absF = fixRelativePart(path);
@@ -2541,7 +2573,8 @@
    *
    * @param path The path of the file or directory
    * @return Returns the policy information if file or directory on the path
-   * is erasure coded, null otherwise
+   * is erasure coded, null otherwise. Null will be returned if directory or
+   * file has REPLICATION policy.
    * @throws IOException
    */
   public ErasureCodingPolicy getErasureCodingPolicy(final Path path)
@@ -2568,7 +2601,9 @@
   }
 
   /**
-   * Retrieve all the erasure coding policies supported by this file system.
+   * Retrieve all the erasure coding policies supported by this file system,
+   * including enabled, disabled and removed policies, but excluding
+   * REPLICATION policy.
    *
    * @return all erasure coding policies supported by this file system.
    * @throws IOException
@@ -2585,7 +2620,7 @@
    * @return all erasure coding codecs and coders supported by this file system.
    * @throws IOException
    */
-  public HashMap<String, String> getAllErasureCodingCodecs()
+  public Map<String, String> getAllErasureCodingCodecs()
       throws IOException {
     return dfs.getErasureCodingCodecs();
   }
@@ -2593,8 +2628,9 @@
   /**
    * Add Erasure coding policies to HDFS. For each policy input, schema and
    * cellSize are musts, name and id are ignored. They will be automatically
-   * created and assigned by Namenode once the policy is successfully added, and
-   * will be returned in the response.
+   * created and assigned by Namenode once the policy is successfully added,
+   * and will be returned in the response; policy states will be set to
+   * DISABLED automatically.
    *
    * @param policies The user defined ec policy list to add.
    * @return Return the response list of adding operations.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
index abf341e..bfc6010 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
@@ -17,12 +17,6 @@
  */
 package org.apache.hadoop.hdfs.client;
 
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.URI;
-import java.util.Collection;
-import java.util.EnumSet;
-
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -49,9 +43,17 @@
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
+import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
 import org.apache.hadoop.security.AccessControlException;
 
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URI;
+import java.util.Collection;
+import java.util.EnumSet;
+
 /**
  * The public API for performing administrative functions on HDFS. Those writing
  * applications against HDFS should prefer this interface to directly accessing
@@ -370,6 +372,33 @@
   }
 
   /**
+   * Performs re-encryption action for a given encryption zone.
+   *
+   * @param zone the root of the encryption zone
+   * @param action the re-encrypt action
+   * @throws IOException If any error occurs when handling re-encrypt action.
+   */
+  public void reencryptEncryptionZone(final Path zone,
+      final ReencryptAction action) throws IOException {
+    dfs.reencryptEncryptionZone(zone, action);
+  }
+
+  /**
+   * Returns a RemoteIterator which can be used to list all re-encryption
+   * information. For large numbers of re-encryptions, the iterator will fetch
+   * the list in a number of small batches.
+   * <p>
+   * Since the list is fetched in batches, it does not represent a
+   * consistent snapshot of the entire list of encryption zones.
+   * <p>
+   * This method can only be called by HDFS superusers.
+   */
+  public RemoteIterator<ZoneReencryptionStatus> listReencryptionStatus()
+      throws IOException {
+    return dfs.listReencryptionStatus();
+  }
+
+  /**
    * Returns the FileEncryptionInfo on the HdfsFileStatus for the given path.
    * The return value can be null if the path points to a directory, or a file
    * that is not in an encryption zone.
@@ -525,8 +554,9 @@
   /**
    * Add Erasure coding policies to HDFS. For each policy input, schema and
    * cellSize are musts, name and id are ignored. They will be automatically
-   * created and assigned by Namenode once the policy is successfully added, and
-   * will be returned in the response.
+   * created and assigned by Namenode once the policy is successfully added,
+   * and will be returned in the response; policy states will be set to
+   * DISABLED automatically.
    *
    * @param policies The user defined ec policy list to add.
    * @return Return the response list of adding operations.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 5667989..e99b099 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -389,7 +389,7 @@
 
     String  THREADPOOL_SIZE_KEY = PREFIX + "threadpool.size";
     /**
-     * With default RS-6-3-64k erasure coding policy, each normal read could
+     * With default RS-6-3-1024k erasure coding policy, each normal read could
      * span 6 DNs, so this default value accommodates 3 read streams
      */
     int     THREADPOOL_SIZE_DEFAULT = 18;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 45c6b32..b550467 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -19,8 +19,8 @@
 
 import java.io.IOException;
 import java.util.EnumSet;
-import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -41,6 +41,7 @@
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.inotify.EventBatchList;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@@ -1444,6 +1445,30 @@
       long prevId) throws IOException;
 
   /**
+   * Used to implement re-encryption of encryption zones.
+   *
+   * @param zone the encryption zone to re-encrypt.
+   * @param action the action for the re-encryption.
+   * @throws IOException
+   */
+  @AtMostOnce
+  void reencryptEncryptionZone(String zone, ReencryptAction action)
+      throws IOException;
+
+  /**
+   * Used to implement cursor-based batched listing of
+   * {@ZoneReencryptionStatus}s.
+   *
+   * @param prevId ID of the last item in the previous batch. If there is no
+   *               previous batch, a negative value can be used.
+   * @return Batch of encryption zones.
+   * @throws IOException
+   */
+  @Idempotent
+  BatchedEntries<ZoneReencryptionStatus> listReencryptionStatus(long prevId)
+      throws IOException;
+
+  /**
    * Set xattr of a file or directory.
    * The name must be prefixed with the namespace followed by ".". For example,
    * "user.attr".
@@ -1588,7 +1613,8 @@
 
 
   /**
-   * Get the erasure coding policies loaded in Namenode.
+   * Get the erasure coding policies loaded in Namenode, excluding REPLICATION
+   * policy.
    *
    * @throws IOException
    */
@@ -1601,10 +1627,11 @@
    * @throws IOException
    */
   @Idempotent
-  HashMap<String, String> getErasureCodingCodecs() throws IOException;
+  Map<String, String> getErasureCodingCodecs() throws IOException;
 
   /**
-   * Get the information about the EC policy for the path.
+   * Get the information about the EC policy for the path. Null will be returned
+   * if directory or file has REPLICATION policy.
    *
    * @param src path to get the info for
    * @throws IOException
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
index 7afc3776..6cbe0e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
@@ -17,14 +17,15 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
-import java.io.Serializable;
-
 import com.google.common.base.Preconditions;
 import org.apache.commons.lang.builder.EqualsBuilder;
 import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
+
+import java.io.Serializable;
 
 /**
  * A policy about how to write/read/code an erasure coding file.
@@ -39,9 +40,11 @@
   private final ECSchema schema;
   private final int cellSize;
   private byte id;
+  private ErasureCodingPolicyState state;
+
 
   public ErasureCodingPolicy(String name, ECSchema schema,
-      int cellSize, byte id) {
+      int cellSize, byte id, ErasureCodingPolicyState state) {
     Preconditions.checkNotNull(name);
     Preconditions.checkNotNull(schema);
     Preconditions.checkArgument(cellSize > 0, "cellSize must be positive");
@@ -51,14 +54,22 @@
     this.schema = schema;
     this.cellSize = cellSize;
     this.id = id;
+    this.state = state;
+  }
+
+  public ErasureCodingPolicy(String name, ECSchema schema, int cellSize,
+      byte id) {
+    this(name, schema, cellSize, id, ErasureCodingPolicyState.DISABLED);
   }
 
   public ErasureCodingPolicy(ECSchema schema, int cellSize, byte id) {
-    this(composePolicyName(schema, cellSize), schema, cellSize, id);
+    this(composePolicyName(schema, cellSize), schema, cellSize, id,
+        ErasureCodingPolicyState.DISABLED);
   }
 
   public ErasureCodingPolicy(ECSchema schema, int cellSize) {
-    this(composePolicyName(schema, cellSize), schema, cellSize, (byte) -1);
+    this(composePolicyName(schema, cellSize), schema, cellSize, (byte) -1,
+        ErasureCodingPolicyState.DISABLED);
   }
 
   public static String composePolicyName(ECSchema schema, int cellSize) {
@@ -107,6 +118,35 @@
     this.id = id;
   }
 
+
+  public boolean isReplicationPolicy() {
+    return (id == ErasureCodeConstants.REPLICATION_POLICY_ID);
+  }
+
+  public ErasureCodingPolicyState getState() {
+    return state;
+  }
+
+  public void setState(ErasureCodingPolicyState state) {
+    this.state = state;
+  }
+
+  public boolean isSystemPolicy() {
+    return (this.id < ErasureCodeConstants.USER_DEFINED_POLICY_START_ID);
+  }
+
+  public boolean isEnabled() {
+    return (this.state == ErasureCodingPolicyState.ENABLED);
+  }
+
+  public boolean isDisabled() {
+    return (this.state == ErasureCodingPolicyState.DISABLED);
+  }
+
+  public boolean isRemoved() {
+    return (this.state == ErasureCodingPolicyState.REMOVED);
+  }
+
   @Override
   public boolean equals(Object o) {
     if (o == null) {
@@ -124,6 +164,7 @@
         .append(schema, rhs.schema)
         .append(cellSize, rhs.cellSize)
         .append(id, rhs.id)
+        .append(state, rhs.state)
         .isEquals();
   }
 
@@ -134,6 +175,7 @@
         .append(schema)
         .append(cellSize)
         .append(id)
+        .append(state)
         .toHashCode();
   }
 
@@ -142,7 +184,8 @@
     return "ErasureCodingPolicy=[" + "Name=" + name + ", "
         + "Schema=[" + schema.toString() + "], "
         + "CellSize=" + cellSize + ", "
-        + "Id=" + id
+        + "Id=" + id + ", "
+        + "State=" + state.toString()
         + "]";
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyState.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyState.java
new file mode 100644
index 0000000..a8586dc
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyState.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+/**
+ * Value denotes the possible states of an ErasureCodingPolicy.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public enum ErasureCodingPolicyState {
+
+  /** Policy is disabled. It's policy default state. */
+  DISABLED(1),
+  /** Policy is enabled. It can be applied to directory and file. */
+  ENABLED(2),
+  /**
+   * Policy is removed from the system. Due to there are potential files
+   * use this policy, it cannot be deleted from system immediately. A removed
+   * policy can be re-enabled later.*/
+  REMOVED(3);
+
+  private static final ErasureCodingPolicyState[] CACHED_VALUES =
+      ErasureCodingPolicyState.values();
+
+  private final int value;
+
+  ErasureCodingPolicyState(int v) {
+    value = v;
+  }
+
+  public int getValue() {
+    return value;
+  }
+
+  public static ErasureCodingPolicyState fromValue(int v) {
+    if (v > 0 && v <= CACHED_VALUES.length) {
+      return CACHED_VALUES[v - 1];
+    }
+    return null;
+  }
+
+  /** Read from in. */
+  public static ErasureCodingPolicyState read(DataInput in) throws IOException {
+    return fromValue(in.readByte());
+  }
+
+  /** Write to out. */
+  public void write(DataOutput out) throws IOException {
+    out.writeByte(ordinal());
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 2681f12..8c44293 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -144,6 +144,13 @@
     ALL, LIVE, DEAD, DECOMMISSIONING, ENTERING_MAINTENANCE, IN_MAINTENANCE
   }
 
+  /**
+   * Re-encrypt encryption zone actions.
+   */
+  public enum ReencryptAction {
+    CANCEL, START
+  }
+
   /* Hidden constructor */
   protected HdfsConstants() {
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReencryptionStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReencryptionStatus.java
new file mode 100644
index 0000000..e83ab52
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReencryptionStatus.java
@@ -0,0 +1,216 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto;
+import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus.State;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+/**
+ * A class representing information about re-encrypting encryption zones. It
+ * contains a collection of @{code ZoneReencryptionStatus} for each EZ.
+ * <p>
+ * FSDirectory lock is used for synchronization (except test-only methods, which
+ * are not protected).
+ */
+@InterfaceAudience.Private
+public final class ReencryptionStatus {
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(ReencryptionStatus.class);
+
+  public static final BatchedListEntries<ZoneReencryptionStatus> EMPTY_LIST =
+      new BatchedListEntries<>(Lists.newArrayList(), false);
+
+  /**
+   * The zones that were submitted for re-encryption. This should preserve
+   * the order of submission.
+   */
+  private final TreeMap<Long, ZoneReencryptionStatus> zoneStatuses;
+  // Metrics
+  private long zonesReencrypted;
+
+  public ReencryptionStatus() {
+    zoneStatuses = new TreeMap<>();
+  }
+
+  @VisibleForTesting
+  public ReencryptionStatus(ReencryptionStatus rhs) {
+    if (rhs != null) {
+      this.zoneStatuses = new TreeMap<>(rhs.zoneStatuses);
+      this.zonesReencrypted = rhs.zonesReencrypted;
+    } else {
+      zoneStatuses = new TreeMap<>();
+    }
+  }
+
+  @VisibleForTesting
+  public void resetMetrics() {
+    zonesReencrypted = 0;
+    for (Map.Entry<Long, ZoneReencryptionStatus> entry : zoneStatuses
+        .entrySet()) {
+      entry.getValue().resetMetrics();
+    }
+  }
+
+  public ZoneReencryptionStatus getZoneStatus(final Long zondId) {
+    return zoneStatuses.get(zondId);
+  }
+
+  public void markZoneForRetry(final Long zoneId) {
+    final ZoneReencryptionStatus zs = zoneStatuses.get(zoneId);
+    Preconditions.checkNotNull(zs, "Cannot find zone " + zoneId);
+    LOG.info("Zone {} will retry re-encryption", zoneId);
+    zs.setState(State.Submitted);
+  }
+
+  public void markZoneStarted(final Long zoneId) {
+    final ZoneReencryptionStatus zs = zoneStatuses.get(zoneId);
+    Preconditions.checkNotNull(zs, "Cannot find zone " + zoneId);
+    LOG.info("Zone {} starts re-encryption processing", zoneId);
+    zs.setState(State.Processing);
+  }
+
+  public void markZoneCompleted(final Long zoneId) {
+    final ZoneReencryptionStatus zs = zoneStatuses.get(zoneId);
+    Preconditions.checkNotNull(zs, "Cannot find zone " + zoneId);
+    LOG.info("Zone {} completed re-encryption.", zoneId);
+    zs.setState(State.Completed);
+    zonesReencrypted++;
+  }
+
+  public Long getNextUnprocessedZone() {
+    for (Map.Entry<Long, ZoneReencryptionStatus> entry : zoneStatuses
+        .entrySet()) {
+      if (entry.getValue().getState() == State.Submitted) {
+        return entry.getKey();
+      }
+    }
+    return null;
+  }
+
+  public boolean hasRunningZone(final Long zoneId) {
+    return zoneStatuses.containsKey(zoneId)
+        && zoneStatuses.get(zoneId).getState() != State.Completed;
+  }
+
+  /**
+   * @param zoneId
+   * @return true if this is a zone is added.
+   */
+  private boolean addZoneIfNecessary(final Long zoneId, final String name,
+      final ReencryptionInfoProto reProto) {
+    if (!zoneStatuses.containsKey(zoneId)) {
+      LOG.debug("Adding zone {} for re-encryption status", zoneId);
+      Preconditions.checkNotNull(reProto);
+      final ZoneReencryptionStatus.Builder builder =
+          new ZoneReencryptionStatus.Builder();
+      builder.id(zoneId).zoneName(name)
+          .ezKeyVersionName(reProto.getEzKeyVersionName())
+          .submissionTime(reProto.getSubmissionTime())
+          .canceled(reProto.getCanceled())
+          .filesReencrypted(reProto.getNumReencrypted())
+          .fileReencryptionFailures(reProto.getNumFailures());
+      if (reProto.hasCompletionTime()) {
+        builder.completionTime(reProto.getCompletionTime());
+        builder.state(State.Completed);
+        zonesReencrypted++;
+      } else {
+        builder.state(State.Submitted);
+      }
+      if (reProto.hasLastFile()) {
+        builder.lastCheckpointFile(reProto.getLastFile());
+      }
+      return zoneStatuses.put(zoneId, builder.build()) == null;
+    }
+    return false;
+  }
+
+  public void updateZoneStatus(final Long zoneId, final String zonePath,
+      final ReencryptionInfoProto reProto) {
+    Preconditions.checkArgument(zoneId != null, "zoneId can't be null");
+    if (addZoneIfNecessary(zoneId, zonePath, reProto)) {
+      return;
+    }
+    final ZoneReencryptionStatus zs = getZoneStatus(zoneId);
+    assert zs != null;
+    if (reProto.hasCompletionTime()) {
+      zs.markZoneCompleted(reProto);
+    } else if (!reProto.hasLastFile() && !reProto.hasCompletionTime()) {
+      zs.markZoneSubmitted(reProto);
+    } else {
+      zs.updateZoneProcess(reProto);
+    }
+  }
+
+  public boolean removeZone(final Long zoneId) {
+    LOG.debug("Removing re-encryption status of zone {} ", zoneId);
+    return zoneStatuses.remove(zoneId) != null;
+  }
+
+  @VisibleForTesting
+  public int zonesQueued() {
+    int ret = 0;
+    for (Map.Entry<Long, ZoneReencryptionStatus> entry : zoneStatuses
+        .entrySet()) {
+      if (entry.getValue().getState() == State.Submitted) {
+        ret++;
+      }
+    }
+    return ret;
+  }
+
+  @VisibleForTesting
+  public int zonesTotal() {
+    return zoneStatuses.size();
+  }
+
+  @VisibleForTesting
+  public long getNumZonesReencrypted() {
+    return zonesReencrypted;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    for (Map.Entry<Long, ZoneReencryptionStatus> entry : zoneStatuses
+        .entrySet()) {
+      sb.append("[zone:" + entry.getKey());
+      sb.append(" state:" + entry.getValue().getState());
+      sb.append(" lastProcessed:" + entry.getValue().getLastCheckpointFile());
+      sb.append(" filesReencrypted:" + entry.getValue().getFilesReencrypted());
+      sb.append(" fileReencryptionFailures:" + entry.getValue()
+          .getNumReencryptionFailures() + "]");
+    }
+    return sb.toString();
+  }
+
+  public NavigableMap<Long, ZoneReencryptionStatus> getZoneStatuses() {
+    return zoneStatuses;
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReencryptionStatusIterator.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReencryptionStatusIterator.java
new file mode 100644
index 0000000..c8a8857
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReencryptionStatusIterator.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.BatchedRemoteIterator;
+import org.apache.htrace.core.TraceScope;
+import org.apache.htrace.core.Tracer;
+
+import java.io.IOException;
+
+/**
+ * ReencryptionStatusIterator is a remote iterator that iterates over the
+ * reencryption status of encryption zones.
+ * It supports retrying in case of namenode failover.
+ */
+@InterfaceAudience.Private
+public class ReencryptionStatusIterator
+    extends BatchedRemoteIterator<Long, ZoneReencryptionStatus> {
+
+  private final ClientProtocol namenode;
+  private final Tracer tracer;
+
+  public ReencryptionStatusIterator(ClientProtocol namenode, Tracer tracer) {
+    super((long) 0);
+    this.namenode = namenode;
+    this.tracer = tracer;
+  }
+
+  @Override
+  public BatchedEntries<ZoneReencryptionStatus> makeRequest(Long prevId)
+      throws IOException {
+    try (TraceScope ignored = tracer.newScope("listReencryptionStatus")) {
+      return namenode.listReencryptionStatus(prevId);
+    }
+  }
+
+  @Override
+  public Long elementToPrevKey(ZoneReencryptionStatus entry) {
+    return entry.getId();
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java
index 2cd838b..c079fea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java
@@ -40,8 +40,8 @@
   // Private constructor, this is a utility class.
   private SystemErasureCodingPolicies() {}
 
-  // 64 KB
-  private static final int DEFAULT_CELLSIZE = 64 * 1024;
+  // 1 MB
+  private static final int DEFAULT_CELLSIZE = 1024 * 1024;
 
   public static final byte RS_6_3_POLICY_ID = 1;
   private static final ErasureCodingPolicy SYS_POLICY1 =
@@ -68,6 +68,13 @@
       new ErasureCodingPolicy(ErasureCodeConstants.RS_10_4_SCHEMA,
           DEFAULT_CELLSIZE, RS_10_4_POLICY_ID);
 
+  // REPLICATION policy is always enabled.
+  private static final ErasureCodingPolicy REPLICATION_POLICY =
+      new ErasureCodingPolicy(ErasureCodeConstants.REPLICATION_POLICY_NAME,
+          ErasureCodeConstants.REPLICATION_1_2_SCHEMA,
+          DEFAULT_CELLSIZE,
+          ErasureCodeConstants.REPLICATION_POLICY_ID);
+
   private static final List<ErasureCodingPolicy> SYS_POLICIES =
       Collections.unmodifiableList(Arrays.asList(
           SYS_POLICY1, SYS_POLICY2, SYS_POLICY3, SYS_POLICY4,
@@ -118,4 +125,11 @@
   public static ErasureCodingPolicy getByName(String name) {
     return SYSTEM_POLICIES_BY_NAME.get(name);
   }
+
+  /**
+   * Get the special REPLICATION policy.
+   */
+  public static ErasureCodingPolicy  getReplicationPolicy() {
+    return REPLICATION_POLICY;
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ZoneReencryptionStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ZoneReencryptionStatus.java
new file mode 100644
index 0000000..9022b9f
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ZoneReencryptionStatus.java
@@ -0,0 +1,257 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto;
+
+/**
+ * A class representing information about re-encryption of an encryption zone.
+ * <p>
+ * FSDirectory lock is used for synchronization (except test-only methods, which
+ * are not protected).
+ */
+public class ZoneReencryptionStatus {
+  /**
+   * State of re-encryption.
+   */
+  public enum State {
+    /**
+     * Submitted for re-encryption but hasn't been picked up.
+     * This is the initial state.
+     */
+    Submitted,
+    /**
+     * Currently re-encrypting.
+     */
+    Processing,
+    /**
+     * Re-encryption completed.
+     */
+    Completed
+  }
+
+  private long id;
+  private String zoneName;
+  /**
+   * The re-encryption status of the zone. Note this is a in-memory only
+   * variable. On failover it will always be submitted, or completed if
+   * completionTime != 0;
+   */
+  private State state;
+  private String ezKeyVersionName;
+  private long submissionTime;
+  private long completionTime;
+  private boolean canceled;
+  /**
+   * Name of last file processed. It's important to record name (not inode)
+   * because we want to restore to the position even if the inode is removed.
+   */
+  private String lastCheckpointFile;
+  private long filesReencrypted;
+  private long numReencryptionFailures;
+
+  /**
+   * Builder of {@link ZoneReencryptionStatus}.
+   */
+  public static final class Builder {
+    private long id;
+    private String zoneName;
+    private State state;
+    private String ezKeyVersionName;
+    private long submissionTime;
+    private long completionTime;
+    private boolean canceled;
+    private String lastCheckpointFile;
+    private long filesReencrypted;
+    private long fileReencryptionFailures;
+
+    public Builder() {
+    }
+
+    public Builder id(final long inodeid) {
+      id = inodeid;
+      return this;
+    }
+
+    public Builder zoneName(final String ezName) {
+      zoneName = ezName;
+      return this;
+    }
+
+    public Builder state(final State st) {
+      state = st;
+      return this;
+    }
+
+    public Builder ezKeyVersionName(final String ezkvn) {
+      ezKeyVersionName = ezkvn;
+      return this;
+    }
+
+    public Builder submissionTime(final long submission) {
+      submissionTime = submission;
+      return this;
+    }
+
+    public Builder completionTime(final long completion) {
+      completionTime = completion;
+      return this;
+    }
+
+    public Builder canceled(final boolean isCanceled) {
+      canceled = isCanceled;
+      return this;
+    }
+
+    public Builder lastCheckpointFile(final String lastFile) {
+      lastCheckpointFile = lastFile;
+      return this;
+    }
+
+    public Builder filesReencrypted(final long numReencrypted) {
+      filesReencrypted = numReencrypted;
+      return this;
+    }
+
+    public Builder fileReencryptionFailures(final long numFailures) {
+      fileReencryptionFailures = numFailures;
+      return this;
+    }
+
+    public ZoneReencryptionStatus build() {
+      ZoneReencryptionStatus ret = new ZoneReencryptionStatus();
+      Preconditions.checkArgument(id != 0, "no inode id set.");
+      Preconditions.checkNotNull(state, "no state id set.");
+      Preconditions.checkNotNull(ezKeyVersionName, "no keyVersionName set.");
+      Preconditions
+          .checkArgument(submissionTime != 0, "no submission time set.");
+      ret.id = this.id;
+      ret.zoneName = this.zoneName;
+      ret.state = this.state;
+      ret.ezKeyVersionName = this.ezKeyVersionName;
+      ret.submissionTime = this.submissionTime;
+      ret.completionTime = this.completionTime;
+      ret.canceled = this.canceled;
+      ret.lastCheckpointFile = this.lastCheckpointFile;
+      ret.filesReencrypted = this.filesReencrypted;
+      ret.numReencryptionFailures = this.fileReencryptionFailures;
+      return ret;
+    }
+  }
+
+  public ZoneReencryptionStatus() {
+    reset();
+  }
+
+  void resetMetrics() {
+    filesReencrypted = 0;
+    numReencryptionFailures = 0;
+  }
+
+  public long getId() {
+    return id;
+  }
+
+  public String getZoneName() {
+    return zoneName;
+  }
+
+  void setState(final State s) {
+    state = s;
+  }
+
+  public State getState() {
+    return state;
+  }
+
+  public String getEzKeyVersionName() {
+    return ezKeyVersionName;
+  }
+
+  public long getSubmissionTime() {
+    return submissionTime;
+  }
+
+  public long getCompletionTime() {
+    return completionTime;
+  }
+
+  public boolean isCanceled() {
+    return canceled;
+  }
+
+  public String getLastCheckpointFile() {
+    return lastCheckpointFile;
+  }
+
+  public long getFilesReencrypted() {
+    return filesReencrypted;
+  }
+
+  public long getNumReencryptionFailures() {
+    return numReencryptionFailures;
+  }
+
+  public void reset() {
+    state = State.Submitted;
+    ezKeyVersionName = null;
+    submissionTime = 0;
+    completionTime = 0;
+    canceled = false;
+    lastCheckpointFile = null;
+    resetMetrics();
+  }
+
+  /**
+   * Set the zone name. The zone name is resolved from inode id and set during
+   * a listReencryptionStatus call, for the crypto admin to consume.
+   */
+  public void setZoneName(final String name) {
+    Preconditions.checkNotNull(name == null);
+    zoneName = name;
+  }
+
+  public void cancel() {
+    canceled = true;
+  }
+
+  void markZoneCompleted(final ReencryptionInfoProto proto) {
+    state = ZoneReencryptionStatus.State.Completed;
+    completionTime = proto.getCompletionTime();
+    lastCheckpointFile = null;
+    canceled = proto.getCanceled();
+    filesReencrypted = proto.getNumReencrypted();
+    numReencryptionFailures = proto.getNumFailures();
+  }
+
+  void markZoneSubmitted(final ReencryptionInfoProto proto) {
+    reset();
+    state = ZoneReencryptionStatus.State.Submitted;
+    ezKeyVersionName = proto.getEzKeyVersionName();
+    submissionTime = proto.getSubmissionTime();
+    filesReencrypted = proto.getNumReencrypted();
+    numReencryptionFailures = proto.getNumFailures();
+  }
+
+  void updateZoneProcess(final ReencryptionInfoProto proto) {
+    lastCheckpointFile = proto.getLastFile();
+    filesReencrypted = proto.getNumReencrypted();
+    numReencryptionFailures = proto.getNumFailures();
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index aed4117..ec7d93f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -26,6 +26,7 @@
 
 import com.google.common.collect.Lists;
 
+import java.util.Map;
 import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 
@@ -65,6 +66,7 @@
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -73,6 +75,7 @@
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.BlocksStats;
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
+import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
@@ -179,6 +182,10 @@
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.EncryptionZoneProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ZoneReencryptionStatusProto;
+import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptEncryptionZoneRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto;
@@ -1544,6 +1551,39 @@
   }
 
   @Override
+  public void reencryptEncryptionZone(String zone, ReencryptAction action)
+      throws IOException {
+    final ReencryptEncryptionZoneRequestProto.Builder builder =
+        ReencryptEncryptionZoneRequestProto.newBuilder();
+    builder.setZone(zone).setAction(PBHelperClient.convert(action));
+    ReencryptEncryptionZoneRequestProto req = builder.build();
+    try {
+      rpcProxy.reencryptEncryptionZone(null, req);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
+  public BatchedEntries<ZoneReencryptionStatus> listReencryptionStatus(long id)
+      throws IOException {
+    final ListReencryptionStatusRequestProto req =
+        ListReencryptionStatusRequestProto.newBuilder().setId(id).build();
+    try {
+      ListReencryptionStatusResponseProto response =
+          rpcProxy.listReencryptionStatus(null, req);
+      List<ZoneReencryptionStatus> elements =
+          Lists.newArrayListWithCapacity(response.getStatusesCount());
+      for (ZoneReencryptionStatusProto p : response.getStatusesList()) {
+        elements.add(PBHelperClient.convert(p));
+      }
+      return new BatchedListEntries<>(elements, response.getHasMore());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
   public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
       throws IOException {
     SetXAttrRequestProto req = SetXAttrRequestProto.newBuilder()
@@ -1760,11 +1800,11 @@
   }
 
   @Override
-  public HashMap<String, String> getErasureCodingCodecs() throws IOException {
+  public Map<String, String> getErasureCodingCodecs() throws IOException {
     try {
       GetErasureCodingCodecsResponseProto response = rpcProxy
           .getErasureCodingCodecs(null, VOID_GET_EC_CODEC_REQUEST);
-      HashMap<String, String> ecCodecs = new HashMap<String, String>();
+      Map<String, String> ecCodecs = new HashMap<>();
       for (CodecProto codec : response.getCodecList()) {
         ecCodecs.put(codec.getCodec(), codec.getCoders());
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 5b1a687..04f9686 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -79,10 +79,12 @@
 import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -99,6 +101,7 @@
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
+import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto;
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryScopeProto;
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryTypeProto;
@@ -129,6 +132,9 @@
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.EncryptionZoneProto;
+import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptActionProto;
+import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptionStateProto;
+import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ZoneReencryptionStatusProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddECPolicyResponseProto;
@@ -157,6 +163,7 @@
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto;
@@ -165,6 +172,7 @@
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.InotifyProtos;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsResponseProto;
@@ -1678,6 +1686,17 @@
     return builder.build();
   }
 
+  public static ReencryptActionProto convert(ReencryptAction a) {
+    switch (a) {
+    case CANCEL:
+      return ReencryptActionProto.CANCEL_REENCRYPT;
+    case START:
+      return ReencryptActionProto.START_REENCRYPT;
+    default:
+      throw new IllegalArgumentException("Unexpected value: " + a);
+    }
+  }
+
   public static RollingUpgradeActionProto convert(RollingUpgradeAction a) {
     switch (a) {
     case QUERY:
@@ -2282,6 +2301,17 @@
     }
   }
 
+  public static ReencryptAction convert(ReencryptActionProto a) {
+    switch (a) {
+    case CANCEL_REENCRYPT:
+      return ReencryptAction.CANCEL;
+    case START_REENCRYPT:
+      return ReencryptAction.START;
+    default:
+      throw new IllegalArgumentException("Unexpected value: " + a);
+    }
+  }
+
   public static RollingUpgradeAction convert(RollingUpgradeActionProto a) {
     switch (a) {
     case QUERY:
@@ -2733,16 +2763,24 @@
         .build();
   }
 
-  public static HdfsProtos.ZoneEncryptionInfoProto convert(
-      CipherSuite suite, CryptoProtocolVersion version, String keyName) {
+  public static ZoneEncryptionInfoProto convert(CipherSuite suite,
+      CryptoProtocolVersion version, String keyName) {
+    return convert(suite, version, keyName, null);
+  }
+
+  public static ZoneEncryptionInfoProto convert(CipherSuite suite,
+      CryptoProtocolVersion version, String keyName,
+      ReencryptionInfoProto proto) {
     if (suite == null || version == null || keyName == null) {
       return null;
     }
-    return HdfsProtos.ZoneEncryptionInfoProto.newBuilder()
-        .setSuite(convert(suite))
-        .setCryptoProtocolVersion(convert(version))
-        .setKeyName(keyName)
-        .build();
+    ZoneEncryptionInfoProto.Builder builder =
+        ZoneEncryptionInfoProto.newBuilder().setSuite(convert(suite))
+            .setCryptoProtocolVersion(convert(version)).setKeyName(keyName);
+    if (proto != null) {
+      builder.setReencryptionProto(proto);
+    }
+    return builder.build();
   }
 
   public static FileEncryptionInfo convert(
@@ -2759,6 +2797,91 @@
         ezKeyVersionName);
   }
 
+  public static ReencryptionInfoProto convert(String ezkvn, Long submissionTime,
+      boolean isCanceled, long numReencrypted, long numFailures,
+      Long completionTime, String lastFile) {
+    if (ezkvn == null || submissionTime == null) {
+      return null;
+    }
+    ReencryptionInfoProto.Builder builder =
+        ReencryptionInfoProto.newBuilder().setEzKeyVersionName(ezkvn)
+            .setSubmissionTime(submissionTime).setCanceled(isCanceled)
+            .setNumReencrypted(numReencrypted).setNumFailures(numFailures);
+    if (completionTime != null) {
+      builder.setCompletionTime(completionTime);
+    }
+    if (lastFile != null) {
+      builder.setLastFile(lastFile);
+    }
+    return builder.build();
+  }
+
+  public static ZoneReencryptionStatusProto convert(ZoneReencryptionStatus zs) {
+    ZoneReencryptionStatusProto.Builder builder =
+        ZoneReencryptionStatusProto.newBuilder()
+        .setId(zs.getId())
+        .setPath(zs.getZoneName())
+        .setEzKeyVersionName(zs.getEzKeyVersionName())
+        .setSubmissionTime(zs.getSubmissionTime())
+        .setCanceled(zs.isCanceled())
+        .setNumReencrypted(zs.getFilesReencrypted())
+        .setNumFailures(zs.getNumReencryptionFailures());
+    switch (zs.getState()) {
+    case Submitted:
+      builder.setState(ReencryptionStateProto.SUBMITTED);
+      break;
+    case Processing:
+      builder.setState(ReencryptionStateProto.PROCESSING);
+      break;
+    case Completed:
+      builder.setState(ReencryptionStateProto.COMPLETED);
+      break;
+    default:
+      throw new IllegalArgumentException("Unknown state " + zs.getState());
+    }
+    final long completion = zs.getCompletionTime();
+    if (completion != 0) {
+      builder.setCompletionTime(completion);
+    }
+    final String file = zs.getLastCheckpointFile();
+    if (file != null) {
+      builder.setLastFile(file);
+    }
+    return builder.build();
+  }
+
+  public static ZoneReencryptionStatus convert(
+      ZoneReencryptionStatusProto proto) {
+    ZoneReencryptionStatus.State state;
+    switch (proto.getState()) {
+    case SUBMITTED:
+      state = ZoneReencryptionStatus.State.Submitted;
+      break;
+    case PROCESSING:
+      state = ZoneReencryptionStatus.State.Processing;
+      break;
+    case COMPLETED:
+      state = ZoneReencryptionStatus.State.Completed;
+      break;
+    default:
+      throw new IllegalArgumentException("Unknown state " + proto.getState());
+    }
+    ZoneReencryptionStatus.Builder builder = new ZoneReencryptionStatus.
+        Builder().
+        id(proto.getId()).zoneName(proto.getPath()).state(state)
+        .ezKeyVersionName(proto.getEzKeyVersionName())
+        .submissionTime(proto.getSubmissionTime()).canceled(proto.getCanceled())
+        .filesReencrypted(proto.getNumReencrypted())
+        .fileReencryptionFailures(proto.getNumFailures());
+    if (proto.hasCompletionTime()) {
+      builder.completionTime(proto.getCompletionTime());
+    }
+    if (proto.hasLastFile()) {
+      builder.lastCheckpointFile(proto.getLastFile());
+    }
+    return builder.build();
+  }
+
   public static DatanodeInfo[] convert(DatanodeInfosProto datanodeInfosProto) {
     List<DatanodeInfoProto> proto = datanodeInfosProto.getDatanodesList();
     DatanodeInfo[] infos = new DatanodeInfo[proto.size()];
@@ -2803,6 +2926,16 @@
     return builder.build();
   }
 
+  public static ErasureCodingPolicyState convertECState(
+      HdfsProtos.ErasureCodingPolicyState state) {
+    return ErasureCodingPolicyState.fromValue(state.getNumber());
+  }
+
+  public static HdfsProtos.ErasureCodingPolicyState convertECState(
+      ErasureCodingPolicyState state) {
+    return HdfsProtos.ErasureCodingPolicyState.valueOf(state.getValue());
+  }
+
   public static ErasureCodingPolicy convertErasureCodingPolicy(
       ErasureCodingPolicyProto proto) {
     final byte id = (byte) (proto.getId() & 0xFF);
@@ -2816,10 +2949,12 @@
           "Missing schema field in ErasureCodingPolicy proto");
       Preconditions.checkArgument(proto.hasCellSize(),
           "Missing cellsize field in ErasureCodingPolicy proto");
+      Preconditions.checkArgument(proto.hasState(),
+          "Missing state field in ErasureCodingPolicy proto");
 
       return new ErasureCodingPolicy(proto.getName(),
           convertECSchema(proto.getSchema()),
-          proto.getCellSize(), id);
+          proto.getCellSize(), id, convertECState(proto.getState()));
     }
     return policy;
   }
@@ -2833,7 +2968,8 @@
     if (SystemErasureCodingPolicies.getByID(policy.getId()) == null) {
       builder.setName(policy.getName())
           .setSchema(convertECSchema(policy.getSchema()))
-          .setCellSize(policy.getCellSize());
+          .setCellSize(policy.getCellSize())
+          .setState(convertECState(policy.getState()));
     }
     return builder.build();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index 4f44c5e..3f108fa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -941,6 +941,10 @@
       returns(CreateEncryptionZoneResponseProto);
   rpc listEncryptionZones(ListEncryptionZonesRequestProto)
       returns(ListEncryptionZonesResponseProto);
+  rpc reencryptEncryptionZone(ReencryptEncryptionZoneRequestProto)
+      returns(ReencryptEncryptionZoneResponseProto);
+  rpc listReencryptionStatus(ListReencryptionStatusRequestProto)
+      returns(ListReencryptionStatusResponseProto);
   rpc getEZForPath(GetEZForPathRequestProto)
       returns(GetEZForPathResponseProto);
   rpc setErasureCodingPolicy(SetErasureCodingPolicyRequestProto)
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/encryption.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/encryption.proto
index 68b2f3a..75d3a0e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/encryption.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/encryption.proto
@@ -58,6 +58,47 @@
   required bool hasMore = 2;
 }
 
+enum ReencryptActionProto {
+  CANCEL_REENCRYPT = 1;
+  START_REENCRYPT = 2;
+}
+
+message ReencryptEncryptionZoneRequestProto {
+  required ReencryptActionProto action = 1;
+  required string zone = 2;
+}
+
+message ReencryptEncryptionZoneResponseProto {
+}
+
+message ListReencryptionStatusRequestProto {
+  required int64 id = 1;
+}
+
+enum ReencryptionStateProto {
+  SUBMITTED = 1;
+  PROCESSING = 2;
+  COMPLETED = 3;
+}
+
+message ZoneReencryptionStatusProto {
+  required int64 id = 1;
+  required string path = 2;
+  required ReencryptionStateProto state = 3;
+  required string ezKeyVersionName = 4;
+  required int64 submissionTime = 5;
+  required bool canceled = 6;
+  required int64 numReencrypted = 7;
+  required int64 numFailures = 8;
+  optional int64 completionTime = 9;
+  optional string lastFile = 10;
+}
+
+message ListReencryptionStatusResponseProto {
+  repeated ZoneReencryptionStatusProto statuses = 1;
+  required bool hasMore = 2;
+}
+
 message GetEZForPathRequestProto {
     required string src = 1;
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
index 7109980..ddb5566 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
@@ -313,6 +313,20 @@
   required CipherSuiteProto suite = 1;
   required CryptoProtocolVersionProto cryptoProtocolVersion = 2;
   required string keyName = 3;
+  optional ReencryptionInfoProto reencryptionProto = 4;
+}
+
+/**
+ * Re-encryption information for an encryption zone
+ */
+message ReencryptionInfoProto {
+  required string ezKeyVersionName = 1;
+  required uint64 submissionTime = 2;
+  required bool canceled = 3;
+  required int64 numReencrypted = 4;
+  required int64 numFailures = 5;
+  optional uint64 completionTime = 6;
+  optional string lastFile = 7;
 }
 
 /**
@@ -359,11 +373,21 @@
   repeated ECSchemaOptionEntryProto options = 4;
 }
 
+/**
+ * EC policy state.
+ */
+enum ErasureCodingPolicyState {
+  DISABLED = 1;
+  ENABLED = 2;
+  REMOVED = 3;
+}
+
 message ErasureCodingPolicyProto {
   optional string name = 1;
   optional ECSchemaProto schema = 2;
   optional uint32 cellSize = 3;
   required uint32 id = 4; // Actually a byte - only 8 bits used
+  optional ErasureCodingPolicyState state = 5 [default = ENABLED];
 }
 
 message AddECPolicyResponseProto {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
index 553bbce..2cd8934 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
@@ -859,6 +859,7 @@
     FileStatus expectedFileStatus = expected.getFileStatus(path);
     FileStatus actualFileStatus = actual.getFileStatus(path);
     assertEquals(actualFileStatus.hasAcl(), expectedFileStatus.hasAcl());
+    // backwards compat
     assertEquals(actualFileStatus.getPermission().getAclBit(),
         expectedFileStatus.getPermission().getAclBit());
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
index 14b62e7..aa690e2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
@@ -16,7 +16,7 @@
 # limitations under the License.
 #
 
-cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
 
 enable_testing()
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/CMakeLists.txt
index 44b18e6..c1ba3ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/CMakeLists.txt
@@ -16,6 +16,8 @@
 # limitations under the License.
 #
 
+cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
+
 set(CMAKE_SKIP_RPATH TRUE)
 
 # Flatten a list into a string.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt
index 08c9f8b..08fc030 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt
@@ -16,6 +16,8 @@
 # limitations under the License.
 #
 
+cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
+
 include_directories(
     ${CMAKE_CURRENT_SOURCE_DIR}/../libhdfs/include
     ${GENERATED_JAVAH}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt
index a0cd042..2883585 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt
@@ -15,6 +15,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
+
+cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
+
 add_definitions(-DLIBHDFS_DLL_EXPORT)
 
 include_directories(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index f4c383e..17cabad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -357,6 +357,9 @@
   public static final boolean DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES_DEFAULT =
       HdfsClientConfigKeys.DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES_DEFAULT;
 
+  public static final String DFS_NAMENODE_SNAPSHOT_SKIP_CAPTURE_ACCESSTIME_ONLY_CHANGE = "dfs.namenode.snapshot.skip.capture.accesstime-only-change";
+  public static final boolean DFS_NAMENODE_SNAPSHOT_SKIP_CAPTURE_ACCESSTIME_ONLY_CHANGE_DEFAULT = false;
+
   // Whether to enable datanode's stale state detection and usage for reads
   public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY = "dfs.namenode.avoid.read.stale.datanode";
   public static final boolean DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT = false;
@@ -567,7 +570,7 @@
   public static final String  DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY =
       "dfs.namenode.ec.system.default.policy";
   public static final String  DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT =
-      "RS-6-3-64k";
+      "RS-6-3-1024k";
   public static final String  DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_KEY = "dfs.datanode.ec.reconstruction.stripedread.threads";
   public static final int     DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_DEFAULT = 20;
   public static final String  DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY = "dfs.datanode.ec.reconstruction.stripedread.buffer.size";
@@ -881,6 +884,8 @@
       HdfsClientConfigKeys.DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY;
   public static final int    DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES_DEFAULT = 100;
   public static final String DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES = "dfs.namenode.list.encryption.zones.num.responses";
+  public static final int    DFS_NAMENODE_LIST_REENCRYPTION_STATUS_NUM_RESPONSES_DEFAULT = 100;
+  public static final String DFS_NAMENODE_LIST_REENCRYPTION_STATUS_NUM_RESPONSES_KEY = "dfs.namenode.list.reencryption.status.num.responses";
   public static final String DFS_NAMENODE_LIST_OPENFILES_NUM_RESPONSES =
       "dfs.namenode.list.openfiles.num.responses";
   public static final int    DFS_NAMENODE_LIST_OPENFILES_NUM_RESPONSES_DEFAULT =
@@ -889,6 +894,16 @@
   public static final int DFS_NAMENODE_EDEKCACHELOADER_INTERVAL_MS_DEFAULT = 1000;
   public static final String DFS_NAMENODE_EDEKCACHELOADER_INITIAL_DELAY_MS_KEY = "dfs.namenode.edekcacheloader.initial.delay.ms";
   public static final int DFS_NAMENODE_EDEKCACHELOADER_INITIAL_DELAY_MS_DEFAULT = 3000;
+  public static final String DFS_NAMENODE_REENCRYPT_SLEEP_INTERVAL_KEY = "dfs.namenode.reencrypt.sleep.interval";
+  public static final String DFS_NAMENODE_REENCRYPT_SLEEP_INTERVAL_DEFAULT = "1m";
+  public static final String DFS_NAMENODE_REENCRYPT_BATCH_SIZE_KEY = "dfs.namenode.reencrypt.batch.size";
+  public static final int DFS_NAMENODE_REENCRYPT_BATCH_SIZE_DEFAULT = 1000;
+  public static final String DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_HANDLER_RATIO_KEY = "dfs.namenode.reencrypt.throttle.limit.handler.ratio";
+  public static final double DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_HANDLER_RATIO_DEFAULT = 1.0;
+  public static final String DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_UPDATER_RATIO_KEY = "dfs.namenode.reencrypt.throttle.limit.updater.ratio";
+  public static final double DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_UPDATER_RATIO_DEFAULT = 1.0;
+  public static final String DFS_NAMENODE_REENCRYPT_EDEK_THREADS_KEY = "dfs.namenode.reencrypt.edek.threads";
+  public static final int DFS_NAMENODE_REENCRYPT_EDEK_THREADS_DEFAULT = 10;
 
   // Journal-node related configs. These are read on the JN side.
   public static final String  DFS_JOURNALNODE_EDITS_DIR_KEY = "dfs.journalnode.edits.dir";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index 38b81c6..44d5216 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -21,7 +21,6 @@
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumSet;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.stream.Collectors;
@@ -56,6 +55,7 @@
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto;
@@ -222,8 +222,12 @@
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListReencryptionStatusResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.AddErasureCodingPoliciesResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptEncryptionZoneRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ReencryptEncryptionZoneResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyRequestProto;
@@ -1484,6 +1488,37 @@
   }
 
   @Override
+  public ReencryptEncryptionZoneResponseProto reencryptEncryptionZone(
+      RpcController controller, ReencryptEncryptionZoneRequestProto req)
+      throws ServiceException {
+    try {
+      server.reencryptEncryptionZone(req.getZone(),
+          PBHelperClient.convert(req.getAction()));
+      return ReencryptEncryptionZoneResponseProto.newBuilder().build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  public ListReencryptionStatusResponseProto listReencryptionStatus(
+      RpcController controller, ListReencryptionStatusRequestProto req)
+      throws ServiceException {
+    try {
+      BatchedEntries<ZoneReencryptionStatus> entries = server
+          .listReencryptionStatus(req.getId());
+      ListReencryptionStatusResponseProto.Builder builder =
+          ListReencryptionStatusResponseProto.newBuilder();
+      builder.setHasMore(entries.hasMore());
+      for (int i=0; i<entries.size(); i++) {
+        builder.addStatuses(PBHelperClient.convert(entries.get(i)));
+      }
+      return builder.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
   public SetErasureCodingPolicyResponseProto setErasureCodingPolicy(
       RpcController controller, SetErasureCodingPolicyRequestProto req)
       throws ServiceException {
@@ -1664,7 +1699,7 @@
       RpcController controller, GetErasureCodingCodecsRequestProto request)
       throws ServiceException {
     try {
-      HashMap<String, String> codecs = server.getErasureCodingCodecs();
+      Map<String, String> codecs = server.getErasureCodingCodecs();
       GetErasureCodingCodecsResponseProto.Builder resBuilder =
           GetErasureCodingCodecsResponseProto.newBuilder();
       for (Map.Entry<String, String> codec : codecs.entrySet()) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
index 537ba0a..0155b85 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
@@ -403,20 +403,23 @@
     LOG.info("Downloaded file " + tmpEditsFile.getName() + " of size " +
         tmpEditsFile.length() + " bytes.");
 
-    final boolean moveSuccess = journal.moveTmpSegmentToCurrent(tmpEditsFile,
-        finalEditsFile, log.getEndTxId());
-    if (!moveSuccess) {
-      // If move is not successful, delete the tmpFile
-      LOG.debug("Move to current directory unsuccessful. Deleting temporary " +
-          "file: " + tmpEditsFile);
-      if (!tmpEditsFile.delete()) {
+    boolean moveSuccess = false;
+    try {
+      moveSuccess = journal.moveTmpSegmentToCurrent(tmpEditsFile,
+          finalEditsFile, log.getEndTxId());
+    } catch (IOException e) {
+      LOG.info("Could not move %s to current directory.", tmpEditsFile);
+    } finally {
+      if (tmpEditsFile.exists() && !tmpEditsFile.delete()) {
         LOG.warn("Deleting " + tmpEditsFile + " has failed");
       }
-      return false;
-    } else {
-      metrics.incrNumEditLogsSynced();
     }
-    return true;
+    if (moveSuccess) {
+      metrics.incrNumEditLogsSynced();
+      return true;
+    } else {
+      return false;
+    }
   }
 
   private static DataTransferThrottler getThrottler(Configuration conf) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 2bd4a20..d35894c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -661,7 +661,11 @@
     return erasurecodeBlocks.size();
   }
 
-  public List<BlockTargetPair> getReplicationCommand(int maxTransfers) {
+  int getNumberOfReplicateBlocks() {
+    return replicateBlocks.size();
+  }
+
+  List<BlockTargetPair> getReplicationCommand(int maxTransfers) {
     return replicateBlocks.poll(maxTransfers);
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 78783ca..c75bcea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1663,21 +1663,38 @@
     }
 
     final List<DatanodeCommand> cmds = new ArrayList<>();
-    // check pending replication
-    List<BlockTargetPair> pendingList = nodeinfo.getReplicationCommand(
-        maxTransfers);
-    if (pendingList != null) {
-      cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId,
-          pendingList));
-      maxTransfers -= pendingList.size();
+    // Allocate _approximately_ maxTransfers pending tasks to DataNode.
+    // NN chooses pending tasks based on the ratio between the lengths of
+    // replication and erasure-coded block queues.
+    int totalReplicateBlocks = nodeinfo.getNumberOfReplicateBlocks();
+    int totalECBlocks = nodeinfo.getNumberOfBlocksToBeErasureCoded();
+    int totalBlocks = totalReplicateBlocks + totalECBlocks;
+    if (totalBlocks > 0) {
+      int numReplicationTasks = (int) Math.ceil(
+          (double) (totalReplicateBlocks * maxTransfers) / totalBlocks);
+      int numECTasks = (int) Math.ceil(
+          (double) (totalECBlocks * maxTransfers) / totalBlocks);
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Pending replication tasks: " + numReplicationTasks
+            + " erasure-coded tasks: " + numECTasks);
+      }
+      // check pending replication tasks
+      List<BlockTargetPair> pendingList = nodeinfo.getReplicationCommand(
+          numReplicationTasks);
+      if (pendingList != null && !pendingList.isEmpty()) {
+        cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId,
+            pendingList));
+      }
+      // check pending erasure coding tasks
+      List<BlockECReconstructionInfo> pendingECList = nodeinfo
+          .getErasureCodeCommand(numECTasks);
+      if (pendingECList != null && !pendingECList.isEmpty()) {
+        cmds.add(new BlockECReconstructionCommand(
+            DNA_ERASURE_CODING_RECONSTRUCTION, pendingECList));
+      }
     }
-    // check pending erasure coding tasks
-    List<BlockECReconstructionInfo> pendingECList = nodeinfo
-        .getErasureCodeCommand(maxTransfers);
-    if (pendingECList != null) {
-      cmds.add(new BlockECReconstructionCommand(
-          DNA_ERASURE_CODING_RECONSTRUCTION, pendingECList));
-    }
+
     // check block invalidation
     Block[] blks = nodeinfo.getInvalidateBlocks(blockInvalidateLimit);
     if (blks != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 0077700..8d91f04 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -284,7 +284,8 @@
       
       // check if there is a disk error
       IOException cause = DatanodeUtil.getCauseIfDiskError(ioe);
-      DataNode.LOG.warn("IOException in BlockReceiver constructor"
+      DataNode.LOG
+          .warn("IOException in BlockReceiver constructor :" + ioe.getMessage()
           + (cause == null ? "" : ". Cause is "), cause);
       if (cause != null) {
         ioe = cause;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 6069487..837ac07 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -2554,7 +2554,8 @@
         // disk check moved to FileIoProvider
         IOException cause = DatanodeUtil.getCauseIfDiskError(ie);
         if (cause != null) { // possible disk error
-          LOG.warn("IOException in DataTransfer#run(). Cause is ", cause);
+          LOG.warn("IOException in DataTransfer#run() "+ ie.getMessage() +". "
+                  + "Cause is ", cause);
         }
       } finally {
         decrementXmitsInProgress();
@@ -2999,8 +3000,8 @@
     b.setNumBytes(visible);
 
     if (targets.length > 0) {
-      new DataTransfer(targets, targetStorageTypes, targetStorageIds, b, stage,
-          client).run();
+      new Daemon(new DataTransfer(targets, targetStorageTypes,
+          targetStorageIds, b, stage, client)).start();
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 18188dd..966bcb0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -606,12 +606,11 @@
     public ScanInfoPerBlockPool call() throws IOException {
       String[] bpList = volume.getBlockPoolList();
       ScanInfoPerBlockPool result = new ScanInfoPerBlockPool(bpList.length);
+      perfTimer.start();
+      throttleTimer.start();
       for (String bpid : bpList) {
         LinkedList<ScanInfo> report = new LinkedList<>();
 
-        perfTimer.start();
-        throttleTimer.start();
-
         try {
           result.put(bpid, volume.compileReport(bpid, report, this));
         } catch (InterruptedException ex) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointFaultInjector.java
index 18520944..eeb082b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointFaultInjector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointFaultInjector.java
@@ -24,13 +24,17 @@
  * Utility class to faciliate some fault injection tests for the checkpointing
  * process.
  */
-class CheckpointFaultInjector {
-  static CheckpointFaultInjector instance = new CheckpointFaultInjector();
-  
-  static CheckpointFaultInjector getInstance() {
+public class CheckpointFaultInjector {
+  public static CheckpointFaultInjector instance =
+      new CheckpointFaultInjector();
+
+  public static CheckpointFaultInjector getInstance() {
     return instance;
   }
-  
+
+  public static void set(CheckpointFaultInjector instance) {
+    CheckpointFaultInjector.instance = instance;
+  }
   public void beforeGetImageSetsHeaders() throws IOException {}
   public void afterSecondaryCallsRollEditLog() throws IOException {}
   public void duringMerge() throws IOException {}
@@ -46,4 +50,8 @@
   
   public void afterMD5Rename() throws IOException {}
   public void beforeEditsRename() throws IOException {}
+
+  public void duringUploadInProgess() throws InterruptedException, IOException {
+  }
+
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java
index 104d8c3..e4a035e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java
@@ -42,4 +42,13 @@
 
   @VisibleForTesting
   public void startFileAfterGenerateKey() throws IOException {}
+
+  @VisibleForTesting
+  public void reencryptEncryptedKeys() throws IOException {}
+
+  @VisibleForTesting
+  public void reencryptUpdaterProcessOneTask() throws IOException {}
+
+  @VisibleForTesting
+  public void reencryptUpdaterProcessCheckpoint() throws IOException {}
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index 96e189b9..f4cf8f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -19,32 +19,45 @@
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
 import java.util.NavigableMap;
 import java.util.TreeMap;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.CipherSuite;
 import org.apache.hadoop.crypto.CryptoProtocolVersion;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
+import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
+import org.apache.hadoop.hdfs.protocol.ReencryptionStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
+import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
+import org.apache.hadoop.security.AccessControlException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 
 import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_REENCRYPTION_STATUS_NUM_RESPONSES_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_REENCRYPTION_STATUS_NUM_RESPONSES_KEY;
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants
     .CRYPTO_XATTR_ENCRYPTION_ZONE;
 
@@ -57,7 +70,7 @@
  */
 public class EncryptionZoneManager {
 
-  public static Logger LOG = LoggerFactory.getLogger(EncryptionZoneManager
+  public static final Logger LOG = LoggerFactory.getLogger(EncryptionZoneManager
       .class);
 
   /**
@@ -99,6 +112,91 @@
   private TreeMap<Long, EncryptionZoneInt> encryptionZones = null;
   private final FSDirectory dir;
   private final int maxListEncryptionZonesResponses;
+  private final int maxListRecncryptionStatusResponses;
+
+  private ThreadFactory reencryptionThreadFactory;
+  private ExecutorService reencryptHandlerExecutor;
+  private ReencryptionHandler reencryptionHandler;
+  // Reencryption status is kept here to decouple status listing (which should
+  // work as long as NN is up), with the actual handler (which only exists if
+  // keyprovider exists)
+  private final ReencryptionStatus reencryptionStatus;
+
+  public static final BatchedListEntries<ZoneReencryptionStatus> EMPTY_LIST =
+      new BatchedListEntries<>(new ArrayList<ZoneReencryptionStatus>(), false);
+
+  @VisibleForTesting
+  public void pauseReencryptForTesting() {
+    reencryptionHandler.pauseForTesting();
+  }
+
+  @VisibleForTesting
+  public void resumeReencryptForTesting() {
+    reencryptionHandler.resumeForTesting();
+  }
+
+  @VisibleForTesting
+  public void pauseForTestingAfterNthSubmission(final int count) {
+    reencryptionHandler.pauseForTestingAfterNthSubmission(count);
+  }
+
+  @VisibleForTesting
+  public void pauseReencryptUpdaterForTesting() {
+    reencryptionHandler.pauseUpdaterForTesting();
+  }
+
+  @VisibleForTesting
+  public void resumeReencryptUpdaterForTesting() {
+    reencryptionHandler.resumeUpdaterForTesting();
+  }
+
+  @VisibleForTesting
+  public void pauseForTestingAfterNthCheckpoint(final String zone,
+      final int count) throws IOException {
+    INodesInPath iip;
+    dir.readLock();
+    try {
+      iip = dir.resolvePath(dir.getPermissionChecker(), zone, DirOp.READ);
+    } finally {
+      dir.readUnlock();
+    }
+    reencryptionHandler
+        .pauseForTestingAfterNthCheckpoint(iip.getLastINode().getId(), count);
+  }
+
+  @VisibleForTesting
+  public void resetMetricsForTesting() {
+    reencryptionStatus.resetMetrics();
+  }
+
+  @VisibleForTesting
+  public ReencryptionStatus getReencryptionStatus() {
+    return reencryptionStatus;
+  }
+
+  @VisibleForTesting
+  public ZoneReencryptionStatus getZoneStatus(final String zone)
+      throws IOException {
+    final FSPermissionChecker pc = dir.getPermissionChecker();
+    final INode inode;
+    dir.getFSNamesystem().readLock();
+    dir.readLock();
+    try {
+      final INodesInPath iip = dir.resolvePath(pc, zone, DirOp.READ);
+      inode = iip.getLastINode();
+      if (inode == null) {
+        return null;
+      }
+      return getReencryptionStatus().getZoneStatus(inode.getId());
+    } finally {
+      dir.readUnlock();
+      dir.getFSNamesystem().readUnlock();
+    }
+  }
+
+  FSDirectory getFSDirectory() {
+    return dir;
+  }
 
   /**
    * Construct a new EncryptionZoneManager.
@@ -115,6 +213,50 @@
         DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES + " " +
             "must be a positive integer."
     );
+    if (getProvider() != null) {
+      reencryptionHandler = new ReencryptionHandler(this, conf);
+      reencryptionThreadFactory = new ThreadFactoryBuilder().setDaemon(true)
+          .setNameFormat("reencryptionHandlerThread #%d").build();
+    }
+    maxListRecncryptionStatusResponses =
+        conf.getInt(DFS_NAMENODE_LIST_REENCRYPTION_STATUS_NUM_RESPONSES_KEY,
+            DFS_NAMENODE_LIST_REENCRYPTION_STATUS_NUM_RESPONSES_DEFAULT);
+    Preconditions.checkArgument(maxListRecncryptionStatusResponses >= 0,
+        DFS_NAMENODE_LIST_REENCRYPTION_STATUS_NUM_RESPONSES_KEY +
+            " must be a positive integer."
+    );
+    reencryptionStatus = new ReencryptionStatus();
+  }
+
+  KeyProviderCryptoExtension getProvider() {
+    return dir.getProvider();
+  }
+
+  void startReencryptThreads() {
+    if (getProvider() == null) {
+      return;
+    }
+    Preconditions.checkNotNull(reencryptionHandler);
+    reencryptHandlerExecutor =
+        Executors.newSingleThreadExecutor(reencryptionThreadFactory);
+    reencryptHandlerExecutor.execute(reencryptionHandler);
+    reencryptionHandler.startUpdaterThread();
+  }
+
+  void stopReencryptThread() {
+    if (getProvider() == null || reencryptionHandler == null) {
+      return;
+    }
+    dir.writeLock();
+    try {
+      reencryptionHandler.stopThreads();
+    } finally {
+      dir.writeUnlock();
+    }
+    if (reencryptHandlerExecutor != null) {
+      reencryptHandlerExecutor.shutdownNow();
+      reencryptHandlerExecutor = null;
+    }
   }
 
   /**
@@ -157,7 +299,13 @@
   void removeEncryptionZone(Long inodeId) {
     assert dir.hasWriteLock();
     if (hasCreatedEncryptionZone()) {
-      encryptionZones.remove(inodeId);
+      if (encryptionZones.remove(inodeId) == null
+          || !getReencryptionStatus().hasRunningZone(inodeId)) {
+        return;
+      }
+      if (reencryptionHandler != null) {
+        reencryptionHandler.removeZone(inodeId);
+      }
     }
   }
 
@@ -173,13 +321,17 @@
   }
 
   /**
-   * Returns the path of the EncryptionZoneInt.
+   * Returns the full path from an INode id.
    * <p/>
    * Called while holding the FSDirectory lock.
    */
-  private String getFullPathName(EncryptionZoneInt ezi) {
+  String getFullPathName(Long nodeId) {
     assert dir.hasReadLock();
-    return dir.getInode(ezi.getINodeId()).getFullPathName();
+    INode inode = dir.getInode(nodeId);
+    if (inode == null) {
+      return null;
+    }
+    return inode.getFullPathName();
   }
 
   /**
@@ -247,7 +399,8 @@
     if (ezi == null) {
       return null;
     } else {
-      return new EncryptionZone(ezi.getINodeId(), getFullPathName(ezi),
+      return new EncryptionZone(ezi.getINodeId(),
+          getFullPathName(ezi.getINodeId()),
           ezi.getSuite(), ezi.getVersion(), ezi.getKeyName());
     }
   }
@@ -284,8 +437,8 @@
 
     if (srcInEZ) {
       if (srcParentEZI != dstParentEZI) {
-        final String srcEZPath = getFullPathName(srcParentEZI);
-        final String dstEZPath = getFullPathName(dstParentEZI);
+        final String srcEZPath = getFullPathName(srcParentEZI.getINodeId());
+        final String dstEZPath = getFullPathName(dstParentEZI.getINodeId());
         final StringBuilder sb = new StringBuilder(srcIIP.getPath());
         sb.append(" can't be moved from encryption zone ");
         sb.append(srcEZPath);
@@ -294,6 +447,24 @@
         sb.append(".");
         throw new IOException(sb.toString());
       }
+      checkMoveValidityForReencryption(srcIIP.getPath(),
+          srcParentEZI.getINodeId());
+    } else if (dstInEZ) {
+      checkMoveValidityForReencryption(dstIIP.getPath(),
+          dstParentEZI.getINodeId());
+    }
+  }
+
+  private void checkMoveValidityForReencryption(final String pathName,
+      final long zoneId) throws IOException {
+    assert dir.hasReadLock();
+    final ZoneReencryptionStatus zs = reencryptionStatus.getZoneStatus(zoneId);
+    if (zs != null && zs.getState() != ZoneReencryptionStatus.State.Completed) {
+      final StringBuilder sb = new StringBuilder(pathName);
+      sb.append(" can't be moved because encryption zone ");
+      sb.append(getFullPathName(zoneId));
+      sb.append(" is currently under re-encryption");
+      throw new IOException(sb.toString());
     }
   }
 
@@ -364,19 +535,13 @@
       /*
        Skip EZs that are only present in snapshots. Re-resolve the path to 
        see if the path's current inode ID matches EZ map's INode ID.
-       
+
        INode#getFullPathName simply calls getParent recursively, so will return
-       the INode's parents at the time it was snapshotted. It will not 
+       the INode's parents at the time it was snapshotted. It will not
        contain a reference INode.
       */
-      final String pathName = getFullPathName(ezi);
-      INode inode = dir.getInode(ezi.getINodeId());
-      INode lastINode = null;
-      if (inode.getParent() != null || inode.isRoot()) {
-        INodesInPath iip = dir.getINodesInPath(pathName, DirOp.READ_LINK);
-        lastINode = iip.getLastINode();
-      }
-      if (lastINode == null || lastINode.getId() != ezi.getINodeId()) {
+      final String pathName = getFullPathName(ezi.getINodeId());
+      if (!pathResolvesToId(ezi.getINodeId(), pathName)) {
         continue;
       }
       // Add the EZ to the result list
@@ -392,6 +557,156 @@
   }
 
   /**
+   * Resolves the path to inode id, then check if it's the same as the inode id
+   * passed in. This is necessary to filter out zones in snapshots.
+   * @param zoneId
+   * @param zonePath
+   * @return true if path resolve to the id, false if not.
+   * @throws UnresolvedLinkException
+   */
+  private boolean pathResolvesToId(final long zoneId, final String zonePath)
+      throws UnresolvedLinkException, AccessControlException,
+      ParentNotDirectoryException {
+    assert dir.hasReadLock();
+    INode inode = dir.getInode(zoneId);
+    if (inode == null) {
+      return false;
+    }
+    INode lastINode = null;
+    if (INode.isValidAbsolutePath(zonePath)) {
+      INodesInPath iip = dir.getINodesInPath(zonePath, DirOp.READ_LINK);
+      lastINode = iip.getLastINode();
+    }
+    if (lastINode == null || lastINode.getId() != zoneId) {
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * Re-encrypts the given encryption zone path. If the given path is not the
+   * root of an encryption zone, an exception is thrown.
+   */
+  XAttr reencryptEncryptionZone(final INodesInPath zoneIIP,
+      final String keyVersionName) throws IOException {
+    assert dir.hasWriteLock();
+    if (reencryptionHandler == null) {
+      throw new IOException("No key provider configured, re-encryption "
+          + "operation is rejected");
+    }
+    final INode inode = zoneIIP.getLastINode();
+    final String zoneName = zoneIIP.getPath();
+    checkEncryptionZoneRoot(inode, zoneName);
+    if (getReencryptionStatus().hasRunningZone(inode.getId())) {
+      throw new IOException("Zone " + zoneName
+          + " is already submitted for re-encryption.");
+    }
+    LOG.info("Zone {}({}) is submitted for re-encryption.", zoneName,
+        inode.getId());
+    XAttr ret = FSDirEncryptionZoneOp
+        .updateReencryptionSubmitted(dir, zoneIIP, keyVersionName);
+    reencryptionHandler.notifyNewSubmission();
+    return ret;
+  }
+
+  /**
+   * Cancels the currently-running re-encryption of the given encryption zone.
+   * If the given path is not the root of an encryption zone,
+   * * an exception is thrown.
+   */
+  List<XAttr> cancelReencryptEncryptionZone(final INodesInPath zoneIIP)
+      throws IOException {
+    assert dir.hasWriteLock();
+    if (reencryptionHandler == null) {
+      throw new IOException("No key provider configured, re-encryption "
+          + "operation is rejected");
+    }
+    final long zoneId = zoneIIP.getLastINode().getId();
+    final String zoneName = zoneIIP.getPath();
+    checkEncryptionZoneRoot(zoneIIP.getLastINode(), zoneName);
+    reencryptionHandler.cancelZone(zoneId, zoneName);
+    LOG.info("Cancelled zone {}({}) for re-encryption.", zoneName, zoneId);
+    return FSDirEncryptionZoneOp.updateReencryptionFinish(dir, zoneIIP,
+        reencryptionStatus.getZoneStatus(zoneId));
+  }
+
+  /**
+   * Cursor-based listing of zone re-encryption status.
+   * <p/>
+   * Called while holding the FSDirectory lock.
+   */
+  BatchedListEntries<ZoneReencryptionStatus> listReencryptionStatus(
+      final long prevId) throws IOException {
+    assert dir.hasReadLock();
+    if (!hasCreatedEncryptionZone()) {
+      return ReencryptionStatus.EMPTY_LIST;
+    }
+
+    NavigableMap<Long, ZoneReencryptionStatus> stats =
+        reencryptionStatus.getZoneStatuses();
+
+    if (stats.isEmpty()) {
+      return EMPTY_LIST;
+    }
+
+    NavigableMap<Long, ZoneReencryptionStatus> tailMap =
+        stats.tailMap(prevId, false);
+    final int numResp =
+        Math.min(maxListRecncryptionStatusResponses, tailMap.size());
+    final List<ZoneReencryptionStatus> ret =
+        Lists.newArrayListWithExpectedSize(numResp);
+    int count = 0;
+    for (ZoneReencryptionStatus zs : tailMap.values()) {
+      final String name = getFullPathName(zs.getId());
+      if (name == null || !pathResolvesToId(zs.getId(), name)) {
+        continue;
+      }
+      zs.setZoneName(name);
+      ret.add(zs);
+      ++count;
+      if (count >= numResp) {
+        break;
+      }
+    }
+    final boolean hasMore = (numResp < tailMap.size());
+    return new BatchedListEntries<>(ret, hasMore);
+  }
+
+  /**
+   * Return whether an INode is an encryption zone root.
+   */
+  boolean isEncryptionZoneRoot(final INode inode, final String name)
+      throws FileNotFoundException {
+    assert dir.hasReadLock();
+    if (inode == null) {
+      throw new FileNotFoundException("INode does not exist for " + name);
+    }
+    if (!inode.isDirectory()) {
+      return false;
+    }
+    if (!hasCreatedEncryptionZone()
+        || !encryptionZones.containsKey(inode.getId())) {
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * Return whether an INode is an encryption zone root.
+   *
+   * @param inode the zone inode
+   * @throws IOException if the inode is not a directory,
+   *                     or is a directory but not the root of an EZ.
+   */
+  void checkEncryptionZoneRoot(final INode inode, final String name)
+      throws IOException {
+    if (!isEncryptionZoneRoot(inode, name)) {
+      throw new IOException("Path " + name + " is not the root of an"
+          + " encryption zone.");
+    }
+  }
+
+  /**
    * @return number of encryption zones.
    */
   public int getNumEncryptionZones() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index 18b8e8a..74b5ebf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -17,24 +17,27 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState;
 import org.apache.hadoop.hdfs.protocol.IllegalECPolicyException;
 import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 
 import org.apache.hadoop.io.erasurecode.CodecUtil;
+import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.stream.Collectors;
-import java.util.stream.Stream;
 
 /**
  * This manages erasure coding policies predefined and activated in the system.
@@ -48,7 +51,6 @@
 
   public static Logger LOG = LoggerFactory.getLogger(
       ErasureCodingPolicyManager.class);
-  private static final byte USER_DEFINED_POLICY_START_ID = (byte) 64;
   private int maxCellSize =
       DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_DEFAULT;
 
@@ -60,25 +62,32 @@
           HdfsConstants.ALLSSD_STORAGE_POLICY_ID};
 
   /**
-   * All user defined policies sorted by name for fast querying.
+   * All policies sorted by name for fast querying, include built-in policy,
+   * user defined policy, removed policy.
    */
-  private Map<String, ErasureCodingPolicy> userPoliciesByName;
+  private Map<String, ErasureCodingPolicy> policiesByName;
 
   /**
-   * All user defined policies sorted by ID for fast querying.
+   * All policies sorted by ID for fast querying, including built-in policy,
+   * user defined policy, removed policy.
    */
-  private Map<Byte, ErasureCodingPolicy> userPoliciesByID;
+  private Map<Byte, ErasureCodingPolicy> policiesByID;
 
   /**
-   * All removed policies sorted by name.
+   * For better performance when query all Policies.
    */
-  private Map<String, ErasureCodingPolicy> removedPoliciesByName;
+  private ErasureCodingPolicy[] allPolicies;
 
   /**
-   * All enabled policies maintained in NN memory for fast querying,
-   * identified and sorted by its name.
+   * All enabled policies sorted by name for fast querying, including built-in
+   * policy, user defined policy.
    */
   private Map<String, ErasureCodingPolicy> enabledPoliciesByName;
+  /**
+   * For better performance when query all enabled Policies.
+   */
+  private ErasureCodingPolicy[] enabledPolicies;
+
 
   private volatile static ErasureCodingPolicyManager instance = null;
 
@@ -101,46 +110,51 @@
             DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT);
     final String[] policyNames =
             (String[]) ArrayUtils.add(enablePolicyNames, defaultPolicyName);
-    this.userPoliciesByID = new TreeMap<>();
-    this.userPoliciesByName = new TreeMap<>();
-    this.removedPoliciesByName = new TreeMap<>();
+    this.policiesByName = new TreeMap<>();
+    this.policiesByID = new TreeMap<>();
     this.enabledPoliciesByName = new TreeMap<>();
+
+    /**
+     * TODO: load user defined EC policy from fsImage HDFS-7859
+     * load persistent policies from image and editlog, which is done only once
+     * during NameNode startup. This can be done here or in a separate method.
+     */
+
+    /*
+     * Add all System built-in policies into policy map
+     */
+    for (ErasureCodingPolicy policy :
+        SystemErasureCodingPolicies.getPolicies()) {
+      policiesByName.put(policy.getName(), policy);
+      policiesByID.put(policy.getId(), policy);
+    }
+
     for (String policyName : policyNames) {
       if (policyName.trim().isEmpty()) {
         continue;
       }
-      ErasureCodingPolicy ecPolicy =
-          SystemErasureCodingPolicies.getByName(policyName);
+      ErasureCodingPolicy ecPolicy = policiesByName.get(policyName);
       if (ecPolicy == null) {
-        ecPolicy = userPoliciesByName.get(policyName);
-        if (ecPolicy == null) {
-          String allPolicies = SystemErasureCodingPolicies.getPolicies()
-              .stream().map(ErasureCodingPolicy::getName)
-              .collect(Collectors.joining(", ")) + ", " +
-              userPoliciesByName.values().stream()
-              .map(ErasureCodingPolicy::getName)
-              .collect(Collectors.joining(", "));
-          String msg = String.format("EC policy '%s' specified at %s is not a "
-              + "valid policy. Please choose from list of available "
-              + "policies: [%s]",
-              policyName,
-              DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
-              allPolicies);
-          throw new IllegalArgumentException(msg);
-        }
+        String names = policiesByName.values()
+            .stream().map(ErasureCodingPolicy::getName)
+            .collect(Collectors.joining(", "));
+        String msg = String.format("EC policy '%s' specified at %s is not a "
+                + "valid policy. Please choose from list of available "
+                + "policies: [%s]",
+            policyName,
+            DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
+            names);
+        throw new IllegalArgumentException(msg);
       }
       enabledPoliciesByName.put(ecPolicy.getName(), ecPolicy);
     }
+    enabledPolicies =
+        enabledPoliciesByName.values().toArray(new ErasureCodingPolicy[0]);
+    allPolicies = policiesByName.values().toArray(new ErasureCodingPolicy[0]);
 
     maxCellSize = conf.getInt(
         DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_KEY,
         DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_DEFAULT);
-
-    /**
-     * TODO: HDFS-7859 persist into NameNode
-     * load persistent policies from image and editlog, which is done only once
-     * during NameNode startup. This can be done here or in a separate method.
-     */
   }
 
   /**
@@ -148,16 +162,20 @@
    * @return all policies
    */
   public ErasureCodingPolicy[] getEnabledPolicies() {
-    ErasureCodingPolicy[] results =
-        new ErasureCodingPolicy[enabledPoliciesByName.size()];
-    return enabledPoliciesByName.values().toArray(results);
+    return enabledPolicies;
   }
 
   /**
    * Get enabled policy by policy name.
    */
   public ErasureCodingPolicy getEnabledPolicyByName(String name) {
-    return enabledPoliciesByName.get(name);
+    ErasureCodingPolicy ecPolicy = enabledPoliciesByName.get(name);
+    if (ecPolicy == null) {
+      if (name.equalsIgnoreCase(ErasureCodeConstants.REPLICATION_POLICY_NAME)) {
+        ecPolicy = SystemErasureCodingPolicies.getReplicationPolicy();
+      }
+    }
+    return ecPolicy;
   }
 
   /**
@@ -181,9 +199,7 @@
    * @return all policies
    */
   public ErasureCodingPolicy[] getPolicies() {
-    return Stream.concat(SystemErasureCodingPolicies.getPolicies().stream(),
-        userPoliciesByName.values().stream())
-        .toArray(ErasureCodingPolicy[]::new);
+    return allPolicies;
   }
 
   /**
@@ -191,11 +207,7 @@
    * @return ecPolicy, or null if not found
    */
   public ErasureCodingPolicy getByID(byte id) {
-    ErasureCodingPolicy policy = SystemErasureCodingPolicies.getByID(id);
-    if (policy == null) {
-      return this.userPoliciesByID.get(id);
-    }
-    return policy;
+    return this.policiesByID.get(id);
   }
 
   /**
@@ -203,11 +215,7 @@
    * @return ecPolicy, or null if not found
    */
   public ErasureCodingPolicy getByName(String name) {
-    ErasureCodingPolicy policy = SystemErasureCodingPolicies.getByName(name);
-    if (policy == null) {
-      return this.userPoliciesByName.get(name);
-    }
-    return policy;
+    return this.policiesByName.get(name);
   }
 
   /**
@@ -224,6 +232,9 @@
    */
   public synchronized ErasureCodingPolicy addPolicy(ErasureCodingPolicy policy)
       throws IllegalECPolicyException {
+    // Set policy state into DISABLED when adding into Hadoop.
+    policy.setState(ErasureCodingPolicyState.DISABLED);
+
     if (!CodecUtil.hasCodec(policy.getCodecName())) {
       throw new IllegalECPolicyException("Codec name "
           + policy.getCodecName() + " is not supported");
@@ -250,14 +261,17 @@
     }
     policy.setName(assignedNewName);
     policy.setId(getNextAvailablePolicyID());
-    this.userPoliciesByName.put(policy.getName(), policy);
-    this.userPoliciesByID.put(policy.getId(), policy);
+    this.policiesByName.put(policy.getName(), policy);
+    this.policiesByID.put(policy.getId(), policy);
+    allPolicies = policiesByName.values().toArray(new ErasureCodingPolicy[0]);
     return policy;
   }
 
   private byte getNextAvailablePolicyID() {
-    byte currentId = this.userPoliciesByID.keySet().stream()
-        .max(Byte::compareTo).orElse(USER_DEFINED_POLICY_START_ID);
+    byte currentId = this.policiesByID.keySet().stream()
+        .max(Byte::compareTo)
+        .filter(id -> id >= ErasureCodeConstants.USER_DEFINED_POLICY_START_ID)
+        .orElse(ErasureCodeConstants.USER_DEFINED_POLICY_START_ID);
     return (byte) (currentId + 1);
   }
 
@@ -265,69 +279,71 @@
    * Remove an User erasure coding policy by policyName.
    */
   public synchronized void removePolicy(String name) {
-    if (SystemErasureCodingPolicies.getByName(name) != null) {
-      throw new IllegalArgumentException("System erasure coding policy " +
-          name + " cannot be removed");
-    }
-    ErasureCodingPolicy policy = userPoliciesByName.get(name);
-    if (policy == null) {
+    ErasureCodingPolicy ecPolicy = policiesByName.get(name);
+    if (ecPolicy == null) {
       throw new IllegalArgumentException("The policy name " +
           name + " does not exists");
     }
-    enabledPoliciesByName.remove(name);
-    removedPoliciesByName.put(name, policy);
+
+    if (ecPolicy.isSystemPolicy()) {
+      throw new IllegalArgumentException("System erasure coding policy " +
+          name + " cannot be removed");
+    }
+
+    if (enabledPoliciesByName.containsKey(name)) {
+      enabledPoliciesByName.remove(name);
+      enabledPolicies =
+          enabledPoliciesByName.values().toArray(new ErasureCodingPolicy[0]);
+    }
+    ecPolicy.setState(ErasureCodingPolicyState.REMOVED);
+    LOG.info("Remove erasure coding policy " + name);
   }
 
+  @VisibleForTesting
   public List<ErasureCodingPolicy> getRemovedPolicies() {
-    return removedPoliciesByName.values().stream().collect(Collectors.toList());
+    ArrayList<ErasureCodingPolicy> removedPolicies =
+        new ArrayList<ErasureCodingPolicy>();
+    for (ErasureCodingPolicy ecPolicy : policiesByName.values()) {
+      if (ecPolicy.isRemoved()) {
+        removedPolicies.add(ecPolicy);
+      }
+    }
+    return removedPolicies;
   }
 
   /**
    * Disable an erasure coding policy by policyName.
    */
   public synchronized void disablePolicy(String name) {
-    ErasureCodingPolicy sysEcPolicy = SystemErasureCodingPolicies
-        .getByName(name);
-    ErasureCodingPolicy userEcPolicy = userPoliciesByName.get(name);
-    LOG.info("Disable the erasure coding policy " + name);
-    if (sysEcPolicy == null &&
-        userEcPolicy == null) {
+    ErasureCodingPolicy ecPolicy = policiesByName.get(name);
+    if (ecPolicy == null) {
       throw new IllegalArgumentException("The policy name " +
           name + " does not exists");
     }
 
-    if(sysEcPolicy != null){
+    if (enabledPoliciesByName.containsKey(name)) {
       enabledPoliciesByName.remove(name);
-      removedPoliciesByName.put(name, sysEcPolicy);
+      enabledPolicies =
+          enabledPoliciesByName.values().toArray(new ErasureCodingPolicy[0]);
     }
-    if(userEcPolicy != null){
-      enabledPoliciesByName.remove(name);
-      removedPoliciesByName.put(name, userEcPolicy);
-    }
+    ecPolicy.setState(ErasureCodingPolicyState.DISABLED);
+    LOG.info("Disable the erasure coding policy " + name);
   }
 
   /**
    * Enable an erasure coding policy by policyName.
    */
   public synchronized void enablePolicy(String name) {
-    ErasureCodingPolicy sysEcPolicy = SystemErasureCodingPolicies
-        .getByName(name);
-    ErasureCodingPolicy userEcPolicy = userPoliciesByName.get(name);
-    LOG.info("Enable the erasure coding policy " + name);
-    if (sysEcPolicy == null &&
-        userEcPolicy == null) {
+    ErasureCodingPolicy ecPolicy = policiesByName.get(name);
+    if (ecPolicy == null) {
       throw new IllegalArgumentException("The policy name " +
           name + " does not exists");
     }
 
-    if(sysEcPolicy != null){
-      enabledPoliciesByName.put(name, sysEcPolicy);
-      removedPoliciesByName.remove(name);
-    }
-    if(userEcPolicy != null) {
-      enabledPoliciesByName.put(name, userEcPolicy);
-      removedPoliciesByName.remove(name);
-    }
+    enabledPoliciesByName.put(name, ecPolicy);
+    ecPolicy.setState(ErasureCodingPolicyState.ENABLED);
+    enabledPolicies =
+        enabledPoliciesByName.values().toArray(new ErasureCodingPolicy[0]);
+    LOG.info("Enable the erasure coding policy " + name);
   }
-
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index d4b24f5..0dfaa8e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -494,7 +494,9 @@
     // then no need to store access time
     if (atime != -1 && (status || force
         || atime > inode.getAccessTime() + fsd.getAccessTimePrecision())) {
-      inode.setAccessTime(atime, latest);
+      inode.setAccessTime(atime, latest,
+          fsd.getFSNamesystem().getSnapshotManager().
+          getSkipCaptureAccessTimeOnlyChange());
       status = true;
     }
     return status;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
index 22039d1..2552cf5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
@@ -19,6 +19,7 @@
 
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_FILE_ENCRYPTION_INFO;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.security.GeneralSecurityException;
 import java.security.PrivilegedExceptionAction;
@@ -31,6 +32,7 @@
 import org.apache.hadoop.crypto.CipherSuite;
 import org.apache.hadoop.crypto.CryptoProtocolVersion;
 import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
 import org.apache.hadoop.fs.FileEncryptionInfo;
@@ -42,15 +44,22 @@
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
+import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
+import org.apache.hadoop.hdfs.server.namenode.ReencryptionUpdater.FileEdekInfo;
 import org.apache.hadoop.security.SecurityUtil;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.util.Time;
+
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE;
 import static org.apache.hadoop.util.Time.monotonicNow;
 
 /**
@@ -216,18 +225,206 @@
     }
   }
 
+  static void reencryptEncryptionZone(final FSDirectory fsd,
+      final String zone, final String keyVersionName,
+      final boolean logRetryCache) throws IOException {
+    final List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
+    final FSPermissionChecker pc = fsd.getPermissionChecker();
+    fsd.writeLock();
+    try {
+      final INodesInPath iip = fsd.resolvePath(pc, zone, DirOp.WRITE);
+      final XAttr xattr = fsd.ezManager
+          .reencryptEncryptionZone(iip, keyVersionName);
+      xAttrs.add(xattr);
+    } finally {
+      fsd.writeUnlock();
+    }
+    fsd.getEditLog().logSetXAttrs(zone, xAttrs, logRetryCache);
+  }
+
+  static void cancelReencryptEncryptionZone(final FSDirectory fsd,
+      final String zone, final boolean logRetryCache) throws IOException {
+    final List<XAttr> xattrs;
+    final FSPermissionChecker pc = fsd.getPermissionChecker();
+    fsd.writeLock();
+    try {
+      final INodesInPath iip = fsd.resolvePath(pc, zone, DirOp.WRITE);
+      xattrs = fsd.ezManager.cancelReencryptEncryptionZone(iip);
+    } finally {
+      fsd.writeUnlock();
+    }
+    if (xattrs != null && !xattrs.isEmpty()) {
+      fsd.getEditLog().logSetXAttrs(zone, xattrs, logRetryCache);
+    }
+  }
+
+  static BatchedListEntries<ZoneReencryptionStatus> listReencryptionStatus(
+      final FSDirectory fsd, final long prevId)
+      throws IOException {
+    fsd.readLock();
+    try {
+      return fsd.ezManager.listReencryptionStatus(prevId);
+    } finally {
+      fsd.readUnlock();
+    }
+  }
+
+  /**
+   * Update re-encryption progress (submitted). Caller should
+   * logSync after calling this, outside of the FSN lock.
+   * <p>
+   * The reencryption status is updated during SetXAttrs.
+   */
+  static XAttr updateReencryptionSubmitted(final FSDirectory fsd,
+      final INodesInPath iip, final String ezKeyVersionName)
+      throws IOException {
+    assert fsd.hasWriteLock();
+    Preconditions.checkNotNull(ezKeyVersionName, "ezKeyVersionName is null.");
+    final ZoneEncryptionInfoProto zoneProto = getZoneEncryptionInfoProto(iip);
+    Preconditions.checkNotNull(zoneProto, "ZoneEncryptionInfoProto is null.");
+
+    final ReencryptionInfoProto newProto = PBHelperClient
+        .convert(ezKeyVersionName, Time.now(), false, 0, 0, null, null);
+    final ZoneEncryptionInfoProto newZoneProto = PBHelperClient
+        .convert(PBHelperClient.convert(zoneProto.getSuite()),
+            PBHelperClient.convert(zoneProto.getCryptoProtocolVersion()),
+            zoneProto.getKeyName(), newProto);
+
+    final XAttr xattr = XAttrHelper
+        .buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, newZoneProto.toByteArray());
+    final List<XAttr> xattrs = Lists.newArrayListWithCapacity(1);
+    xattrs.add(xattr);
+    FSDirXAttrOp.unprotectedSetXAttrs(fsd, iip, xattrs,
+        EnumSet.of(XAttrSetFlag.REPLACE));
+    return xattr;
+  }
+
+  /**
+   * Update re-encryption progress (start, checkpoint). Caller should
+   * logSync after calling this, outside of the FSN lock.
+   * <p>
+   * The reencryption status is updated during SetXAttrs.
+   * Original reencryption status is passed in to get existing information
+   * such as ezkeyVersionName and submissionTime.
+   */
+  static XAttr updateReencryptionProgress(final FSDirectory fsd,
+      final INode zoneNode, final ZoneReencryptionStatus origStatus,
+      final String lastFile, final long numReencrypted, final long numFailures)
+      throws IOException {
+    assert fsd.hasWriteLock();
+    Preconditions.checkNotNull(zoneNode, "Zone node is null");
+    INodesInPath iip = INodesInPath.fromINode(zoneNode);
+    final ZoneEncryptionInfoProto zoneProto = getZoneEncryptionInfoProto(iip);
+    Preconditions.checkNotNull(zoneProto, "ZoneEncryptionInfoProto is null.");
+    Preconditions.checkNotNull(origStatus, "Null status for " + iip.getPath());
+
+    final ReencryptionInfoProto newProto = PBHelperClient
+        .convert(origStatus.getEzKeyVersionName(),
+            origStatus.getSubmissionTime(), false,
+            origStatus.getFilesReencrypted() + numReencrypted,
+            origStatus.getNumReencryptionFailures() + numFailures, null,
+            lastFile);
+
+    final ZoneEncryptionInfoProto newZoneProto = PBHelperClient
+        .convert(PBHelperClient.convert(zoneProto.getSuite()),
+            PBHelperClient.convert(zoneProto.getCryptoProtocolVersion()),
+            zoneProto.getKeyName(), newProto);
+
+    final XAttr xattr = XAttrHelper
+        .buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, newZoneProto.toByteArray());
+    final List<XAttr> xattrs = Lists.newArrayListWithCapacity(1);
+    xattrs.add(xattr);
+    FSDirXAttrOp.unprotectedSetXAttrs(fsd, iip, xattrs,
+        EnumSet.of(XAttrSetFlag.REPLACE));
+    return xattr;
+  }
+
+  /**
+   * Log re-encrypt complete (cancel, or 100% re-encrypt) to edits.
+   * Caller should logSync after calling this, outside of the FSN lock.
+   * <p>
+   * Original reencryption status is passed in to get existing information,
+   * this should include whether it is finished due to cancellation.
+   * The reencryption status is updated during SetXAttrs for completion time.
+   */
+  static List<XAttr> updateReencryptionFinish(final FSDirectory fsd,
+      final INodesInPath zoneIIP, final ZoneReencryptionStatus origStatus)
+      throws IOException {
+    assert origStatus != null;
+    assert fsd.hasWriteLock();
+    fsd.ezManager.getReencryptionStatus()
+        .markZoneCompleted(zoneIIP.getLastINode().getId());
+    final XAttr xattr =
+        generateNewXAttrForReencryptionFinish(zoneIIP, origStatus);
+    final List<XAttr> xattrs = Lists.newArrayListWithCapacity(1);
+    xattrs.add(xattr);
+    FSDirXAttrOp.unprotectedSetXAttrs(fsd, zoneIIP, xattrs,
+        EnumSet.of(XAttrSetFlag.REPLACE));
+    return xattrs;
+  }
+
+  static XAttr generateNewXAttrForReencryptionFinish(final INodesInPath iip,
+      final ZoneReencryptionStatus status) throws IOException {
+    final ZoneEncryptionInfoProto zoneProto = getZoneEncryptionInfoProto(iip);
+    final ReencryptionInfoProto newRiProto = PBHelperClient
+        .convert(status.getEzKeyVersionName(), status.getSubmissionTime(),
+            status.isCanceled(), status.getFilesReencrypted(),
+            status.getNumReencryptionFailures(), Time.now(), null);
+
+    final ZoneEncryptionInfoProto newZoneProto = PBHelperClient
+        .convert(PBHelperClient.convert(zoneProto.getSuite()),
+            PBHelperClient.convert(zoneProto.getCryptoProtocolVersion()),
+            zoneProto.getKeyName(), newRiProto);
+
+    final XAttr xattr = XAttrHelper
+        .buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, newZoneProto.toByteArray());
+    return xattr;
+  }
+
+  private static ZoneEncryptionInfoProto getZoneEncryptionInfoProto(
+      final INodesInPath iip) throws IOException {
+    final XAttr fileXAttr = FSDirXAttrOp
+        .unprotectedGetXAttrByPrefixedName(iip, CRYPTO_XATTR_ENCRYPTION_ZONE);
+    if (fileXAttr == null) {
+      throw new IOException(
+          "Could not find reencryption XAttr for file " + iip.getPath());
+    }
+    try {
+      return ZoneEncryptionInfoProto.parseFrom(fileXAttr.getValue());
+    } catch (InvalidProtocolBufferException e) {
+      throw new IOException(
+          "Could not parse file encryption info for " + "inode " + iip
+              .getPath(), e);
+    }
+  }
+
+  /**
+   * Save the batch's edeks to file xattrs.
+   */
+  static void saveFileXAttrsForBatch(FSDirectory fsd,
+      List<FileEdekInfo> batch) {
+    assert fsd.getFSNamesystem().hasWriteLock();
+    if (batch != null && !batch.isEmpty()) {
+      for (FileEdekInfo entry : batch) {
+        final INode inode = fsd.getInode(entry.getInodeId());
+        Preconditions.checkNotNull(inode);
+        fsd.getEditLog().logSetXAttrs(inode.getFullPathName(),
+            inode.getXAttrFeature().getXAttrs(), false);
+      }
+    }
+  }
+
   /**
    * Set the FileEncryptionInfo for an INode.
    *
    * @param fsd fsdirectory
-   * @param src the path of a directory which will be the root of the
-   *            encryption zone.
    * @param info file encryption information
+   * @param flag action when setting xattr. Either CREATE or REPLACE.
    * @throws IOException
    */
   static void setFileEncryptionInfo(final FSDirectory fsd,
-      final INodesInPath iip, final FileEncryptionInfo info)
-          throws IOException {
+      final INodesInPath iip, final FileEncryptionInfo info,
+      final XAttrSetFlag flag) throws IOException {
     // Make the PB for the xattr
     final HdfsProtos.PerFileEncryptionInfoProto proto =
         PBHelperClient.convertPerFileEncInfo(info);
@@ -238,8 +435,7 @@
     xAttrs.add(fileEncryptionAttr);
     fsd.writeLock();
     try {
-      FSDirXAttrOp.unprotectedSetXAttrs(fsd, iip, xAttrs,
-                                        EnumSet.of(XAttrSetFlag.CREATE));
+      FSDirXAttrOp.unprotectedSetXAttrs(fsd, iip, xAttrs, EnumSet.of(flag));
     } finally {
       fsd.writeUnlock();
     }
@@ -500,4 +696,34 @@
       this.edek = edek;
     }
   }
+
+  /**
+   * Get the last key version name for the given EZ. This will contact
+   * the KMS to getKeyVersions.
+   * @param zone the encryption zone
+   * @param pc the permission checker
+   * @return the last element from the list of keyVersionNames returned by KMS.
+   * @throws IOException
+   */
+  static KeyVersion getLatestKeyVersion(final FSDirectory dir,
+      final String zone, final FSPermissionChecker pc) throws IOException {
+    final EncryptionZone ez;
+    assert dir.getProvider() != null;
+    dir.readLock();
+    try {
+      final INodesInPath iip = dir.resolvePath(pc, zone, DirOp.READ);
+      if (iip.getLastINode() == null) {
+        throw new FileNotFoundException(zone + " does not exist.");
+      }
+      dir.ezManager.checkEncryptionZoneRoot(iip.getLastINode(), iip.getPath());
+      ez = FSDirEncryptionZoneOp.getEZForPath(dir, iip);
+    } finally {
+      dir.readUnlock();
+    }
+    // Contact KMS out of locks.
+    KeyVersion currKv = dir.getProvider().getCurrentKey(ez.getKeyName());
+    Preconditions.checkNotNull(currKv,
+        "No current key versions for key name " + ez.getKeyName());
+    return currKv;
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index 486503c..4f4befe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -17,6 +17,23 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.XAttrHelper;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.IllegalECPolicyException;
+import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.io.erasurecode.CodecRegistry;
+import org.apache.hadoop.security.AccessControlException;
+
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
@@ -25,28 +42,10 @@
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.EnumSet;
-import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.stream.Collectors;
 
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.XAttr;
-import org.apache.hadoop.fs.XAttrSetFlag;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.XAttrHelper;
-import org.apache.hadoop.hdfs.protocol.IllegalECPolicyException;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.io.erasurecode.CodecRegistry;
-import org.apache.hadoop.security.AccessControlException;
-
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_ERASURECODING_POLICY;
 
 /**
@@ -62,7 +61,7 @@
 
   /**
    * Check if the ecPolicyName is valid and enabled, return the corresponding
-   * EC policy if is.
+   * EC policy if is, including the REPLICATION EC policy.
    * @param fsn namespace
    * @param ecPolicyName name of EC policy to be checked
    * @return an erasure coding policy if ecPolicyName is valid and enabled
@@ -295,7 +294,12 @@
     if (iip.getLastINode() == null) {
       throw new FileNotFoundException("Path not found: " + iip.getPath());
     }
-    return getErasureCodingPolicyForPath(fsd, iip);
+
+    ErasureCodingPolicy ecPolicy = getErasureCodingPolicyForPath(fsd, iip);
+    if (ecPolicy != null && ecPolicy.isReplicationPolicy()) {
+      ecPolicy = null;
+    }
+    return ecPolicy;
   }
 
   /**
@@ -312,7 +316,8 @@
   }
 
   /**
-   * Get the erasure coding policy. This does not do any permission checking.
+   * Get the erasure coding policy, including the REPLICATION policy. This does
+   * not do any permission checking.
    *
    * @param fsn namespace
    * @param iip inodes in the path containing the file
@@ -335,7 +340,7 @@
   static ErasureCodingPolicy[] getErasureCodingPolicies(final FSNamesystem fsn)
       throws IOException {
     assert fsn.hasReadLock();
-    return fsn.getErasureCodingPolicyManager().getEnabledPolicies();
+    return fsn.getErasureCodingPolicyManager().getPolicies();
   }
 
   /**
@@ -344,12 +349,13 @@
    * @param fsn namespace
    * @return {@link java.util.HashMap} array
    */
-  static HashMap<String, String> getErasureCodingCodecs(final FSNamesystem fsn)
+  static Map<String, String> getErasureCodingCodecs(final FSNamesystem fsn)
       throws IOException {
     assert fsn.hasReadLock();
     return CodecRegistry.getInstance().getCodec2CoderCompactMap();
   }
 
+  //return erasure coding policy for path, including REPLICATION policy
   private static ErasureCodingPolicy getErasureCodingPolicyForPath(
       FSDirectory fsd, INodesInPath iip) throws IOException {
     Preconditions.checkNotNull(iip, "INodes cannot be null");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index a62cddd..012e916 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -20,6 +20,7 @@
 import com.google.common.base.Preconditions;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.hdfs.AddBlockFlag;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
@@ -397,7 +398,8 @@
         newNode.getFileUnderConstructionFeature().getClientName(),
         newNode.getId());
     if (feInfo != null) {
-      FSDirEncryptionZoneOp.setFileEncryptionInfo(fsd, iip, feInfo);
+      FSDirEncryptionZoneOp.setFileEncryptionInfo(fsd, iip, feInfo,
+          XAttrSetFlag.CREATE);
     }
     setNewINodeStoragePolicy(fsd.getBlockManager(), iip, isLazyPersist);
     fsd.getEditLog().logOpenFile(src, newNode, overwrite, logRetryEntry);
@@ -541,7 +543,7 @@
           ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(
               fsd.getFSNamesystem(), existing);
         }
-        if (ecPolicy != null) {
+        if (ecPolicy != null && (!ecPolicy.isReplicationPolicy())) {
           isStriped = true;
         }
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index ddc088c..acdade7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -29,6 +29,7 @@
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.security.AccessControlException;
@@ -275,6 +276,12 @@
             PBHelperClient.convert(ezProto.getSuite()),
             PBHelperClient.convert(ezProto.getCryptoProtocolVersion()),
             ezProto.getKeyName());
+
+        if (ezProto.hasReencryptionProto()) {
+          ReencryptionInfoProto reProto = ezProto.getReencryptionProto();
+          fsd.ezManager.getReencryptionStatus()
+              .updateZoneStatus(inode.getId(), iip.getPath(), reProto);
+        }
       }
 
       if (!isFile && SECURITY_XATTR_UNREADABLE_BY_SUPERUSER.equals(xaName)) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 87b1156..e6aa533 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -50,6 +50,7 @@
 import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
@@ -1358,12 +1359,18 @@
     }
     try {
       final HdfsProtos.ZoneEncryptionInfoProto ezProto =
-          HdfsProtos.ZoneEncryptionInfoProto.parseFrom(
-              xattr.getValue());
+          HdfsProtos.ZoneEncryptionInfoProto.parseFrom(xattr.getValue());
       ezManager.unprotectedAddEncryptionZone(inode.getId(),
           PBHelperClient.convert(ezProto.getSuite()),
           PBHelperClient.convert(ezProto.getCryptoProtocolVersion()),
           ezProto.getKeyName());
+      if (ezProto.hasReencryptionProto()) {
+        final ReencryptionInfoProto reProto = ezProto.getReencryptionProto();
+        // inodes parents may not be loaded if this is done during fsimage
+        // loading so cannot set full path now. Pass in null to indicate that.
+        ezManager.getReencryptionStatus()
+            .updateZoneStatus(inode.getId(), null, reProto);
+      }
     } catch (InvalidProtocolBufferException e) {
       NameNode.LOG.warn("Error parsing protocol buffer of " +
           "EZ XAttr " + xattr.getName() + " dir:" + inode.getFullPathName());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 060bd59..bc62a7e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -413,7 +413,7 @@
       // update the block list.
       
       // Update the salient file attributes.
-      newFile.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID);
+      newFile.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID, false);
       newFile.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID);
       ErasureCodingPolicy ecPolicy =
           FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(
@@ -436,7 +436,7 @@
       final INodeFile file = INodeFile.valueOf(iip.getLastINode(), path);
 
       // Update the salient file attributes.
-      file.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID);
+      file.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID, false);
       file.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID);
       ErasureCodingPolicy ecPolicy =
           FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index b1639b2..12d96d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -89,9 +89,11 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
 import static org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.*;
 
+import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
 import org.apache.hadoop.hdfs.protocol.BlocksStats;
 import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats;
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
+import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
 import org.apache.hadoop.hdfs.server.namenode.metrics.ReplicatedBlocksMBean;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import static org.apache.hadoop.util.Time.now;
@@ -199,6 +201,7 @@
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.IllegalECPolicyException;
@@ -1230,6 +1233,8 @@
       dir.updateCountForQuota();
       // Enable quota checks.
       dir.enableQuotaChecks();
+      dir.ezManager.startReencryptThreads();
+
       if (haEnabled) {
         // Renew all of the leases before becoming active.
         // This is because, while we were in standby mode,
@@ -1321,6 +1326,9 @@
         // so that the tailer starts from the right spot.
         getFSImage().updateLastAppliedTxIdFromWritten();
       }
+      if (dir != null) {
+        dir.ezManager.stopReencryptThread();
+      }
       if (cacheManager != null) {
         cacheManager.stopMonitorThread();
         cacheManager.clearDirectiveStats();
@@ -7031,6 +7039,84 @@
     }
   }
 
+  void reencryptEncryptionZone(final String zone, final ReencryptAction action,
+      final boolean logRetryCache) throws IOException {
+    boolean success = false;
+    try {
+      Preconditions.checkNotNull(zone, "zone is null.");
+      checkSuperuserPrivilege();
+      checkOperation(OperationCategory.WRITE);
+      checkNameNodeSafeMode("NameNode in safemode, cannot " + action
+          + " re-encryption on zone " + zone);
+      reencryptEncryptionZoneInt(zone, action, logRetryCache);
+      success = true;
+    } finally {
+      logAuditEvent(success, action + "reencryption", zone, null, null);
+    }
+  }
+
+  BatchedListEntries<ZoneReencryptionStatus> listReencryptionStatus(
+      final long prevId) throws IOException {
+    final String operationName = "listReencryptionStatus";
+    boolean success = false;
+    checkSuperuserPrivilege();
+    checkOperation(OperationCategory.READ);
+    readLock();
+    try {
+      checkSuperuserPrivilege();
+      checkOperation(OperationCategory.READ);
+      final BatchedListEntries<ZoneReencryptionStatus> ret =
+          FSDirEncryptionZoneOp.listReencryptionStatus(dir, prevId);
+      success = true;
+      return ret;
+    } finally {
+      readUnlock(operationName);
+      logAuditEvent(success, operationName, null);
+    }
+  }
+
+  private void reencryptEncryptionZoneInt(final String zone,
+      final ReencryptAction action, final boolean logRetryCache)
+      throws IOException {
+    if (getProvider() == null) {
+      throw new IOException("No key provider configured, re-encryption "
+          + "operation is rejected");
+    }
+    FSPermissionChecker pc = getPermissionChecker();
+    // get keyVersionName out of the lock. This keyVersionName will be used
+    // as the target keyVersion for the entire re-encryption.
+    // This means all edek's keyVersion will be compared with this one, and
+    // kms is only contacted if the edek's keyVersion is different.
+    final KeyVersion kv =
+        FSDirEncryptionZoneOp.getLatestKeyVersion(dir, zone, pc);
+    provider.invalidateCache(kv.getName());
+    writeLock();
+    try {
+      checkSuperuserPrivilege();
+      checkOperation(OperationCategory.WRITE);
+      checkNameNodeSafeMode(
+          "NameNode in safemode, cannot " + action + " re-encryption on zone "
+              + zone);
+      switch (action) {
+      case START:
+        FSDirEncryptionZoneOp
+            .reencryptEncryptionZone(dir, zone, kv.getVersionName(),
+                logRetryCache);
+        break;
+      case CANCEL:
+        FSDirEncryptionZoneOp
+            .cancelReencryptEncryptionZone(dir, zone, logRetryCache);
+        break;
+      default:
+        throw new IOException(
+            "Re-encryption action " + action + " is not supported");
+      }
+    } finally {
+      writeUnlock();
+    }
+    getEditLog().logSync();
+  }
+
   /**
    * Set an erasure coding policy on the given path.
    * @param srcArg  The path of the target directory.
@@ -7081,6 +7167,8 @@
       checkOperation(OperationCategory.WRITE);
       for (ErasureCodingPolicy policy : policies) {
         try {
+          checkOperation(OperationCategory.WRITE);
+          checkNameNodeSafeMode("Cannot add erasure coding policy");
           ErasureCodingPolicy newPolicy =
               FSDirErasureCodingOp.addErasureCodePolicy(this, policy);
           addECPolicyName = newPolicy.getName();
@@ -7111,6 +7199,9 @@
     boolean success = false;
     writeLock();
     try {
+      checkOperation(OperationCategory.WRITE);
+      checkNameNodeSafeMode("Cannot remove erasure coding policy "
+          + ecPolicyName);
       FSDirErasureCodingOp.removeErasureCodePolicy(this, ecPolicyName);
       success = true;
     } finally {
@@ -7250,14 +7341,14 @@
   /**
    * Get available erasure coding codecs and corresponding coders.
    */
-  HashMap<String, String> getErasureCodingCodecs() throws IOException {
+  Map<String, String> getErasureCodingCodecs() throws IOException {
     final String operationName = "getErasureCodingCodecs";
     boolean success = false;
     checkOperation(OperationCategory.READ);
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      final HashMap<String, String> ret =
+      final Map<String, String> ret =
           FSDirErasureCodingOp.getErasureCodingCodecs(this);
       success = true;
       return ret;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index d768e08..34bfe10a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -724,8 +724,11 @@
   /**
    * Set last access time of inode.
    */
-  public final INode setAccessTime(long accessTime, int latestSnapshotId) {
-    recordModification(latestSnapshotId);
+  public final INode setAccessTime(long accessTime, int latestSnapshotId,
+      boolean skipCaptureAccessTimeOnlyChangeInSnapshot) {
+    if (!skipCaptureAccessTimeOnlyChangeInSnapshot) {
+      recordModification(latestSnapshotId);
+    }
     setAccessTime(accessTime);
     return this;
   }
@@ -780,8 +783,17 @@
     return StringUtils.split(path, Path.SEPARATOR_CHAR);
   }
 
+  /**
+   * Verifies if the path informed is a valid absolute path.
+   * @param path the absolute path to validate.
+   * @return true if the path is valid.
+   */
+  static boolean isValidAbsolutePath(final String path){
+    return path != null && path.startsWith(Path.SEPARATOR);
+  }
+
   private static void checkAbsolutePath(final String path) {
-    if (path == null || !path.startsWith(Path.SEPARATOR)) {
+    if (!isValidAbsolutePath(path)) {
       throw new AssertionError("Absolute path required, but got '"
           + path + "'");
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index d304d3d..3fbb7bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -37,9 +37,9 @@
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.EnumSet;
-import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.Callable;
 
@@ -105,6 +105,7 @@
 import org.apache.hadoop.hdfs.protocol.FSLimitException;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -116,6 +117,7 @@
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.protocol.BlocksStats;
+import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
@@ -2052,6 +2054,31 @@
   }
 
   @Override // ClientProtocol
+  public void reencryptEncryptionZone(final String zone,
+      final ReencryptAction action) throws IOException {
+    checkNNStartup();
+    namesystem.checkOperation(OperationCategory.WRITE);
+    final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
+    if (cacheEntry != null && cacheEntry.isSuccess()) {
+      return;
+    }
+    boolean success = false;
+    try {
+      namesystem.reencryptEncryptionZone(zone, action, cacheEntry != null);
+      success = true;
+    } finally {
+      RetryCache.setState(cacheEntry, success);
+    }
+  }
+
+  @Override // ClientProtocol
+  public BatchedEntries<ZoneReencryptionStatus> listReencryptionStatus(
+      final long prevId) throws IOException {
+    checkNNStartup();
+    return namesystem.listReencryptionStatus(prevId);
+  }
+
+  @Override // ClientProtocol
   public void setErasureCodingPolicy(String src, String ecPolicyName)
       throws IOException {
     checkNNStartup();
@@ -2278,7 +2305,7 @@
   }
 
   @Override // ClientProtocol
-  public HashMap<String, String> getErasureCodingCodecs() throws IOException {
+  public Map<String, String> getErasureCodingCodecs() throws IOException {
     checkNNStartup();
     return namesystem.getErasureCodingCodecs();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
new file mode 100644
index 0000000..729b894
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
@@ -0,0 +1,940 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Stopwatch;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
+import org.apache.hadoop.fs.FileEncryptionInfo;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.ReencryptionStatus;
+import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
+import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus.State;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.server.namenode.ReencryptionUpdater.FileEdekInfo;
+import org.apache.hadoop.hdfs.server.namenode.ReencryptionUpdater.ReencryptionTask;
+import org.apache.hadoop.hdfs.server.namenode.ReencryptionUpdater.ZoneSubmissionTracker;
+import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.ipc.RetriableException;
+import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.StopWatch;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.security.GeneralSecurityException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REENCRYPT_BATCH_SIZE_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REENCRYPT_BATCH_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REENCRYPT_SLEEP_INTERVAL_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REENCRYPT_SLEEP_INTERVAL_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_HANDLER_RATIO_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_HANDLER_RATIO_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REENCRYPT_EDEK_THREADS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REENCRYPT_EDEK_THREADS_KEY;
+
+/**
+ * Class for handling re-encrypt EDEK operations.
+ * <p>
+ * For each EZ, ReencryptionHandler walks the tree in a depth-first order,
+ * and submits batches of (files + existing edeks) as re-encryption tasks
+ * to a thread pool. Each thread in the pool then contacts the KMS to
+ * re-encrypt the edeks. ReencryptionUpdater tracks the tasks and updates
+ * file xattrs with the new edeks.
+ * <p>
+ * File renames are disabled in the EZ that's being re-encrypted. Newly created
+ * files will have new edeks, because the edek cache is drained upon the
+ * submission of a re-encryption command.
+ * <p>
+ * It is assumed only 1 ReencryptionHandler will be running, because:
+ *   1. The bottleneck of the entire re-encryption appears to be on the KMS.
+ *   2. Even with multiple handlers, since updater requires writelock and is
+ * single-threaded, the performance gain is limited.
+ * <p>
+ * This class uses the FSDirectory lock for synchronization.
+ */
+@InterfaceAudience.Private
+public class ReencryptionHandler implements Runnable {
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(ReencryptionHandler.class);
+
+  // 2000 is based on buffer size = 512 * 1024, and SetXAttr op size is
+  // 100 - 200 bytes (depending on the xattr value).
+  // The buffer size is hard-coded, see outputBufferCapacity from QJM.
+  private static final int MAX_BATCH_SIZE_WITHOUT_FLOODING = 2000;
+
+  private final EncryptionZoneManager ezManager;
+  private final FSDirectory dir;
+  private final long interval;
+  private final int reencryptBatchSize;
+  private double throttleLimitHandlerRatio;
+  private final int reencryptThreadPoolSize;
+  // stopwatches for throttling
+  private final StopWatch throttleTimerAll = new StopWatch();
+  private final StopWatch throttleTimerLocked = new StopWatch();
+
+  private ExecutorCompletionService<ReencryptionTask> batchService;
+  private BlockingQueue<Runnable> taskQueue;
+  // protected by ReencryptionHandler object lock
+  private final Map<Long, ZoneSubmissionTracker> submissions =
+      new ConcurrentHashMap<>();
+
+  // The current batch that the handler is working on. Handler is designed to
+  // be single-threaded, see class javadoc for more details.
+  private ReencryptionBatch currentBatch;
+
+  private final ReencryptionUpdater reencryptionUpdater;
+  private ExecutorService updaterExecutor;
+
+  // Vars for unit tests.
+  private volatile boolean shouldPauseForTesting = false;
+  private volatile int pauseAfterNthSubmission = 0;
+
+  /**
+   * Stop the re-encryption updater thread, as well as all EDEK re-encryption
+   * tasks submitted.
+   */
+  void stopThreads() {
+    assert dir.hasWriteLock();
+    for (ZoneSubmissionTracker zst : submissions.values()) {
+      zst.cancelAllTasks();
+    }
+    if (updaterExecutor != null) {
+      updaterExecutor.shutdownNow();
+    }
+  }
+
+  /**
+   * Start the re-encryption updater thread.
+   */
+  void startUpdaterThread() {
+    updaterExecutor = Executors.newSingleThreadExecutor(
+        new ThreadFactoryBuilder().setDaemon(true)
+            .setNameFormat("reencryptionUpdaterThread #%d").build());
+    updaterExecutor.execute(reencryptionUpdater);
+  }
+
+  @VisibleForTesting
+  synchronized void pauseForTesting() {
+    shouldPauseForTesting = true;
+    LOG.info("Pausing re-encrypt handler for testing.");
+    notify();
+  }
+
+  @VisibleForTesting
+  synchronized void resumeForTesting() {
+    shouldPauseForTesting = false;
+    LOG.info("Resuming re-encrypt handler for testing.");
+    notify();
+  }
+
+  @VisibleForTesting
+  void pauseForTestingAfterNthSubmission(final int count) {
+    assert pauseAfterNthSubmission == 0;
+    pauseAfterNthSubmission = count;
+  }
+
+  @VisibleForTesting
+  void pauseUpdaterForTesting() {
+    reencryptionUpdater.pauseForTesting();
+  }
+
+  @VisibleForTesting
+  void resumeUpdaterForTesting() {
+    reencryptionUpdater.resumeForTesting();
+  }
+
+  @VisibleForTesting
+  void pauseForTestingAfterNthCheckpoint(final long zoneId, final int count) {
+    reencryptionUpdater.pauseForTestingAfterNthCheckpoint(zoneId, count);
+  }
+
+  private synchronized void checkPauseForTesting() throws InterruptedException {
+    assert !dir.hasReadLock();
+    assert !dir.getFSNamesystem().hasReadLock();
+    while (shouldPauseForTesting) {
+      LOG.info("Sleeping in the re-encrypt handler for unit test.");
+      wait();
+      LOG.info("Continuing re-encrypt handler after pausing.");
+    }
+  }
+
+  ReencryptionHandler(final EncryptionZoneManager ezMgr,
+      final Configuration conf) {
+    this.ezManager = ezMgr;
+    Preconditions.checkNotNull(ezManager.getProvider(),
+        "No provider set, cannot re-encrypt");
+    this.dir = ezMgr.getFSDirectory();
+    this.interval =
+        conf.getTimeDuration(DFS_NAMENODE_REENCRYPT_SLEEP_INTERVAL_KEY,
+            DFS_NAMENODE_REENCRYPT_SLEEP_INTERVAL_DEFAULT,
+            TimeUnit.MILLISECONDS);
+    Preconditions.checkArgument(interval > 0,
+        DFS_NAMENODE_REENCRYPT_SLEEP_INTERVAL_KEY + " is not positive.");
+    this.reencryptBatchSize = conf.getInt(DFS_NAMENODE_REENCRYPT_BATCH_SIZE_KEY,
+        DFS_NAMENODE_REENCRYPT_BATCH_SIZE_DEFAULT);
+    Preconditions.checkArgument(reencryptBatchSize > 0,
+        DFS_NAMENODE_REENCRYPT_BATCH_SIZE_KEY + " is not positive.");
+    if (reencryptBatchSize > MAX_BATCH_SIZE_WITHOUT_FLOODING) {
+      LOG.warn("Re-encryption batch size is {}. It could cause edit log buffer "
+          + "to be full and trigger a logSync within the writelock, greatly "
+          + "impacting namenode throughput.", reencryptBatchSize);
+    }
+    this.throttleLimitHandlerRatio =
+        conf.getDouble(DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_HANDLER_RATIO_KEY,
+            DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_HANDLER_RATIO_DEFAULT);
+    LOG.info("Configured throttleLimitHandlerRatio={} for re-encryption",
+        throttleLimitHandlerRatio);
+    Preconditions.checkArgument(throttleLimitHandlerRatio > 0.0f,
+        DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_HANDLER_RATIO_KEY
+            + " is not positive.");
+    this.reencryptThreadPoolSize =
+        conf.getInt(DFS_NAMENODE_REENCRYPT_EDEK_THREADS_KEY,
+            DFS_NAMENODE_REENCRYPT_EDEK_THREADS_DEFAULT);
+
+    taskQueue = new LinkedBlockingQueue<>();
+    ThreadPoolExecutor threadPool =
+        new ThreadPoolExecutor(reencryptThreadPoolSize, reencryptThreadPoolSize,
+            60, TimeUnit.SECONDS, taskQueue, new Daemon.DaemonFactory() {
+              private final AtomicInteger ind = new AtomicInteger(0);
+
+              @Override
+              public Thread newThread(Runnable r) {
+                Thread t = super.newThread(r);
+                t.setName("reencryption edek Thread-" + ind.getAndIncrement());
+                return t;
+              }
+            }, new ThreadPoolExecutor.CallerRunsPolicy() {
+
+              @Override
+              public void rejectedExecution(Runnable runnable,
+                  ThreadPoolExecutor e) {
+                LOG.info("Execution rejected, executing in current thread");
+                super.rejectedExecution(runnable, e);
+              }
+            });
+
+    threadPool.allowCoreThreadTimeOut(true);
+    this.batchService = new ExecutorCompletionService(threadPool);
+    reencryptionUpdater =
+        new ReencryptionUpdater(dir, batchService, this, conf);
+    currentBatch = new ReencryptionBatch(reencryptBatchSize);
+  }
+
+  ReencryptionStatus getReencryptionStatus() {
+    return ezManager.getReencryptionStatus();
+  }
+
+  void cancelZone(final long zoneId, final String zoneName) throws IOException {
+    assert dir.hasWriteLock();
+    final ZoneReencryptionStatus zs =
+        getReencryptionStatus().getZoneStatus(zoneId);
+    if (zs == null || zs.getState() == State.Completed) {
+      throw new IOException("Zone " + zoneName + " is not under re-encryption");
+    }
+    zs.cancel();
+    ZoneSubmissionTracker zst = submissions.get(zoneId);
+    if (zst != null) {
+      zst.cancelAllTasks();
+    }
+  }
+
+  void removeZone(final long zoneId) {
+    assert dir.hasWriteLock();
+    LOG.info("Removing zone {} from re-encryption.", zoneId);
+    ZoneSubmissionTracker zst = submissions.get(zoneId);
+    if (zst != null) {
+      zst.cancelAllTasks();
+    }
+    submissions.remove(zoneId);
+    getReencryptionStatus().removeZone(zoneId);
+  }
+
+  ZoneSubmissionTracker getTracker(final long zoneId) {
+    dir.hasReadLock();
+    return unprotectedGetTracker(zoneId);
+  }
+
+  /**
+   * get the tracker without holding the FSDirectory lock. This is only used for
+   * testing, when updater checks about pausing.
+   */
+  ZoneSubmissionTracker unprotectedGetTracker(final long zoneId) {
+    return submissions.get(zoneId);
+  }
+
+  /**
+   * Add a dummy tracker (with 1 task that has 0 files to re-encrypt)
+   * for the zone. This is necessary to complete the re-encryption in case
+   * no file in the entire zone needs re-encryption at all. We cannot simply
+   * update zone status and set zone xattrs, because in the handler we only hold
+   * readlock, and setting xattrs requires upgrading to a writelock.
+   *
+   * @param zoneId
+   */
+  void addDummyTracker(final long zoneId) {
+    assert dir.hasReadLock();
+    assert !submissions.containsKey(zoneId);
+    final ZoneSubmissionTracker zst = new ZoneSubmissionTracker();
+    zst.setSubmissionDone();
+
+    Future future = batchService.submit(
+        new EDEKReencryptCallable(zoneId, new ReencryptionBatch(), this));
+    zst.addTask(future);
+    submissions.put(zoneId, zst);
+  }
+
+  /**
+   * Main loop. It takes at most 1 zone per scan, and executes until the zone
+   * is completed.
+   * {@see #reencryptEncryptionZoneInt(Long)}.
+   */
+  @Override
+  public void run() {
+    LOG.info("Starting up re-encrypt thread with interval={} millisecond.",
+        interval);
+    while (true) {
+      try {
+        synchronized (this) {
+          wait(interval);
+        }
+        checkPauseForTesting();
+      } catch (InterruptedException ie) {
+        LOG.info("Re-encrypt handler interrupted. Exiting");
+        Thread.currentThread().interrupt();
+        return;
+      }
+
+      final Long zoneId;
+      dir.readLock();
+      try {
+        zoneId = getReencryptionStatus().getNextUnprocessedZone();
+        if (zoneId == null) {
+          // empty queue.
+          continue;
+        }
+        LOG.info("Executing re-encrypt commands on zone {}. Current zones:{}",
+            zoneId, getReencryptionStatus());
+      } finally {
+        dir.readUnlock();
+      }
+
+      try {
+        reencryptEncryptionZone(zoneId);
+      } catch (RetriableException | SafeModeException re) {
+        LOG.info("Re-encryption caught exception, will retry", re);
+        getReencryptionStatus().markZoneForRetry(zoneId);
+      } catch (IOException ioe) {
+        LOG.warn("IOException caught when re-encrypting zone {}", zoneId, ioe);
+      } catch (InterruptedException ie) {
+        LOG.info("Re-encrypt handler interrupted. Exiting.");
+        Thread.currentThread().interrupt();
+        return;
+      } catch (Throwable t) {
+        LOG.error("Re-encrypt handler thread exiting. Exception caught when"
+            + " re-encrypting zone {}.", zoneId, t);
+        return;
+      }
+    }
+  }
+
+  /**
+   * Re-encrypts a zone by recursively iterating all paths inside the zone,
+   * in lexicographic order.
+   * Files are re-encrypted, and subdirs are processed during iteration.
+   *
+   * @param zoneId the Zone's id.
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  void reencryptEncryptionZone(final long zoneId)
+      throws IOException, InterruptedException {
+    throttleTimerAll.reset().start();
+    throttleTimerLocked.reset();
+    final INode zoneNode;
+    final ZoneReencryptionStatus zs;
+
+    readLock();
+    try {
+      getReencryptionStatus().markZoneStarted(zoneId);
+      zoneNode = dir.getInode(zoneId);
+      // start re-encrypting the zone from the beginning
+      if (zoneNode == null) {
+        LOG.info("Directory with id {} removed during re-encrypt, skipping",
+            zoneId);
+        return;
+      }
+      if (!zoneNode.isDirectory()) {
+        LOG.info("Cannot re-encrypt directory with id {} because it's not a"
+            + " directory.", zoneId);
+        return;
+      }
+
+      zs = getReencryptionStatus().getZoneStatus(zoneId);
+      assert zs != null;
+      // Only costly log FullPathName here once, and use id elsewhere.
+      LOG.info("Re-encrypting zone {}(id={})", zoneNode.getFullPathName(),
+          zoneId);
+      if (zs.getLastCheckpointFile() == null) {
+        // new re-encryption
+        reencryptDir(zoneNode.asDirectory(), zoneId, HdfsFileStatus.EMPTY_NAME,
+            zs.getEzKeyVersionName());
+      } else {
+        // resuming from a past re-encryption
+        restoreFromLastProcessedFile(zoneId, zs);
+      }
+      // save the last batch and mark complete
+      submitCurrentBatch(zoneId);
+      LOG.info("Submission completed of zone {} for re-encryption.", zoneId);
+      reencryptionUpdater.markZoneSubmissionDone(zoneId);
+    } finally {
+      readUnlock();
+    }
+  }
+
+  List<XAttr> completeReencryption(final INode zoneNode) throws IOException {
+    assert dir.hasWriteLock();
+    assert dir.getFSNamesystem().hasWriteLock();
+    final Long zoneId = zoneNode.getId();
+    ZoneReencryptionStatus zs = getReencryptionStatus().getZoneStatus(zoneId);
+    assert zs != null;
+    LOG.info("Re-encryption completed on zone {}. Re-encrypted {} files,"
+            + " failures encountered: {}.", zoneNode.getFullPathName(),
+        zs.getFilesReencrypted(), zs.getNumReencryptionFailures());
+    // This also removes the zone from reencryptionStatus
+    submissions.remove(zoneId);
+    return FSDirEncryptionZoneOp
+        .updateReencryptionFinish(dir, INodesInPath.fromINode(zoneNode), zs);
+  }
+
+  /**
+   * Restore the re-encryption from the progress inside ReencryptionStatus.
+   * This means start from exactly the lastProcessedFile (LPF), skipping all
+   * earlier paths in lexicographic order. Lexicographically-later directories
+   * on the LPF parent paths are added to subdirs.
+   */
+  private void restoreFromLastProcessedFile(final long zoneId,
+      final ZoneReencryptionStatus zs)
+      throws IOException, InterruptedException {
+    final INodeDirectory parent;
+    final byte[] startAfter;
+    final INodesInPath lpfIIP =
+        dir.getINodesInPath(zs.getLastCheckpointFile(), FSDirectory.DirOp.READ);
+    parent = lpfIIP.getLastINode().getParent();
+    startAfter = lpfIIP.getLastINode().getLocalNameBytes();
+    reencryptDir(parent, zoneId, startAfter, zs.getEzKeyVersionName());
+  }
+
+  /**
+   * Iterate through all files directly inside parent, and recurse down
+   * directories. The listing is done in batch, and can optionally start after
+   * a position.
+   * <p>
+   * Each batch is then send to the threadpool, where KMS will be contacted and
+   * edek re-encrypted. {@link ReencryptionUpdater} handles the tasks completed
+   * from the threadpool.
+   * <p>
+   * The iteration of the inode tree is done in a depth-first fashion. But
+   * instead of holding all INodeDirectory's in memory on the fly, only the
+   * path components to the current inode is held. This is to reduce memory
+   * consumption.
+   *
+   * @param parent     The inode id of parent directory
+   * @param zoneId     Id of the EZ inode
+   * @param startAfter Full path of a file the re-encrypt should start after.
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  private void reencryptDir(final INodeDirectory parent, final long zoneId,
+      byte[] startAfter, final String ezKeyVerName)
+      throws IOException, InterruptedException {
+    List<byte[]> startAfters = new ArrayList<>();
+    if (parent == null) {
+      return;
+    }
+    INode curr = parent;
+    // construct startAfters all the way up to the zone inode.
+    startAfters.add(startAfter);
+    while (curr.getId() != zoneId) {
+      startAfters.add(0, curr.getLocalNameBytes());
+      curr = curr.getParent();
+    }
+    curr = reencryptDirInt(zoneId, parent, startAfters, ezKeyVerName);
+    while (!startAfters.isEmpty()) {
+      if (curr == null) {
+        // lock was reacquired, re-resolve path.
+        curr = resolvePaths(zoneId, startAfters);
+      }
+      curr = reencryptDirInt(zoneId, curr, startAfters, ezKeyVerName);
+    }
+  }
+
+  /**
+   * Resolve the cursor of re-encryption to an inode.
+   * <p>
+   * The parent of the lowest level startAfter is returned. If somewhere in the
+   * middle of startAfters changed, the parent of the lowest unchanged level is
+   * returned.
+   *
+   * @param zoneId      Id of the EZ inode.
+   * @param startAfters the cursor, represented by a list of path bytes.
+   * @return the parent inode corresponding to the startAfters, or null if
+   * the EZ node (furthest parent) is deleted.
+   */
+  private INode resolvePaths(final long zoneId, List<byte[]> startAfters)
+      throws IOException {
+    // If the readlock was reacquired, we need to resolve the paths again
+    // in case things have changed. If our cursor file/dir is changed,
+    // continue from the next one.
+    INode zoneNode = dir.getInode(zoneId);
+    if (zoneNode == null) {
+      throw new FileNotFoundException("Zone " + zoneId + " is deleted.");
+    }
+    INodeDirectory parent = zoneNode.asDirectory();
+    for (int i = 0; i < startAfters.size(); ++i) {
+      if (i == startAfters.size() - 1) {
+        // last startAfter does not need to be resolved, since search for
+        // nextChild will cover that automatically.
+        break;
+      }
+      INode curr =
+          parent.getChild(startAfters.get(i), Snapshot.CURRENT_STATE_ID);
+      if (curr == null) {
+        // inode at this level has changed. Update startAfters to point to
+        // the next dir at the parent level (and dropping any startAfters
+        // at lower levels).
+        for (; i < startAfters.size(); ++i) {
+          startAfters.remove(startAfters.size() - 1);
+        }
+        break;
+      }
+      parent = curr.asDirectory();
+    }
+    return parent;
+  }
+
+  /**
+   * Submit the current batch to the thread pool.
+   *
+   * @param zoneId Id of the EZ INode
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  private void submitCurrentBatch(final long zoneId)
+      throws IOException, InterruptedException {
+    assert dir.hasReadLock();
+    if (currentBatch.isEmpty()) {
+      return;
+    }
+    ZoneSubmissionTracker zst = submissions.get(zoneId);
+    if (zst == null) {
+      zst = new ZoneSubmissionTracker();
+      submissions.put(zoneId, zst);
+    }
+    Future future = batchService
+        .submit(new EDEKReencryptCallable(zoneId, currentBatch, this));
+    zst.addTask(future);
+    LOG.info("Submitted batch (start:{}, size:{}) of zone {} to re-encrypt.",
+        currentBatch.getFirstFilePath(), currentBatch.size(), zoneId);
+    currentBatch = new ReencryptionBatch(reencryptBatchSize);
+    // flip the pause flag if this is nth submission.
+    // The actual pause need to happen outside of the lock.
+    if (pauseAfterNthSubmission > 0) {
+      if (--pauseAfterNthSubmission == 0) {
+        shouldPauseForTesting = true;
+      }
+    }
+  }
+
+  final class ReencryptionBatch {
+    // First file's path, for logging purpose.
+    private String firstFilePath;
+    private final List<FileEdekInfo> batch;
+
+    ReencryptionBatch() {
+      this(reencryptBatchSize);
+    }
+
+    ReencryptionBatch(int initialCapacity) {
+      batch = new ArrayList<>(initialCapacity);
+    }
+
+    void add(final INodeFile inode) throws IOException {
+      assert dir.hasReadLock();
+      Preconditions.checkNotNull(inode, "INodeFile is null");
+      if (batch.isEmpty()) {
+        firstFilePath = inode.getFullPathName();
+      }
+      batch.add(new FileEdekInfo(dir, inode));
+    }
+
+    String getFirstFilePath() {
+      return firstFilePath;
+    }
+
+    boolean isEmpty() {
+      return batch.isEmpty();
+    }
+
+    int size() {
+      return batch.size();
+    }
+
+    void clear() {
+      batch.clear();
+    }
+
+    List<FileEdekInfo> getBatch() {
+      return batch;
+    }
+  }
+
+  /**
+   * Simply contacts the KMS for re-encryption. No NN locks held.
+   */
+  private static class EDEKReencryptCallable
+      implements Callable<ReencryptionTask> {
+    private final long zoneNodeId;
+    private final ReencryptionBatch batch;
+    private final ReencryptionHandler handler;
+
+    EDEKReencryptCallable(final long zoneId,
+        final ReencryptionBatch currentBatch, final ReencryptionHandler rh) {
+      zoneNodeId = zoneId;
+      batch = currentBatch;
+      handler = rh;
+    }
+
+    @Override
+    public ReencryptionTask call() {
+      LOG.info("Processing batched re-encryption for zone {}, batch size {},"
+          + " start:{}", zoneNodeId, batch.size(), batch.getFirstFilePath());
+      if (batch.isEmpty()) {
+        return new ReencryptionTask(zoneNodeId, 0, batch);
+      }
+      final Stopwatch kmsSW = new Stopwatch().start();
+
+      int numFailures = 0;
+      String result = "Completed";
+      if (!reencryptEdeks()) {
+        numFailures += batch.size();
+        result = "Failed to";
+      }
+      LOG.info("{} re-encrypting one batch of {} edeks from KMS,"
+              + " time consumed: {}, start: {}.", result,
+          batch.size(), kmsSW.stop(), batch.getFirstFilePath());
+      return new ReencryptionTask(zoneNodeId, numFailures, batch);
+    }
+
+    private boolean reencryptEdeks() {
+      // communicate with the kms out of lock
+      final List<EncryptedKeyVersion> edeks = new ArrayList<>(batch.size());
+      for (FileEdekInfo entry : batch.getBatch()) {
+        edeks.add(entry.getExistingEdek());
+      }
+      // provider already has LoadBalancingKMSClientProvider's reties. It that
+      // fails, just fail this callable.
+      try {
+        handler.ezManager.getProvider().reencryptEncryptedKeys(edeks);
+        EncryptionFaultInjector.getInstance().reencryptEncryptedKeys();
+      } catch (GeneralSecurityException | IOException ex) {
+        LOG.warn("Failed to re-encrypt one batch of {} edeks, start:{}",
+            batch.size(), batch.getFirstFilePath(), ex);
+        return false;
+      }
+      int i = 0;
+      for (FileEdekInfo entry : batch.getBatch()) {
+        assert i < edeks.size();
+        entry.setEdek(edeks.get(i++));
+      }
+      return true;
+    }
+  }
+
+  /**
+   * Iterates the parent directory, and add direct children files to
+   * current batch. If batch size meets configured threshold, a Callable
+   * is created and sent to the thread pool, which will communicate to the KMS
+   * to get new edeks.
+   * <p>
+   * Locks could be released and reacquired when a Callable is created.
+   *
+   * @param zoneId Id of the EZ INode
+   * @return The inode which was just processed, if lock is held in the entire
+   * process. Null if lock is released.
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  private INode reencryptDirInt(final long zoneId, INode curr,
+      List<byte[]> startAfters, final String ezKeyVerName)
+      throws IOException, InterruptedException {
+    assert dir.hasReadLock();
+    assert dir.getFSNamesystem().hasReadLock();
+    Preconditions.checkNotNull(curr, "Current inode can't be null");
+    checkZoneReady(zoneId);
+    final INodeDirectory parent =
+        curr.isDirectory() ? curr.asDirectory() : curr.getParent();
+    ReadOnlyList<INode> children =
+        parent.getChildrenList(Snapshot.CURRENT_STATE_ID);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Re-encrypting directory {}", parent.getFullPathName());
+    }
+
+    final byte[] startAfter = startAfters.get(startAfters.size() - 1);
+    boolean lockReleased = false;
+    for (int i = INodeDirectory.nextChild(children, startAfter);
+         i < children.size(); ++i) {
+      final INode inode = children.get(i);
+      if (!reencryptINode(inode, ezKeyVerName)) {
+        // inode wasn't added for re-encryption. Recurse down if it's a dir,
+        // skip otherwise.
+        if (!inode.isDirectory()) {
+          continue;
+        }
+        if (ezManager.isEncryptionZoneRoot(inode, inode.getFullPathName())) {
+          // nested EZ, ignore.
+          LOG.info("{}({}) is a nested EZ, skipping for re-encryption",
+              inode.getFullPathName(), inode.getId());
+          continue;
+        }
+        // add 1 level to the depth-first search.
+        curr = inode;
+        if (!startAfters.isEmpty()) {
+          startAfters.remove(startAfters.size() - 1);
+          startAfters.add(curr.getLocalNameBytes());
+        }
+        startAfters.add(HdfsFileStatus.EMPTY_NAME);
+        return lockReleased ? null : curr;
+      }
+      if (currentBatch.size() >= reencryptBatchSize) {
+        final byte[] currentStartAfter = inode.getLocalNameBytes();
+        final String parentPath = parent.getFullPathName();
+        submitCurrentBatch(zoneId);
+        lockReleased = true;
+        readUnlock();
+        try {
+          throttle();
+          checkPauseForTesting();
+        } finally {
+          readLock();
+        }
+        checkZoneReady(zoneId);
+
+        // Things could have changed when the lock was released.
+        // Re-resolve the parent inode.
+        FSPermissionChecker pc = dir.getPermissionChecker();
+        INode newParent =
+            dir.resolvePath(pc, parentPath, FSDirectory.DirOp.READ)
+                .getLastINode();
+        if (newParent == null || !newParent.equals(parent)) {
+          // parent dir is deleted or recreated. We're done.
+          return null;
+        }
+        children = parent.getChildrenList(Snapshot.CURRENT_STATE_ID);
+        // -1 to counter the ++ on the for loop
+        i = INodeDirectory.nextChild(children, currentStartAfter) - 1;
+      }
+    }
+    // Successfully finished this dir, adjust pointers to 1 level up, and
+    // startAfter this dir.
+    startAfters.remove(startAfters.size() - 1);
+    if (!startAfters.isEmpty()) {
+      startAfters.remove(startAfters.size() - 1);
+      startAfters.add(curr.getLocalNameBytes());
+    }
+    curr = curr.getParent();
+    return lockReleased ? null : curr;
+  }
+
+  private void readLock() {
+    dir.getFSNamesystem().readLock();
+    dir.readLock();
+    throttleTimerLocked.start();
+  }
+
+  private void readUnlock() {
+    dir.readUnlock();
+    dir.getFSNamesystem().readUnlock("reencryptHandler");
+    throttleTimerLocked.stop();
+  }
+
+  /**
+   * Throttles the ReencryptionHandler in 3 aspects:
+   * 1. Prevents generating more Callables than the CPU could possibly handle.
+   * 2. Prevents generating more Callables than the ReencryptionUpdater can
+   *   handle, under its own throttling
+   * 3. Prevents contending FSN/FSD read locks. This is done based on the
+   *   DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_RATIO_KEY configuration.
+   * <p>
+   * Item 1 and 2 are to control NN heap usage.
+   *
+   * @throws InterruptedException
+   */
+  @VisibleForTesting
+  void throttle() throws InterruptedException {
+    // 1.
+    final int numCores = Runtime.getRuntime().availableProcessors();
+    if (taskQueue.size() >= numCores) {
+      LOG.debug("Re-encryption handler throttling because queue size {} is"
+          + "larger than number of cores {}", taskQueue.size(), numCores);
+      while (taskQueue.size() >= numCores) {
+        Thread.sleep(100);
+      }
+    }
+
+    // 2. if tasks are piling up on the updater, don't create new callables
+    // until the queue size goes down.
+    final int maxTasksPiled = Runtime.getRuntime().availableProcessors() * 2;
+    int totalTasks = 0;
+    for (ZoneSubmissionTracker zst : submissions.values()) {
+      totalTasks += zst.getTasks().size();
+    }
+    if (totalTasks >= maxTasksPiled) {
+      LOG.debug("Re-encryption handler throttling because total tasks pending"
+          + " re-encryption updater is {}", totalTasks);
+      while (totalTasks >= maxTasksPiled) {
+        Thread.sleep(500);
+        totalTasks = 0;
+        for (ZoneSubmissionTracker zst : submissions.values()) {
+          totalTasks += zst.getTasks().size();
+        }
+      }
+    }
+
+    // 3.
+    if (throttleLimitHandlerRatio >= 1.0) {
+      return;
+    }
+    final long expect = (long) (throttleTimerAll.now(TimeUnit.MILLISECONDS)
+        * throttleLimitHandlerRatio);
+    final long actual = throttleTimerLocked.now(TimeUnit.MILLISECONDS);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Re-encryption handler throttling expect: {}, actual: {},"
+              + " throttleTimerAll:{}", expect, actual,
+          throttleTimerAll.now(TimeUnit.MILLISECONDS));
+    }
+    if (expect - actual < 0) {
+      // in case throttleLimitHandlerRatio is very small, expect will be 0.
+      // so sleepMs should not be calculated from expect, to really meet the
+      // ratio. e.g. if ratio is 0.001, expect = 0 and actual = 1, sleepMs
+      // should be 1000 - throttleTimerAll.now()
+      final long sleepMs =
+          (long) (actual / throttleLimitHandlerRatio) - throttleTimerAll
+              .now(TimeUnit.MILLISECONDS);
+      LOG.debug("Throttling re-encryption, sleeping for {} ms", sleepMs);
+      Thread.sleep(sleepMs);
+    }
+    throttleTimerAll.reset().start();
+    throttleTimerLocked.reset();
+  }
+
+  /**
+   * Process an Inode for re-encryption. Add to current batch if it's a file,
+   * no-op otherwise.
+   *
+   * @param inode the inode
+   * @return true if inode is added to currentBatch and should be re-encrypted.
+   * false otherwise: could be inode is not a file, or inode's edek's
+   * key version is not changed.
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  private boolean reencryptINode(final INode inode, final String ezKeyVerName)
+      throws IOException, InterruptedException {
+    dir.hasReadLock();
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Processing {} for re-encryption", inode.getFullPathName());
+    }
+    if (!inode.isFile()) {
+      return false;
+    }
+    FileEncryptionInfo feInfo = FSDirEncryptionZoneOp
+        .getFileEncryptionInfo(dir, INodesInPath.fromINode(inode));
+    if (feInfo == null) {
+      LOG.warn("File {} skipped re-encryption because it is not encrypted! "
+          + "This is very likely a bug.", inode.getId());
+      return false;
+    }
+    if (ezKeyVerName.equals(feInfo.getEzKeyVersionName())) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("File {} skipped re-encryption because edek's key version"
+            + " name is not changed.", inode.getFullPathName());
+      }
+      return false;
+    }
+    currentBatch.add(inode.asFile());
+    return true;
+  }
+
+  /**
+   * Check whether zone is ready for re-encryption. Throws IOE if it's not.
+   * 1. If EZ is deleted.
+   * 2. if the re-encryption is canceled.
+   * 3. If NN is not active or is in safe mode.
+   *
+   * @throws IOException if zone does not exist / is cancelled, or if NN is not
+   *                     ready for write.
+   */
+  void checkZoneReady(final long zoneId)
+      throws RetriableException, SafeModeException, IOException {
+    final ZoneReencryptionStatus zs =
+        getReencryptionStatus().getZoneStatus(zoneId);
+    if (zs == null) {
+      throw new IOException("Zone " + zoneId + " status cannot be found.");
+    }
+    if (zs.isCanceled()) {
+      throw new IOException("Re-encryption is canceled for zone " + zoneId);
+    }
+    dir.getFSNamesystem()
+        .checkNameNodeSafeMode("NN is in safe mode, cannot re-encrypt.");
+    // re-encryption should be cancelled when NN goes to standby. Just
+    // double checking for sanity.
+    dir.getFSNamesystem().checkOperation(NameNode.OperationCategory.WRITE);
+  }
+
+  /**
+   * Called when a new zone is submitted for re-encryption. This will interrupt
+   * the background thread if it's waiting for the next
+   * DFS_NAMENODE_REENCRYPT_SLEEP_INTERVAL_KEY.
+   */
+  synchronized void notifyNewSubmission() {
+    LOG.debug("Notifying handler for new re-encryption command.");
+    this.notify();
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
new file mode 100644
index 0000000..690a0e9f
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
@@ -0,0 +1,523 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
+import org.apache.hadoop.fs.FileEncryptionInfo;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
+import org.apache.hadoop.hdfs.server.namenode.ReencryptionHandler.ReencryptionBatch;
+import org.apache.hadoop.ipc.RetriableException;
+import org.apache.hadoop.util.StopWatch;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_UPDATER_RATIO_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_UPDATER_RATIO_KEY;
+
+/**
+ * Class for finalizing re-encrypt EDEK operations, by updating file xattrs with
+ * edeks returned from reencryption.
+ * <p>
+ * The tasks are submitted by ReencryptionHandler.
+ * <p>
+ * It is assumed only 1 Updater will be running, since updating file xattrs
+ * requires namespace write lock, and performance gain from multi-threading
+ * is limited.
+ */
+@InterfaceAudience.Private
+public final class ReencryptionUpdater implements Runnable {
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(ReencryptionUpdater.class);
+
+  private volatile boolean shouldPauseForTesting = false;
+  private volatile int pauseAfterNthCheckpoint = 0;
+  private volatile long pauseZoneId = 0;
+
+  private double throttleLimitRatio;
+  private final StopWatch throttleTimerAll = new StopWatch();
+  private final StopWatch throttleTimerLocked = new StopWatch();
+
+  private volatile long faultRetryInterval = 60000;
+
+  /**
+   * Class to track re-encryption submissions of a single zone. It contains
+   * all the submitted futures, and statistics about how far the futures are
+   * processed.
+   */
+  static final class ZoneSubmissionTracker {
+    private boolean submissionDone;
+    private LinkedList<Future> tasks;
+    private int numCheckpointed;
+    private int numFutureDone;
+
+    ZoneSubmissionTracker() {
+      submissionDone = false;
+      tasks = new LinkedList<>();
+      numCheckpointed = 0;
+      numFutureDone = 0;
+    }
+
+    LinkedList<Future> getTasks() {
+      return tasks;
+    }
+
+    void cancelAllTasks() {
+      if (!tasks.isEmpty()) {
+        LOG.info("Cancelling {} re-encryption tasks", tasks.size());
+        for (Future f : tasks) {
+          f.cancel(true);
+        }
+      }
+    }
+
+    void addTask(final Future task) {
+      tasks.add(task);
+    }
+
+    private boolean isCompleted() {
+      return submissionDone && tasks.isEmpty();
+    }
+
+    void setSubmissionDone() {
+      submissionDone = true;
+    }
+  }
+
+  /**
+   * Class representing the task for one batch of a re-encryption command. It
+   * also contains statistics about how far this single batch has been executed.
+   */
+  static final class ReencryptionTask {
+    private final long zoneId;
+    private boolean processed = false;
+    private int numFilesUpdated = 0;
+    private int numFailures = 0;
+    private String lastFile = null;
+    private final ReencryptionBatch batch;
+
+    ReencryptionTask(final long id, final int failures,
+        final ReencryptionBatch theBatch) {
+      zoneId = id;
+      numFailures = failures;
+      batch = theBatch;
+    }
+  }
+
+  /**
+   * Class that encapsulates re-encryption details of a file. It contains the
+   * file inode, stores the initial edek of the file, and the new edek
+   * after re-encryption.
+   * <p>
+   * Assumptions are the object initialization happens when dir lock is held,
+   * and inode is valid and is encrypted during initialization.
+   * <p>
+   * Namespace changes may happen during re-encryption, and if inode is changed
+   * the re-encryption is skipped.
+   */
+  static final class FileEdekInfo {
+    private final long inodeId;
+    private final EncryptedKeyVersion existingEdek;
+    private EncryptedKeyVersion edek = null;
+
+    FileEdekInfo(FSDirectory dir, INodeFile inode) throws IOException {
+      assert dir.hasReadLock();
+      Preconditions.checkNotNull(inode, "INodeFile is null");
+      inodeId = inode.getId();
+      final FileEncryptionInfo fei = FSDirEncryptionZoneOp
+          .getFileEncryptionInfo(dir, INodesInPath.fromINode(inode));
+      Preconditions.checkNotNull(fei,
+          "FileEncryptionInfo is null for " + inodeId);
+      existingEdek = EncryptedKeyVersion
+          .createForDecryption(fei.getKeyName(), fei.getEzKeyVersionName(),
+              fei.getIV(), fei.getEncryptedDataEncryptionKey());
+    }
+
+    long getInodeId() {
+      return inodeId;
+    }
+
+    EncryptedKeyVersion getExistingEdek() {
+      return existingEdek;
+    }
+
+    void setEdek(final EncryptedKeyVersion ekv) {
+      assert ekv != null;
+      edek = ekv;
+    }
+  }
+
+  @VisibleForTesting
+  synchronized void pauseForTesting() {
+    shouldPauseForTesting = true;
+    LOG.info("Pausing re-encrypt updater for testing.");
+    notify();
+  }
+
+  @VisibleForTesting
+  synchronized void resumeForTesting() {
+    shouldPauseForTesting = false;
+    LOG.info("Resuming re-encrypt updater for testing.");
+    notify();
+  }
+
+  @VisibleForTesting
+  void pauseForTestingAfterNthCheckpoint(final long zoneId, final int count) {
+    assert pauseAfterNthCheckpoint == 0;
+    pauseAfterNthCheckpoint = count;
+    pauseZoneId = zoneId;
+  }
+
+  private final FSDirectory dir;
+  private final CompletionService<ReencryptionTask> batchService;
+  private final ReencryptionHandler handler;
+
+  ReencryptionUpdater(final FSDirectory fsd,
+      final CompletionService<ReencryptionTask> service,
+      final ReencryptionHandler rh, final Configuration conf) {
+    dir = fsd;
+    batchService = service;
+    handler = rh;
+    this.throttleLimitRatio =
+        conf.getDouble(DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_UPDATER_RATIO_KEY,
+            DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_UPDATER_RATIO_DEFAULT);
+    Preconditions.checkArgument(throttleLimitRatio > 0.0f,
+        DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_UPDATER_RATIO_KEY
+            + " is not positive.");
+  }
+
+  /**
+   * Called by the submission thread to indicate all tasks have been submitted.
+   * If this is called but no tasks has been submitted, the re-encryption is
+   * considered complete.
+   *
+   * @param zoneId Id of the zone inode.
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  void markZoneSubmissionDone(final long zoneId)
+      throws IOException, InterruptedException {
+    final ZoneSubmissionTracker tracker = handler.getTracker(zoneId);
+    if (tracker != null) {
+      tracker.submissionDone = true;
+    } else {
+      // Caller thinks submission is done, but no tasks submitted - meaning
+      // no files in the EZ need to be re-encrypted. Complete directly.
+      handler.addDummyTracker(zoneId);
+    }
+  }
+
+  @Override
+  public void run() {
+    throttleTimerAll.start();
+    while (true) {
+      try {
+        // Assuming single-threaded updater.
+        takeAndProcessTasks();
+      } catch (InterruptedException ie) {
+        LOG.warn("Re-encryption updater thread interrupted. Exiting.");
+        Thread.currentThread().interrupt();
+        return;
+      } catch (IOException ioe) {
+        LOG.warn("Re-encryption updater thread exception.", ioe);
+      } catch (Throwable t) {
+        LOG.error("Re-encryption updater thread exiting.", t);
+        return;
+      }
+    }
+  }
+
+  /**
+   * Process a completed ReencryptionTask. Each inode id is resolved to an INode
+   * object, skip if the inode is deleted.
+   * <p>
+   * Only file xattr is updated by this method. Re-encryption progress is not
+   * updated.
+   *
+   * @param zoneNodePath full path of the EZ inode.
+   * @param task     the completed task.
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  private void processTaskEntries(final String zoneNodePath,
+      final ReencryptionTask task) throws IOException, InterruptedException {
+    assert dir.hasWriteLock();
+    if (!task.batch.isEmpty() && task.numFailures == 0) {
+      LOG.debug(
+          "Updating file xattrs for re-encrypting zone {}," + " starting at {}",
+          zoneNodePath, task.batch.getFirstFilePath());
+      for (Iterator<FileEdekInfo> it = task.batch.getBatch().iterator();
+           it.hasNext();) {
+        FileEdekInfo entry = it.next();
+        // resolve the inode again, and skip if it's doesn't exist
+        LOG.trace("Updating {} for re-encryption.", entry.getInodeId());
+        final INode inode = dir.getInode(entry.getInodeId());
+        if (inode == null) {
+          LOG.debug("INode {} doesn't exist, skipping re-encrypt.",
+              entry.getInodeId());
+          // also remove from batch so later it's not saved.
+          it.remove();
+          continue;
+        }
+
+        // Cautiously check file encryption info, and only update if we're sure
+        // it's still using the same edek.
+        Preconditions.checkNotNull(entry.edek);
+        final FileEncryptionInfo fei = FSDirEncryptionZoneOp
+            .getFileEncryptionInfo(dir, INodesInPath.fromINode(inode));
+        if (!fei.getKeyName().equals(entry.edek.getEncryptionKeyName())) {
+          LOG.debug("Inode {} EZ key changed, skipping re-encryption.",
+              entry.getInodeId());
+          it.remove();
+          continue;
+        }
+        if (fei.getEzKeyVersionName()
+            .equals(entry.edek.getEncryptionKeyVersionName())) {
+          LOG.debug(
+              "Inode {} EZ key version unchanged, skipping re-encryption.",
+              entry.getInodeId());
+          it.remove();
+          continue;
+        }
+        if (!Arrays.equals(fei.getEncryptedDataEncryptionKey(),
+            entry.existingEdek.getEncryptedKeyVersion().getMaterial())) {
+          LOG.debug("Inode {} existing edek changed, skipping re-encryption",
+              entry.getInodeId());
+          it.remove();
+          continue;
+        }
+        FileEncryptionInfo newFei = new FileEncryptionInfo(fei.getCipherSuite(),
+            fei.getCryptoProtocolVersion(),
+            entry.edek.getEncryptedKeyVersion().getMaterial(),
+            entry.edek.getEncryptedKeyIv(), fei.getKeyName(),
+            entry.edek.getEncryptionKeyVersionName());
+        final INodesInPath iip = INodesInPath.fromINode(inode);
+        FSDirEncryptionZoneOp
+            .setFileEncryptionInfo(dir, iip, newFei, XAttrSetFlag.REPLACE);
+        task.lastFile = iip.getPath();
+        ++task.numFilesUpdated;
+      }
+
+      LOG.info("Updated xattrs on {}({}) files in zone {} for re-encryption,"
+              + " starting:{}.", task.numFilesUpdated, task.batch.size(),
+          zoneNodePath, task.batch.getFirstFilePath());
+    }
+    task.processed = true;
+  }
+
+  /**
+   * Iterate tasks for the given zone, and update progress accordingly. The
+   * checkpoint indicates all files before it are done re-encryption, so it will
+   * be updated to the position where all tasks before are completed.
+   *
+   * @param zoneNode the EZ inode.
+   * @param tracker  the zone submission tracker.
+   * @return the list containing the last checkpointed xattr. Empty if
+   *   no checkpoint happened.
+   * @throws ExecutionException
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  private List<XAttr> processCheckpoints(final INode zoneNode,
+      final ZoneSubmissionTracker tracker)
+      throws ExecutionException, IOException, InterruptedException {
+    assert dir.hasWriteLock();
+    final long zoneId = zoneNode.getId();
+    final String zonePath = zoneNode.getFullPathName();
+    final ZoneReencryptionStatus status =
+        handler.getReencryptionStatus().getZoneStatus(zoneId);
+    assert status != null;
+    // always start from the beginning, because the checkpoint means all files
+    // before it are re-encrypted.
+    final LinkedList<Future> tasks = tracker.getTasks();
+    final List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
+    ListIterator<Future> iter = tasks.listIterator();
+    while (iter.hasNext()) {
+      Future<ReencryptionTask> curr = iter.next();
+      if (!curr.isDone() || !curr.get().processed) {
+        // still has earlier tasks not completed, skip here.
+        break;
+      }
+      ReencryptionTask task = curr.get();
+      LOG.debug("Updating re-encryption checkpoint with completed task."
+          + " last: {} size:{}.", task.lastFile, task.batch.size());
+      assert zoneId == task.zoneId;
+      try {
+        final XAttr xattr = FSDirEncryptionZoneOp
+            .updateReencryptionProgress(dir, zoneNode, status, task.lastFile,
+                task.numFilesUpdated, task.numFailures);
+        xAttrs.clear();
+        xAttrs.add(xattr);
+      } catch (IOException ie) {
+        LOG.warn("Failed to update re-encrypted progress to xattr for zone {}",
+            zonePath, ie);
+        ++task.numFailures;
+      }
+      ++tracker.numCheckpointed;
+      iter.remove();
+    }
+    if (tracker.isCompleted()) {
+      LOG.debug("Removed re-encryption tracker for zone {} because it completed"
+              + " with {} tasks.", zonePath, tracker.numCheckpointed);
+      return handler.completeReencryption(zoneNode);
+    }
+    return xAttrs;
+  }
+
+  private void takeAndProcessTasks() throws Exception {
+    final Future<ReencryptionTask> completed = batchService.take();
+    throttle();
+    checkPauseForTesting();
+    ReencryptionTask task = completed.get();
+    if (completed.isCancelled()) {
+      LOG.debug("Skipped canceled re-encryption task for zone {}, last: {}",
+          task.zoneId, task.lastFile);
+    }
+
+    boolean shouldRetry;
+    do {
+      dir.getFSNamesystem().writeLock();
+      try {
+        throttleTimerLocked.start();
+        processTask(task);
+        shouldRetry = false;
+      } catch (RetriableException | SafeModeException re) {
+        // Keep retrying until succeed.
+        LOG.info("Exception when processing re-encryption task for zone {}, "
+                + "retrying...", task.zoneId, re);
+        shouldRetry = true;
+        Thread.sleep(faultRetryInterval);
+      } catch (IOException ioe) {
+        LOG.warn("Failure processing re-encryption task for zone {}",
+            task.zoneId, ioe);
+        ++task.numFailures;
+        task.processed = true;
+        shouldRetry = false;
+      } finally {
+        dir.getFSNamesystem().writeUnlock("reencryptUpdater");
+        throttleTimerLocked.stop();
+      }
+      // logSync regardless, to prevent edit log buffer overflow triggering
+      // logSync inside FSN writelock.
+      dir.getEditLog().logSync();
+    } while (shouldRetry);
+  }
+
+  private void processTask(ReencryptionTask task)
+      throws InterruptedException, ExecutionException, IOException {
+    final List<XAttr> xAttrs;
+    final String zonePath;
+    dir.writeLock();
+    try {
+      handler.checkZoneReady(task.zoneId);
+      final INode zoneNode = dir.getInode(task.zoneId);
+      if (zoneNode == null) {
+        // ez removed.
+        return;
+      }
+      zonePath = zoneNode.getFullPathName();
+      LOG.info("Processing returned re-encryption task for zone {}({}), "
+              + "batch size {}, start:{}", zonePath, task.zoneId,
+          task.batch.size(), task.batch.getFirstFilePath());
+      final ZoneSubmissionTracker tracker =
+          handler.getTracker(zoneNode.getId());
+      Preconditions.checkNotNull(tracker, "zone tracker not found " + zonePath);
+      tracker.numFutureDone++;
+      EncryptionFaultInjector.getInstance().reencryptUpdaterProcessOneTask();
+      processTaskEntries(zonePath, task);
+      EncryptionFaultInjector.getInstance().reencryptUpdaterProcessCheckpoint();
+      xAttrs = processCheckpoints(zoneNode, tracker);
+    } finally {
+      dir.writeUnlock();
+    }
+    FSDirEncryptionZoneOp.saveFileXAttrsForBatch(dir, task.batch.getBatch());
+    if (!xAttrs.isEmpty()) {
+      dir.getEditLog().logSetXAttrs(zonePath, xAttrs, false);
+    }
+  }
+
+  private synchronized void checkPauseForTesting() throws InterruptedException {
+    assert !dir.hasWriteLock();
+    assert !dir.getFSNamesystem().hasWriteLock();
+    if (pauseAfterNthCheckpoint != 0) {
+      ZoneSubmissionTracker tracker =
+          handler.unprotectedGetTracker(pauseZoneId);
+      if (tracker != null) {
+        if (tracker.numFutureDone == pauseAfterNthCheckpoint) {
+          shouldPauseForTesting = true;
+          pauseAfterNthCheckpoint = 0;
+        }
+      }
+    }
+    while (shouldPauseForTesting) {
+      LOG.info("Sleeping in the re-encryption updater for unit test.");
+      wait();
+      LOG.info("Continuing re-encryption updater after pausing.");
+    }
+  }
+
+  /**
+   * Throttles the ReencryptionUpdater to prevent from contending FSN/FSD write
+   * locks. This is done by the configuration.
+   */
+  private void throttle() throws InterruptedException {
+    if (throttleLimitRatio >= 1.0) {
+      return;
+    }
+
+    final long expect = (long) (throttleTimerAll.now(TimeUnit.MILLISECONDS)
+        * throttleLimitRatio);
+    final long actual = throttleTimerLocked.now(TimeUnit.MILLISECONDS);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Re-encryption updater throttling expect: {}, actual: {},"
+              + " throttleTimerAll:{}", expect, actual,
+          throttleTimerAll.now(TimeUnit.MILLISECONDS));
+    }
+    if (expect - actual < 0) {
+      // in case throttleLimitHandlerRatio is very small, expect will be 0.
+      // so sleepMs should not be calculated from expect, to really meet the
+      // ratio. e.g. if ratio is 0.001, expect = 0 and actual = 1, sleepMs
+      // should be 1000 - throttleTimerAll.now()
+      final long sleepMs =
+          (long) (actual / throttleLimitRatio) - throttleTimerAll
+              .now(TimeUnit.MILLISECONDS);
+      LOG.debug("Throttling re-encryption, sleeping for {} ms", sleepMs);
+      Thread.sleep(sleepMs);
+    }
+    throttleTimerAll.reset().start();
+    throttleTimerLocked.reset();
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
index 753447b..789ed9c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
@@ -36,6 +36,7 @@
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointConf;
+import org.apache.hadoop.hdfs.server.namenode.CheckpointFaultInjector;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
@@ -228,7 +229,9 @@
       Future<TransferFsImage.TransferResult> upload =
           executor.submit(new Callable<TransferFsImage.TransferResult>() {
             @Override
-            public TransferFsImage.TransferResult call() throws IOException {
+            public TransferFsImage.TransferResult call()
+                throws IOException, InterruptedException {
+              CheckpointFaultInjector.getInstance().duringUploadInProgess();
               return TransferFsImage.uploadImageFromStorage(activeNNAddress, conf, namesystem
                   .getFSImage().getStorage(), imageType, txid, canceler);
             }
@@ -258,11 +261,12 @@
         break;
       }
     }
-    lastUploadTime = monotonicNow();
-
-    // we are primary if we successfully updated the ANN
-    this.isPrimaryCheckPointer = success;
-
+    if (ie == null && ioe == null) {
+      //Update only when response from remote about success or
+      lastUploadTime = monotonicNow();
+      // we are primary if we successfully updated the ANN
+      this.isPrimaryCheckPointer = success;
+    }
     // cleaner than copying code for multiple catch statements and better than catching all
     // exceptions, so we just handle the ones we expect.
     if (ie != null || ioe != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
index 4b479e0..2fef235 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
@@ -19,6 +19,8 @@
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_SKIP_CAPTURE_ACCESSTIME_ONLY_CHANGE;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_SKIP_CAPTURE_ACCESSTIME_ONLY_CHANGE_DEFAULT;
 
 import java.io.DataInput;
 import java.io.DataOutput;
@@ -33,6 +35,8 @@
 
 import javax.management.ObjectName;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtilClient;
@@ -67,8 +71,19 @@
  * if necessary.
  */
 public class SnapshotManager implements SnapshotStatsMXBean {
+  public static final Log LOG = LogFactory.getLog(SnapshotManager.class);
+
   private final FSDirectory fsdir;
   private final boolean captureOpenFiles;
+  /**
+   * If skipCaptureAccessTimeOnlyChange is set to true, if accessTime
+   * of a file changed but there is no other modification made to the file,
+   * it will not be captured in next snapshot. However, if there is other
+   * modification made to the file, the last access time will be captured
+   * together with the modification in next snapshot.
+   */
+  private boolean skipCaptureAccessTimeOnlyChange = false;
+
   private final AtomicInteger numSnapshots = new AtomicInteger();
   private static final int SNAPSHOT_ID_BIT_WIDTH = 24;
 
@@ -84,6 +99,19 @@
     this.captureOpenFiles = conf.getBoolean(
         DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES,
         DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES_DEFAULT);
+    this.skipCaptureAccessTimeOnlyChange = conf.getBoolean(
+        DFS_NAMENODE_SNAPSHOT_SKIP_CAPTURE_ACCESSTIME_ONLY_CHANGE,
+        DFS_NAMENODE_SNAPSHOT_SKIP_CAPTURE_ACCESSTIME_ONLY_CHANGE_DEFAULT);
+    LOG.info("Loaded config captureOpenFiles: " + captureOpenFiles
+        + "skipCaptureAccessTimeOnlyChange: " +
+        skipCaptureAccessTimeOnlyChange);
+  }
+
+  /**
+   * @return skipCaptureAccessTimeOnlyChange
+   */
+  public boolean getSkipCaptureAccessTimeOnlyChange() {
+    return skipCaptureAccessTimeOnlyChange;
   }
 
   /** Used in tests only */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
index 14abf6e..4b0b083 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
@@ -32,8 +32,11 @@
 import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
+import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
 import org.apache.hadoop.tools.TableListing;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -286,10 +289,139 @@
     }
   }
 
+  private static class ReencryptZoneCommand implements AdminHelper.Command {
+    @Override
+    public String getName() {
+      return "-reencryptZone";
+    }
+
+    @Override
+    public String getShortUsage() {
+      return "[" + getName() + " <action> -path <zone>]\n";
+    }
+
+    @Override
+    public String getLongUsage() {
+      final TableListing listing = AdminHelper.getOptionDescriptionListing();
+      listing.addRow("<action>",
+          "The re-encrypt action to perform. Must be -start or -cancel.");
+      listing.addRow("<zone>", "The path to the zone to be re-encrypted.");
+      return getShortUsage() + "\n" + "Issue a re-encryption command for"
+          + " an encryption zone. Requires superuser permissions.\n\n"
+          + listing.toString();
+    }
+
+    @Override
+    public int run(Configuration conf, List<String> args) throws IOException {
+      final String path = StringUtils.popOptionWithArgument("-path", args);
+      final boolean start = StringUtils.popOption("-start", args);
+      final boolean cancel = StringUtils.popOption("-cancel", args);
+
+      if (!args.isEmpty()) {
+        System.err.println("Can't understand argument: " + args.get(0));
+        getLongUsage();
+        return 1;
+      }
+      if (!(start ^ cancel)) {
+        System.err.println("You must specify either [-start] or [-cancel]. ");
+        getLongUsage();
+        return 2;
+      }
+      if (path == null) {
+        System.err.println("You must specify a zone directory with [-path]. ");
+        getLongUsage();
+        return 3;
+      }
+      ReencryptAction action = ReencryptAction.START;
+      if (cancel) {
+        action = ReencryptAction.CANCEL;
+      }
+
+      final HdfsAdmin admin =
+          new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+      try {
+        admin.reencryptEncryptionZone(new Path(path), action);
+        System.out.println("re-encrypt command successfully submitted for "
+            + "zone: " + path + " action: " + action);
+      } catch (IOException e) {
+        System.err.println(prettifyException(e));
+        return 4;
+      }
+      return 0;
+    }
+  }
+
+  private static class ListReencryptionStatusCommand
+      implements AdminHelper.Command {
+    @Override
+    public String getName() {
+      return "-listReencryptionStatus";
+    }
+
+    @Override
+    public String getShortUsage() {
+      return "[" + getName()+ "]\n";
+    }
+
+    @Override
+    public String getLongUsage() {
+      return getShortUsage() + "\n" +
+          "List re-encryption statuses of encryption zones. "
+          + "Requires superuser permissions.\n\n";
+    }
+
+    @Override
+    public int run(Configuration conf, List<String> args) throws IOException {
+      HdfsAdmin admin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+      try {
+        final TableListing listing =
+            new TableListing.Builder().addField("Zone Name").addField("Status")
+                .addField("EZKey Version Name").addField("Submission Time")
+                .addField("Is Canceled?").addField("Completion Time")
+                .addField("Number of files re-encrypted")
+                .addField("Number of failures")
+                .addField("Last File Checkpointed")
+                .wrapWidth(AdminHelper.MAX_LINE_WIDTH).showHeaders().build();
+        final RemoteIterator<ZoneReencryptionStatus> it =
+            admin.listReencryptionStatus();
+        boolean failuresMet = false;
+        while (it.hasNext()) {
+          ZoneReencryptionStatus zs = it.next();
+          final long completion = zs.getCompletionTime();
+          listing.addRow(zs.getZoneName(), zs.getState().toString(),
+              zs.getEzKeyVersionName(), Time.formatTime(zs.getSubmissionTime()),
+              Boolean.toString(zs.isCanceled()),
+              completion == 0 ? "N/A" : Time.formatTime(completion),
+              Long.toString(zs.getFilesReencrypted()),
+              Long.toString(zs.getNumReencryptionFailures()),
+              zs.getLastCheckpointFile());
+          if (zs.getNumReencryptionFailures() > 0) {
+            failuresMet = true;
+          }
+        }
+        System.out.println(listing.toString());
+        if (failuresMet) {
+          System.out.println("There are re-encryption failures. Files that are"
+              + " failed to re-encrypt are still using the old EDEKs. "
+              + "Please check NameNode log to see which files failed,"
+              + " then either fix the error and re-encrypt again,"
+              + " or manually copy the failed files to use new EDEKs.");
+        }
+      } catch (IOException e) {
+        System.err.println(prettifyException(e));
+        return 2;
+      }
+
+      return 0;
+    }
+  }
+
   private static final AdminHelper.Command[] COMMANDS = {
       new CreateZoneCommand(),
       new ListZonesCommand(),
       new ProvisionTrashCommand(),
-      new GetFileEncryptionInfoCommand()
+      new GetFileEncryptionInfoCommand(),
+      new ReencryptZoneCommand(),
+      new ListReencryptionStatusCommand()
   };
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 1fb1d5f..88aafe2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -66,11 +66,13 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
+import org.apache.hadoop.hdfs.protocol.BlocksStats;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeVolumeInfo;
+import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
@@ -532,16 +534,30 @@
      * minutes. Use "-metaSave" to list of all such blocks and accurate 
      * counts.
      */
-    System.out.println("Under replicated blocks: " + 
-                       dfs.getLowRedundancyBlocksCount());
-    System.out.println("Blocks with corrupt replicas: " + 
-                       dfs.getCorruptBlocksCount());
-    System.out.println("Missing blocks: " + 
-                       dfs.getMissingBlocksCount());
-    System.out.println("Missing blocks (with replication factor 1): " +
-                      dfs.getMissingReplOneBlocksCount());
-    System.out.println("Pending deletion blocks: " +
-        dfs.getPendingDeletionBlocksCount());
+    BlocksStats blocksStats = dfs.getClient().getNamenode().getBlocksStats();
+    System.out.println("Replicated Blocks:");
+    System.out.println("\tUnder replicated blocks: " +
+        blocksStats.getLowRedundancyBlocksStat());
+    System.out.println("\tBlocks with corrupt replicas: " +
+        blocksStats.getCorruptBlocksStat());
+    System.out.println("\tMissing blocks: " +
+        blocksStats.getMissingReplicaBlocksStat());
+    System.out.println("\tMissing blocks (with replication factor 1): " +
+        blocksStats.getMissingReplicationOneBlocksStat());
+    System.out.println("\tPending deletion blocks: " +
+        blocksStats.getPendingDeletionBlocksStat());
+
+    ECBlockGroupsStats ecBlockGroupsStats =
+        dfs.getClient().getNamenode().getECBlockGroupsStats();
+    System.out.println("Erasure Coded Block Groups: ");
+    System.out.println("\tLow redundancy block groups: " +
+        ecBlockGroupsStats.getLowRedundancyBlockGroupsStat());
+    System.out.println("\tBlock groups with corrupt internal blocks: " +
+        ecBlockGroupsStats.getCorruptBlockGroupsStat());
+    System.out.println("\tMissing block groups: " +
+        ecBlockGroupsStats.getMissingBlockGroupsStat());
+    System.out.println("\tPending deletion block groups: " +
+        ecBlockGroupsStats.getPendingDeletionBlockGroupsStat());
 
     System.out.println();
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
index 46600a0..0b4e0c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -20,11 +20,11 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.util.ECPolicyLoader;
+import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 import org.apache.hadoop.tools.TableListing;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
@@ -33,7 +33,6 @@
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -100,9 +99,7 @@
     @Override
     public String getLongUsage() {
       return getShortUsage() + "\n" +
-          "Get the list of enabled erasure coding policies.\n" +
-          "Policies can be enabled on the NameNode via `" +
-          DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY + "`.\n";
+          "Get the list of all erasure coding policies.\n";
     }
 
     @Override
@@ -117,17 +114,13 @@
         Collection<ErasureCodingPolicy> policies =
             dfs.getAllErasureCodingPolicies();
         if (policies.isEmpty()) {
-          System.out.println("No erasure coding policies are enabled on the " +
+          System.out.println("There is no erasure coding policies in the " +
               "cluster.");
-          System.out.println("The set of enabled policies can be " +
-              "configured at '" +
-              DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY + "' on the " +
-              "NameNode.");
         } else {
           System.out.println("Erasure Coding Policies:");
           for (ErasureCodingPolicy policy : policies) {
             if (policy != null) {
-              System.out.println("\t" + policy.getName());
+              System.out.println(policy.toString());
             }
           }
         }
@@ -310,7 +303,8 @@
 
     @Override
     public String getShortUsage() {
-      return "[" + getName() + " -path <path> -policy <policy>]\n";
+      return "[" + getName() +
+          " -path <path> [-policy <policy>] [-replicate]]\n";
     }
 
     @Override
@@ -319,9 +313,13 @@
       listing.addRow("<path>", "The path of the file/directory to set " +
           "the erasure coding policy");
       listing.addRow("<policy>", "The name of the erasure coding policy");
+      listing.addRow("-replicate",
+          "force 3x replication scheme on the directory");
       return getShortUsage() + "\n" +
           "Set the erasure coding policy for a file/directory.\n\n" +
-          listing.toString();
+          listing.toString() + "\n" +
+          "-replicate and -policy are optional arguments. They cannot been " +
+          "used at the same time";
     }
 
     @Override
@@ -333,14 +331,24 @@
         return 1;
       }
 
-      final String ecPolicyName = StringUtils.popOptionWithArgument("-policy",
+      String ecPolicyName = StringUtils.popOptionWithArgument("-policy",
           args);
+      final boolean replicate = StringUtils.popOption("-replicate", args);
 
       if (args.size() > 0) {
         System.err.println(getName() + ": Too many arguments");
         return 1;
       }
 
+      if (replicate) {
+        if (ecPolicyName != null) {
+          System.err.println(getName() +
+              ": -replicate and -policy cannot been used at the same time");
+          return 2;
+        }
+        ecPolicyName = ErasureCodeConstants.REPLICATION_POLICY_NAME;
+      }
+
       final Path p = new Path(path);
       final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf);
       try {
@@ -354,7 +362,7 @@
         }
       } catch (Exception e) {
         System.err.println(AdminHelper.prettifyException(e));
-        return 2;
+        return 3;
       }
       return 0;
     }
@@ -441,7 +449,7 @@
 
       final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
       try {
-        HashMap<String, String> codecs =
+        Map<String, String> codecs =
             dfs.getAllErasureCodingCodecs();
         if (codecs.isEmpty()) {
           System.out.println("No erasure coding codecs are supported on the " +
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 03becc9..bbe2eca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -2800,6 +2800,15 @@
   </description>
 </property>
 
+<property>
+  <name>dfs.namenode.list.reencryption.status.num.responses</name>
+  <value>100</value>
+  <description>When listing re-encryption status, the maximum number of zones
+    that will be returned in a batch. Fetching the list incrementally in
+    batches improves namenode performance.
+  </description>
+</property>
+
   <property>
     <name>dfs.namenode.list.openfiles.num.responses</name>
     <value>1000</value>
@@ -2829,6 +2838,49 @@
 </property>
 
 <property>
+  <name>dfs.namenode.reencrypt.sleep.interval</name>
+  <value>1m</value>
+  <description>Interval the re-encrypt EDEK thread sleeps in the main loop. The
+    interval accepts units. If none given, millisecond is assumed.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.reencrypt.batch.size</name>
+  <value>1000</value>
+  <description>How many EDEKs should the re-encrypt thread process in one batch.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.reencrypt.throttle.limit.handler.ratio</name>
+  <value>1.0</value>
+  <description>Throttling ratio for the re-encryption, indicating what fraction
+    of time should the re-encrypt handler thread work under NN read lock.
+    Larger than 1.0 values are interpreted as 1.0. Negative value or 0 are
+    invalid values and will fail NN startup.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.reencrypt.throttle.limit.updater.ratio</name>
+  <value>1.0</value>
+  <description>Throttling ratio for the re-encryption, indicating what fraction
+    of time should the re-encrypt updater thread work under NN write lock.
+    Larger than 1.0 values are interpreted as 1.0. Negative value or 0 are
+    invalid values and will fail NN startup.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.reencrypt.edek.threads</name>
+  <value>10</value>
+  <description>Maximum number of re-encrypt threads to contact the KMS
+    and re-encrypt the edeks.
+  </description>
+</property>
+
+<property>
   <name>dfs.namenode.inotify.max.events.per.rpc</name>
   <value>1000</value>
   <description>Maximum number of events that will be sent to an inotify client
@@ -2977,7 +3029,7 @@
 
 <property>
   <name>dfs.namenode.ec.system.default.policy</name>
-  <value>RS-6-3-64k</value>
+  <value>RS-6-3-1024k</value>
   <description>The default erasure coding policy name will be used
     on the path if no policy name is passed.
   </description>
@@ -4207,22 +4259,34 @@
   </description>
 </property>
 
-  <property>
-    <name>dfs.namenode.snapshot.capture.openfiles</name>
-    <value>false</value>
-    <description>
-      If true, snapshots taken will have an immutable shared copy of
-      the open files that have valid leases. Even after the open files
-      grow or shrink in size, snapshot will always have the previous
-      point-in-time version of the open files, just like all other
-      closed files. Default is false.
-      Note: The file length captured for open files in snapshot is
-      whats recorded in NameNode at the time of snapshot and it may
-      be shorter than what the client has written till then. In order
-      to capture the latest length, the client can call hflush/hsync
-      with the flag SyncFlag.UPDATE_LENGTH on the open files handles.
-    </description>
-  </property>
+<property>
+  <name>dfs.namenode.snapshot.capture.openfiles</name>
+  <value>false</value>
+  <description>
+    If true, snapshots taken will have an immutable shared copy of
+    the open files that have valid leases. Even after the open files
+    grow or shrink in size, snapshot will always have the previous
+    point-in-time version of the open files, just like all other
+    closed files. Default is false.
+    Note: The file length captured for open files in snapshot is
+    whats recorded in NameNode at the time of snapshot and it may
+    be shorter than what the client has written till then. In order
+    to capture the latest length, the client can call hflush/hsync
+    with the flag SyncFlag.UPDATE_LENGTH on the open files handles.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.snapshot.skip.capture.accesstime-only-change</name>
+  <value>false</value>
+  <description>
+    If accessTime of a file/directory changed but there is no other
+    modification made to the file/directory, the changed accesstime will
+    not be captured in next snapshot. However, if there is other modification
+    made to the file/directory, the latest access time will be captured
+    together with the modification in next snapshot.
+  </description>
+</property>
 
 <property>
   <name>dfs.pipeline.ecn</name>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
index 3e276a9..dae3519 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
@@ -370,6 +370,12 @@
 
     var b = function() { browse_directory($('#directory').val()); };
     $('#btn-nav-directory').click(b);
+    //Also navigate to the directory when a user presses enter.
+    $('#directory').on('keyup', function (e) {
+      if (e.which == 13) {
+        browse_directory($('#directory').val());
+      }
+    });
     var dir = window.location.hash.slice(1);
     if(dir == "") {
       window.location.hash = "/";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 4a48c2a..5bd7c6d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -65,9 +65,11 @@
 
       2. _The size of a striping cell._ This determines the granularity of striped reads and writes, including buffer sizes and encoding work.
 
-    Policies are named *codec*-*num data blocks*-*num parity blocks*-*cell size*. Currently, five built-in policies are supported: `RS-3-2-64k`, `RS-6-3-64k`, `RS-10-4-64k`, `RS-LEGACY-6-3-64k`, and `XOR-2-1-64k`.
+    Policies are named *codec*-*num data blocks*-*num parity blocks*-*cell size*. Currently, six built-in policies are supported: `RS-3-2-1024k`, `RS-6-3-1024k`, `RS-10-4-1024k`, `RS-LEGACY-6-3-1024k`, `XOR-2-1-1024k` and `REPLICATION`.
 
-    By default, all built-in erasure coding policies are disabled.
+    `REPLICATION` is a special policy. It can only be set on directory, to force the directory to adopt 3x replication scheme, instead of inheriting its ancestor's erasure coding policy. This policy makes it possible to interleave 3x replication scheme directory with erasure coding directory.
+
+    `REPLICATION` policy is always enabled. For other built-in policies, unless they are configured in `dfs.namenode.ec.policies.enabled` property, otherwise they are disabled by default.
 
     Similar to HDFS storage policies, erasure coding policies are set on a directory. When a file is created, it inherits the EC policy of its nearest ancestor directory.
 
@@ -112,15 +114,15 @@
   what EC policies can be set by clients. It does not affect the behavior of already set file or directory-level EC policies.
 
   By default, all built-in erasure coding policies are disabled. Typically, the cluster administrator will enable set of policies by including them
-  in the `dfs .namenode.ec.policies.enabled` configuration based on the size of the cluster and the desired fault-tolerance properties. For instance,
-  for a cluster with 9 racks, a policy like `RS-10-4-64k` will not preserve rack-level fault-tolerance, and `RS-6-3-64k` or `RS-3-2-64k` might
-  be more appropriate. If the administrator only cares about node-level fault-tolerance, `RS-10-4-64k` would still be appropriate as long as
+  in the `dfs.namenode.ec.policies.enabled` configuration based on the size of the cluster and the desired fault-tolerance properties. For instance,
+  for a cluster with 9 racks, a policy like `RS-10-4-1024k` will not preserve rack-level fault-tolerance, and `RS-6-3-1024k` or `RS-3-2-1024k` might
+  be more appropriate. If the administrator only cares about node-level fault-tolerance, `RS-10-4-1024k` would still be appropriate as long as
   there are at least 14 DataNodes in the cluster.
 
   A system default EC policy can be configured via 'dfs.namenode.ec.system.default.policy' configuration. With this configuration,
   the default EC policy will be used when no policy name is passed as an argument in the '-setPolicy' command.
 
-  By default, the 'dfs.namenode.ec.system.default.policy' is "RS-6-3-64k".
+  By default, the 'dfs.namenode.ec.system.default.policy' is "RS-6-3-1024k".
 
   The codec implementations for Reed-Solomon and XOR can be configured with the following client and DataNode configuration keys:
   `io.erasurecode.codec.rs.rawcoders` for the default RS codec,
@@ -153,7 +155,7 @@
   HDFS provides an `ec` subcommand to perform administrative commands related to erasure coding.
 
        hdfs ec [generic options]
-         [-setPolicy -policy <policyName> -path <path>]
+         [-setPolicy -path <path> [-policy <policyName>] [-replicate]]
          [-getPolicy -path <path>]
          [-unsetPolicy -path <path>]
          [-listPolicies]
@@ -165,7 +167,7 @@
 
 Below are the details about each command.
 
- *  `[-setPolicy -policy <policyName> -path <path>]`
+ *  `[-setPolicy -path <path> [-policy <policyName>] [-replicate]]`
 
     Sets an erasure coding policy on a directory at the specified path.
 
@@ -175,6 +177,10 @@
       This parameter can be omitted if a 'dfs.namenode.ec.system.default.policy' configuration is set.
       The EC policy of the path will be set with the default value in configuration.
 
+      `-replicate` apply the special `REPLICATION` policy on the directory, force the directory to adopt 3x replication scheme.
+
+      `-replicate` and `-policy <policyName>` are optional arguments. They cannot be specified at the same time.
+
 
  *  `[-getPolicy -path <path>]`
 
@@ -186,7 +192,7 @@
 
  *  `[-listPolicies]`
 
-     Lists the set of enabled erasure coding policies. These names are suitable for use with the `setPolicy` command.
+     Lists all (enabled, disabled and removed) erasure coding policies registered in HDFS. Only the enabled policies are suitable for use with the `setPolicy` command.
 
  *  `[-addPolicies -policyFile <file>]`
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
index 3f9fbf0..3454265 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
@@ -177,6 +177,33 @@
 |:---- |:---- |
 | *path* | The path of the file to get encryption information. |
 
+### <a name="reencryptZone"></a>reencryptZone
+
+Usage: `[-reencryptZone <action> -path <zone>]`
+
+Re-encrypts an encryption zone, by iterating through the encryption zone, and calling the KeyProvider's reencryptEncryptedKeys interface to batch-re-encrypt all files' EDEKs with the latest version encryption zone key in the key provider. Requires superuser permissions.
+
+Note that re-encryption does not apply to snapshots, due to snapshots' immutable nature.
+
+| | |
+|:---- |:---- |
+| *action* | The re-encrypt action to perform. Must be either `-start` or `-cancel`. |
+| *path* | The path to the root of the encryption zone. |
+
+Re-encryption is a NameNode-only operation in HDFS, so could potentially put intensive load to the NameNode. The following configurations can be changed to control the stress on the NameNode, depending on the acceptable throughput impact to the cluster.
+
+| | |
+|:---- |:---- |
+| *dfs.namenode.reencrypt.batch.size* | The number of EDEKs in a batch to be sent to the KMS for re-encryption. Each batch is processed when holding the name system read/write lock, with throttling happening between batches. See configs below. |
+| *dfs.namenode.reencrypt.throttle.limit.handler.ratio* | Ratio of read locks to be held during re-encryption. 1.0 means no throttling. 0.5 means re-encryption can hold the readlock at most 50% of its total processing time. Negative value or 0 are invalid. |
+| *dfs.namenode.reencrypt.throttle.limit.updater.ratio* | Ratio of write locks to be held during re-encryption. 1.0 means no throttling. 0.5 means re-encryption can hold the writelock at most 50% of its total processing time. Negative value or 0 are invalid. |
+
+### <a name="listReencryptionStatus"></a>listReencryptionStatus
+
+Usage: `[-listReencryptionStatus]`
+
+List re-encryption information for all encryption zones. Requires superuser permissions.
+
 <a name="Example_usage"></a>Example usage
 -------------
 
@@ -282,4 +309,20 @@
 
 ### <a name="Rogue_user_exploits"></a>Rogue user exploits
 
-A rogue user can collect keys of files they have access to, and use them later to decrypt the encrypted data of those files. As the user had access to those files, they already had access to the file contents. This can be mitigated through periodic key rolling policies.
+A rogue user can collect keys of files they have access to, and use them later to decrypt the encrypted data of those files. As the user had access to those files, they already had access to the file contents. This can be mitigated through periodic key rolling policies. The [reencryptZone](#reencryptZone) command is usually required after key rolling, to make sure the EDEKs on existing files use the new version key.
+
+Manual steps to a complete key rolling and re-encryption are listed below. These instructions assume that you are running as the key admin or HDFS superuser as is appropriate.
+
+    # As the key admin, roll the key to a new version
+    hadoop key roll exposedKey
+
+    # As the super user, re-encrypt the encryption zone. Possibly list zones first.
+    hdfs crypto -listZones
+    hdfs crypto -reencryptZone -start -path /zone
+
+    # As the super user, periodically check the status of re-encryption
+    hdfs crypto -listReencryptionStatus
+
+    # As the super user, get encryption information from the file and double check it's encryption key version
+    hdfs crypto -getFileEncryptionInfo -path /zone/helloWorld
+    # console output: {cipherSuite: {name: AES/CTR/NoPadding, algorithmBlockSize: 16}, cryptoProtocolVersion: CryptoProtocolVersion{description='Encryption zones', version=2, unknownValue=null}, edek: 2010d301afbd43b58f10737ce4e93b39, iv: ade2293db2bab1a2e337f91361304cb3, keyName: exposedKey, ezKeyVersionName: exposedKey@1}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java
index 7181968..60f4f56 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java
@@ -46,7 +46,7 @@
     super.setUp();
 
     conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
-        "RS-6-3-64k,RS-3-2-64k,XOR-2-1-64k");
+        "RS-6-3-1024k,RS-3-2-1024k,XOR-2-1-1024k");
 
     dfsCluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(NUM_OF_DATANODES).build();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
index 0212c4e..3f8c7f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
@@ -38,6 +38,9 @@
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
@@ -706,4 +709,48 @@
       cluster.shutdown();
     }
   }
+
+  @Test
+  public void testUpdatePipeLineAfterDNReg()throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+      cluster.waitActive();
+      FileSystem fileSys = cluster.getFileSystem();
+
+      Path file = new Path("/testUpdatePipeLineAfterDNReg");
+      FSDataOutputStream out = fileSys.create(file);
+      out.write(1);
+      out.hflush();
+      //Get the First DN and disable the heartbeats and then put in Deadstate
+      DataNode dn1 = cluster.getDataNodes().get(0);
+      dn1.setHeartbeatsDisabledForTests(true);
+      DatanodeDescriptor dn1Desc = cluster.getNamesystem(0).getBlockManager()
+          .getDatanodeManager().getDatanode(dn1.getDatanodeId());
+      cluster.setDataNodeDead(dn1Desc);
+      //Re-register the DeadNode
+      DatanodeProtocolClientSideTranslatorPB dnp = new DatanodeProtocolClientSideTranslatorPB(
+          cluster.getNameNode().getNameNodeAddress(), conf);
+      dnp.registerDatanode(
+          dn1.getDNRegistrationForBP(cluster.getNamesystem().getBlockPoolId()));
+      DFSOutputStream dfsO = (DFSOutputStream) out.getWrappedStream();
+      String clientName = ((DistributedFileSystem) fileSys).getClient()
+          .getClientName();
+      NamenodeProtocols namenode = cluster.getNameNodeRpc();
+      //Update the genstamp and call updatepipeline
+      LocatedBlock newBlock = namenode
+          .updateBlockForPipeline(dfsO.getBlock(), clientName);
+      dfsO.getStreamer()
+          .updatePipeline(newBlock.getBlock().getGenerationStamp());
+      newBlock = namenode.updateBlockForPipeline(dfsO.getBlock(), clientName);
+      //Should not throw any error Pipeline should be success
+      dfsO.getStreamer()
+          .updatePipeline(newBlock.getBlock().getGenerationStamp());
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
index 2012258..eb4f124 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
@@ -33,6 +33,9 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
+import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Retry;
+
 import org.junit.Assume;
 import org.junit.Test;
 
@@ -135,4 +138,42 @@
       cluster.shutdown();
     }
   }
+
+  @Test(timeout=60000)
+  public void testOpenInfo() throws IOException {
+    Configuration conf = new Configuration();
+    conf.setInt(Retry.TIMES_GET_LAST_BLOCK_LENGTH_KEY, 0);
+    MiniDFSCluster cluster =
+            new MiniDFSCluster.Builder(conf).build();
+    cluster.waitActive();
+    try {
+      DistributedFileSystem fs = cluster.getFileSystem();
+
+      int chunkSize = 512;
+      Random r = new Random(12345L);
+      byte[] data = new byte[chunkSize];
+      r.nextBytes(data);
+
+      Path file = new Path("/testfile");
+      try(FSDataOutputStream fout = fs.create(file)) {
+        fout.write(data);
+      }
+
+      DfsClientConf dcconf = new DfsClientConf(conf);
+      int retryTimesForGetLastBlockLength =
+              dcconf.getRetryTimesForGetLastBlockLength();
+      assertEquals(0, retryTimesForGetLastBlockLength);
+
+      try(DFSInputStream fin = fs.dfs.open("/testfile")) {
+        long flen = fin.getFileLength();
+        assertEquals(chunkSize, flen);
+
+        long lastBlockBeingWrittenLength =
+                fin.getlastBlockBeingWrittenLengthForTesting();
+        assertEquals(0, lastBlockBeingWrittenLength);
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index 27d41b4..9ae49aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -2189,7 +2189,7 @@
       assertTrue(xattrs.isEmpty());
       List<AclEntry> acls = dfs.getAclStatus(target1).getEntries();
       assertTrue(acls.isEmpty());
-      assertFalse(targetPerm.getAclBit());
+      assertFalse(targetStatus.hasAcl());
 
       // -ptop
       Path target2 = new Path(hdfsTestDir, "targetfile2");
@@ -2208,7 +2208,7 @@
       assertTrue(xattrs.isEmpty());
       acls = dfs.getAclStatus(target2).getEntries();
       assertTrue(acls.isEmpty());
-      assertFalse(targetPerm.getAclBit());
+      assertFalse(targetStatus.hasAcl());
 
       // -ptopx
       Path target3 = new Path(hdfsTestDir, "targetfile3");
@@ -2229,7 +2229,7 @@
       assertArrayEquals(TRUSTED_A1_VALUE, xattrs.get(TRUSTED_A1));
       acls = dfs.getAclStatus(target3).getEntries();
       assertTrue(acls.isEmpty());
-      assertFalse(targetPerm.getAclBit());
+      assertFalse(targetStatus.hasAcl());
 
       // -ptopa
       Path target4 = new Path(hdfsTestDir, "targetfile4");
@@ -2248,7 +2248,7 @@
       assertTrue(xattrs.isEmpty());
       acls = dfs.getAclStatus(target4).getEntries();
       assertFalse(acls.isEmpty());
-      assertTrue(targetPerm.getAclBit());
+      assertTrue(targetStatus.hasAcl());
       assertEquals(dfs.getAclStatus(src), dfs.getAclStatus(target4));
 
       // -ptoa (verify -pa option will preserve permissions also)
@@ -2268,7 +2268,7 @@
       assertTrue(xattrs.isEmpty());
       acls = dfs.getAclStatus(target5).getEntries();
       assertFalse(acls.isEmpty());
-      assertTrue(targetPerm.getAclBit());
+      assertTrue(targetStatus.hasAcl());
       assertEquals(dfs.getAclStatus(src), dfs.getAclStatus(target5));
     } finally {
       if (null != shell) {
@@ -2480,7 +2480,7 @@
       assertTrue(xattrs.isEmpty());
       List<AclEntry> acls = dfs.getAclStatus(targetDir1).getEntries();
       assertTrue(acls.isEmpty());
-      assertFalse(targetPerm.getAclBit());
+      assertFalse(targetStatus.hasAcl());
 
       // -ptop
       Path targetDir2 = new Path(hdfsTestDir, "targetDir2");
@@ -2499,7 +2499,7 @@
       assertTrue(xattrs.isEmpty());
       acls = dfs.getAclStatus(targetDir2).getEntries();
       assertTrue(acls.isEmpty());
-      assertFalse(targetPerm.getAclBit());
+      assertFalse(targetStatus.hasAcl());
 
       // -ptopx
       Path targetDir3 = new Path(hdfsTestDir, "targetDir3");
@@ -2520,7 +2520,7 @@
       assertArrayEquals(TRUSTED_A1_VALUE, xattrs.get(TRUSTED_A1));
       acls = dfs.getAclStatus(targetDir3).getEntries();
       assertTrue(acls.isEmpty());
-      assertFalse(targetPerm.getAclBit());
+      assertFalse(targetStatus.hasAcl());
 
       // -ptopa
       Path targetDir4 = new Path(hdfsTestDir, "targetDir4");
@@ -2539,7 +2539,7 @@
       assertTrue(xattrs.isEmpty());
       acls = dfs.getAclStatus(targetDir4).getEntries();
       assertFalse(acls.isEmpty());
-      assertTrue(targetPerm.getAclBit());
+      assertTrue(targetStatus.hasAcl());
       assertEquals(dfs.getAclStatus(srcDir), dfs.getAclStatus(targetDir4));
 
       // -ptoa (verify -pa option will preserve permissions also)
@@ -2559,7 +2559,7 @@
       assertTrue(xattrs.isEmpty());
       acls = dfs.getAclStatus(targetDir5).getEntries();
       assertFalse(acls.isEmpty());
-      assertTrue(targetPerm.getAclBit());
+      assertTrue(targetStatus.hasAcl());
       assertEquals(dfs.getAclStatus(srcDir), dfs.getAclStatus(targetDir5));
     } finally {
       if (shell != null) {
@@ -2615,7 +2615,7 @@
       assertTrue(perm.equals(targetPerm));
       List<AclEntry> acls = dfs.getAclStatus(target1).getEntries();
       assertTrue(acls.isEmpty());
-      assertFalse(targetPerm.getAclBit());
+      assertFalse(targetStatus.hasAcl());
 
       // -ptopa preserves both sticky bit and ACL
       Path target2 = new Path(hdfsTestDir, "targetfile2");
@@ -2632,7 +2632,7 @@
       assertTrue(perm.equals(targetPerm));
       acls = dfs.getAclStatus(target2).getEntries();
       assertFalse(acls.isEmpty());
-      assertTrue(targetPerm.getAclBit());
+      assertTrue(targetStatus.hasAcl());
       assertEquals(dfs.getAclStatus(src), dfs.getAclStatus(target2));
     } finally {
       if (null != shell) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 9525609..bf2b002 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -1608,8 +1608,6 @@
       assertEquals(policyName, ErasureCodingPolicyManager.getInstance().
           getByName(policyName).getName());
       fs.disableErasureCodingPolicy(policyName);
-      assertEquals(policyName, ErasureCodingPolicyManager.getInstance().
-          getRemovedPolicies().get(0).getName());
       fs.enableErasureCodingPolicy(policyName);
       assertEquals(policyName, ErasureCodingPolicyManager.getInstance().
           getByName(policyName).getName());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index bf02db3..870023b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -1867,4 +1867,51 @@
     // Read them back in and compare byte-by-byte
     verifyFilesEqual(fs, baseFile, encFile1, len);
   }
+
+  /**
+   * Test listing encryption zones after zones had been deleted,
+   * but still exist under snapshots. This test first moves EZs
+   * to trash folder, so that an inodereference is created for the EZ,
+   * then it removes the EZ from trash folder to emulate condition where
+   * the EZ inode will not be complete.
+   */
+  @Test
+  public void testListEncryptionZonesWithSnapshots() throws Exception {
+    final Path snapshottable = new Path("/zones");
+    final Path zoneDirectChild = new Path(snapshottable, "zone1");
+    final Path snapshottableChild = new Path(snapshottable, "child");
+    final Path zoneSubChild = new Path(snapshottableChild, "zone2");
+    fsWrapper.mkdir(zoneDirectChild, FsPermission.getDirDefault(),
+        true);
+    fsWrapper.mkdir(zoneSubChild, FsPermission.getDirDefault(),
+        true);
+    dfsAdmin.allowSnapshot(snapshottable);
+    dfsAdmin.createEncryptionZone(zoneDirectChild, TEST_KEY, NO_TRASH);
+    dfsAdmin.createEncryptionZone(zoneSubChild, TEST_KEY, NO_TRASH);
+    final Path snap1 = fs.createSnapshot(snapshottable, "snap1");
+    Configuration clientConf = new Configuration(conf);
+    clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
+    FsShell shell = new FsShell(clientConf);
+    //will "trash" the zone under subfolder of snapshottable directory
+    verifyShellDeleteWithTrash(shell, snapshottableChild);
+    //permanently remove zone under subfolder of snapshottable directory
+    fsWrapper.delete(shell.getCurrentTrashDir(snapshottableChild),
+        true);
+    final RemoteIterator<EncryptionZone> it = dfsAdmin.listEncryptionZones();
+    boolean match = false;
+    while (it.hasNext()) {
+      EncryptionZone ez = it.next();
+      assertNotEquals("EncryptionZone " + zoneSubChild.toString() +
+          " should not be listed.",
+          ez.getPath(), zoneSubChild.toString());
+    }
+    //will "trash" the zone direct child of snapshottable directory
+    verifyShellDeleteWithTrash(shell, zoneDirectChild);
+    //permanently remove zone direct child of snapshottable directory
+    fsWrapper.delete(shell.getCurrentTrashDir(zoneDirectChild), true);
+    assertFalse("listEncryptionZones should not return anything, " +
+            "since both EZs were deleted.",
+        dfsAdmin.listEncryptionZones().hasNext());
+  }
+
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
index 06edb1a..0c545be 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
@@ -50,8 +50,8 @@
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
 import java.util.EnumSet;
-import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
 import static org.junit.Assert.*;
@@ -211,7 +211,8 @@
 
     // Only default policy should be enabled after restart
     Assert.assertEquals("Only default policy should be enabled after restart",
-        1, fs.getAllErasureCodingPolicies().size());
+        1,
+        ErasureCodingPolicyManager.getInstance().getEnabledPolicies().length);
 
     // Already set directory-level policies should still be in effect
     Path disabledPolicy = new Path(dir1, "afterDisabled");
@@ -383,6 +384,18 @@
         .getAllErasureCodingPolicies();
     assertTrue("All system policies should be enabled",
         allECPolicies.containsAll(SystemErasureCodingPolicies.getPolicies()));
+
+    // Query after add a new policy
+    ECSchema toAddSchema = new ECSchema("rs", 5, 2);
+    ErasureCodingPolicy newPolicy =
+        new ErasureCodingPolicy(toAddSchema, 128 * 1024);
+    ErasureCodingPolicy[] policyArray = new ErasureCodingPolicy[]{newPolicy};
+    fs.addErasureCodingPolicies(policyArray);
+    allECPolicies = fs.getAllErasureCodingPolicies();
+    assertEquals("Should return new added policy",
+        SystemErasureCodingPolicies.getPolicies().size() + 1,
+        allECPolicies.size());
+
   }
 
   @Test
@@ -600,7 +613,17 @@
     fs.mkdirs(dirPath);
     fs.setErasureCodingPolicy(dirPath, ecPolicy.getName());
 
-    final String ecPolicyName = "RS-10-4-64k";
+    String ecPolicyName = null;
+    Collection<ErasureCodingPolicy> allPolicies =
+        fs.getAllErasureCodingPolicies();
+    for (ErasureCodingPolicy policy : allPolicies) {
+      if (!ecPolicy.equals(policy)) {
+        ecPolicyName = policy.getName();
+        break;
+      }
+    }
+    assertNotNull(ecPolicyName);
+
     fs.createFile(filePath).build().close();
     assertEquals(ecPolicy, fs.getErasureCodingPolicy(filePath));
     fs.delete(filePath, true);
@@ -647,7 +670,7 @@
 
   @Test
   public void testGetAllErasureCodingCodecs() throws Exception {
-    HashMap<String, String> allECCodecs = fs
+    Map<String, String> allECCodecs = fs
         .getAllErasureCodingCodecs();
     assertTrue("At least 3 system codecs should be enabled",
         allECCodecs.size() >= 3);
@@ -704,7 +727,7 @@
 
     // Test add policy successfully
     newPolicy =
-        new ErasureCodingPolicy(toAddSchema, 1 * 1024 * 1024);
+        new ErasureCodingPolicy(toAddSchema, 4 * 1024 * 1024);
     policyArray  = new ErasureCodingPolicy[]{newPolicy};
     responses = fs.addErasureCodingPolicies(policyArray);
     assertEquals(1, responses.length);
@@ -732,4 +755,85 @@
       }
     });
   }
+
+  @Test
+  public void testReplicationPolicy() throws Exception {
+    ErasureCodingPolicy replicaPolicy =
+        SystemErasureCodingPolicies.getReplicationPolicy();
+
+    final Path rootDir = new Path("/striped");
+    final Path replicaDir = new Path(rootDir, "replica");
+    final Path subReplicaDir = new Path(replicaDir, "replica");
+    final Path replicaFile = new Path(replicaDir, "file");
+    final Path subReplicaFile = new Path(subReplicaDir, "file");
+
+    fs.mkdirs(rootDir);
+    fs.setErasureCodingPolicy(rootDir, ecPolicy.getName());
+
+    // 1. At first, child directory will inherit parent's EC policy
+    fs.mkdirs(replicaDir);
+    fs.createFile(replicaFile).build().close();
+    HdfsFileStatus fileStatus = (HdfsFileStatus)fs.getFileStatus(replicaFile);
+    assertEquals("File should inherit EC policy.", ecPolicy, fileStatus
+        .getErasureCodingPolicy());
+    assertEquals("File should be a EC file.", true, fileStatus
+        .isErasureCoded());
+    assertEquals("File should have the same EC policy as its ancestor.",
+        ecPolicy, fs.getErasureCodingPolicy(replicaFile));
+    fs.delete(replicaFile, false);
+
+    // 2. Set replication policy on child directory, then get back the policy
+    fs.setErasureCodingPolicy(replicaDir, replicaPolicy.getName());
+    ErasureCodingPolicy temp = fs.getErasureCodingPolicy(replicaDir);
+    assertEquals("Directory should hide replication EC policy.",
+        null, temp);
+
+    // 3. New file will be replication file. Please be noted that replication
+    //    policy only set on directory, not on file
+    fs.createFile(replicaFile).build().close();
+    assertEquals("Replication file should have default replication factor.",
+        fs.getDefaultReplication(),
+        fs.getFileStatus(replicaFile).getReplication());
+    fs.setReplication(replicaFile, (short) 2);
+    assertEquals("File should have replication factor as expected.",
+        2, fs.getFileStatus(replicaFile).getReplication());
+    fileStatus = (HdfsFileStatus)fs.getFileStatus(replicaFile);
+    assertEquals("File should not have EC policy.", null, fileStatus
+        .getErasureCodingPolicy());
+    assertEquals("File should not be a EC file.", false,
+        fileStatus.isErasureCoded());
+    ErasureCodingPolicy ecPolicyOnFile = fs.getErasureCodingPolicy(replicaFile);
+    assertEquals("File should not have EC policy.", null, ecPolicyOnFile);
+    fs.delete(replicaFile, false);
+
+    // 4. New directory under replication directory, is also replication
+    // directory
+    fs.mkdirs(subReplicaDir);
+    assertEquals("Directory should inherit hiding replication EC policy.",
+        null, fs.getErasureCodingPolicy(subReplicaDir));
+    fs.createFile(subReplicaFile).build().close();
+    assertEquals("File should have default replication factor.",
+        fs.getDefaultReplication(),
+        fs.getFileStatus(subReplicaFile).getReplication());
+    fileStatus = (HdfsFileStatus)fs.getFileStatus(subReplicaFile);
+    assertEquals("File should not have EC policy.", null,
+        fileStatus.getErasureCodingPolicy());
+    assertEquals("File should not be a EC file.", false,
+        fileStatus.isErasureCoded());
+    assertEquals("File should not have EC policy.", null,
+        fs.getErasureCodingPolicy(subReplicaFile));
+    fs.delete(subReplicaFile, false);
+
+    // 5. Unset replication policy on directory, new file will be EC file
+    fs.unsetErasureCodingPolicy(replicaDir);
+    fs.createFile(subReplicaFile).build().close();
+    fileStatus = (HdfsFileStatus)fs.getFileStatus(subReplicaFile);
+    assertEquals("File should inherit EC policy.", ecPolicy,
+        fileStatus.getErasureCodingPolicy());
+    assertEquals("File should be a EC file.", true,
+        fileStatus.isErasureCoded());
+    assertEquals("File should have the same EC policy as its ancestor",
+        ecPolicy, fs.getErasureCodingPolicy(subReplicaFile));
+    fs.delete(subReplicaFile, false);
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
index bcb02b3..0834d30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
@@ -626,7 +626,7 @@
    */
   @Test
   public void testPreadFailureWithChangedBlockLocations() throws Exception {
-    doPreadTestWithChangedLocations();
+    doPreadTestWithChangedLocations(1);
   }
 
   /**
@@ -639,21 +639,36 @@
    * 7. Consider next calls to getBlockLocations() always returns DN3 as last
    * location.<br>
    */
-  @Test
+  @Test(timeout = 60000)
   public void testPreadHedgedFailureWithChangedBlockLocations()
       throws Exception {
     isHedgedRead = true;
-    doPreadTestWithChangedLocations();
+    DFSClientFaultInjector old = DFSClientFaultInjector.get();
+    try {
+      DFSClientFaultInjector.set(new DFSClientFaultInjector() {
+        public void sleepBeforeHedgedGet() {
+          try {
+            Thread.sleep(500);
+          } catch (InterruptedException e) {
+          }
+        }
+      });
+      doPreadTestWithChangedLocations(2);
+    } finally {
+      DFSClientFaultInjector.set(old);
+    }
   }
 
-  private void doPreadTestWithChangedLocations()
+  private void doPreadTestWithChangedLocations(int maxFailures)
       throws IOException, TimeoutException, InterruptedException {
     GenericTestUtils.setLogLevel(DFSClient.LOG, Level.DEBUG);
     Configuration conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
     if (isHedgedRead) {
+      conf.setInt(HdfsClientConfigKeys.HedgedRead.THRESHOLD_MILLIS_KEY, 100);
       conf.setInt(HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_KEY, 2);
+      conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 1000);
     }
     try (MiniDFSCluster cluster =
         new MiniDFSCluster.Builder(conf).numDataNodes(3).build()) {
@@ -747,6 +762,9 @@
       int n = din.read(0, buf, 0, data.length());
       assertEquals(data.length(), n);
       assertEquals("Data should be read", data, new String(buf, 0, n));
+      assertTrue("Read should complete with maximum " + maxFailures
+              + " failures, but completed with " + din.failures,
+          din.failures <= maxFailures);
       DFSClient.LOG.info("Read completed");
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
index b356fb9..0545b04 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
@@ -20,6 +20,7 @@
 import java.io.File;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
+import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ThreadLocalRandom;
 
 import javax.management.AttributeNotFoundException;
@@ -46,6 +47,7 @@
 import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.CheckpointFaultInjector;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode;
@@ -568,6 +570,52 @@
     testCheckpoint(3);
   }
 
+  @Test(timeout = 60000)
+  public void testRollBackImage() throws Exception {
+    final Configuration conf = new Configuration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 10);
+    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 2);
+    MiniQJMHACluster cluster = null;
+    CheckpointFaultInjector old = CheckpointFaultInjector.getInstance();
+    try {
+      cluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(2).build();
+      MiniDFSCluster dfsCluster = cluster.getDfsCluster();
+      dfsCluster.waitActive();
+      dfsCluster.transitionToActive(0);
+      DistributedFileSystem dfs = dfsCluster.getFileSystem(0);
+      for (int i = 0; i <= 10; i++) {
+        Path foo = new Path("/foo" + i);
+        dfs.mkdirs(foo);
+      }
+      cluster.getDfsCluster().getNameNodeRpc(0).rollEdits();
+      CountDownLatch ruEdit = new CountDownLatch(1);
+      CheckpointFaultInjector.set(new CheckpointFaultInjector() {
+        @Override
+        public void duringUploadInProgess()
+            throws IOException, InterruptedException {
+          if (ruEdit.getCount() == 1) {
+            ruEdit.countDown();
+            Thread.sleep(180000);
+          }
+        }
+      });
+      ruEdit.await();
+      RollingUpgradeInfo info = dfs
+          .rollingUpgrade(RollingUpgradeAction.PREPARE);
+      Assert.assertTrue(info.isStarted());
+      FSImage fsimage = dfsCluster.getNamesystem(0).getFSImage();
+      queryForPreparation(dfs);
+      // The NN should have a copy of the fsimage in case of rollbacks.
+      Assert.assertTrue(fsimage.hasRollbackFSImage());
+    } finally {
+      CheckpointFaultInjector.set(old);
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
   public void testCheckpoint(int nnCount) throws IOException, InterruptedException {
     final Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
index f03b440..f25d28f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
@@ -40,6 +40,7 @@
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
@@ -48,6 +49,7 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -466,6 +468,29 @@
       // expected
     }
 
+    ECSchema toAddSchema = new ECSchema("testcodec", 3, 2);
+    ErasureCodingPolicy newPolicy =
+        new ErasureCodingPolicy(toAddSchema, 128 * 1024);
+    ErasureCodingPolicy[] policyArray =
+        new ErasureCodingPolicy[]{newPolicy};
+    try {
+      dfs.addErasureCodingPolicies(policyArray);
+      fail("AddErasureCodingPolicies should have failed.");
+    } catch (IOException ioe) {
+      GenericTestUtils.assertExceptionContains(
+          "Cannot add erasure coding policy", ioe);
+      // expected
+    }
+
+    try {
+      dfs.removeErasureCodingPolicy("testECName");
+      fail("RemoveErasureCodingPolicy should have failed.");
+    } catch (IOException ioe) {
+      GenericTestUtils.assertExceptionContains(
+          "Cannot remove erasure coding policy", ioe);
+      // expected
+    }
+
     assertFalse("Could not leave SM",
         dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java
index 2964f05..09ef3a575 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java
@@ -20,11 +20,13 @@
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
 import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
@@ -54,10 +56,11 @@
   private MiniQJMHACluster qjmhaCluster;
   private MiniDFSCluster dfsCluster;
   private MiniJournalCluster jCluster;
-  private FileSystem fs;
   private FSNamesystem namesystem;
   private int editsPerformed = 0;
   private final String jid = "ns1";
+  private int activeNNindex=0;
+  private static final int DFS_HA_TAILEDITS_PERIOD_SECONDS=1;
 
   @Rule
   public TestName testName = new TestName();
@@ -71,13 +74,16 @@
         "testSyncAfterJNdowntimeWithoutQJournalQueue")) {
       conf.setInt(DFSConfigKeys.DFS_QJOURNAL_QUEUE_SIZE_LIMIT_KEY, 0);
     }
+    if (testName.getMethodName().equals("testSyncDuringRollingUpgrade")) {
+      conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,
+          DFS_HA_TAILEDITS_PERIOD_SECONDS);
+    }
     qjmhaCluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(2)
       .build();
     dfsCluster = qjmhaCluster.getDfsCluster();
     jCluster = qjmhaCluster.getJournalCluster();
 
     dfsCluster.transitionToActive(0);
-    fs = dfsCluster.getFileSystem(0);
     namesystem = dfsCluster.getNamesystem(0);
   }
 
@@ -192,36 +198,7 @@
   // the journals.
   @Test(timeout=60000)
   public void testRandomJournalMissingLogs() throws Exception {
-    Random randomJournal = new Random();
-
-    List<File> journalCurrentDirs = Lists.newArrayList();
-
-    for (int i = 0; i < 3; i++) {
-      journalCurrentDirs.add(new StorageDirectory(jCluster.getJournalDir(i,
-          jid)).getCurrentDir());
-    }
-
-    int count = 0;
-    long lastStartTxId;
-    int journalIndex;
-    List<File> missingLogs = Lists.newArrayList();
-    while (count < 5) {
-      lastStartTxId = generateEditLog();
-
-      // Delete the last edit log segment from randomly selected journal node
-      journalIndex = randomJournal.nextInt(3);
-      missingLogs.add(deleteEditLog(journalCurrentDirs.get(journalIndex),
-          lastStartTxId));
-
-      // Delete the last edit log segment from two journals for some logs
-      if (count % 2 == 0) {
-        journalIndex = (journalIndex + 1) % 3;
-        missingLogs.add(deleteEditLog(journalCurrentDirs.get(journalIndex),
-            lastStartTxId));
-      }
-
-      count++;
-    }
+    List<File> missingLogs = deleteEditLogsFromRandomJN();
 
     GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 30000);
   }
@@ -277,7 +254,8 @@
    */
   @Test (timeout=300_000)
   public void testSyncAfterJNdowntimeWithoutQJournalQueue() throws Exception{
-    // Queuing is disabled during the cluster setup {@link #setUpMiniCluster()}
+    // QJournal Queuing is disabled during the cluster setup
+    // {@link #setUpMiniCluster()}
     File firstJournalDir = jCluster.getJournalDir(0, jid);
     File firstJournalCurrentDir = new StorageDirectory(firstJournalDir)
         .getCurrentDir();
@@ -376,11 +354,88 @@
     GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 30000);
   }
 
+  // Test JournalNode Sync during a Rolling Upgrade of NN.
+  @Test (timeout=300_000)
+  public void testSyncDuringRollingUpgrade() throws Exception {
+
+    DistributedFileSystem dfsActive;
+    int standbyNNindex;
+
+    if (dfsCluster.getNameNode(0).isActiveState()) {
+      activeNNindex = 0;
+      standbyNNindex = 1;
+    } else {
+      activeNNindex = 1;
+      standbyNNindex = 0;
+    }
+    dfsActive = dfsCluster.getFileSystem(activeNNindex);
+
+    // Prepare for rolling upgrade
+    final RollingUpgradeInfo info = dfsActive.rollingUpgrade(
+          HdfsConstants.RollingUpgradeAction.PREPARE);
+
+    //query rolling upgrade
+    Assert.assertEquals(info, dfsActive.rollingUpgrade(
+        HdfsConstants.RollingUpgradeAction.QUERY));
+
+    // Restart the Standby NN with rollingUpgrade option
+    dfsCluster.restartNameNode(standbyNNindex, true,
+        "-rollingUpgrade", "started");
+    Assert.assertEquals(info, dfsActive.rollingUpgrade(
+        HdfsConstants.RollingUpgradeAction.QUERY));
+
+    // Do some edits and delete some edit logs
+    List<File> missingLogs = deleteEditLogsFromRandomJN();
+
+    GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 30000);
+
+    // Transition the active NN to standby and standby to active
+    dfsCluster.transitionToStandby(activeNNindex);
+
+    // Let Standby NN catch up tailing edit logs before transitioning it to
+    // active
+    Thread.sleep(30*DFS_HA_TAILEDITS_PERIOD_SECONDS*1000);
+
+    dfsCluster.transitionToActive(standbyNNindex);
+    dfsCluster.waitActive();
+
+    activeNNindex=standbyNNindex;
+    standbyNNindex=((activeNNindex+1)%2);
+    dfsActive = dfsCluster.getFileSystem(activeNNindex);
+
+    Assert.assertTrue(dfsCluster.getNameNode(activeNNindex).isActiveState());
+    Assert.assertFalse(dfsCluster.getNameNode(standbyNNindex).isActiveState());
+
+    // Restart the current standby NN (previously active)
+    dfsCluster.restartNameNode(standbyNNindex, true,
+        "-rollingUpgrade", "started");
+    Assert.assertEquals(info, dfsActive.rollingUpgrade(
+        HdfsConstants.RollingUpgradeAction.QUERY));
+    dfsCluster.waitActive();
+
+    // Do some edits and delete some edit logs
+    missingLogs.addAll(deleteEditLogsFromRandomJN());
+
+    // Check that JNSync downloaded the edit logs rolled during rolling upgrade.
+    GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 30000);
+
+    //finalize rolling upgrade
+    final RollingUpgradeInfo finalize = dfsActive.rollingUpgrade(
+        HdfsConstants.RollingUpgradeAction.FINALIZE);
+    Assert.assertTrue(finalize.isFinalized());
+
+    // Check the missing edit logs exist after finalizing rolling upgrade
+    for (File editLog : missingLogs) {
+      Assert.assertTrue("Edit log missing after finalizing rolling upgrade",
+          editLog.exists());
+    }
+  }
+
   private File deleteEditLog(File currentDir, long startTxId)
       throws IOException {
     EditLogFile logFile = getLogFile(currentDir, startTxId);
     while (logFile.isInProgress()) {
-      dfsCluster.getNameNode(0).getRpcServer().rollEditLog();
+      dfsCluster.getNameNode(activeNNindex).getRpcServer().rollEditLog();
       logFile = getLogFile(currentDir, startTxId);
     }
     File deleteFile = logFile.getFile();
@@ -389,13 +444,55 @@
     return deleteFile;
   }
 
+  private List<File> deleteEditLogsFromRandomJN() throws IOException {
+    Random random = new Random();
+
+    List<File> journalCurrentDirs = Lists.newArrayList();
+
+    for (int i = 0; i < 3; i++) {
+      journalCurrentDirs.add(new StorageDirectory(jCluster.getJournalDir(i,
+          jid)).getCurrentDir());
+    }
+
+    long[] startTxIds = new long[20];
+    for (int i = 0; i < 20; i++) {
+      startTxIds[i] = generateEditLog();
+    }
+
+    int count = 0, startTxIdIndex;
+    long startTxId;
+    int journalIndex;
+    List<File> missingLogs = Lists.newArrayList();
+    List<Integer> deletedStartTxIds = Lists.newArrayList();
+    while (count < 5) {
+      // Select a random edit log to delete
+      startTxIdIndex = random.nextInt(20);
+      while (deletedStartTxIds.contains(startTxIdIndex)) {
+        startTxIdIndex = random.nextInt(20);
+      }
+      startTxId = startTxIds[startTxIdIndex];
+      deletedStartTxIds.add(startTxIdIndex);
+
+      // Delete the randomly selected edit log segment from randomly selected
+      // journal node
+      journalIndex = random.nextInt(3);
+      missingLogs.add(deleteEditLog(journalCurrentDirs.get(journalIndex),
+          startTxId));
+
+      count++;
+    }
+
+    return missingLogs;
+  }
+
   /**
    * Do a mutative metadata operation on the file system.
    *
    * @return true if the operation was successful, false otherwise.
    */
   private boolean doAnEdit() throws IOException {
-    return fs.mkdirs(new Path("/tmp", Integer.toString(editsPerformed++)));
+    return dfsCluster.getFileSystem(activeNNindex).mkdirs(
+        new Path("/tmp", Integer.toString(editsPerformed++)));
   }
 
   /**
@@ -414,12 +511,13 @@
    * @return the startTxId of next segment after rolling edits.
    */
   private long generateEditLog(int numEdits) throws IOException {
-    long startTxId = namesystem.getFSImage().getEditLog().getLastWrittenTxId();
+    long lastWrittenTxId = dfsCluster.getNameNode(activeNNindex).getFSImage()
+        .getEditLog().getLastWrittenTxId();
     for (int i = 1; i <= numEdits; i++) {
       Assert.assertTrue("Failed to do an edit", doAnEdit());
     }
-    dfsCluster.getNameNode(0).getRpcServer().rollEditLog();
-    return startTxId;
+    dfsCluster.getNameNode(activeNNindex).getRpcServer().rollEditLog();
+    return lastWrittenTxId;
   }
 
   private Supplier<Boolean> editLogExists(List<File> editLogs) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 42aeadf..4092e5e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -17,45 +17,10 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION;
-import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.verify;
-
-import java.io.BufferedReader;
-import java.io.DataInputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.InputStreamReader;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.concurrent.BrokenBarrierException;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.CyclicBarrier;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.FutureTask;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicBoolean;
-
+import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.LinkedListMultimap;
+import com.google.common.collect.Lists;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -80,8 +45,8 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
+import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
 import org.apache.hadoop.hdfs.server.namenode.CacheManager;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
@@ -115,10 +80,44 @@
 import org.junit.Test;
 import org.mockito.Mockito;
 
-import com.google.common.base.Joiner;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.LinkedListMultimap;
-import com.google.common.collect.Lists;
+import java.io.BufferedReader;
+import java.io.DataInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.PrintWriter;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.concurrent.BrokenBarrierException;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.FutureTask;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
 
 public class TestBlockManager {
   private DatanodeStorageInfo[] storages;
@@ -1365,7 +1364,7 @@
         .getNumDataUnits();
     final int numParityBlocks = StripedFileTestUtil.getDefaultECPolicy()
         .getNumParityUnits();
-    final long blockSize = 64 * 1024;
+    final long blockSize = 6 * 1024 * 1024;
     Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
index de002f4..286f4a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
@@ -25,6 +25,7 @@
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -500,46 +501,93 @@
         "127.0.0.1:23456", bothAgain.get(1).getInfoAddr());
   }
 
-  @Test
-  public void testPendingRecoveryTasks() throws IOException {
+  /**
+   * Verify the correctness of pending recovery process.
+   *
+   * @param numReplicationBlocks the number of replication blocks in the queue.
+   * @param numECBlocks number of EC blocks in the queue.
+   * @param maxTransfers the maxTransfer value.
+   * @param numReplicationTasks the number of replication tasks polled from
+   *                            the queue.
+   * @param numECTasks the number of EC tasks polled from the queue.
+   *
+   * @throws IOException
+   */
+  private void verifyPendingRecoveryTasks(
+      int numReplicationBlocks, int numECBlocks,
+      int maxTransfers, int numReplicationTasks, int numECTasks)
+      throws IOException {
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
     Configuration conf = new Configuration();
     DatanodeManager dm = Mockito.spy(mockDatanodeManager(fsn, conf));
 
-    int maxTransfers = 20;
-    int numPendingTasks = 7;
-    int numECTasks = maxTransfers - numPendingTasks;
-
     DatanodeDescriptor nodeInfo = Mockito.mock(DatanodeDescriptor.class);
     Mockito.when(nodeInfo.isRegistered()).thenReturn(true);
     Mockito.when(nodeInfo.getStorageInfos())
         .thenReturn(new DatanodeStorageInfo[0]);
 
-    List<BlockTargetPair> pendingList =
-        Collections.nCopies(numPendingTasks, new BlockTargetPair(null, null));
-    Mockito.when(nodeInfo.getReplicationCommand(maxTransfers))
-        .thenReturn(pendingList);
-    List<BlockECReconstructionInfo> ecPendingList =
-        Collections.nCopies(numECTasks, null);
+    if (numReplicationBlocks > 0) {
+      Mockito.when(nodeInfo.getNumberOfReplicateBlocks())
+          .thenReturn(numReplicationBlocks);
 
-    Mockito.when(nodeInfo.getErasureCodeCommand(numECTasks))
-        .thenReturn(ecPendingList);
+      List<BlockTargetPair> tasks =
+          Collections.nCopies(
+              Math.min(numReplicationTasks, numReplicationBlocks),
+              new BlockTargetPair(null, null));
+      Mockito.when(nodeInfo.getReplicationCommand(numReplicationTasks))
+          .thenReturn(tasks);
+    }
+
+    if (numECBlocks > 0) {
+      Mockito.when(nodeInfo.getNumberOfBlocksToBeErasureCoded())
+          .thenReturn(numECBlocks);
+
+      List<BlockECReconstructionInfo> tasks =
+          Collections.nCopies(numECTasks, null);
+      Mockito.when(nodeInfo.getErasureCodeCommand(numECTasks))
+          .thenReturn(tasks);
+    }
+
     DatanodeRegistration dnReg = Mockito.mock(DatanodeRegistration.class);
     Mockito.when(dm.getDatanode(dnReg)).thenReturn(nodeInfo);
-
     DatanodeCommand[] cmds = dm.handleHeartbeat(
         dnReg, new StorageReport[1], "bp-123", 0, 0, 10, maxTransfers, 0, null,
         SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT);
 
-    assertEquals(2, cmds.length);
-    assertTrue(cmds[0] instanceof BlockCommand);
-    BlockCommand replicaCmd = (BlockCommand) cmds[0];
-    assertEquals(numPendingTasks, replicaCmd.getBlocks().length);
-    assertEquals(numPendingTasks, replicaCmd.getTargets().length);
-    assertTrue(cmds[1] instanceof BlockECReconstructionCommand);
-    BlockECReconstructionCommand ecRecoveryCmd =
-        (BlockECReconstructionCommand) cmds[1];
-    assertEquals(numECTasks, ecRecoveryCmd.getECTasks().size());
+    long expectedNumCmds = Arrays.stream(
+        new int[]{numReplicationTasks, numECTasks})
+        .filter(x -> x > 0)
+        .count();
+    assertEquals(expectedNumCmds, cmds.length);
+
+    int idx = 0;
+    if (numReplicationTasks > 0) {
+      assertTrue(cmds[idx] instanceof BlockCommand);
+      BlockCommand cmd = (BlockCommand) cmds[0];
+      assertEquals(numReplicationTasks, cmd.getBlocks().length);
+      assertEquals(numReplicationTasks, cmd.getTargets().length);
+      idx++;
+    }
+
+    if (numECTasks > 0) {
+      assertTrue(cmds[idx] instanceof BlockECReconstructionCommand);
+      BlockECReconstructionCommand cmd =
+          (BlockECReconstructionCommand) cmds[idx];
+      assertEquals(numECTasks, cmd.getECTasks().size());
+    }
+
+    Mockito.verify(nodeInfo).getReplicationCommand(numReplicationTasks);
+    Mockito.verify(nodeInfo).getErasureCodeCommand(numECTasks);
+  }
+
+  @Test
+  public void testPendingRecoveryTasks() throws IOException {
+    // Tasks are slitted according to the ratio between queue lengths.
+    verifyPendingRecoveryTasks(20, 20, 20, 10, 10);
+    verifyPendingRecoveryTasks(40, 10, 20, 16, 4);
+
+    // Approximately load tasks if the ratio between queue length is large.
+    verifyPendingRecoveryTasks(400, 1, 20, 20, 1);
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index 956406d..c95c71b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -54,6 +54,7 @@
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
 import org.apache.hadoop.util.AutoCloseableLock;
@@ -316,15 +317,22 @@
          missingMemoryBlocks, mismatchBlocks, 0);
   }
 
-    private void scan(long totalBlocks, int diffsize, long missingMetaFile, long missingBlockFile,
-      long missingMemoryBlocks, long mismatchBlocks, long duplicateBlocks) throws IOException {
+  private void scan(long totalBlocks, int diffsize, long missingMetaFile,
+      long missingBlockFile, long missingMemoryBlocks, long mismatchBlocks,
+      long duplicateBlocks) throws IOException {
     scanner.reconcile();
-    
+    verifyStats(totalBlocks, diffsize, missingMetaFile, missingBlockFile,
+        missingMemoryBlocks, mismatchBlocks, duplicateBlocks);
+  }
+
+  private void verifyStats(long totalBlocks, int diffsize, long missingMetaFile,
+      long missingBlockFile, long missingMemoryBlocks, long mismatchBlocks,
+      long duplicateBlocks) {
     assertTrue(scanner.diffs.containsKey(bpid));
     LinkedList<FsVolumeSpi.ScanInfo> diff = scanner.diffs.get(bpid);
     assertTrue(scanner.stats.containsKey(bpid));
     DirectoryScanner.Stats stats = scanner.stats.get(bpid);
-    
+
     assertEquals(diffsize, diff.size());
     assertEquals(totalBlocks, stats.totalBlocks);
     assertEquals(missingMetaFile, stats.missingMetaFile);
@@ -1035,4 +1043,48 @@
       cluster.shutdown();
     }
   }
+
+  @Test
+  public void testDirectoryScannerInFederatedCluster() throws Exception {
+    //Create Federated cluster with two nameservices and one DN
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF)
+        .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(2))
+        .numDataNodes(1).build()) {
+      cluster.waitActive();
+      cluster.transitionToActive(1);
+      cluster.transitionToActive(3);
+      DataNode dataNode = cluster.getDataNodes().get(0);
+      fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
+      //Create one block in first nameservice
+      FileSystem fs = cluster.getFileSystem(1);
+      int bp1Files = 1;
+      writeFile(fs, bp1Files);
+      //Create two blocks in second nameservice
+      FileSystem fs2 = cluster.getFileSystem(3);
+      int bp2Files = 2;
+      writeFile(fs2, bp2Files);
+      //Call the Directory scanner
+      scanner = new DirectoryScanner(dataNode, fds, CONF);
+      scanner.setRetainDiffs(true);
+      scanner.reconcile();
+      //Check blocks in corresponding BP
+      bpid = cluster.getNamesystem(1).getBlockPoolId();
+      verifyStats(bp1Files, 0, 0, 0, 0, 0, 0);
+      bpid = cluster.getNamesystem(3).getBlockPoolId();
+      verifyStats(bp2Files, 0, 0, 0, 0, 0, 0);
+    } finally {
+      if (scanner != null) {
+        scanner.shutdown();
+        scanner = null;
+      }
+    }
+  }
+
+  private void writeFile(FileSystem fs, int numFiles) throws IOException {
+    final String fileName = "/" + GenericTestUtils.getMethodName();
+    final Path filePath = new Path(fileName);
+    for (int i = 0; i < numFiles; i++) {
+      DFSTestUtil.createFile(fs, filePath, 1, (short) 1, 0);
+    }
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
index 93a83fd3..ee92217 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
@@ -32,6 +32,7 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.AclEntry;
@@ -886,6 +887,11 @@
     FsPermission perm = inode.getFsPermission();
     assertNotNull(perm);
     assertEquals(0755, perm.toShort());
+    FileStatus stat = fs.getFileStatus(path);
+    assertFalse(stat.hasAcl());
+    assertFalse(stat.isEncrypted());
+    assertFalse(stat.isErasureCoded());
+    // backwards-compat check
     assertEquals(0755, perm.toExtendedShort());
     assertAclFeature(false);
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java
index 728e15b..fecbbfa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java
@@ -135,4 +135,30 @@
     assertEquals(0L, result.get(0).getId());
     assertEquals("/", result.get(0).getPath());
   }
+
+  @Test
+  public void testListEncryptionZonesSubDirInvalid() throws Exception{
+    INodeDirectory thirdINode = new INodeDirectory(3L, "third".getBytes(),
+        defaultPermission, System.currentTimeMillis());
+    when(this.mockedDir.getInode(3L)).thenReturn(thirdINode);
+    //sets "second" as parent
+    thirdINode.setParent(this.secondINode);
+    this.ezManager = new EncryptionZoneManager(mockedDir, new Configuration());
+    this.ezManager.addEncryptionZone(1L, CipherSuite.AES_CTR_NOPADDING,
+        CryptoProtocolVersion.ENCRYPTION_ZONES, "test_key");
+    this.ezManager.addEncryptionZone(3L, CipherSuite.AES_CTR_NOPADDING,
+        CryptoProtocolVersion.ENCRYPTION_ZONES, "test_key");
+    // sets root as proper parent for firstINode only,
+    // leave secondINode with no parent
+    this.firstINode.setParent(rootINode);
+    when(mockedDir.getINodesInPath("/first", DirOp.READ_LINK)).
+        thenReturn(mockedINodesInPath);
+    when(mockedINodesInPath.getLastINode()).
+        thenReturn(firstINode);
+    BatchedListEntries<EncryptionZone> result = ezManager.
+        listEncryptionZones(0);
+    assertEquals(1, result.size());
+    assertEquals(1L, result.get(0).getId());
+    assertEquals("/first", result.get(0).getPath());
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
index 44cf57a..bdd48e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
@@ -21,6 +21,8 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -36,10 +38,15 @@
 
   private boolean unprotectedSetTimes(long atime, long atime0, long precision,
       long mtime, boolean force) throws QuotaExceededException {
+    FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
+    SnapshotManager ssMgr = Mockito.mock(SnapshotManager.class);
     FSDirectory fsd = Mockito.mock(FSDirectory.class);
     INodesInPath iip = Mockito.mock(INodesInPath.class);
     INode inode = Mockito.mock(INode.class);
 
+    when(fsd.getFSNamesystem()).thenReturn(fsn);
+    when(fsn.getSnapshotManager()).thenReturn(ssMgr);
+    when(ssMgr.getSkipCaptureAccessTimeOnlyChange()).thenReturn(false);
     when(fsd.getAccessTimePrecision()).thenReturn(precision);
     when(fsd.hasWriteLock()).thenReturn(Boolean.TRUE);
     when(iip.getLastINode()).thenReturn(inode);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
index 22c40fb..9256056 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
@@ -723,4 +723,91 @@
         .getBlockType());
     assertEquals(defaultBlockType, BlockType.CONTIGUOUS);
   }
+
+  /**
+   * Test if a INodeFile under a replication EC policy directory
+   * can be saved by FSImageSerialization and loaded by FSImageFormat#Loader.
+   */
+  @Test
+  public void testSaveAndLoadFileUnderReplicationPolicyDir()
+      throws IOException {
+    Configuration conf = new Configuration();
+    DFSTestUtil.enableAllECPolicies(conf);
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).build();
+      cluster.waitActive();
+      FSNamesystem fsn = cluster.getNamesystem();
+      DistributedFileSystem fs = cluster.getFileSystem();
+      ErasureCodingPolicy replicaPolicy =
+          SystemErasureCodingPolicies.getReplicationPolicy();
+      ErasureCodingPolicy defaultEcPolicy =
+          StripedFileTestUtil.getDefaultECPolicy();
+
+      final Path ecDir = new Path("/ec");
+      final Path replicaDir = new Path(ecDir, "replica");
+      final Path replicaFile1 = new Path(replicaDir, "f1");
+      final Path replicaFile2 = new Path(replicaDir, "f2");
+
+      // create root directory
+      fs.mkdir(ecDir, null);
+      fs.setErasureCodingPolicy(ecDir, defaultEcPolicy.getName());
+
+      // create directory, and set replication Policy
+      fs.mkdir(replicaDir, null);
+      fs.setErasureCodingPolicy(replicaDir, replicaPolicy.getName());
+
+      // create an empty file f1
+      fs.create(replicaFile1).close();
+
+      // create an under-construction file f2
+      FSDataOutputStream out = fs.create(replicaFile2, (short) 2);
+      out.writeBytes("hello");
+      ((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet
+          .of(SyncFlag.UPDATE_LENGTH));
+
+      // checkpoint
+      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+      fs.saveNamespace();
+      fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+      cluster.restartNameNode();
+      cluster.waitActive();
+      fs = cluster.getFileSystem();
+
+      assertTrue(fs.getFileStatus(ecDir).isDirectory());
+      assertTrue(fs.getFileStatus(replicaDir).isDirectory());
+      assertTrue(fs.exists(replicaFile1));
+      assertTrue(fs.exists(replicaFile2));
+
+      // check directories
+      assertEquals("Directory should have default EC policy.",
+          defaultEcPolicy, fs.getErasureCodingPolicy(ecDir));
+      assertEquals("Directory should hide replication EC policy.",
+          null, fs.getErasureCodingPolicy(replicaDir));
+
+      // check file1
+      assertEquals("File should not have EC policy.", null,
+          fs.getErasureCodingPolicy(replicaFile1));
+      // check internals of file2
+      INodeFile file2Node =
+          fsn.dir.getINode4Write(replicaFile2.toString()).asFile();
+      assertEquals("hello".length(), file2Node.computeFileSize());
+      assertTrue(file2Node.isUnderConstruction());
+      BlockInfo[] blks = file2Node.getBlocks();
+      assertEquals(1, blks.length);
+      assertEquals(BlockUCState.UNDER_CONSTRUCTION, blks[0].getBlockUCState());
+      assertEquals("File should return expected replication factor.",
+          2, blks[0].getReplication());
+      assertEquals("File should not have EC policy.", null,
+          fs.getErasureCodingPolicy(replicaFile2));
+      // check lease manager
+      Lease lease = fsn.leaseManager.getLease(file2Node);
+      Assert.assertNotNull(lease);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
new file mode 100644
index 0000000..7ba3f91
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
@@ -0,0 +1,1847 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import com.google.common.base.Supplier;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileContextTestWrapper;
+import org.apache.hadoop.fs.FileEncryptionInfo;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FileSystemTestWrapper;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.ReencryptionStatus;
+import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
+import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
+import org.apache.hadoop.hdfs.server.namenode.ReencryptionUpdater.ZoneSubmissionTracker;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.RetriableException;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import org.junit.rules.Timeout;
+import org.mockito.internal.util.reflection.Whitebox;
+import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
+
+/**
+ * Test class for re-encryption.
+ */
+public class TestReencryption {
+
+  protected static final org.slf4j.Logger LOG =
+      LoggerFactory.getLogger(TestReencryption.class);
+
+  private Configuration conf;
+  private FileSystemTestHelper fsHelper;
+
+  private MiniDFSCluster cluster;
+  private HdfsAdmin dfsAdmin;
+  private DistributedFileSystem fs;
+  private FSNamesystem fsn;
+  private File testRootDir;
+  private static final String TEST_KEY = "test_key";
+
+  private FileSystemTestWrapper fsWrapper;
+  private FileContextTestWrapper fcWrapper;
+
+  private static final EnumSet<CreateEncryptionZoneFlag> NO_TRASH =
+      EnumSet.of(CreateEncryptionZoneFlag.NO_TRASH);
+
+  private String getKeyProviderURI() {
+    return JavaKeyStoreProvider.SCHEME_NAME + "://file" + new Path(
+        testRootDir.toString(), "test.jks").toUri();
+  }
+
+  @Rule
+  public Timeout globalTimeout = new Timeout(180 * 1000);
+
+  @Before
+  public void setup() throws Exception {
+    conf = new HdfsConfiguration();
+    fsHelper = new FileSystemTestHelper();
+    // Set up java key store
+    String testRoot = fsHelper.getTestRootDir();
+    testRootDir = new File(testRoot).getAbsoluteFile();
+    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
+        getKeyProviderURI());
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,
+        true);
+    // Lower the batch size for testing
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
+        2);
+    // Lower the listing limit for testing
+    conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 3);
+    // Adjust configs for re-encrypt test cases
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REENCRYPT_BATCH_SIZE_KEY, 5);
+    conf.setTimeDuration(
+        DFSConfigKeys.DFS_NAMENODE_REENCRYPT_SLEEP_INTERVAL_KEY, 1,
+        TimeUnit.SECONDS);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+    fsn = cluster.getNamesystem();
+    fsWrapper = new FileSystemTestWrapper(fs);
+    fcWrapper = new FileContextTestWrapper(
+        FileContext.getFileContext(cluster.getURI(), conf));
+    dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
+    setProvider();
+    // Create a test key
+    DFSTestUtil.createKey(TEST_KEY, cluster, conf);
+    GenericTestUtils.setLogLevel(EncryptionZoneManager.LOG, Level.TRACE);
+    GenericTestUtils.setLogLevel(ReencryptionHandler.LOG, Level.TRACE);
+    GenericTestUtils.setLogLevel(ReencryptionStatus.LOG, Level.TRACE);
+    GenericTestUtils.setLogLevel(ReencryptionUpdater.LOG, Level.TRACE);
+  }
+
+  private void setProvider() {
+    // Need to set the client's KeyProvider to the NN's for JKS,
+    // else the updates do not get flushed properly
+    fs.getClient()
+        .setKeyProvider(cluster.getNameNode().getNamesystem().getProvider());
+  }
+
+  @After
+  public void teardown() {
+    if (cluster != null) {
+      cluster.shutdown();
+      cluster = null;
+    }
+    EncryptionFaultInjector.instance = new EncryptionFaultInjector();
+  }
+
+  private FileEncryptionInfo getFileEncryptionInfo(Path path) throws Exception {
+    return fsn.getFileInfo(path.toString(), false).getFileEncryptionInfo();
+  }
+
+  @Test
+  public void testReencryptionBasic() throws Exception {
+    /* Setup test dir:
+     * /zones/zone/[0-9]
+     * /dir/f
+     */
+    final int len = 8196;
+    final Path zoneParent = new Path("/zones");
+    final Path zone = new Path(zoneParent, "zone");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, Integer.toString(i)), len, (short) 1,
+              0xFEED);
+    }
+    final Path subdir = new Path("/dir");
+    fsWrapper.mkdir(subdir, FsPermission.getDirDefault(), true);
+    DFSTestUtil.createFile(fs, new Path(subdir, "f"), len, (short) 1, 0xFEED);
+
+    // test re-encrypt without keyroll
+    final Path encFile1 = new Path(zone, "0");
+    final FileEncryptionInfo fei0 = getFileEncryptionInfo(encFile1);
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForReencryptedZones(1);
+    assertKeyVersionEquals(encFile1, fei0);
+    // key not rolled, so no edeks need to be updated.
+    verifyZoneStatus(zone, null, 0);
+
+    // test re-encrypt after keyroll
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForReencryptedZones(2);
+    FileEncryptionInfo fei1 = getFileEncryptionInfo(encFile1);
+    assertKeyVersionChanged(encFile1, fei0);
+
+    // test listReencryptionStatus
+    RemoteIterator<ZoneReencryptionStatus> it =
+        dfsAdmin.listReencryptionStatus();
+    assertTrue(it.hasNext());
+    ZoneReencryptionStatus zs = it.next();
+    assertEquals(zone.toString(), zs.getZoneName());
+    assertEquals(ZoneReencryptionStatus.State.Completed, zs.getState());
+    assertTrue(zs.getCompletionTime() > 0);
+    assertTrue(zs.getCompletionTime() > zs.getSubmissionTime());
+    assertNotEquals(fei0.getEzKeyVersionName(), zs.getEzKeyVersionName());
+    assertEquals(fei1.getEzKeyVersionName(), zs.getEzKeyVersionName());
+    assertEquals(10, zs.getFilesReencrypted());
+
+    // test re-encrypt on same zone again
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForReencryptedZones(3);
+    assertKeyVersionEquals(encFile1, fei1);
+
+    // test non-EZ submission
+    try {
+      dfsAdmin.reencryptEncryptionZone(subdir, ReencryptAction.START);
+      fail("Re-encrypting non-EZ should fail");
+    } catch (RemoteException expected) {
+      LOG.info("Expected exception caught.", expected);
+      assertExceptionContains("not the root of an encryption zone", expected);
+    }
+
+    // test non-existing dir
+    try {
+      dfsAdmin.reencryptEncryptionZone(new Path(zone, "notexist"),
+          ReencryptAction.START);
+      fail("Re-encrypting non-existing dir should fail");
+    } catch (RemoteException expected) {
+      LOG.info("Expected exception caught.", expected);
+      assertTrue(
+          expected.unwrapRemoteException() instanceof FileNotFoundException);
+    }
+
+    // test directly on a EZ file
+    try {
+      dfsAdmin.reencryptEncryptionZone(encFile1, ReencryptAction.START);
+      fail("Re-encrypting on a file should fail");
+    } catch (RemoteException expected) {
+      LOG.info("Expected exception caught.", expected);
+      assertExceptionContains("not the root of an encryption zone", expected);
+    }
+
+    // test same command resubmission
+    getEzManager().pauseReencryptForTesting();
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForQueuedZones(1);
+    try {
+      dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    } catch (RemoteException expected) {
+      LOG.info("Expected exception caught.", expected);
+      assertExceptionContains("already submitted", expected);
+    }
+    getEzManager().resumeReencryptForTesting();
+    waitForReencryptedZones(4);
+
+    // test empty EZ
+    final Path emptyZone = new Path("/emptyZone");
+    fsWrapper.mkdir(emptyZone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(emptyZone, TEST_KEY, NO_TRASH);
+
+    dfsAdmin.reencryptEncryptionZone(emptyZone, ReencryptAction.START);
+    waitForReencryptedZones(5);
+
+    dfsAdmin.reencryptEncryptionZone(emptyZone, ReencryptAction.START);
+    waitForReencryptedZones(6);
+
+    // test rename ez and listReencryptionStatus
+    final Path renamedZone = new Path("/renamedZone");
+    fsWrapper.rename(zone, renamedZone);
+    it = dfsAdmin.listReencryptionStatus();
+    assertTrue(it.hasNext());
+    zs = it.next();
+    assertEquals(renamedZone.toString(), zs.getZoneName());
+  }
+
+  @Test
+  public void testReencryptOrdering() throws Exception {
+    /* Setup dir as follows:
+     * /zones/zone/[0-3]
+     * /zones/zone/dir/f
+     * /zones/zone/f[0-4]
+     */
+    final int len = 8196;
+    final Path zoneParent = new Path("/zones");
+    final Path zone = new Path(zoneParent, "zone");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    Path subdir = new Path(zone, "dir");
+    fsWrapper.mkdir(subdir, FsPermission.getDirDefault(), true);
+    DFSTestUtil.createFile(fs, new Path(subdir, "f"), len, (short) 1, 0xFEED);
+    for (int i = 0; i < 4; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, Integer.toString(i)), len, (short) 1,
+              0xFEED);
+    }
+    for (int i = 0; i < 5; ++i) {
+      DFSTestUtil.createFile(fs, new Path(zone, "f" + Integer.toString(i)), len,
+          (short) 1, 0xFEED);
+    }
+
+    // /zones/zone/f[0-4] should be re-encrypted after /zones/zone/dir/f
+    final Path lastReencryptedFile = new Path(subdir, "f");
+    final Path notReencrypted = new Path(zone, "f0");
+    final FileEncryptionInfo fei = getFileEncryptionInfo(lastReencryptedFile);
+    final FileEncryptionInfo feiLast = getFileEncryptionInfo(notReencrypted);
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    // mark pause after first checkpoint (5 files)
+    getEzManager().pauseForTestingAfterNthSubmission(1);
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForReencryptedFiles(zone.toString(), 5);
+    assertKeyVersionChanged(lastReencryptedFile, fei);
+    assertKeyVersionEquals(notReencrypted, feiLast);
+  }
+
+  @Test
+  public void testDeleteDuringReencrypt() throws Exception {
+    final int len = 8196;
+    final Path zoneParent = new Path("/zones");
+    final Path zone = new Path(zoneParent, "zone");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, Integer.toString(i)), len, (short) 1,
+              0xFEED);
+    }
+    // test zone deleted during re-encrypt
+    getEzManager().pauseReencryptForTesting();
+    getEzManager().resetMetricsForTesting();
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForQueuedZones(1);
+
+    fs.delete(zone, true);
+    getEzManager().resumeReencryptForTesting();
+    waitForTotalZones(0);
+    assertNull(getZoneStatus(zone.toString()));
+  }
+
+  @Test
+  public void testZoneDeleteDuringReencrypt() throws Exception {
+    final int len = 8196;
+    final Path zoneParent = new Path("/zones");
+    final Path zone = new Path(zoneParent, "zone");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, Integer.toString(i)), len, (short) 1,
+              0xFEED);
+    }
+
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    // test zone deleted during re-encrypt's checkpointing
+    getEzManager().pauseForTestingAfterNthSubmission(1);
+    getEzManager().resetMetricsForTesting();
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForReencryptedFiles(zone.toString(), 5);
+
+    fs.delete(zoneParent, true);
+    getEzManager().resumeReencryptForTesting();
+    waitForTotalZones(0);
+    assertNull(getEzManager().getZoneStatus(zone.toString()));
+
+    // verify zone is cleared
+    RemoteIterator<ZoneReencryptionStatus> it =
+        dfsAdmin.listReencryptionStatus();
+    assertFalse(it.hasNext());
+  }
+
+  @Test
+  public void testRestartAfterReencrypt() throws Exception {
+    /* Setup dir as follows:
+     * /zones
+     * /zones/zone
+     * /zones/zone/[0-9]
+     * /zones/zone/dir
+     * /zones/zone/dir/f
+     */
+    final int len = 8196;
+    final Path zoneParent = new Path("/zones");
+    final Path zone = new Path(zoneParent, "zone");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, Integer.toString(i)), len, (short) 1,
+              0xFEED);
+    }
+    final Path subdir = new Path(zone, "dir");
+    fsWrapper.mkdir(subdir, FsPermission.getDirDefault(), true);
+    DFSTestUtil.createFile(fs, new Path(subdir, "f"), len, (short) 1, 0xFEED);
+
+    final Path encFile0 = new Path(zone, "0");
+    final Path encFile9 = new Path(zone, "9");
+    final FileEncryptionInfo fei0 = getFileEncryptionInfo(encFile0);
+    final FileEncryptionInfo fei9 = getFileEncryptionInfo(encFile9);
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForReencryptedZones(1);
+
+    assertKeyVersionChanged(encFile0, fei0);
+    assertKeyVersionChanged(encFile9, fei9);
+
+    final FileEncryptionInfo fei0new = getFileEncryptionInfo(encFile0);
+    final FileEncryptionInfo fei9new = getFileEncryptionInfo(encFile9);
+    restartClusterDisableReencrypt();
+
+    assertKeyVersionEquals(encFile0, fei0new);
+    assertKeyVersionEquals(encFile9, fei9new);
+    assertNull("Re-encrypt queue should be empty after restart",
+        getReencryptionStatus().getNextUnprocessedZone());
+  }
+
+  @Test
+  public void testRestartWithRenames() throws Exception {
+    /* Setup dir as follows:
+     * /zones
+     * /zones/zone
+     * /zones/zone/f --> renamed to f1
+     */
+    final int len = 8196;
+    final Path zoneParent = new Path("/zones");
+    final Path zone = new Path(zoneParent, "zone");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    DFSTestUtil.createFile(fs, new Path(zone, "f"), len, (short) 1, 0xFEED);
+    fsWrapper.rename(new Path(zone, "f"), new Path(zone, "f1"));
+
+    // re-encrypt
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForReencryptedZones(1);
+
+    // make sure NN can successfully restart (rename can load ok with
+    // re-encrypt since they're in correct order)
+    cluster.restartNameNodes();
+    cluster.waitActive();
+
+    waitForReencryptedZones(1);
+  }
+
+  @Test
+  public void testRestartDuringReencrypt() throws Exception {
+    /* Setup dir as follows:
+     * /zones
+     * /zones/zone
+     * /zones/zone/dir_empty
+     * /zones/zone/dir1/[0-9]
+     * /zones/zone/dir1/dir_empty1
+     * /zones/zone/dir2
+     * /zones/zone/dir2/dir_empty2
+     * /zones/zone/dir2/f
+     */
+    final int len = 8196;
+    final Path zoneParent = new Path("/zones");
+    final Path zone = new Path(zoneParent, "zone");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    fsWrapper
+        .mkdir(new Path(zone, "dir_empty"), FsPermission.getDirDefault(), true);
+    Path subdir = new Path(zone, "dir2");
+    fsWrapper.mkdir(subdir, FsPermission.getDirDefault(), true);
+    fsWrapper
+        .mkdir(new Path(subdir, "dir_empty2"), FsPermission.getDirDefault(),
+            true);
+    DFSTestUtil.createFile(fs, new Path(subdir, "f"), len, (short) 1, 0xFEED);
+    subdir = new Path(zone, "dir1");
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(subdir, Integer.toString(i)), len, (short) 1,
+              0xFEED);
+    }
+    fsWrapper
+        .mkdir(new Path(subdir, "dir_empty1"), FsPermission.getDirDefault(),
+            true);
+
+    final Path encFile0 = new Path(subdir, "0");
+    final Path encFile9 = new Path(subdir, "9");
+    final FileEncryptionInfo fei0 = getFileEncryptionInfo(encFile0);
+    final FileEncryptionInfo fei9 = getFileEncryptionInfo(encFile9);
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    // mark pause after first checkpoint (5 files)
+    getEzManager().pauseForTestingAfterNthSubmission(1);
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForReencryptedFiles(zone.toString(), 5);
+
+    restartClusterDisableReencrypt();
+
+    final Long zoneId = fsn.getFSDirectory().getINode(zone.toString()).getId();
+    assertEquals("Re-encrypt should restore to the last checkpoint zone",
+        zoneId, getReencryptionStatus().getNextUnprocessedZone());
+    assertEquals("Re-encrypt should restore to the last checkpoint file",
+        new Path(subdir, "4").toString(),
+        getEzManager().getZoneStatus(zone.toString()).getLastCheckpointFile());
+
+    getEzManager().resumeReencryptForTesting();
+    waitForReencryptedZones(1);
+    assertKeyVersionChanged(encFile0, fei0);
+    assertKeyVersionChanged(encFile9, fei9);
+    assertNull("Re-encrypt queue should be empty after restart",
+        getReencryptionStatus().getNextUnprocessedZone());
+    assertEquals(11, getZoneStatus(zone.toString()).getFilesReencrypted());
+  }
+
+  @Test
+  public void testRestartAfterReencryptAndCheckpoint() throws Exception {
+    final int len = 8196;
+    final Path zoneParent = new Path("/zones");
+    final Path zone = new Path(zoneParent, "zone");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, Integer.toString(i)), len, (short) 1,
+              0xFEED);
+    }
+    final Path subdir = new Path(zone, "dir");
+    fsWrapper.mkdir(subdir, FsPermission.getDirDefault(), true);
+    DFSTestUtil.createFile(fs, new Path(subdir, "f"), len, (short) 1, 0xFEED);
+
+    final Path encFile0 = new Path(zone, "0");
+    final Path encFile9 = new Path(zone, "9");
+    final FileEncryptionInfo fei0 = getFileEncryptionInfo(encFile0);
+    final FileEncryptionInfo fei9 = getFileEncryptionInfo(encFile9);
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForReencryptedZones(1);
+
+    assertKeyVersionChanged(encFile0, fei0);
+    assertKeyVersionChanged(encFile9, fei9);
+
+    final FileEncryptionInfo fei0new = getFileEncryptionInfo(encFile0);
+    final FileEncryptionInfo fei9new = getFileEncryptionInfo(encFile9);
+    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    fs.saveNamespace();
+    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+    restartClusterDisableReencrypt();
+
+    assertKeyVersionEquals(encFile0, fei0new);
+    assertKeyVersionEquals(encFile9, fei9new);
+    assertNull("Re-encrypt queue should be empty after restart",
+        getReencryptionStatus().getNextUnprocessedZone());
+  }
+
+  @Test
+  public void testReencryptLoadedFromEdits() throws Exception {
+    /*
+     * /zones/zone/[0-9]
+     * /zones/zone/dir/f
+     */
+    final int len = 8196;
+    final Path zoneParent = new Path("/zones");
+    final Path zone = new Path(zoneParent, "zone");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, Integer.toString(i)), len, (short) 1,
+              0xFEED);
+    }
+    final Path subdir = new Path(zone, "dir");
+    fsWrapper.mkdir(subdir, FsPermission.getDirDefault(), true);
+    DFSTestUtil.createFile(fs, new Path(subdir, "f"), len, (short) 1, 0xFEED);
+
+    final Path encFile0 = new Path(zone, "0");
+    final Path encFile9 = new Path(zone, "9");
+    final FileEncryptionInfo fei0 = getFileEncryptionInfo(encFile0);
+    final FileEncryptionInfo fei9 = getFileEncryptionInfo(encFile9);
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    // disable re-encrypt for testing, and issue a command
+    getEzManager().pauseReencryptForTesting();
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+
+    // verify after restart the command is loaded
+    restartClusterDisableReencrypt();
+    waitForQueuedZones(1);
+
+    // Let the re-encrypt to start running.
+    getEzManager().resumeReencryptForTesting();
+    waitForReencryptedZones(1);
+    assertKeyVersionChanged(encFile0, fei0);
+    assertKeyVersionChanged(encFile9, fei9);
+
+    // verify status
+    verifyZoneStatus(zone, fei0, 11);
+  }
+
+  private void verifyZoneStatus(final Path zone, final FileEncryptionInfo fei,
+      final long expectedFiles) throws IOException {
+    RemoteIterator<ZoneReencryptionStatus> it =
+        dfsAdmin.listReencryptionStatus();
+    assertTrue(it.hasNext());
+    final ZoneReencryptionStatus zs = it.next();
+    assertEquals(zone.toString(), zs.getZoneName());
+    assertEquals(ZoneReencryptionStatus.State.Completed, zs.getState());
+    assertTrue(zs.getCompletionTime() > 0);
+    assertTrue(zs.getCompletionTime() > zs.getSubmissionTime());
+    if (fei != null) {
+      assertNotEquals(fei.getEzKeyVersionName(), zs.getEzKeyVersionName());
+    }
+    assertEquals(expectedFiles, zs.getFilesReencrypted());
+  }
+
+  @Test
+  public void testReencryptLoadedFromFsimage() throws Exception {
+    /*
+     * /zones/zone/[0-9]
+     * /zones/zone/dir/f
+     */
+    final int len = 8196;
+    final Path zoneParent = new Path("/zones");
+    final Path zone = new Path(zoneParent, "zone");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, Integer.toString(i)), len, (short) 1,
+              0xFEED);
+    }
+    final Path subdir = new Path(zone, "dir");
+    fsWrapper.mkdir(subdir, FsPermission.getDirDefault(), true);
+    DFSTestUtil.createFile(fs, new Path(subdir, "f"), len, (short) 1, 0xFEED);
+
+    final Path encFile0 = new Path(zone, "0");
+    final Path encFile9 = new Path(zone, "9");
+    final FileEncryptionInfo fei0 = getFileEncryptionInfo(encFile0);
+    final FileEncryptionInfo fei9 = getFileEncryptionInfo(encFile9);
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    // disable re-encrypt for testing, and issue a command
+    getEzManager().pauseReencryptForTesting();
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForQueuedZones(1);
+
+    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    fs.saveNamespace();
+    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+    // verify after loading from fsimage the command is loaded
+    restartClusterDisableReencrypt();
+    waitForQueuedZones(1);
+
+    // Let the re-encrypt to start running.
+    getEzManager().resumeReencryptForTesting();
+    waitForReencryptedZones(1);
+    assertKeyVersionChanged(encFile0, fei0);
+    assertKeyVersionChanged(encFile9, fei9);
+
+    // verify status
+    verifyZoneStatus(zone, fei0, 11);
+  }
+
+  @Test
+  public void testReencryptCommandsQueuedOrdering() throws Exception {
+    final Path zoneParent = new Path("/zones");
+    final String zoneBaseName = zoneParent.toString() + "/zone";
+    final int numZones = 10;
+    for (int i = 0; i < numZones; ++i) {
+      final Path zone = new Path(zoneBaseName + i);
+      fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+      dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    }
+
+    // Disable re-encrypt for testing, and issue commands
+    getEzManager().pauseReencryptForTesting();
+    for (int i = 0; i < numZones; ++i) {
+      dfsAdmin.reencryptEncryptionZone(new Path(zoneBaseName + i),
+          ReencryptAction.START);
+    }
+    waitForQueuedZones(numZones);
+
+    // Verify commands are queued in the same order submitted
+    ReencryptionStatus rzs = new ReencryptionStatus(getReencryptionStatus());
+    for (int i = 0; i < numZones; ++i) {
+      Long zoneId = fsn.getFSDirectory().getINode(zoneBaseName + i).getId();
+      assertEquals(zoneId, rzs.getNextUnprocessedZone());
+      rzs.removeZone(zoneId);
+    }
+
+    // Cancel some zones
+    Set<Integer> cancelled = new HashSet<>(Arrays.asList(0, 3, 4));
+    for (int cancel : cancelled) {
+      dfsAdmin.reencryptEncryptionZone(new Path(zoneBaseName + cancel),
+          ReencryptAction.CANCEL);
+    }
+
+    restartClusterDisableReencrypt();
+    waitForQueuedZones(numZones - cancelled.size());
+    rzs = new ReencryptionStatus(getReencryptionStatus());
+    for (int i = 0; i < numZones; ++i) {
+      if (cancelled.contains(i)) {
+        continue;
+      }
+      Long zoneId = fsn.getFSDirectory().getINode(zoneBaseName + i).getId();
+      assertEquals(zoneId, rzs.getNextUnprocessedZone());
+      rzs.removeZone(zoneId);
+    }
+
+    // Verify the same is true after loading from FSImage
+    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    fs.saveNamespace();
+    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+    restartClusterDisableReencrypt();
+    waitForQueuedZones(numZones - cancelled.size());
+    rzs = new ReencryptionStatus(getReencryptionStatus());
+    for (int i = 0; i < 10; ++i) {
+      if (cancelled.contains(i)) {
+        continue;
+      }
+      Long zoneId = fsn.getFSDirectory().getINode(zoneBaseName + i).getId();
+      assertEquals(zoneId, rzs.getNextUnprocessedZone());
+      rzs.removeZone(zoneId);
+    }
+  }
+
+  @Test
+  public void testReencryptNestedZones() throws Exception {
+    /* Setup dir as follows:
+     * / <- EZ
+     * /file
+     * /dir/dfile
+     * /level1  <- nested EZ
+     * /level1/fileL1-[0~2]
+     * /level1/level2/ <- nested EZ
+     * /level1/level2/fileL2-[0~3]
+     */
+    final int len = 8196;
+    final Path zoneRoot = new Path("/");
+    final Path zoneL1 = new Path(zoneRoot, "level1");
+    final Path zoneL2 = new Path(zoneL1, "level2");
+    final Path nonzoneDir = new Path(zoneRoot, "dir");
+    dfsAdmin.createEncryptionZone(zoneRoot, TEST_KEY, NO_TRASH);
+    DFSTestUtil
+        .createFile(fs, new Path(zoneRoot, "file"), len, (short) 1, 0xFEED);
+    DFSTestUtil
+        .createFile(fs, new Path(nonzoneDir, "dfile"), len, (short) 1, 0xFEED);
+    fsWrapper.mkdir(zoneL1, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zoneL1, TEST_KEY, NO_TRASH);
+    for (int i = 0; i < 3; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zoneL1, "fileL1-" + i), len, (short) 1,
+              0xFEED);
+    }
+    fsWrapper.mkdir(zoneL2, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zoneL2, TEST_KEY, NO_TRASH);
+    for (int i = 0; i < 4; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zoneL2, "fileL2-" + i), len, (short) 1,
+              0xFEED);
+    }
+
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    // Disable re-encrypt, send re-encrypt on '/', verify queue
+    getEzManager().pauseReencryptForTesting();
+    dfsAdmin.reencryptEncryptionZone(zoneRoot, ReencryptAction.START);
+    waitForQueuedZones(1);
+    ReencryptionStatus rzs = getReencryptionStatus();
+    assertEquals(
+        (Long) fsn.getFSDirectory().getINode(zoneRoot.toString()).getId(),
+        rzs.getNextUnprocessedZone());
+
+    // Resume re-encrypt, verify files re-encrypted
+    getEzManager().resumeReencryptForTesting();
+    waitForZoneCompletes(zoneRoot.toString());
+    assertEquals(2, getZoneStatus(zoneRoot.toString()).getFilesReencrypted());
+
+    // Same tests on a child EZ.
+    getEzManager().resetMetricsForTesting();
+    getEzManager().pauseReencryptForTesting();
+    dfsAdmin.reencryptEncryptionZone(zoneL1, ReencryptAction.START);
+    waitForQueuedZones(1);
+    rzs = getReencryptionStatus();
+    assertEquals(
+        (Long) fsn.getFSDirectory().getINode(zoneL1.toString()).getId(),
+        rzs.getNextUnprocessedZone());
+
+    getEzManager().resumeReencryptForTesting();
+    waitForZoneCompletes(zoneL1.toString());
+    assertEquals(3, getZoneStatus(zoneL1.toString()).getFilesReencrypted());
+  }
+
+  @Test
+  public void testRaceCreateHandler() throws Exception {
+    /* Setup dir as follows:
+     * /dir/file[0~9]
+     */
+    final int len = 8196;
+    final Path zone = new Path("/dir");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    int expected = 10;
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, "file" + i), len, (short) 1, 0xFEED);
+    }
+
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    // Issue the command re-encrypt and pause it
+    getEzManager().pauseReencryptForTesting();
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForQueuedZones(1);
+
+    // mark pause after first checkpoint (5 files)
+    getEzManager().pauseForTestingAfterNthSubmission(1);
+    // Resume the re-encrypt thread
+    getEzManager().resumeReencryptForTesting();
+    waitForReencryptedFiles(zone.toString(), 5);
+
+    /* creates the following:
+     * /dir/file8[0~5]
+     * /dir/dirsub/file[10-14]
+     * /dir/sub/file[15-19]
+     */
+    for (int i = 0; i < 6; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, "file8" + i), len, (short) 1, 0xFEED);
+    }
+    // we don't care newly created files since they should already use new edek.
+    // so naturally processes the listing from last checkpoint
+    final Path subdir = new Path(zone, "dirsub");
+    fsWrapper.mkdir(subdir, FsPermission.getDirDefault(), true);
+    for (int i = 10; i < 15; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(subdir, "file" + i), len, (short) 1, 0xFEED);
+    }
+    // the above are created before checkpoint position, so not re-encrypted.
+    final Path sub = new Path(zone, "sub");
+    fsWrapper.mkdir(sub, FsPermission.getDirDefault(), true);
+    for (int i = 15; i < 20; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(sub, "file" + i), len, (short) 1, 0xFEED);
+    }
+
+    // resume re-encrypt thread which was paused after first checkpoint
+    getEzManager().resumeReencryptForTesting();
+    waitForZoneCompletes(zone.toString());
+    assertEquals(expected,
+        getZoneStatus(zone.toString()).getFilesReencrypted());
+  }
+
+  @Test
+  public void testRaceDeleteHandler() throws Exception {
+    /* Setup dir as follows:
+     * /dir/file[0~9]
+     * /dir/subdir/file[10-14]
+     */
+    final int len = 8196;
+    final Path zone = new Path("/dir");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    int expected = 15;
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, "file" + i), len, (short) 1, 0xFEED);
+    }
+    final Path subdir = new Path(zone, "subdir");
+    fsWrapper.mkdir(subdir, FsPermission.getDirDefault(), true);
+    for (int i = 10; i < 15; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(subdir, "file" + i), len, (short) 1, 0xFEED);
+    }
+
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    // Issue the command re-encrypt and pause it
+    getEzManager().pauseReencryptForTesting();
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForQueuedZones(1);
+
+    // proceed to first checkpoint (5 files), delete files/subdir, then resume
+    getEzManager().pauseForTestingAfterNthSubmission(1);
+    getEzManager().resumeReencryptForTesting();
+    waitForReencryptedFiles(zone.toString(), 5);
+
+    fsWrapper.delete(new Path(zone, "file5"), true);
+    fsWrapper.delete(new Path(zone, "file8"), true);
+    expected -= 2;
+    fsWrapper.delete(subdir, true);
+    expected -= 5;
+
+    // resume re-encrypt thread which was paused after first checkpoint
+    getEzManager().resumeReencryptForTesting();
+    waitForZoneCompletes(zone.toString());
+    assertEquals(expected,
+        getZoneStatus(zone.toString()).getFilesReencrypted());
+  }
+
+  @Test
+  public void testRaceDeleteUpdater() throws Exception {
+    /* Setup dir as follows:
+     * /dir/file[0~9]
+     * /dir/subdir/file[10-14]
+     */
+    final int len = 8196;
+    final Path zone = new Path("/dir");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    int expected = 15;
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, "file" + i), len, (short) 1, 0xFEED);
+    }
+    final Path subdir = new Path(zone, "subdir");
+    fsWrapper.mkdir(subdir, FsPermission.getDirDefault(), true);
+    for (int i = 10; i < 15; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(subdir, "file" + i), len, (short) 1, 0xFEED);
+    }
+
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    // Issue the command re-encrypt and pause it
+    getEzManager().pauseReencryptForTesting();
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForQueuedZones(1);
+
+    // proceed to first checkpoint (5 files), delete files/subdir, then resume
+    getEzManager().pauseForTestingAfterNthCheckpoint(zone.toString(), 1);
+    getEzManager().pauseForTestingAfterNthSubmission(1);
+    getEzManager().resumeReencryptForTesting();
+
+    waitForReencryptedFiles(zone.toString(), 5);
+    getEzManager().resumeReencryptForTesting();
+
+    // give handler thread some time to process the files before deletion.
+    Thread.sleep(3000);
+    fsWrapper.delete(new Path(zone, "file5"), true);
+    fsWrapper.delete(new Path(zone, "file8"), true);
+    expected -= 2;
+    fsWrapper.delete(subdir, true);
+    expected -= 5;
+
+    // resume updater thread which was paused after first checkpoint, verify
+    // deleted files are skipped.
+    getEzManager().resumeReencryptUpdaterForTesting();
+    waitForZoneCompletes(zone.toString());
+    assertEquals(expected,
+        getZoneStatus(zone.toString()).getFilesReencrypted());
+  }
+
+  @Test
+  public void testRaceDeleteCurrentDirHandler() throws Exception {
+    /* Setup dir as follows:
+     * /dir/subdir/file[0~9]
+     * /dir/subdir2/file[10-14]
+     */
+    final int len = 8196;
+    final Path zone = new Path("/dir");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    final Path subdir = new Path(zone, "subdir");
+    int expected = 15;
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(subdir, "file" + i), len, (short) 1, 0xFEED);
+    }
+    final Path subdir2 = new Path(zone, "subdir2");
+    fsWrapper.mkdir(subdir, FsPermission.getDirDefault(), true);
+    for (int i = 10; i < 15; ++i) {
+      DFSTestUtil.createFile(fs, new Path(subdir2, "file" + i), len, (short) 1,
+          0xFEED);
+    }
+
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    // Issue the command re-encrypt and pause it
+    getEzManager().pauseReencryptForTesting();
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForQueuedZones(1);
+
+    // proceed to first checkpoint (5 files), delete subdir, then resume
+    getEzManager().pauseForTestingAfterNthSubmission(1);
+    getEzManager().resumeReencryptForTesting();
+    waitForReencryptedFiles(zone.toString(), 5);
+
+    fsWrapper.delete(subdir, true);
+    expected -= 5;
+
+    // resume re-encrypt thread which was paused after first checkpoint
+    getEzManager().resumeReencryptForTesting();
+    waitForZoneCompletes(zone.toString());
+    assertEquals(expected,
+        getZoneStatus(zone.toString()).getFilesReencrypted());
+  }
+
+  @Test
+  public void testRaceDeleteCurrentDirUpdater() throws Exception {
+    /* Setup dir as follows:
+     * /dir/subdir/file[0~9]
+     * /dir/subdir2/file[10-14]
+     */
+    final int len = 8196;
+    final Path zone = new Path("/dir");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    final Path subdir = new Path(zone, "subdir");
+    int expected = 15;
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(subdir, "file" + i), len, (short) 1, 0xFEED);
+    }
+    final Path subdir2 = new Path(zone, "subdir2");
+    fsWrapper.mkdir(subdir, FsPermission.getDirDefault(), true);
+    for (int i = 10; i < 15; ++i) {
+      DFSTestUtil.createFile(fs, new Path(subdir2, "file" + i), len, (short) 1,
+          0xFEED);
+    }
+
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    // Issue the command re-encrypt and pause it
+    getEzManager().pauseReencryptForTesting();
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForQueuedZones(1);
+
+    // proceed to first checkpoint (5 files), delete subdir, then resume
+    getEzManager().pauseForTestingAfterNthCheckpoint(zone.toString(), 1);
+    getEzManager().pauseForTestingAfterNthSubmission(1);
+    getEzManager().resumeReencryptForTesting();
+
+    waitForReencryptedFiles(zone.toString(), 5);
+    getEzManager().resumeReencryptForTesting();
+
+    // give handler thread some time to process the files before deletion.
+    Thread.sleep(3000);
+    fsWrapper.delete(subdir, true);
+    expected -= 5;
+
+    // resume updater thread which was paused after first checkpoint, verify
+    // deleted files are skipped.
+    getEzManager().resumeReencryptUpdaterForTesting();
+    waitForZoneCompletes(zone.toString());
+    assertEquals(expected,
+        getZoneStatus(zone.toString()).getFilesReencrypted());
+  }
+
+  @Test
+  public void testRaceDeleteZoneHandler() throws Exception {
+    /* Setup dir as follows:
+     * /dir/file[0~10]
+     */
+    final int len = 8196;
+    final Path zone = new Path("/dir");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    for (int i = 0; i < 11; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, "file" + i), len, (short) 1, 0xFEED);
+    }
+
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    // Issue the command re-encrypt and pause it
+    getEzManager().pauseReencryptForTesting();
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForQueuedZones(1);
+
+    // let both handler and updater pause, then delete zone.
+    getEzManager().pauseForTestingAfterNthSubmission(1);
+    getEzManager().pauseForTestingAfterNthCheckpoint(zone.toString(), 1);
+    getEzManager().resumeReencryptForTesting();
+    waitForReencryptedFiles(zone.toString(), 5);
+    getEzManager().pauseForTestingAfterNthSubmission(1);
+    getEzManager().resumeReencryptForTesting();
+
+    Thread.sleep(3000);
+    EncryptionZoneManager ezm = getEzManager();
+    ReencryptionHandler handler = (ReencryptionHandler) Whitebox
+        .getInternalState(ezm, "reencryptionHandler");
+    Map<Long, ZoneSubmissionTracker> tasks =
+        (Map<Long, ZoneSubmissionTracker>) Whitebox
+            .getInternalState(handler, "submissions");
+    List<Future> futures = new LinkedList<>();
+    for (ZoneSubmissionTracker zst : tasks.values()) {
+      for (Future f : zst.getTasks()) {
+        futures.add(f);
+      }
+    }
+    fsWrapper.delete(zone, true);
+    getEzManager().resumeReencryptForTesting();
+
+    // verify no running tasks
+    for (Future f : futures) {
+      assertTrue(f.isDone());
+    }
+
+    waitForTotalZones(0);
+  }
+
+  @Test
+  public void testRaceDeleteCreateHandler() throws Exception {
+    /* Setup dir as follows:
+     * /dir/file[0~9]
+     */
+    final int len = 8196;
+    final Path zone = new Path("/dir");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    int expected = 10;
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, "file" + i), len, (short) 1, 0xFEED);
+    }
+
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    // Issue the command re-encrypt and pause it
+    getEzManager().pauseReencryptForTesting();
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForQueuedZones(1);
+
+    // mark pause after first checkpoint (5 files)
+    getEzManager().pauseForTestingAfterNthSubmission(1);
+    // Resume the re-encrypt thread
+    getEzManager().resumeReencryptForTesting();
+    waitForReencryptedFiles(zone.toString(), 5);
+
+    final Path recreated = new Path(zone, "file9");
+    fsWrapper.delete(recreated, true);
+    DFSTestUtil.createFile(fs, recreated, len, (short) 2, 0xFEED);
+    expected -= 1; // newly created files use new edek, no need to re-encrypt
+
+    // resume re-encrypt thread which was paused after first checkpoint
+    getEzManager().resumeReencryptForTesting();
+    waitForZoneCompletes(zone.toString());
+    assertEquals(expected,
+        getZoneStatus(zone.toString()).getFilesReencrypted());
+  }
+
+  @Test
+  public void testRaceDeleteCreateUpdater() throws Exception {
+    /* Setup dir as follows:
+     * /dir/file[0~9]
+     */
+    final int len = 8196;
+    final Path zone = new Path("/dir");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    int expected = 10;
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, "file" + i), len, (short) 1, 0xFEED);
+    }
+
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    // Issue the command re-encrypt and pause it
+    getEzManager().pauseReencryptForTesting();
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForQueuedZones(1);
+
+    // mark pause after first checkpoint (5 files)
+    getEzManager().pauseForTestingAfterNthCheckpoint(zone.toString(), 1);
+    getEzManager().pauseForTestingAfterNthSubmission(1);
+    getEzManager().resumeReencryptForTesting();
+    waitForReencryptedFiles(zone.toString(), 5);
+    getEzManager().resumeReencryptForTesting();
+
+    // give handler thread some time to process the files before deletion.
+    Thread.sleep(3000);
+    final Path recreated = new Path(zone, "file9");
+    final FileEncryptionInfo feiOrig = getFileEncryptionInfo(recreated);
+    final String contentOrig = DFSTestUtil.readFile(fs, recreated);
+    fsWrapper.delete(recreated, true);
+    DFSTestUtil.createFile(fs, recreated, len, (short) 2, 0xFEED);
+    expected -= 1;
+
+    // resume updater thread which was paused after first checkpoint
+    getEzManager().resumeReencryptUpdaterForTesting();
+    waitForZoneCompletes(zone.toString());
+    assertEquals(expected,
+        getZoneStatus(zone.toString()).getFilesReencrypted());
+
+    // verify new file is using it's own edeks, with new keyversions,
+    // and can be decrypted correctly.
+    assertKeyVersionChanged(recreated, feiOrig);
+    final String content = DFSTestUtil.readFile(fs, recreated);
+    assertEquals(contentOrig, content);
+  }
+
+  // TODO: update test once HDFS-11203 is implemented.
+  @Test
+  public void testReencryptRaceRename() throws Exception {
+    /* Setup dir as follows:
+     * /dir/file[0~9]
+     * /dir/subdir/file[10-14]
+     */
+    final int len = 8196;
+    final Path zone = new Path("/dir");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, "file" + i), len, (short) 1, 0xFEED);
+    }
+    final Path subdir = new Path(zone, "subdir");
+    fsWrapper.mkdir(subdir, FsPermission.getDirDefault(), true);
+    for (int i = 10; i < 15; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(subdir, "file" + i), len, (short) 1, 0xFEED);
+    }
+
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    // Issue the command re-encrypt and pause it
+    getEzManager().pauseReencryptForTesting();
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForQueuedZones(1);
+
+    // mark pause after first checkpoint (5 files)
+    getEzManager().pauseForTestingAfterNthSubmission(1);
+    // Resume the re-encrypt thread
+    getEzManager().resumeReencryptForTesting();
+    waitForReencryptedFiles(zone.toString(), 5);
+
+    try {
+      fsWrapper.rename(new Path(zone, "file8"), new Path(zone, "file08"));
+      fail("rename a file in an EZ should be disabled");
+    } catch (IOException e) {
+      assertExceptionContains("under re-encryption", e);
+    }
+
+    // resume handler and pause updater, test again.
+    getEzManager().pauseReencryptUpdaterForTesting();
+    getEzManager().resumeReencryptForTesting();
+    try {
+      fsWrapper.rename(new Path(zone, "file8"), new Path(zone, "file08"));
+      fail("rename a file in an EZ should be disabled");
+    } catch (IOException e) {
+      assertExceptionContains("under re-encryption", e);
+    }
+  }
+
+  @Test
+  public void testReencryptSnapshots() throws Exception {
+    /* Setup test dir:
+     * /zones/zone/[0-9]
+     * /dir/f
+     *
+     * /zones/zone is snapshottable, and rename file 5 to 5new,
+      * 6 to 6new then delete (so the file is only referred from a snapshot).
+     */
+    final int len = 8196;
+    final Path zoneParent = new Path("/zones");
+    final Path zone = new Path(zoneParent, "zone");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.allowSnapshot(zone);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, Integer.toString(i)), len, (short) 1,
+              0xFEED);
+    }
+    final Path subdir = new Path("/dir");
+    fsWrapper.mkdir(subdir, FsPermission.getDirDefault(), true);
+    DFSTestUtil.createFile(fs, new Path(subdir, "f"), len, (short) 1, 0xFEED);
+    // create a snapshot and rename a file, so INodeReference is created.
+    final Path zoneSnap = fs.createSnapshot(zone);
+    fsWrapper.rename(new Path(zone, "5"), new Path(zone, "5new"));
+    fsWrapper.rename(new Path(zone, "6"), new Path(zone, "6new"));
+    fsWrapper.delete(new Path(zone, "6new"), true);
+
+    // test re-encrypt on snapshot dir
+    final Path encFile1 = new Path(zone, "0");
+    final FileEncryptionInfo fei0 = getFileEncryptionInfo(encFile1);
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    try {
+      dfsAdmin.reencryptEncryptionZone(zoneSnap, ReencryptAction.START);
+      fail("Reencrypt command on snapshot path should fail.");
+    } catch (RemoteException expected) {
+      LOG.info("Expected exception", expected);
+      assertTrue(expected
+          .unwrapRemoteException() instanceof SnapshotAccessControlException);
+    }
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForReencryptedZones(1);
+    waitForReencryptedFiles(zone.toString(), 9);
+    assertKeyVersionChanged(encFile1, fei0);
+  }
+
+  private void restartClusterDisableReencrypt() throws Exception {
+    cluster.restartNameNode(false);
+    fsn = cluster.getNamesystem();
+    getEzManager().pauseReencryptForTesting();
+    cluster.waitActive();
+  }
+
+  private void waitForReencryptedZones(final int expected)
+      throws TimeoutException, InterruptedException {
+    LOG.info("Waiting for re-encrypted zones to be {}", expected);
+    try {
+      GenericTestUtils.waitFor(new Supplier<Boolean>() {
+        @Override
+        public Boolean get() {
+          return getReencryptionStatus().getNumZonesReencrypted() == expected;
+        }
+      }, 100, 10000);
+    } finally {
+      LOG.info("Re-encrypted zones = {} ",
+          getReencryptionStatus().getNumZonesReencrypted());
+    }
+  }
+
+  private void waitForQueuedZones(final int expected)
+      throws TimeoutException, InterruptedException {
+    LOG.info("Waiting for queued zones for re-encryption to be {}", expected);
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return getReencryptionStatus().zonesQueued() == expected;
+      }
+    }, 100, 10000);
+  }
+
+  private void waitForTotalZones(final int expected)
+      throws TimeoutException, InterruptedException {
+    LOG.info("Waiting for queued zones for re-encryption to be {}", expected);
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return getReencryptionStatus().zonesTotal() == expected;
+      }
+    }, 100, 10000);
+  }
+
+  private void waitForZoneCompletes(final String zone)
+      throws TimeoutException, InterruptedException {
+    LOG.info("Waiting for re-encryption zone {} to complete.", zone);
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        try {
+          return getZoneStatus(zone).getState()
+              == ZoneReencryptionStatus.State.Completed;
+        } catch (Exception ex) {
+          LOG.error("Exception caught", ex);
+          return false;
+        }
+      }
+    }, 100, 10000);
+  }
+
+  private EncryptionZoneManager getEzManager() {
+    return fsn.getFSDirectory().ezManager;
+  }
+
+  private ReencryptionStatus getReencryptionStatus() {
+    return getEzManager().getReencryptionStatus();
+  }
+
+  private ZoneReencryptionStatus getZoneStatus(final String zone)
+      throws IOException {
+    return getEzManager().getZoneStatus(zone);
+  }
+
+  private void waitForReencryptedFiles(final String zone, final int expected)
+      throws TimeoutException, InterruptedException {
+    LOG.info("Waiting for total re-encrypted file count to be {}", expected);
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        try {
+          return getZoneStatus(zone).getFilesReencrypted() == expected;
+        } catch (IOException e) {
+          return false;
+        }
+      }
+    }, 100, 10000);
+  }
+
+  private void assertKeyVersionChanged(final Path file,
+      final FileEncryptionInfo original) throws Exception {
+    final FileEncryptionInfo actual = getFileEncryptionInfo(file);
+    assertNotEquals("KeyVersion should be different",
+        original.getEzKeyVersionName(), actual.getEzKeyVersionName());
+  }
+
+  private void assertKeyVersionEquals(final Path file,
+      final FileEncryptionInfo expected) throws Exception {
+    final FileEncryptionInfo actual = getFileEncryptionInfo(file);
+    assertEquals("KeyVersion should be the same",
+        expected.getEzKeyVersionName(), actual.getEzKeyVersionName());
+  }
+
+  @Test
+  public void testReencryptCancel() throws Exception {
+    /* Setup test dir:
+     * /zones/zone/[0-9]
+     * /dir/f
+     */
+    final int len = 8196;
+    final Path zoneParent = new Path("/zones");
+    final Path zone = new Path(zoneParent, "zone");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, Integer.toString(i)), len, (short) 1,
+              0xFEED);
+    }
+    final Path subdir = new Path("/dir");
+    fsWrapper.mkdir(subdir, FsPermission.getDirDefault(), true);
+    DFSTestUtil.createFile(fs, new Path(subdir, "f"), len, (short) 1, 0xFEED);
+
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    // disable, test basic
+    getEzManager().pauseReencryptForTesting();
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForQueuedZones(1);
+
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.CANCEL);
+    getEzManager().resumeReencryptForTesting();
+    waitForZoneCompletes(zone.toString());
+    assertEquals(0, getZoneStatus(zone.toString()).getFilesReencrypted());
+
+    // test same command resubmission
+    try {
+      dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.CANCEL);
+    } catch (RemoteException expected) {
+      assertExceptionContains("not under re-encryption", expected);
+    }
+
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    // test cancelling half-way
+    getEzManager().pauseForTestingAfterNthSubmission(1);
+    getEzManager().resumeReencryptForTesting();
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForReencryptedFiles(zone.toString(), 5);
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.CANCEL);
+    getEzManager().resumeReencryptForTesting();
+    waitForZoneCompletes(zone.toString());
+    assertEquals(5, getZoneStatus(zone.toString()).getFilesReencrypted());
+    assertNull(
+        getEzManager().getZoneStatus(zone.toString()).getLastCheckpointFile());
+    assertNull(getReencryptionStatus().getNextUnprocessedZone());
+
+    // test cancelling non-EZ dir
+    try {
+      dfsAdmin.reencryptEncryptionZone(subdir, ReencryptAction.CANCEL);
+      fail("Re-encrypting non-EZ should fail");
+    } catch (RemoteException expected) {
+      LOG.info("Expected exception caught.", expected);
+      assertExceptionContains("not the root of an encryption zone", expected);
+    }
+
+    // test cancelling non-existing dir
+    try {
+      dfsAdmin.reencryptEncryptionZone(new Path(zone, "notexist"),
+          ReencryptAction.CANCEL);
+      fail("Re-encrypting non-existing dir should fail");
+    } catch (RemoteException expected) {
+      LOG.info("Expected exception caught.", expected);
+      assertTrue(
+          expected.unwrapRemoteException() instanceof FileNotFoundException);
+    }
+
+    // test cancelling directly on a EZ file
+    final Path encFile = new Path(zone, "0");
+    try {
+      dfsAdmin.reencryptEncryptionZone(encFile, ReencryptAction.CANCEL);
+      fail("Re-encrypting on a file should fail");
+    } catch (RemoteException expected) {
+      LOG.info("Expected exception caught.", expected);
+      assertExceptionContains("not the root of an encryption zone", expected);
+    }
+
+    // final check - should only had 5 files re-encrypted overall.
+    assertEquals(5, getZoneStatus(zone.toString()).getFilesReencrypted());
+  }
+
+  @Test
+  public void testReencryptCancelForUpdater() throws Exception {
+    /* Setup test dir:
+     * /zones/zone/[0-9]
+     * /dir/f
+     */
+    final int len = 8196;
+    final Path zoneParent = new Path("/zones");
+    final Path zone = new Path(zoneParent, "zone");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, Integer.toString(i)), len, (short) 1,
+              0xFEED);
+    }
+    final Path subdir = new Path("/dir");
+    fsWrapper.mkdir(subdir, FsPermission.getDirDefault(), true);
+    DFSTestUtil.createFile(fs, new Path(subdir, "f"), len, (short) 1, 0xFEED);
+
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    // disable, test basic
+    getEzManager().pauseReencryptUpdaterForTesting();
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    Thread.sleep(3000);
+
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.CANCEL);
+    getEzManager().resumeReencryptUpdaterForTesting();
+    waitForZoneCompletes(zone.toString());
+    Thread.sleep(3000);
+    assertEquals(0, getZoneStatus(zone.toString()).getFilesReencrypted());
+
+  }
+
+  @Test
+  public void testReencryptionWithoutProvider() throws Exception {
+    /* Setup test dir:
+     * /zones/zone/[0-9]
+     */
+    final int len = 8196;
+    final Path zoneParent = new Path("/zones");
+    final Path zone = new Path(zoneParent, "zone");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, Integer.toString(i)), len, (short) 1,
+              0xFEED);
+    }
+
+    // re-encrypt the zone
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForReencryptedZones(1);
+
+    // start NN without providers
+    cluster.getConfiguration(0)
+        .unset(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH);
+    cluster.restartNameNodes();
+    cluster.waitActive();
+
+    // test re-encrypt should fail
+    try {
+      dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+      fail("should not be able to re-encrypt");
+    } catch (RemoteException expected) {
+      assertExceptionContains("rejected", expected.unwrapRemoteException());
+    }
+    try {
+      dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.CANCEL);
+      fail("should not be able to cancel re-encrypt");
+    } catch (RemoteException expected) {
+      assertExceptionContains("rejected", expected.unwrapRemoteException());
+    }
+
+    // test listReencryptionStatus should still work
+    RemoteIterator<ZoneReencryptionStatus> it =
+        dfsAdmin.listReencryptionStatus();
+    assertTrue(it.hasNext());
+    ZoneReencryptionStatus zs = it.next();
+    assertEquals(zone.toString(), zs.getZoneName());
+    assertEquals(ZoneReencryptionStatus.State.Completed, zs.getState());
+    assertTrue(zs.getCompletionTime() > 0);
+    assertTrue(zs.getCompletionTime() > zs.getSubmissionTime());
+    assertEquals(10, zs.getFilesReencrypted());
+  }
+
+  @Test
+  public void testReencryptionNNSafeMode() throws Exception {
+    /* Setup test dir:
+     * /zones/zone/[0-9]
+     */
+    final int len = 8196;
+    final Path zoneParent = new Path("/zones");
+    final Path zone = new Path(zoneParent, "zone");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, Integer.toString(i)), len, (short) 1,
+              0xFEED);
+    }
+
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+    // mark pause after first checkpoint (5 files)
+    getEzManager().pauseForTestingAfterNthSubmission(1);
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForReencryptedFiles(zone.toString(), 5);
+
+    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    getEzManager().resumeReencryptForTesting();
+    for (int i = 0; i < 3; ++i) {
+      Thread.sleep(1000);
+      RemoteIterator<ZoneReencryptionStatus> it =
+          dfsAdmin.listReencryptionStatus();
+      assertTrue(it.hasNext());
+      ZoneReencryptionStatus zs = it.next();
+      assertEquals(zone.toString(), zs.getZoneName());
+      assertEquals(0, zs.getCompletionTime());
+      assertEquals(5, zs.getFilesReencrypted());
+    }
+
+    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+    waitForReencryptedFiles(zone.toString(), 10);
+  }
+
+  @Test
+  public void testReencryptionKMSDown() throws Exception {
+    class MyInjector extends EncryptionFaultInjector {
+      private volatile int exceptionCount = 0;
+
+      MyInjector(int numFailures) {
+        exceptionCount = numFailures;
+      }
+
+      @Override
+      public void reencryptEncryptedKeys() throws IOException {
+        if (exceptionCount > 0) {
+          --exceptionCount;
+          throw new IOException("Injected KMS failure");
+        }
+      }
+    }
+    final MyInjector injector = new MyInjector(1);
+    EncryptionFaultInjector.instance = injector;
+    /* Setup test dir:
+     * /zones/zone/[0-9]
+     */
+    final int len = 8196;
+    final Path zoneParent = new Path("/zones");
+    final Path zone = new Path(zoneParent, "zone");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, Integer.toString(i)), len, (short) 1,
+              0xFEED);
+    }
+
+    // re-encrypt the zone
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForReencryptedZones(1);
+    assertEquals(0, injector.exceptionCount);
+
+    // test listReencryptionStatus should still work
+    RemoteIterator<ZoneReencryptionStatus> it =
+        dfsAdmin.listReencryptionStatus();
+    assertTrue(it.hasNext());
+    ZoneReencryptionStatus zs = it.next();
+    assertEquals(zone.toString(), zs.getZoneName());
+    assertEquals(ZoneReencryptionStatus.State.Completed, zs.getState());
+    assertTrue(zs.getCompletionTime() > 0);
+    assertTrue(zs.getCompletionTime() > zs.getSubmissionTime());
+    assertEquals(5, zs.getFilesReencrypted());
+    assertEquals(5, zs.getNumReencryptionFailures());
+  }
+
+  @Test
+  public void testReencryptionUpdaterFaultOneTask() throws Exception {
+    class MyInjector extends EncryptionFaultInjector {
+      private volatile int exceptionCount = 0;
+
+      MyInjector(int numFailures) {
+        exceptionCount = numFailures;
+      }
+
+      @Override
+      public void reencryptUpdaterProcessOneTask() throws IOException {
+        if (exceptionCount > 0) {
+          --exceptionCount;
+          throw new IOException("Injected process task failure");
+        }
+      }
+    }
+    final MyInjector injector = new MyInjector(1);
+    EncryptionFaultInjector.instance = injector;
+    /* Setup test dir:
+     * /zones/zone/[0-9]
+     */
+    final int len = 8196;
+    final Path zoneParent = new Path("/zones");
+    final Path zone = new Path(zoneParent, "zone");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, Integer.toString(i)), len, (short) 1,
+              0xFEED);
+    }
+
+    // re-encrypt the zone
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForReencryptedZones(1);
+    assertEquals(0, injector.exceptionCount);
+
+    // test listReencryptionStatus should still work
+    RemoteIterator<ZoneReencryptionStatus> it =
+        dfsAdmin.listReencryptionStatus();
+    assertTrue(it.hasNext());
+    ZoneReencryptionStatus zs = it.next();
+    assertEquals(zone.toString(), zs.getZoneName());
+    assertEquals(ZoneReencryptionStatus.State.Completed, zs.getState());
+    assertTrue(zs.getCompletionTime() > 0);
+    assertTrue(zs.getCompletionTime() > zs.getSubmissionTime());
+    assertEquals(5, zs.getFilesReencrypted());
+    assertEquals(1, zs.getNumReencryptionFailures());
+  }
+
+
+  @Test
+  public void testReencryptionUpdaterFaultCkpt() throws Exception {
+    class MyInjector extends EncryptionFaultInjector {
+      private volatile int exceptionCount = 0;
+
+      MyInjector(int numFailures) {
+        exceptionCount = numFailures;
+      }
+
+      @Override
+      public void reencryptUpdaterProcessCheckpoint() throws IOException {
+        if (exceptionCount > 0) {
+          --exceptionCount;
+          throw new IOException("Injected process checkpoint failure");
+        }
+      }
+    }
+    final MyInjector injector = new MyInjector(1);
+    EncryptionFaultInjector.instance = injector;
+    /* Setup test dir:
+     * /zones/zone/[0-9]
+     */
+    final int len = 8196;
+    final Path zoneParent = new Path("/zones");
+    final Path zone = new Path(zoneParent, "zone");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, Integer.toString(i)), len, (short) 1,
+              0xFEED);
+    }
+
+    // re-encrypt the zone
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForReencryptedZones(1);
+    assertEquals(0, injector.exceptionCount);
+
+    // test listReencryptionStatus should still work
+    RemoteIterator<ZoneReencryptionStatus> it =
+        dfsAdmin.listReencryptionStatus();
+    assertTrue(it.hasNext());
+    ZoneReencryptionStatus zs = it.next();
+    assertEquals(zone.toString(), zs.getZoneName());
+    assertEquals(ZoneReencryptionStatus.State.Completed, zs.getState());
+    assertTrue(zs.getCompletionTime() > 0);
+    assertTrue(zs.getCompletionTime() > zs.getSubmissionTime());
+    assertEquals(10, zs.getFilesReencrypted());
+    assertEquals(1, zs.getNumReencryptionFailures());
+  }
+
+  @Test
+  public void testReencryptionUpdaterFaultRecover() throws Exception {
+    class MyInjector extends EncryptionFaultInjector {
+      private volatile int exceptionCount = 0;
+
+      MyInjector(int oneTask) {
+        exceptionCount = oneTask;
+      }
+
+      @Override
+      public void reencryptUpdaterProcessOneTask() throws IOException {
+        if (exceptionCount > 0) {
+          --exceptionCount;
+          throw new RetriableException("Injected process task failure");
+        }
+      }
+    }
+    final MyInjector injector = new MyInjector(10);
+    EncryptionFaultInjector.instance = injector;
+    /* Setup test dir:
+     * /zones/zone/[0-9]
+     */
+    final int len = 8196;
+    final Path zoneParent = new Path("/zones");
+    final Path zone = new Path(zoneParent, "zone");
+    fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
+    for (int i = 0; i < 10; ++i) {
+      DFSTestUtil
+          .createFile(fs, new Path(zone, Integer.toString(i)), len, (short) 1,
+              0xFEED);
+    }
+
+    // re-encrypt the zone
+    fsn.getProvider().rollNewVersion(TEST_KEY);
+    fsn.getProvider().flush();
+
+    final EncryptionZoneManager ezm = getEzManager();
+    final ReencryptionHandler handler = (ReencryptionHandler) Whitebox
+        .getInternalState(ezm, "reencryptionHandler");
+    final ReencryptionUpdater updater = (ReencryptionUpdater) Whitebox
+        .getInternalState(handler, "reencryptionUpdater");
+    Whitebox.setInternalState(updater, "faultRetryInterval", 50);
+    dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
+    waitForReencryptedZones(1);
+    assertEquals(0, injector.exceptionCount);
+
+    // test listReencryptionStatus should still work
+    RemoteIterator<ZoneReencryptionStatus> it =
+        dfsAdmin.listReencryptionStatus();
+    assertTrue(it.hasNext());
+    ZoneReencryptionStatus zs = it.next();
+    assertEquals(zone.toString(), zs.getZoneName());
+    assertEquals(ZoneReencryptionStatus.State.Completed, zs.getState());
+    assertTrue(zs.getCompletionTime() > 0);
+    assertTrue(zs.getCompletionTime() > zs.getSubmissionTime());
+    assertEquals(10, zs.getFilesReencrypted());
+    assertEquals(0, zs.getNumReencryptionFailures());
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java
new file mode 100644
index 0000000..f0dd92c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java
@@ -0,0 +1,197 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
+import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.KMSUtil;
+import org.apache.hadoop.util.StopWatch;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.mockito.Mockito;
+import org.mockito.internal.util.reflection.Whitebox;
+import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.Future;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertTrue;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_HANDLER_RATIO_KEY;
+import static org.junit.Assert.fail;
+
+/**
+ * Test class for ReencryptionHandler.
+ */
+public class TestReencryptionHandler {
+
+  protected static final org.slf4j.Logger LOG =
+      LoggerFactory.getLogger(TestReencryptionHandler.class);
+
+  @Rule
+  public Timeout globalTimeout = new Timeout(180 * 1000);
+
+  @Before
+  public void setup() {
+    GenericTestUtils.setLogLevel(ReencryptionHandler.LOG, Level.TRACE);
+  }
+
+  private ReencryptionHandler mockReencryptionhandler(final Configuration conf)
+      throws IOException {
+    // mock stuff to create a mocked ReencryptionHandler
+    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
+        JavaKeyStoreProvider.SCHEME_NAME + "://file" + new Path(
+            new FileSystemTestHelper().getTestRootDir(), "test.jks").toUri());
+    final EncryptionZoneManager ezm = Mockito.mock(EncryptionZoneManager.class);
+    final KeyProvider kp = KMSUtil.createKeyProvider(conf,
+        CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH);
+    Mockito.when(ezm.getProvider()).thenReturn(
+        KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp));
+    return new ReencryptionHandler(ezm, conf);
+  }
+
+  @Test
+  public void testThrottle() throws Exception {
+    final Configuration conf = new Configuration();
+    conf.setDouble(DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_HANDLER_RATIO_KEY,
+        0.5);
+    final ReencryptionHandler rh = mockReencryptionhandler(conf);
+
+    // mock StopWatches so all = 30s, locked = 20s. With ratio = .5, throttle
+    // should wait for 30 * 0.5 - 20 = 5s.
+    final StopWatch mockAll = Mockito.mock(StopWatch.class);
+    Mockito.when(mockAll.now(TimeUnit.MILLISECONDS)).thenReturn((long) 30000);
+    Mockito.when(mockAll.reset()).thenReturn(mockAll);
+    final StopWatch mockLocked = Mockito.mock(StopWatch.class);
+    Mockito.when(mockLocked.now(TimeUnit.MILLISECONDS))
+        .thenReturn((long) 20000);
+    Mockito.when(mockLocked.reset()).thenReturn(mockLocked);
+    final BlockingQueue<Runnable> queue = new LinkedBlockingQueue<>();
+    Whitebox.setInternalState(rh, "throttleTimerAll", mockAll);
+    Whitebox.setInternalState(rh, "throttleTimerLocked", mockLocked);
+    Whitebox.setInternalState(rh, "taskQueue", queue);
+    final StopWatch sw = new StopWatch().start();
+    rh.throttle();
+    sw.stop();
+    assertTrue("should have throttled for at least 4 second",
+        sw.now(TimeUnit.MILLISECONDS) > 8000);
+    assertTrue("should have throttled for at most 6 second",
+        sw.now(TimeUnit.MILLISECONDS) < 12000);
+  }
+
+  @Test
+  public void testThrottleNoOp() throws Exception {
+    final Configuration conf = new Configuration();
+    conf.setDouble(DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_HANDLER_RATIO_KEY,
+        0.5);
+    final ReencryptionHandler rh = mockReencryptionhandler(conf);
+
+    // mock StopWatches so all = 30s, locked = 10s. With ratio = .5, throttle
+    // should not happen.
+    StopWatch mockAll = Mockito.mock(StopWatch.class);
+    Mockito.when(mockAll.now()).thenReturn(new Long(30000));
+    Mockito.when(mockAll.reset()).thenReturn(mockAll);
+    StopWatch mockLocked = Mockito.mock(StopWatch.class);
+    Mockito.when(mockLocked.now()).thenReturn(new Long(10000));
+    Mockito.when(mockLocked.reset()).thenReturn(mockLocked);
+    final BlockingQueue<Runnable> queue = new LinkedBlockingQueue<>();
+    Whitebox.setInternalState(rh, "throttleTimerAll", mockAll);
+    Whitebox.setInternalState(rh, "throttleTimerLocked", mockLocked);
+    Whitebox.setInternalState(rh, "taskQueue", queue);
+    final Map<Long, ReencryptionUpdater.ZoneSubmissionTracker>
+        submissions = new HashMap<>();
+    Whitebox.setInternalState(rh, "submissions", submissions);
+    StopWatch sw = new StopWatch().start();
+    rh.throttle();
+    sw.stop();
+    assertTrue("should not have throttled",
+        sw.now(TimeUnit.MILLISECONDS) < 1000);
+  }
+
+  @Test
+  public void testThrottleConfigs() throws Exception {
+    final Configuration conf = new Configuration();
+    conf.setDouble(DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_HANDLER_RATIO_KEY,
+        -1.0);
+    try {
+      mockReencryptionhandler(conf);
+      fail("Should not be able to init");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains(" is not positive", e);
+    }
+
+    conf.setDouble(DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_HANDLER_RATIO_KEY,
+        0.0);
+    try {
+      mockReencryptionhandler(conf);
+      fail("Should not be able to init");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains(" is not positive", e);
+    }
+  }
+
+  @Test
+  public void testThrottleAccumulatingTasks() throws Exception {
+    final Configuration conf = new Configuration();
+    final ReencryptionHandler rh = mockReencryptionhandler(conf);
+
+    // mock tasks piling up
+    final Map<Long, ReencryptionUpdater.ZoneSubmissionTracker>
+        submissions = new HashMap<>();
+    final ReencryptionUpdater.ZoneSubmissionTracker zst =
+        new ReencryptionUpdater.ZoneSubmissionTracker();
+    submissions.put(new Long(1), zst);
+    Future mock = Mockito.mock(Future.class);
+    for (int i = 0; i < Runtime.getRuntime().availableProcessors() * 3; ++i) {
+      zst.addTask(mock);
+    }
+
+    Thread removeTaskThread = new Thread() {
+      public void run() {
+        try {
+          Thread.sleep(3000);
+        } catch (InterruptedException ie) {
+          LOG.info("removeTaskThread interrupted.");
+          Thread.currentThread().interrupt();
+        }
+        zst.getTasks().clear();
+      }
+    };
+
+    Whitebox.setInternalState(rh, "submissions", submissions);
+    final StopWatch sw = new StopWatch().start();
+    removeTaskThread.start();
+    rh.throttle();
+    sw.stop();
+    assertTrue("should have throttled for at least 3 second",
+        sw.now(TimeUnit.MILLISECONDS) > 3000);
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
index d2f41be..c71d049 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
@@ -17,15 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.apache.hadoop.hdfs.protocol.BlockType.CONTIGUOUS;
-import static org.apache.hadoop.hdfs.protocol.BlockType.STRIPED;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -42,6 +33,7 @@
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.BlockType;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -50,14 +42,23 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
-import org.apache.hadoop.hdfs.protocol.BlockType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
 import org.junit.rules.Timeout;
 
+import java.io.IOException;
+
+import static org.apache.hadoop.hdfs.protocol.BlockType.CONTIGUOUS;
+import static org.apache.hadoop.hdfs.protocol.BlockType.STRIPED;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 /**
  * This class tests INodeFile with striped feature.
  */
@@ -88,6 +89,12 @@
   @Rule
   public ExpectedException thrown = ExpectedException.none();
 
+  @Before
+  public void init() {
+    Configuration conf = new HdfsConfiguration();
+    ErasureCodingPolicyManager.getInstance().init(conf);
+  }
+
   @Test
   public void testInvalidECPolicy() throws IllegalArgumentException {
     thrown.expect(IllegalArgumentException.class);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
index fb83a3e..537612ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
@@ -23,9 +23,14 @@
 import java.util.HashSet;
 import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSOutputStream;
@@ -38,12 +43,15 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
 public class TestOpenFilesWithSnapshot {
+  private static final Log LOG =
+      LogFactory.getLog(TestOpenFilesWithSnapshot.class.getName());
   private final Configuration conf = new Configuration();
   MiniDFSCluster cluster = null;
   DistributedFileSystem fs = null;
@@ -622,6 +630,219 @@
     hbaseOutputStream.close();
   }
 
+  /**
+   * Test client writing to open files are not interrupted when snapshots
+   * that captured open files get deleted.
+   */
+  @Test (timeout = 240000)
+  public void testOpenFileWritingAcrossSnapDeletion() throws Exception {
+    final Path snapRootDir = new Path("/level_0_A");
+    final String flumeFileName = "flume.log";
+    final String hbaseFileName = "hbase.log";
+    final String snap1Name = "snap_1";
+    final String snap2Name = "snap_2";
+    final String snap3Name = "snap_3";
+
+    // Create files and open streams
+    final Path flumeFile = new Path(snapRootDir, flumeFileName);
+    FSDataOutputStream flumeOut = fs.create(flumeFile, false,
+        8000, (short)3, 1048576);
+    flumeOut.close();
+    final Path hbaseFile = new Path(snapRootDir, hbaseFileName);
+    FSDataOutputStream hbaseOut = fs.create(hbaseFile, false,
+        8000, (short)3, 1048576);
+    hbaseOut.close();
+
+    final AtomicBoolean writerError = new AtomicBoolean(false);
+    final CountDownLatch startLatch = new CountDownLatch(1);
+    final CountDownLatch deleteLatch = new CountDownLatch(1);
+    Thread t = new Thread(new Runnable() {
+      @Override
+      public void run() {
+        try {
+          FSDataOutputStream flumeOutputStream = fs.append(flumeFile, 8000);
+          FSDataOutputStream hbaseOutputStream = fs.append(hbaseFile, 8000);
+          byte[] bytes = new byte[(int) (1024 * 0.2)];
+          Random r = new Random(Time.now());
+
+          for (int i = 0; i < 200000; i++) {
+            r.nextBytes(bytes);
+            flumeOutputStream.write(bytes);
+            if (hbaseOutputStream != null) {
+              hbaseOutputStream.write(bytes);
+            }
+            if (i == 50000) {
+              startLatch.countDown();
+            } else if (i == 100000) {
+              deleteLatch.countDown();
+            } else if (i == 150000) {
+              hbaseOutputStream.hsync();
+              fs.delete(hbaseFile, true);
+              try {
+                hbaseOutputStream.close();
+              } catch (Exception e) {
+                // since the file is deleted before the open stream close,
+                // it might throw FileNotFoundException. Ignore the
+                // expected exception.
+              }
+              hbaseOutputStream = null;
+            } else if (i % 5000 == 0) {
+              LOG.info("Write pos: " + flumeOutputStream.getPos()
+                  + ", size: " + fs.getFileStatus(flumeFile).getLen()
+                  + ", loop: " + (i + 1));
+            }
+          }
+        } catch (Exception e) {
+          LOG.warn("Writer error: " + e);
+          writerError.set(true);
+        }
+      }
+    });
+    t.start();
+
+    startLatch.await();
+    final Path snap1Dir = SnapshotTestHelper.createSnapshot(
+        fs, snapRootDir, snap1Name);
+    final Path flumeS1Path = new Path(snap1Dir, flumeFileName);
+    LOG.info("Snap1 file status: " + fs.getFileStatus(flumeS1Path));
+    LOG.info("Current file status: " + fs.getFileStatus(flumeFile));
+
+    deleteLatch.await();
+    LOG.info("Snap1 file status: " + fs.getFileStatus(flumeS1Path));
+    LOG.info("Current file status: " + fs.getFileStatus(flumeFile));
+
+    // Verify deletion of snapshot which had the under construction file
+    // captured is not truncating the under construction file and the thread
+    // writing to the same file not crashing on newer block allocations.
+    LOG.info("Deleting " + snap1Name);
+    fs.deleteSnapshot(snapRootDir, snap1Name);
+
+    // Verify creation and deletion of snapshot newer than the oldest
+    // snapshot is not crashing the thread writing to under construction file.
+    SnapshotTestHelper.createSnapshot(fs, snapRootDir, snap2Name);
+    SnapshotTestHelper.createSnapshot(fs, snapRootDir, snap3Name);
+    fs.deleteSnapshot(snapRootDir, snap3Name);
+    fs.deleteSnapshot(snapRootDir, snap2Name);
+    SnapshotTestHelper.createSnapshot(fs, snapRootDir, "test");
+
+    t.join();
+    Assert.assertFalse("Client encountered writing error!", writerError.get());
+
+    restartNameNode();
+    cluster.waitActive();
+  }
+
+  /**
+   * Verify snapshots with open files captured are safe even when the
+   * 'current' version of the file is truncated and appended later.
+   */
+  @Test (timeout = 120000)
+  public void testOpenFilesSnapChecksumWithTrunkAndAppend() throws Exception {
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES,
+        true);
+    // Construct the directory tree
+    final Path dir = new Path("/A/B/C");
+    fs.mkdirs(dir);
+
+    // String constants
+    final Path hbaseSnapRootDir = dir;
+    final String hbaseFileName = "hbase.wal";
+    final String hbaseSnap1Name = "hbase_snap_s1";
+    final String hbaseSnap2Name = "hbase_snap_s2";
+    final String hbaseSnap3Name = "hbase_snap_s3";
+    final String hbaseSnap4Name = "hbase_snap_s4";
+
+    // Create files and open a stream
+    final Path hbaseFile = new Path(dir, hbaseFileName);
+    createFile(hbaseFile);
+    final FileChecksum hbaseWALFileCksum0 =
+        fs.getFileChecksum(hbaseFile);
+    FSDataOutputStream hbaseOutputStream = fs.append(hbaseFile);
+
+    // Create Snapshot S1
+    final Path hbaseS1Dir = SnapshotTestHelper.createSnapshot(
+        fs, hbaseSnapRootDir, hbaseSnap1Name);
+    final Path hbaseS1Path = new Path(hbaseS1Dir, hbaseFileName);
+    final FileChecksum hbaseFileCksumS1 = fs.getFileChecksum(hbaseS1Path);
+
+    // Verify if Snap S1 checksum is same as the current version one
+    Assert.assertEquals("Live and snap1 file checksum doesn't match!",
+        hbaseWALFileCksum0, fs.getFileChecksum(hbaseS1Path));
+
+    int newWriteLength = (int) (BLOCKSIZE * 1.5);
+    byte[] buf = new byte[newWriteLength];
+    Random random = new Random();
+    random.nextBytes(buf);
+    writeToStream(hbaseOutputStream, buf);
+
+    // Create Snapshot S2
+    final Path hbaseS2Dir = SnapshotTestHelper.createSnapshot(
+        fs, hbaseSnapRootDir, hbaseSnap2Name);
+    final Path hbaseS2Path = new Path(hbaseS2Dir, hbaseFileName);
+    final FileChecksum hbaseFileCksumS2 = fs.getFileChecksum(hbaseS2Path);
+
+    // Verify if the s1 checksum is still the same
+    Assert.assertEquals("Snap file checksum has changed!",
+        hbaseFileCksumS1, fs.getFileChecksum(hbaseS1Path));
+    // Verify if the s2 checksum is different from the s1 checksum
+    Assert.assertNotEquals("Snap1 and snap2 file checksum should differ!",
+        hbaseFileCksumS1, hbaseFileCksumS2);
+
+    newWriteLength = (int) (BLOCKSIZE * 2.5);
+    buf = new byte[newWriteLength];
+    random.nextBytes(buf);
+    writeToStream(hbaseOutputStream, buf);
+
+    // Create Snapshot S3
+    final Path hbaseS3Dir = SnapshotTestHelper.createSnapshot(
+        fs, hbaseSnapRootDir, hbaseSnap3Name);
+    final Path hbaseS3Path = new Path(hbaseS3Dir, hbaseFileName);
+    FileChecksum hbaseFileCksumS3 = fs.getFileChecksum(hbaseS3Path);
+
+    // Record the checksum for the before truncate current file
+    hbaseOutputStream.close();
+    final FileChecksum hbaseFileCksumBeforeTruncate =
+        fs.getFileChecksum(hbaseFile);
+    Assert.assertEquals("Snap3 and before truncate file checksum should match!",
+        hbaseFileCksumBeforeTruncate, hbaseFileCksumS3);
+
+    // Truncate the current file and record the after truncate checksum
+    long currentFileLen = fs.getFileStatus(hbaseFile).getLen();
+    boolean fileTruncated = fs.truncate(hbaseFile, currentFileLen / 2);
+    Assert.assertTrue("File truncation failed!", fileTruncated);
+    final FileChecksum hbaseFileCksumAfterTruncate =
+        fs.getFileChecksum(hbaseFile);
+
+    Assert.assertNotEquals("Snap3 and after truncate checksum shouldn't match!",
+        hbaseFileCksumS3, hbaseFileCksumAfterTruncate);
+
+    // Append more data to the current file
+    hbaseOutputStream = fs.append(hbaseFile);
+    newWriteLength = (int) (BLOCKSIZE * 5.5);
+    buf = new byte[newWriteLength];
+    random.nextBytes(buf);
+    writeToStream(hbaseOutputStream, buf);
+
+    // Create Snapshot S4
+    final Path hbaseS4Dir = SnapshotTestHelper.createSnapshot(
+        fs, hbaseSnapRootDir, hbaseSnap4Name);
+    final Path hbaseS4Path = new Path(hbaseS4Dir, hbaseFileName);
+    final FileChecksum hbaseFileCksumS4 = fs.getFileChecksum(hbaseS4Path);
+
+    // Record the checksum for the current file after append
+    hbaseOutputStream.close();
+    final FileChecksum hbaseFileCksumAfterAppend =
+        fs.getFileChecksum(hbaseFile);
+
+    Assert.assertEquals("Snap4 and after append file checksum should match!",
+        hbaseFileCksumAfterAppend, hbaseFileCksumS4);
+
+    // Recompute checksum for S3 path and verify it has not changed
+    hbaseFileCksumS3 = fs.getFileChecksum(hbaseS3Path);
+    Assert.assertEquals("Snap3 and before truncate file checksum should match!",
+        hbaseFileCksumBeforeTruncate, hbaseFileCksumS3);
+  }
+
   private void restartNameNode() throws Exception {
     cluster.triggerBlockReports();
     NameNode nameNode = cluster.getNameNode();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRandomOpsWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRandomOpsWithSnapshots.java
new file mode 100644
index 0000000..80af690
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRandomOpsWithSnapshots.java
@@ -0,0 +1,691 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.snapshot;
+
+import com.google.common.base.Supplier;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.SnapshotException;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.UUID;
+import java.util.concurrent.TimeoutException;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Testing random FileSystem operations with random Snapshot operations.
+ */
+public class TestRandomOpsWithSnapshots {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestRandomOpsWithSnapshots.class);
+
+  private static final short REPL = 3;
+  private static final long BLOCKSIZE = 1024;
+
+  private static final int TOTAL_FILECOUNT = 250;
+  private static final int MAX_NUM_ITERATIONS = 10;
+  private static final int MAX_NUM_FILESYSTEM_OPERATIONS = 50;
+  private static final int MAX_NUM_SNAPSHOT_OPERATIONS = 50;
+  private static final int MAX_NUM_SUB_DIRECTORIES_LEVEL = 10;
+  private static final int MAX_NUM_FILE_LENGTH = 100;
+  private static final int MIN_NUM_OPERATIONS = 25;
+
+  private static final String TESTDIRSTRING = "/testDir";
+  private static final String WITNESSDIRSTRING = "/WITNESSDIR";
+  private static final Path TESTDIR = new Path(TESTDIRSTRING);
+  private static final Path WITNESSDIR = new Path(WITNESSDIRSTRING);
+  private static List<Path> snapshottableDirectories = new ArrayList<Path>();
+  private static Map<Path, ArrayList<String>> pathToSnapshotsMap =
+      new HashMap<Path, ArrayList<String>>();
+
+  private static final Configuration CONFIG = new Configuration();
+  private MiniDFSCluster cluster;
+  private DistributedFileSystem hdfs;
+  private static Random generator = null;
+
+  private int numberFileCreated = 0;
+  private int numberFileDeleted = 0;
+  private int numberFileRenamed = 0;
+
+  private int numberDirectoryCreated = 0;
+  private int numberDirectoryDeleted = 0;
+  private int numberDirectoryRenamed = 0;
+
+  private int numberSnapshotCreated = 0;
+  private int numberSnapshotDeleted = 0;
+  private int numberSnapshotRenamed = 0;
+
+  // Operation directories
+  private enum OperationDirectories {
+    TestDir,
+    WitnessDir;
+  }
+
+  // Operation type
+  private enum OperationType {
+    FileSystem,
+    Snapshot;
+  }
+
+  // FileSystem & Snapshot operation
+  private enum Operations {
+    FileSystem_CreateFile(2 /*operation weight*/, OperationType.FileSystem),
+    FileSystem_DeleteFile(2, OperationType.FileSystem),
+    FileSystem_RenameFile(2, OperationType.FileSystem),
+    FileSystem_CreateDir(1, OperationType.FileSystem),
+    FileSystem_DeleteDir(1, OperationType.FileSystem),
+    FileSystem_RenameDir(2, OperationType.FileSystem),
+
+    Snapshot_CreateSnapshot(5, OperationType.Snapshot),
+    Snapshot_DeleteSnapshot(3, OperationType.Snapshot),
+    Snapshot_RenameSnapshot(2, OperationType.Snapshot);
+
+    private int weight;
+    private OperationType operationType;
+
+    Operations(int weight, OperationType type) {
+      this.weight = weight;
+      this.operationType = type;
+    }
+
+    private int getWeight() {
+      return weight;
+    }
+
+    private static final Operations[] VALUES = values();
+
+    private static int sumWeights(OperationType type) {
+      int sum = 0;
+      for (Operations value: VALUES) {
+        if (value.operationType == type) {
+          sum += value.getWeight();
+        }
+      }
+      return sum;
+    }
+
+    private static final int TOTAL_WEIGHT_FILESYSTEM =
+        sumWeights(OperationType.FileSystem);
+
+    private static final int TOTAL_WEIGHT_SNAPSHOT =
+        sumWeights(OperationType.Snapshot);
+
+    public static Operations getRandomOperation(OperationType type) {
+      int randomNum = 0;
+      Operations randomOperation = null;
+      switch (type) {
+      case FileSystem:
+        randomNum = generator.nextInt(TOTAL_WEIGHT_FILESYSTEM);
+        break;
+      case Snapshot:
+        randomNum = generator.nextInt(TOTAL_WEIGHT_SNAPSHOT);
+        break;
+      default:
+        break;
+      }
+      int currentWeightSum = 0;
+      for (Operations currentValue: VALUES) {
+        if (currentValue.operationType == type) {
+          if (randomNum <= (currentWeightSum + currentValue.getWeight())) {
+            randomOperation = currentValue;
+            break;
+          }
+          currentWeightSum += currentValue.getWeight();
+        }
+      }
+      return randomOperation;
+    }
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    CONFIG.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
+    cluster = new MiniDFSCluster.Builder(CONFIG).numDataNodes(REPL).
+        format(true).build();
+    cluster.waitActive();
+
+    hdfs = cluster.getFileSystem();
+    hdfs.mkdirs(TESTDIR);
+    hdfs.mkdirs(WITNESSDIR);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+      cluster = null;
+    }
+  }
+
+  /*
+   * Random file system operations with snapshot operations in between.
+   */
+  @Test(timeout = 900000)
+  public void testRandomOperationsWithSnapshots()
+          throws IOException, InterruptedException, TimeoutException {
+    // Set
+    long seed = System.currentTimeMillis();
+    LOG.info("testRandomOperationsWithSnapshots, seed to be used: " + seed);
+    generator = new Random(seed);
+
+    int fileLen = generator.nextInt(MAX_NUM_FILE_LENGTH);
+    createFiles(TESTDIRSTRING, fileLen);
+
+    // Get list of snapshottable directories
+    SnapshottableDirectoryStatus[] snapshottableDirectoryStatus =
+        hdfs.getSnapshottableDirListing();
+    for (SnapshottableDirectoryStatus ssds : snapshottableDirectoryStatus) {
+      snapshottableDirectories.add(ssds.getFullPath());
+    }
+
+    if (snapshottableDirectories.size() == 0) {
+      hdfs.allowSnapshot(hdfs.getHomeDirectory());
+      snapshottableDirectories.add(hdfs.getHomeDirectory());
+    }
+
+    int numberOfIterations = generator.nextInt(MAX_NUM_ITERATIONS);
+    LOG.info("Number of iterations: " + numberOfIterations);
+
+    int numberFileSystemOperations = generator.nextInt(
+        MAX_NUM_FILESYSTEM_OPERATIONS-MIN_NUM_OPERATIONS+1)+MIN_NUM_OPERATIONS;
+    LOG.info("Number of FileSystem operations: "+ numberFileSystemOperations);
+
+    int numberSnapshotOperations = generator.nextInt(
+        MAX_NUM_SNAPSHOT_OPERATIONS-MIN_NUM_OPERATIONS)+MIN_NUM_OPERATIONS;
+    LOG.info("Number of Snapshot operations: " + numberSnapshotOperations);
+
+    // Act && Verify
+    randomOperationsWithSnapshots(numberOfIterations,
+        numberFileSystemOperations, numberSnapshotOperations);
+  }
+
+  /*
+   * Based on input we're performing:
+   *   random number of file system operations
+   *   random number of snapshot operations
+   *   restart name node making sure fsimage can be loaded successfully.
+   */
+  public void randomOperationsWithSnapshots(int numberOfIterations,
+                                            int numberFileSystemOperations,
+                                            int numberSnapshotOperations)
+          throws IOException, InterruptedException, TimeoutException {
+    // random number of iterations
+    for (int i = 0; i < numberOfIterations; i++) {
+      // random number of FileSystem operations
+      for (int j = 0; j < numberFileSystemOperations; j++) {
+        Operations fsOperation =
+            Operations.getRandomOperation(OperationType.FileSystem);
+        LOG.info("fsOperation: " + fsOperation);
+        switch (fsOperation) {
+        case FileSystem_CreateDir:
+          createTestDir();
+          break;
+
+        case FileSystem_DeleteDir:
+          deleteTestDir();
+          break;
+
+        case FileSystem_RenameDir:
+          renameTestDir();
+          break;
+
+        case FileSystem_CreateFile:
+          createTestFile();
+          break;
+
+        case FileSystem_DeleteFile:
+          deleteTestFile();
+          break;
+
+        case FileSystem_RenameFile:
+          renameTestFile();
+          break;
+
+        default:
+          assertNull("Invalid FileSystem operation", fsOperation);
+          break;
+        }
+      }
+
+      // random number of Snapshot operations
+      for (int k = 0; k < numberSnapshotOperations; k++) {
+        Operations snapshotOperation =
+            Operations.getRandomOperation(OperationType.Snapshot);
+        LOG.info("snapshotOperation: " + snapshotOperation);
+
+        switch (snapshotOperation) {
+        case Snapshot_CreateSnapshot:
+          createSnapshot();
+          break;
+
+        case Snapshot_DeleteSnapshot:
+          deleteSnapshot();
+          break;
+
+        case Snapshot_RenameSnapshot:
+          renameSnapshot();
+          break;
+
+        default:
+          assertNull("Invalid Snapshot operation", snapshotOperation);
+          break;
+        }
+      }
+      // Verification
+      checkClusterHealth();
+    }
+  }
+
+  /* Create a new test directory. */
+  private void createTestDir() throws IOException {
+    if (snapshottableDirectories.size() > 0) {
+      int index = generator.nextInt(snapshottableDirectories.size());
+      Path parentDir = snapshottableDirectories.get(index);
+      Path newDir = new Path(parentDir, "createTestDir_" +
+          UUID.randomUUID().toString());
+
+      for (OperationDirectories dir : OperationDirectories.values()) {
+        if (dir == OperationDirectories.WitnessDir) {
+          newDir = new Path(getNewPathString(newDir.toString(),
+              TESTDIRSTRING, WITNESSDIRSTRING));
+        }
+        hdfs.mkdirs(newDir);
+        assertTrue("Directory exists", hdfs.exists(newDir));
+        LOG.info("Directory created: " + newDir);
+        numberDirectoryCreated++;
+      }
+    }
+  }
+
+  /* Delete an existing test directory. */
+  private void deleteTestDir() throws IOException {
+    if (snapshottableDirectories.size() > 0) {
+      int index = generator.nextInt(snapshottableDirectories.size());
+      Path deleteDir = snapshottableDirectories.get(index);
+
+      if (!pathToSnapshotsMap.containsKey(deleteDir)) {
+        boolean isWitnessDir = false;
+        for (OperationDirectories dir : OperationDirectories.values()) {
+          if (dir == OperationDirectories.WitnessDir) {
+            isWitnessDir = true;
+            deleteDir = new Path(getNewPathString(deleteDir.toString(),
+                TESTDIRSTRING, WITNESSDIRSTRING));
+          }
+          hdfs.delete(deleteDir, true);
+          assertFalse("Directory does not exist", hdfs.exists(deleteDir));
+          if (!isWitnessDir) {
+            snapshottableDirectories.remove(deleteDir);
+          }
+          LOG.info("Directory removed: " + deleteDir);
+          numberDirectoryDeleted++;
+        }
+      }
+    }
+  }
+
+  /* Rename an existing test directory. */
+  private void renameTestDir() throws IOException {
+    if (snapshottableDirectories.size() > 0) {
+      int index = generator.nextInt(snapshottableDirectories.size());
+      Path oldDir = snapshottableDirectories.get(index);
+
+      if (!pathToSnapshotsMap.containsKey(oldDir)) {
+        Path newDir = oldDir.suffix("_renameDir+"+UUID.randomUUID().toString());
+        for (OperationDirectories dir : OperationDirectories.values()) {
+          if (dir == OperationDirectories.WitnessDir) {
+            oldDir = new Path(getNewPathString(oldDir.toString(),
+                TESTDIRSTRING, WITNESSDIRSTRING));
+            newDir = new Path(getNewPathString(newDir.toString(),
+                TESTDIRSTRING, WITNESSDIRSTRING));
+          }
+          hdfs.rename(oldDir, newDir, Options.Rename.OVERWRITE);
+          assertTrue("Target directory exists", hdfs.exists(newDir));
+          assertFalse("Source directory does not exist",
+              hdfs.exists(oldDir));
+
+          if (dir == OperationDirectories.TestDir) {
+            snapshottableDirectories.remove(oldDir);
+            snapshottableDirectories.add(newDir);
+          }
+          LOG.info("Renamed directory:" + oldDir + " to directory: " + newDir);
+          numberDirectoryRenamed++;
+        }
+      }
+    }
+  }
+
+  /* Create a new snapshot. */
+  private void createSnapshot() throws IOException {
+    if (snapshottableDirectories.size() > 0) {
+      int index = generator.nextInt(snapshottableDirectories.size());
+      Path randomDir = snapshottableDirectories.get(index);
+      String snapshotName = Integer.toString(generator.nextInt()) + ".ss";
+      hdfs.createSnapshot(randomDir, snapshotName);
+      LOG.info("createSnapshot, directory: " + randomDir +
+          ", snapshot name: " + snapshotName);
+      numberSnapshotCreated++;
+
+      if (pathToSnapshotsMap.containsKey(randomDir)) {
+        pathToSnapshotsMap.get(randomDir).add(snapshotName);
+      } else {
+        pathToSnapshotsMap.put(randomDir,
+            new ArrayList<String>(Arrays.asList(snapshotName)));
+      }
+    }
+  }
+
+  /* Delete an existing snapshot. */
+  private void deleteSnapshot() throws IOException {
+    if (!pathToSnapshotsMap.isEmpty()) {
+      int index = generator.nextInt(pathToSnapshotsMap.size());
+      Object[] snapshotPaths = pathToSnapshotsMap.keySet().toArray();
+      Path snapshotPath = (Path)snapshotPaths[index];
+      ArrayList<String> snapshotNameList=pathToSnapshotsMap.get(snapshotPath);
+
+      String snapshotNameToBeDeleted = snapshotNameList.get(
+          generator.nextInt(snapshotNameList.size()));
+      hdfs.deleteSnapshot(snapshotPath, snapshotNameToBeDeleted);
+      LOG.info("deleteSnapshot, directory: " + snapshotPath +
+          ", snapshot name: " + snapshotNameToBeDeleted);
+      numberSnapshotDeleted++;
+
+      // Adjust pathToSnapshotsMap after snapshot deletion
+      if (snapshotNameList.size() == 1) {
+        pathToSnapshotsMap.remove(snapshotPath);
+      } else {
+        pathToSnapshotsMap.get(snapshotPath).remove(snapshotNameToBeDeleted);
+      }
+    }
+  }
+
+  /* Rename an existing snapshot. */
+  private void renameSnapshot() throws IOException {
+    if (!pathToSnapshotsMap.isEmpty()) {
+      int index = generator.nextInt(pathToSnapshotsMap.size());
+      Object[] snapshotPaths = pathToSnapshotsMap.keySet().toArray();
+      Path snapshotPath = (Path)snapshotPaths[index];
+      ArrayList<String> snapshotNameList =
+          pathToSnapshotsMap.get(snapshotPath);
+
+      String snapshotOldName = snapshotNameList.get(
+          generator.nextInt(snapshotNameList.size()));
+
+      String snapshotOldNameNoExt = snapshotOldName.substring(0,
+          snapshotOldName.lastIndexOf('.')-1);
+
+      String snapshotNewName = snapshotOldNameNoExt + "_rename.ss";
+      hdfs.renameSnapshot(snapshotPath, snapshotOldName, snapshotNewName);
+      LOG.info("renameSnapshot, directory:" + snapshotPath + ", snapshot name:"
+          + snapshotOldName + " to " + snapshotNewName);
+      numberSnapshotRenamed++;
+
+      // Adjust pathToSnapshotsMap after snapshot deletion
+      pathToSnapshotsMap.get(snapshotPath).remove(snapshotOldName);
+      pathToSnapshotsMap.get(snapshotPath).add(snapshotNewName);
+    }
+  }
+
+  /* Create a new test file. */
+  private void createTestFile() throws IOException {
+    if (snapshottableDirectories.size() > 0) {
+      int index = generator.nextInt(snapshottableDirectories.size());
+      Path randomDir = snapshottableDirectories.get(index);
+      if (!randomDir.isRoot()) {
+        randomDir = randomDir.getParent();
+      }
+
+      Path newFile = new Path(randomDir, "createTestFile.log");
+      for (OperationDirectories dir : OperationDirectories.values()) {
+        if (dir == OperationDirectories.WitnessDir) {
+          newFile = new Path(getNewPathString(newFile.toString(),
+              TESTDIRSTRING, WITNESSDIRSTRING));
+        }
+        hdfs.createNewFile(newFile);
+        assertTrue("File exists", hdfs.exists(newFile));
+        LOG.info("createTestFile, file created: " + newFile);
+        numberFileCreated++;
+      }
+    }
+  }
+
+  /* Delete an existing test file. */
+  private void deleteTestFile() throws IOException {
+    if (snapshottableDirectories.size() > 0) {
+      int index = generator.nextInt(snapshottableDirectories.size());
+      Path randomDir = snapshottableDirectories.get(index);
+
+      FileStatus[] fileStatusList = hdfs.listStatus(randomDir);
+      for (FileStatus fsEntry : fileStatusList) {
+        if (fsEntry.isFile()) {
+          Path deleteFile = fsEntry.getPath();
+          for (OperationDirectories dir : OperationDirectories.values()) {
+            if (dir == OperationDirectories.WitnessDir) {
+              deleteFile = new Path(getNewPathString(deleteFile.toString(),
+                  TESTDIRSTRING, WITNESSDIRSTRING));
+            }
+            hdfs.delete(deleteFile, false);
+            assertFalse("File does not exists",
+                hdfs.exists(deleteFile));
+            LOG.info("deleteTestFile, file deleted: " + deleteFile);
+            numberFileDeleted++;
+          }
+          break;
+        }
+      }
+    }
+  }
+
+  /* Rename an existing test file. */
+  private void renameTestFile() throws IOException {
+    if (snapshottableDirectories.size() > 0) {
+      int index = generator.nextInt(snapshottableDirectories.size());
+      Path randomDir = snapshottableDirectories.get(index);
+
+      FileStatus[] fileStatusList = hdfs.listStatus(randomDir);
+      for (FileStatus fsEntry : fileStatusList) {
+        if (fsEntry.isFile()) {
+          Path oldFile = fsEntry.getPath();
+          Path newFile = oldFile.suffix("_renameFile");
+          for (OperationDirectories dir : OperationDirectories.values()) {
+            if (dir == OperationDirectories.WitnessDir) {
+              oldFile = new Path(getNewPathString(oldFile.toString(),
+                  TESTDIRSTRING, WITNESSDIRSTRING));
+              newFile = new Path(getNewPathString(newFile.toString(),
+                  TESTDIRSTRING, WITNESSDIRSTRING));
+            }
+
+            hdfs.rename(oldFile, newFile, Options.Rename.OVERWRITE);
+            assertTrue("Target file exists", hdfs.exists(newFile));
+            assertFalse("Source file does not exist", hdfs.exists(oldFile));
+            LOG.info("Renamed file: " + oldFile + " to file: " + newFile);
+            numberFileRenamed++;
+          }
+          break;
+        }
+      }
+    }
+  }
+
+  /* Check cluster health. */
+  private void checkClusterHealth() throws IOException, InterruptedException,
+      TimeoutException {
+    // 1. FileStatus comparison between test and witness directory
+    FileStatus[] testDirStatus = hdfs.listStatus(TESTDIR);
+    FileStatus[] witnessDirStatus = hdfs.listStatus(WITNESSDIR);
+    assertEquals(witnessDirStatus.length, testDirStatus.length);
+    LOG.info("checkClusterHealth, number of entries verified.");
+
+    Arrays.sort(testDirStatus);
+    Arrays.sort(witnessDirStatus);
+    for (int i = 0; i < testDirStatus.length; i++) {
+      assertEquals(witnessDirStatus[i].getPermission(),
+          testDirStatus[i].getPermission());
+      assertEquals(witnessDirStatus[i].getOwner(),
+          testDirStatus[i].getOwner());
+      assertEquals(witnessDirStatus[i].getGroup(),
+          testDirStatus[i].getGroup());
+      assertEquals(witnessDirStatus[i].getLen(), testDirStatus[i].getLen());
+      assertEquals(witnessDirStatus[i].getBlockSize(),
+          testDirStatus[i].getBlockSize());
+      assertEquals(witnessDirStatus[i].hasAcl(), testDirStatus[i].hasAcl());
+      assertEquals(witnessDirStatus[i].isEncrypted(),
+          testDirStatus[i].isEncrypted());
+      assertEquals(witnessDirStatus[i].isErasureCoded(),
+          testDirStatus[i].isErasureCoded());
+      assertEquals(witnessDirStatus[i].isDirectory(),
+          testDirStatus[i].isDirectory());
+      assertEquals(witnessDirStatus[i].isFile(), testDirStatus[i].isFile());
+    }
+    LOG.info("checkClusterHealth, metadata verified.");
+
+    // Randomly decide whether we want to do a check point
+    if (generator.nextBoolean()) {
+      LOG.info("checkClusterHealth, doing a checkpoint on NN.");
+      hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
+      hdfs.saveNamespace();
+      hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
+    }
+
+    /** Restart name node making sure loading from image successfully */
+    LOG.info("checkClusterHealth, restarting NN.");
+    cluster.restartNameNodes();
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return !cluster.getNameNode().isInSafeMode();
+      }
+    }, 10, 100000);
+
+    assertTrue("NameNode is up", cluster.getNameNode().isActiveState());
+    assertTrue("DataNode is up and running", cluster.isDataNodeUp());
+    assertTrue("Cluster is up and running", cluster.isClusterUp());
+    LOG.info("checkClusterHealth, cluster is healthy.");
+
+    printOperationStats();
+  }
+
+  /* Helper: create a file with a length of filelen. */
+  private void createFile(final String fileName, final long filelen,
+                          boolean enableSnapshot) throws IOException {
+    FileSystem fs = cluster.getFileSystem();
+    Path filePath = new Path(fileName);
+    DFSTestUtil.createFile(fs, filePath, filelen, (short) 1, 0);
+
+    // Randomly allow snapshot on parent directly
+    if (enableSnapshot && generator.nextBoolean()) {
+      try {
+        hdfs.allowSnapshot(filePath.getParent());
+      } catch (SnapshotException e) {
+        LOG.info("createFile, exception setting snapshotable directory: " +
+            e.getMessage());
+      }
+    }
+  }
+
+  /* Helper: create a large number of directories and files. */
+  private void createFiles(String rootDir, int fileLength) throws IOException {
+    if (!rootDir.endsWith("/")) {
+      rootDir += "/";
+    }
+
+    // Create files in a directory with random depth, ranging from 0-10.
+    for (int i = 0; i < TOTAL_FILECOUNT; i++) {
+      String filename = rootDir;
+      int dirs = generator.nextInt(MAX_NUM_SUB_DIRECTORIES_LEVEL);
+
+      for (int j=i; j >=(i-dirs); j--) {
+        filename += j + "/";
+      }
+      filename += "file" + i;
+      createFile(filename, fileLength, true);
+      assertTrue("Test file created", hdfs.exists(new Path(filename)));
+      LOG.info("createFiles, file: " + filename + "was created");
+
+      String witnessFile =
+          filename.replaceAll(TESTDIRSTRING, WITNESSDIRSTRING);
+      createFile(witnessFile, fileLength, false);
+      assertTrue("Witness file exists",
+          hdfs.exists(new Path(witnessFile)));
+      LOG.info("createFiles, file: " + witnessFile + "was created");
+    }
+  }
+
+  /* Helper: replace all target string with replacement string in original
+   * string. */
+  private String getNewPathString(String originalString, String targetString,
+                                  String replacementString) {
+    String str =  originalString.replaceAll(targetString, replacementString);
+    LOG.info("Original string: " + originalString);
+    LOG.info("New string: " + str);
+    return str;
+  }
+
+  private void printOperationStats() {
+    LOG.info("Operation statistics for this iteration: ");
+
+    LOG.info("Number of files created: " + numberFileCreated);
+    LOG.info("Number of files deleted: " + numberFileDeleted);
+    LOG.info("Number of files renamed: " + numberFileRenamed);
+
+    LOG.info("Number of directories created: " + numberDirectoryCreated);
+    LOG.info("Number of directories deleted: " + numberDirectoryDeleted);
+    LOG.info("Number of directories renamed: " + numberDirectoryRenamed);
+
+    LOG.info("Number of snapshots created: " + numberSnapshotCreated);
+    LOG.info("Number of snapshots deleted: " + numberSnapshotDeleted);
+    LOG.info("Number of snapshots renamed: " + numberSnapshotRenamed);
+
+    numberFileCreated = 0;
+    numberFileDeleted = 0;
+    numberFileRenamed = 0;
+
+    numberDirectoryCreated = 0;
+    numberDirectoryDeleted = 0;
+    numberDirectoryRenamed = 0;
+
+    numberSnapshotCreated = 0;
+    numberSnapshotDeleted = 0;
+    numberSnapshotRenamed = 0;
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java
index b9451bc..4654486 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java
@@ -18,10 +18,13 @@
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.text.SimpleDateFormat;
+import java.util.Date;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.Random;
@@ -43,15 +46,21 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tests snapshot deletion.
  */
 public class TestSnapshotDiffReport {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestSnapshotDiffReport.class);
+
   private static final long SEED = 0;
   private static final short REPLICATION = 3;
   private static final short REPLICATION_1 = 2;
@@ -73,6 +82,10 @@
     conf = new Configuration();
     conf.setBoolean(
         DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES, true);
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1);
+    conf.setBoolean(
+        DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_SKIP_CAPTURE_ACCESSTIME_ONLY_CHANGE,
+        true);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
         .format(true).build();
     cluster.waitActive();
@@ -167,8 +180,8 @@
     // reverse the order of from and to
     SnapshotDiffReport inverseReport = hdfs
         .getSnapshotDiffReport(dir, to, from);
-    System.out.println(report.toString());
-    System.out.println(inverseReport.toString() + "\n");
+    LOG.info(report.toString());
+    LOG.info(inverseReport.toString() + "\n");
     
     assertEquals(entries.length, report.getDiffList().size());
     assertEquals(entries.length, inverseReport.getDiffList().size());
@@ -221,20 +234,20 @@
     
     // diff between the same snapshot
     SnapshotDiffReport report = hdfs.getSnapshotDiffReport(sub1, "s0", "s0");
-    System.out.println(report);
+    LOG.info(report.toString());
     assertEquals(0, report.getDiffList().size());
     
     report = hdfs.getSnapshotDiffReport(sub1, "", "");
-    System.out.println(report);
+    LOG.info(report.toString());
     assertEquals(0, report.getDiffList().size());
     
     report = hdfs.getSnapshotDiffReport(subsubsub1, "s0", "s2");
-    System.out.println(report);
+    LOG.info(report.toString());
     assertEquals(0, report.getDiffList().size());
 
     // test path with scheme also works
     report = hdfs.getSnapshotDiffReport(hdfs.makeQualified(subsubsub1), "s0", "s2");
-    System.out.println(report);
+    LOG.info(report.toString());
     assertEquals(0, report.getDiffList().size());
 
     verifyDiffReport(sub1, "s0", "s2", 
@@ -677,4 +690,142 @@
 
   }
 
+  private long getAccessTime(Path path) throws IOException {
+    return hdfs.getFileStatus(path).getAccessTime();
+  }
+
+  private String getAccessTimeStr(Path path) throws IOException {
+    SimpleDateFormat timeFmt = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
+    return timeFmt.format(new Date(getAccessTime(path)));
+  }
+
+  private Path getSSpath(Path path, Path ssRoot, String ssName) {
+    return new Path(ssRoot, ".snapshot/" + ssName + "/" +
+        path.toString().substring(ssRoot.toString().length()));
+  }
+
+  private void printAtime(Path path, Path ssRoot, String ssName)
+      throws IOException {
+    Path ssPath = getSSpath(path, ssRoot, ssName);
+    LOG.info("Access time "
+        + path + ": " + getAccessTimeStr(path)
+        + " " + ssPath + ": " + getAccessTimeStr(ssPath));
+  }
+
+  private void assertAtimeEquals(Path path, Path ssRoot,
+      String ssName1, String ssName2)
+      throws IOException {
+    Path ssPath1 = getSSpath(path, ssRoot, ssName1);
+    Path ssPath2 = getSSpath(path, ssRoot, ssName2);
+    assertEquals(getAccessTime(ssPath1), getAccessTime(ssPath2));
+  }
+
+  private void assertAtimeNotEquals(Path path, Path ssRoot,
+      String ssName1, String ssName2)
+      throws IOException {
+    Path ssPath1 = getSSpath(path, ssRoot, ssName1);
+    Path ssPath2 = getSSpath(path, ssRoot, ssName2);
+    assertNotEquals(getAccessTime(ssPath1), getAccessTime(ssPath2));
+  }
+
+  /**
+   * Check to see access time is not captured in snapshot when applicable.
+   * When DFS_NAMENODE_SNAPSHOT_SKIP_CAPTURE_ACCESSTIME_ONLY_CHANGE
+   * is set to true, and if a file's access time changed between two
+   * snapshots but has no other modification, then the access time is not
+   * captured in snapshot.
+   */
+  @Test
+  public void testDontCaptureAccessTimeOnlyChangeReport() throws Exception {
+    final Path froot = new Path("/");
+    final Path root = new Path(froot, "/testSdiffCalc");
+
+    // items created pre enabling snapshot
+    final Path filePreSS = new Path(root, "fParent/filePreSS");
+    final Path dirPreSS = new Path(root, "dirPreSS");
+    final Path dirPreSSChild = new Path(dirPreSS, "dirPreSSChild");
+
+    // items created after enabling snapshot
+    final Path filePostSS = new Path(root, "fParent/filePostSS");
+    final Path dirPostSS = new Path(root, "dirPostSS");
+    final Path dirPostSSChild = new Path(dirPostSS, "dirPostSSChild");
+
+    DFSTestUtil.createFile(hdfs, filePreSS, BLOCKSIZE, REPLICATION, SEED);
+    DFSTestUtil.createFile(hdfs, dirPreSSChild, BLOCKSIZE, REPLICATION, SEED);
+
+    SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
+    printAtime(filePreSS, root, "s0");
+    printAtime(dirPreSS, root, "s0");
+
+    // items created after creating the first snapshot
+    DFSTestUtil.createFile(hdfs, filePostSS, BLOCKSIZE, REPLICATION, SEED);
+    DFSTestUtil.createFile(hdfs, dirPostSSChild, BLOCKSIZE, REPLICATION, SEED);
+
+    Thread.sleep(3000);
+    long now = Time.now();
+    hdfs.setTimes(filePreSS, -1, now);
+    hdfs.setTimes(filePostSS, -1, now);
+    hdfs.setTimes(dirPreSS, -1, now);
+    hdfs.setTimes(dirPostSS, -1, now);
+
+    SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
+    printAtime(filePreSS, root, "s1");
+    printAtime(dirPreSS, root, "s1");
+    printAtime(filePostSS, root, "s1");
+    printAtime(dirPostSS, root, "s1");
+
+    Thread.sleep(3000);
+    now = Time.now();
+    hdfs.setTimes(filePreSS, -1, now);
+    hdfs.setTimes(filePostSS, -1, now);
+    hdfs.setTimes(dirPreSS, -1, now);
+    hdfs.setTimes(dirPostSS, -1, now);
+
+    SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
+    printAtime(filePreSS, root, "s2");
+    printAtime(dirPreSS, root, "s2");
+    printAtime(filePostSS, root, "s2");
+    printAtime(dirPostSS, root, "s2");
+
+    Thread.sleep(3000);
+    now = Time.now();
+    // modify filePostSS, and change access time
+    hdfs.setReplication(filePostSS, (short) (REPLICATION - 1));
+    hdfs.setTimes(filePostSS, -1, now);
+    SnapshotTestHelper.createSnapshot(hdfs, root, "s3");
+
+    LOG.info("\nsnapshotDiff s0 -> s1:");
+    LOG.info(hdfs.getSnapshotDiffReport(root, "s0", "s1").toString());
+    LOG.info("\nsnapshotDiff s1 -> s2:");
+    LOG.info(hdfs.getSnapshotDiffReport(root, "s1", "s2").toString());
+
+    assertAtimeEquals(filePreSS, root, "s0", "s1");
+    assertAtimeEquals(dirPreSS, root, "s0", "s1");
+
+    assertAtimeEquals(filePreSS, root, "s1", "s2");
+    assertAtimeEquals(dirPreSS, root, "s1", "s2");
+
+    assertAtimeEquals(filePostSS, root, "s1", "s2");
+    assertAtimeEquals(dirPostSS, root, "s1", "s2");
+
+    // access time should be captured in snapshot due to
+    // other modification
+    assertAtimeNotEquals(filePostSS, root, "s2", "s3");
+
+    // restart NN, and see the access time relationship
+    // still stands (no change caused by edit logs
+    // loading)
+    cluster.restartNameNodes();
+    cluster.waitActive();
+    assertAtimeEquals(filePreSS, root, "s0", "s1");
+    assertAtimeEquals(dirPreSS, root, "s0", "s1");
+
+    assertAtimeEquals(filePreSS, root, "s1", "s2");
+    assertAtimeEquals(dirPreSS, root, "s1", "s2");
+
+    assertAtimeEquals(filePostSS, root, "s1", "s2");
+    assertAtimeEquals(dirPostSS, root, "s1", "s2");
+
+    assertAtimeNotEquals(filePostSS, root, "s2", "s3");
+  }
 }
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index ed43dc0..2d38f2f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -38,11 +38,18 @@
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
+import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -498,24 +505,47 @@
     return sb.toString();
   }
 
-  @Test(timeout = 120000)
+  // get block details and check if the block is corrupt
+  private void waitForCorruptBlock(MiniDFSCluster miniCluster,
+      DFSClient client, Path file)
+      throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        LocatedBlocks blocks = null;
+        try {
+          miniCluster.triggerBlockReports();
+          blocks = client.getNamenode().getBlockLocations(file.toString(), 0,
+              Long.MAX_VALUE);
+        } catch (IOException e) {
+          return false;
+        }
+        return blocks != null && blocks.get(0).isCorrupt();
+      }
+    }, 1000, 60000);
+  }
+
+  @Test(timeout = 180000)
   public void testReportCommand() throws Exception {
+    tearDown();
     redirectStream();
 
-    /* init conf */
+    // init conf
     final Configuration dfsConf = new HdfsConfiguration();
+    ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies.getByID(
+        SystemErasureCodingPolicies.XOR_2_1_POLICY_ID);
+    dfsConf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
+        ecPolicy.getName());
     dfsConf.setInt(
-        DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
-        500); // 0.5s
+        DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
     dfsConf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
     final Path baseDir = new Path(
         PathUtils.getTestDir(getClass()).getAbsolutePath(),
         GenericTestUtils.getMethodName());
     dfsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.toString());
+    final int numDn =
+        ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits();
 
-    final int numDn = 3;
-
-    /* init cluster */
     try(MiniDFSCluster miniCluster = new MiniDFSCluster
         .Builder(dfsConf)
         .numDataNodes(numDn).build()) {
@@ -523,34 +553,71 @@
       miniCluster.waitActive();
       assertEquals(numDn, miniCluster.getDataNodes().size());
 
-      /* local vars */
       final DFSAdmin dfsAdmin = new DFSAdmin(dfsConf);
       final DFSClient client = miniCluster.getFileSystem().getClient();
 
-      /* run and verify report command */
+      // Verify report command for all counts to be zero
       resetStream();
       assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
-      verifyNodesAndCorruptBlocks(numDn, numDn, 0, client);
+      verifyNodesAndCorruptBlocks(numDn, numDn, 0, 0, client);
 
-      /* shut down one DN */
-      final List<DataNode> datanodes = miniCluster.getDataNodes();
-      final DataNode last = datanodes.get(datanodes.size() - 1);
-      last.shutdown();
-      miniCluster.setDataNodeDead(last.getDatanodeId());
-
-      /* run and verify report command */
-      assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
-      verifyNodesAndCorruptBlocks(numDn, numDn - 1, 0, client);
-
-      /* corrupt one block */
       final short replFactor = 1;
       final long fileLength = 512L;
-      final FileSystem fs = miniCluster.getFileSystem();
+      final DistributedFileSystem fs = miniCluster.getFileSystem();
       final Path file = new Path(baseDir, "/corrupted");
       DFSTestUtil.createFile(fs, file, fileLength, replFactor, 12345L);
       DFSTestUtil.waitReplication(fs, file, replFactor);
-
       final ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file);
+      LocatedBlocks lbs = miniCluster.getFileSystem().getClient().
+          getNamenode().getBlockLocations(
+          file.toString(), 0, fileLength);
+      assertTrue("Unexpected block type: " + lbs.get(0),
+          lbs.get(0) instanceof LocatedBlock);
+      LocatedBlock locatedBlock = lbs.get(0);
+      DatanodeInfo locatedDataNode = locatedBlock.getLocations()[0];
+      LOG.info("Replica block located on: " + locatedDataNode);
+
+      Path ecDir = new Path(baseDir, "ec");
+      fs.mkdirs(ecDir);
+      fs.getClient().setErasureCodingPolicy(ecDir.toString(),
+          ecPolicy.getName());
+      Path ecFile = new Path(ecDir, "ec-file");
+      int stripesPerBlock = 2;
+      int cellSize = ecPolicy.getCellSize();
+      int blockSize = stripesPerBlock * cellSize;
+      int blockGroupSize =  ecPolicy.getNumDataUnits() * blockSize;
+      int totalBlockGroups = 1;
+      DFSTestUtil.createStripedFile(miniCluster, ecFile, ecDir,
+          totalBlockGroups, stripesPerBlock, false, ecPolicy);
+
+      // Verify report command for all counts to be zero
+      resetStream();
+      assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
+      verifyNodesAndCorruptBlocks(numDn, numDn, 0, 0, client);
+
+      // Choose a DataNode to shutdown
+      final List<DataNode> datanodes = miniCluster.getDataNodes();
+      DataNode dataNodeToShutdown = null;
+      for (DataNode dn : datanodes) {
+        if (!dn.getDatanodeId().getDatanodeUuid().equals(
+            locatedDataNode.getDatanodeUuid())) {
+          dataNodeToShutdown = dn;
+          break;
+        }
+      }
+      assertTrue("Unable to choose a DataNode to shutdown!",
+          dataNodeToShutdown != null);
+
+      // Shut down the DataNode not hosting the replicated block
+      LOG.info("Shutting down: " + dataNodeToShutdown);
+      dataNodeToShutdown.shutdown();
+      miniCluster.setDataNodeDead(dataNodeToShutdown.getDatanodeId());
+
+      // Verify report command to show dead DataNode
+      assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
+      verifyNodesAndCorruptBlocks(numDn, numDn - 1, 0, 0, client);
+
+      // Corrupt the replicated block
       final int blockFilesCorrupted = miniCluster
           .corruptBlockOnDataNodes(block);
       assertEquals("Fail to corrupt all replicas for block " + block,
@@ -564,35 +631,44 @@
         // expected exception reading corrupt blocks
       }
 
-      /*
-       * Increase replication factor, this should invoke transfer request.
-       * Receiving datanode fails on checksum and reports it to namenode
-       */
+      // Increase replication factor, this should invoke transfer request.
+      // Receiving datanode fails on checksum and reports it to namenode
       fs.setReplication(file, (short) (replFactor + 1));
 
-      /* get block details and check if the block is corrupt */
-      GenericTestUtils.waitFor(new Supplier<Boolean>() {
-        @Override
-        public Boolean get() {
-          LocatedBlocks blocks = null;
-          try {
-            miniCluster.triggerBlockReports();
-            blocks = client.getNamenode().getBlockLocations(file.toString(), 0,
-                Long.MAX_VALUE);
-          } catch (IOException e) {
-            return false;
-          }
-          return blocks != null && blocks.get(0).isCorrupt();
-        }
-      }, 1000, 60000);
-
+      // get block details and check if the block is corrupt
       BlockManagerTestUtil.updateState(
           miniCluster.getNameNode().getNamesystem().getBlockManager());
+      waitForCorruptBlock(miniCluster, client, file);
 
-      /* run and verify report command */
+      // verify report command for corrupt replicated block
       resetStream();
       assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
-      verifyNodesAndCorruptBlocks(numDn, numDn - 1, 1, client);
+      verifyNodesAndCorruptBlocks(numDn, numDn - 1, 1, 0, client);
+
+      lbs = miniCluster.getFileSystem().getClient().
+          getNamenode().getBlockLocations(
+          ecFile.toString(), 0, blockGroupSize);
+      assertTrue("Unexpected block type: " + lbs.get(0),
+          lbs.get(0) instanceof LocatedStripedBlock);
+      LocatedStripedBlock bg =
+          (LocatedStripedBlock)(lbs.get(0));
+
+      miniCluster.getNamesystem().writeLock();
+      try {
+        BlockManager bm = miniCluster.getNamesystem().getBlockManager();
+        bm.findAndMarkBlockAsCorrupt(bg.getBlock(), bg.getLocations()[0],
+            "STORAGE_ID", "TEST");
+        BlockManagerTestUtil.updateState(bm);
+      } finally {
+        miniCluster.getNamesystem().writeUnlock();
+      }
+      waitForCorruptBlock(miniCluster, client, file);
+
+      // verify report command for corrupt replicated block
+      // and EC block group
+      resetStream();
+      assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
+      verifyNodesAndCorruptBlocks(numDn, numDn - 1, 1, 1, client);
     }
   }
 
@@ -669,6 +745,7 @@
       final int numDn,
       final int numLiveDn,
       final int numCorruptBlocks,
+      final int numCorruptECBlockGroups,
       final DFSClient client) throws IOException {
 
     /* init vars */
@@ -679,11 +756,15 @@
     final String expectedCorruptedBlocksStr = String.format(
         "Blocks with corrupt replicas: %d",
         numCorruptBlocks);
+    final String expectedCorruptedECBlockGroupsStr = String.format(
+        "Block groups with corrupt internal blocks: %d",
+        numCorruptECBlockGroups);
 
-    /* verify nodes and corrupt blocks */
+    // verify nodes and corrupt blocks
     assertThat(outStr, is(allOf(
         containsString(expectedLiveNodesStr),
-        containsString(expectedCorruptedBlocksStr))));
+        containsString(expectedCorruptedBlocksStr),
+        containsString(expectedCorruptedECBlockGroupsStr))));
 
     assertEquals(
         numDn,
@@ -694,7 +775,12 @@
     assertEquals(
         numDn - numLiveDn,
         client.getDatanodeStorageReport(DatanodeReportType.DEAD).length);
-    assertEquals(numCorruptBlocks, client.getCorruptBlocksCount());
+    assertEquals(numCorruptBlocks + numCorruptECBlockGroups,
+        client.getCorruptBlocksCount());
+    assertEquals(numCorruptBlocks, client.getNamenode()
+        .getBlocksStats().getCorruptBlocksStat());
+    assertEquals(numCorruptECBlockGroups, client.getNamenode()
+        .getECBlockGroupsStats().getCorruptBlockGroupsStat());
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml
index f9bb29e..c109442 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml
@@ -593,6 +593,7 @@
         <crypto-admin-command>-listZones</crypto-admin-command>
       </test-commands>
       <cleanup-commands>
+        <command>-fs NAMENODE -deleteSnapshot /snapshotable snapshot1</command>
         <command>-fs NAMENODE -rm -r /snapshotable</command>
         <command>-fs NAMENODE -rm -r .Trash/Current/*</command>
       </cleanup-commands>
@@ -603,5 +604,116 @@
         </comparator>
       </comparators>
     </test>
+
+    <!--More thorough test cases for re-encryption are in TestEncryptionZones-->
+    <test>
+      <description>Test success of reencrypt submission on a EZ</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /src</command>
+        <crypto-admin-command>-createZone -path /src -keyName myKey</crypto-admin-command>
+        <crypto-admin-command>-reencryptZone -start -path /src</crypto-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r -skipTrash /src</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>successfully submitted for zone: /src action: START</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>Test failure of reencrypt submission on a non-EZ</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /src</command>
+        <crypto-admin-command>-reencryptZone -start -path /src</crypto-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r -skipTrash /src</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>not the root of an encryption zone</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <!-- Cannot test successful cancel here, since the submit will finish very quickly.
+         TestReencryption covers successful cancellations. -->
+
+    <test>
+      <description>Test failure of reencrypt on cancellation a not-being-re-encrypted EZ</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /src</command>
+        <crypto-admin-command>-createZone -path /src -keyName myKey</crypto-admin-command>
+        <crypto-admin-command>-reencryptZone -cancel -path /src</crypto-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r -skipTrash /src</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Zone /src is not under re-encryption</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>Test success of list reencrypt status on a EZ</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /src</command>
+        <command>-fs NAMENODE -mkdir /zone2</command>
+        <crypto-admin-command>-createZone -path /src -keyName myKey</crypto-admin-command>
+        <crypto-admin-command>-createZone -path /zone2 -keyName myKey</crypto-admin-command>
+        <crypto-admin-command>-reencryptZone -start -path /src</crypto-admin-command>
+        <crypto-admin-command>-reencryptZone -start -path /zone2</crypto-admin-command>
+        <crypto-admin-command>-listReencryptionStatus</crypto-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r -skipTrash /src</command>
+        <command>-fs NAMENODE -rm -r -skipTrash /zone2</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>/src,Completed,false,myKey,zone2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>Test adding two zones to a snapshotable directory;
+        The second zone is not a direct child of snapshottable directory;
+        Take snapshot, permanently delete second EZ, then list zones;</description>
+      <test-commands>
+        <command>-fs NAMENODE -rm -r .Trash/Current/*</command>
+        <command>-fs NAMENODE -mkdir /snapshotable</command>
+        <command>-fs NAMENODE -mkdir /snapshotable/test1</command>
+        <command>-fs NAMENODE -mkdir /snapshotable/test1/test2</command>
+        <command>-fs NAMENODE -mkdir /snapshotable/test3</command>
+        <dfs-admin-command>-fs NAMENODE -allowSnapshot /snapshotable</dfs-admin-command>
+        <crypto-admin-command>-createZone -path /snapshotable/test1/test2 -keyName myKey</crypto-admin-command>
+        <crypto-admin-command>-createZone -path /snapshotable/test3 -keyName myKey</crypto-admin-command>
+        <command>-fs NAMENODE -createSnapshot /snapshotable snapshot1</command>
+        <command>-fs NAMENODE -rm -r /snapshotable/test1</command>
+        <command>-fs NAMENODE -rm -r .Trash/Current/*</command>
+        <crypto-admin-command>-listZones</crypto-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -deleteSnapshot /snapshotable snapshot1</command>
+        <command>-fs NAMENODE -rm -r /snapshotable</command>
+        <command>-fs NAMENODE -rm -r .Trash/Current/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpAcrossOutputComparator</type>
+          <expected-output>(/test3)\s*(myKey)\s*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
   </tests>
 </configuration>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index c68c6d6..9988ff3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -101,7 +101,7 @@
         </comparator>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>[-setPolicy -path &lt;path&gt; -policy &lt;policy&gt;]</expected-output>
+          <expected-output>[-setPolicy -path &lt;path&gt; [-policy &lt;policy&gt;] [-replicate]]</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -135,7 +135,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Get the list of enabled erasure coding policies</expected-output>
+          <expected-output>Get the list of all erasure coding policies</expected-output>
         </comparator>
         <comparator>
           <type>SubstringComparator</type>
@@ -206,7 +206,7 @@
       <description>setPolicy : set erasure coding policy on a directory to encode files</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-1024k -path /ecdir</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /ecdir</command>
@@ -214,7 +214,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Set erasure coding policy RS-6-3-64k on /ecdir</expected-output>
+          <expected-output>Set erasure coding policy RS-6-3-1024k on /ecdir</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -223,8 +223,8 @@
       <description>setPolicy : set a policy twice</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-1024k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-1024k -path /ecdir</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /ecdir</command>
@@ -232,7 +232,30 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Set erasure coding policy RS-6-3-64k on /ecdir</expected-output>
+          <expected-output>Set erasure coding policy RS-6-3-1024k on /ecdir</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>setPolicy : set replication policy on a directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /ecdir</command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-1024k -path /ecdir</ec-admin-command>
+        <command>-fs NAMENODE -mkdir /ecdir/replica</command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -replicate -path /ecdir/replica</ec-admin-command>
+        <command>-fs NAMENODE -touchz /ecdir/replica/file</command>
+        <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir/replica/file</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /ecdir/replica/file</command>
+        <command>-fs NAMENODE -rmdir /ecdir/replica</command>
+        <command>-fs NAMENODE -rmdir /ecdir</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>is unspecified</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -241,7 +264,7 @@
       <description>unsetPolicy : unset policy and get</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-1024k -path /ecdir</ec-admin-command>
         <ec-admin-command>-fs NAMENODE -unsetPolicy -path /ecdir</ec-admin-command>
         <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir</ec-admin-command>
       </test-commands>
@@ -260,8 +283,8 @@
       <description>setPolicy : change different policy and get</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-3-2-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-1024k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-3-2-1024k -path /ecdir</ec-admin-command>
         <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir</ec-admin-command>
       </test-commands>
       <cleanup-commands>
@@ -270,7 +293,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>RS-3-2-64k</expected-output>
+          <expected-output>RS-3-2-1024k</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -280,7 +303,7 @@
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
         <command>-fs NAMENODE -mkdir /ecdir/child</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-1024k -path /ecdir</ec-admin-command>
         <ec-admin-command>-fs NAMENODE -unsetPolicy -path /ecdir/child</ec-admin-command>
         <command>-fs NAMENODE -touchz /ecdir/child/ecfile</command>
         <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir/child/ecfile</ec-admin-command>
@@ -293,7 +316,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>RS-6-3-64k</expected-output>
+          <expected-output>RS-6-3-1024k</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -319,7 +342,7 @@
       <description>getPolicy : get EC policy information at specified path, which doesn't have an EC policy</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-1024k -path /ecdir</ec-admin-command>
         <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir</ec-admin-command>
       </test-commands>
       <cleanup-commands>
@@ -328,7 +351,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>RS-6-3-64k</expected-output>
+          <expected-output>RS-6-3-1024k</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -337,7 +360,7 @@
       <description>getPolicy : get EC policy information at specified path, which doesn't have an EC policy</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-1024k -path /ecdir</ec-admin-command>
         <command>-fs NAMENODE -touchz /ecdir/ecfile</command>
         <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir/ecfile</ec-admin-command>
       </test-commands>
@@ -348,7 +371,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>RS-6-3-64k</expected-output>
+          <expected-output>RS-6-3-1024k</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -382,7 +405,26 @@
         </comparator>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Add ErasureCodingPolicy RS-6-3-64k failed</expected-output>
+          <expected-output>Add ErasureCodingPolicy RS-6-3-1024k failed</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>listPolicies : get the list of ECPolicies supported</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -listPolicies</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>XOR-2-1-128k</expected-output>
+        </comparator>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>State=DISABLED</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -390,14 +432,14 @@
     <test>
       <description>enablePolicy : enable the erasure coding policy</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-1024k</ec-admin-command>
       </test-commands>
       <cleanup-commands>
       </cleanup-commands>
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Erasure coding policy RS-6-3-64k is enabled</expected-output>
+          <expected-output>Erasure coding policy RS-6-3-1024k is enabled</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -405,15 +447,15 @@
     <test>
       <description>enablePolicy : enable the erasure coding policy twice</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-64k</ec-admin-command>
-        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-1024k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-1024k</ec-admin-command>
       </test-commands>
       <cleanup-commands>
       </cleanup-commands>
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Erasure coding policy RS-6-3-64k is enabled</expected-output>
+          <expected-output>Erasure coding policy RS-6-3-1024k is enabled</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -421,15 +463,15 @@
     <test>
       <description>disablePolicy : disable the erasure coding policy</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -disablePolicy -policy RS-6-3-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -disablePolicy -policy RS-6-3-1024k</ec-admin-command>
       </test-commands>
       <cleanup-commands>
-        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-1024k</ec-admin-command>
       </cleanup-commands>
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Erasure coding policy RS-6-3-64k is disabled</expected-output>
+          <expected-output>Erasure coding policy RS-6-3-1024k is disabled</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -437,23 +479,23 @@
     <test>
       <description>disablePolicy : disable the erasure coding policy twice</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -disablePolicy -policy RS-6-3-64k</ec-admin-command>
-        <ec-admin-command>-fs NAMENODE -disablePolicy -policy RS-6-3-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -disablePolicy -policy RS-6-3-1024k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -disablePolicy -policy RS-6-3-1024k</ec-admin-command>
       </test-commands>
       <cleanup-commands>
-        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-1024k</ec-admin-command>
       </cleanup-commands>
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Erasure coding policy RS-6-3-64k is disabled</expected-output>
+          <expected-output>Erasure coding policy RS-6-3-1024k is disabled</expected-output>
         </comparator>
       </comparators>
     </test>
 
 <!-- Test illegal parameters -->
     <test>
-      <description>setPolicy : illegal parameters - path is missing</description>
+      <description>setPolicy : illegal parameters - path option is missing</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
         <ec-admin-command>-fs NAMENODE -setPolicy</ec-admin-command>
@@ -470,7 +512,7 @@
     </test>
 
     <test>
-      <description>setPolicy : illegal parameters - policy name is missing</description>
+      <description>setPolicy : illegal parameters - path name is missing</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
         <ec-admin-command>-fs NAMENODE -setPolicy -path</ec-admin-command>
@@ -487,10 +529,27 @@
     </test>
 
     <test>
-      <description>setPolicy : illegal parameters - too many arguments</description>
+      <description>setPolicy : illegal parameters - too many arguments case 1</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir1 -policy RS-3-2-64k /ecdir2</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir1 -policy RS-3-2-1024k /ecdir2</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmdir /ecdir</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>-setPolicy: Too many arguments</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>setPolicy : illegal parameters - too many arguments case 2</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /ecdir</command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir1 -policy RS-3-2-1024k -replicate /ecdir2</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /ecdir</command>
@@ -521,10 +580,10 @@
     </test>
 
     <test>
-      <description>setPolicy : illegal parameters - RS-10-4-64k</description>
+      <description>setPolicy : illegal parameters - RS-10-4-1024k</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-10-4-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-10-4-1024k -path /ecdir</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /ecdir</command>
@@ -532,7 +591,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Policy 'RS-10-4-64k' does not match any enabled erasure coding policies</expected-output>
+          <expected-output>Policy 'RS-10-4-1024k' does not match any enabled erasure coding policies</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -540,7 +599,7 @@
     <test>
       <description>setPolicy : illegal parameters - no such file</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir -policy RS-3-2-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir -policy RS-3-2-1024k</ec-admin-command>
       </test-commands>
       <cleanup-commands>
       </cleanup-commands>
@@ -553,6 +612,36 @@
     </test>
 
     <test>
+      <description>setPolicy : illegal parameters - wrong spelling replicate </description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir -replica</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>-setPolicy: Too many arguments</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>setPolicy : illegal parameters - replicate and policy coexist</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir -policy RS-3-2-1024k -replicate</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>-replicate and -policy cannot been used at the same time</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
       <description>setPolicy : set erasure coding policy without given a specific policy name</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
@@ -582,7 +671,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>RS-6-3-64k</expected-output>
+          <expected-output>RS-6-3-1024k</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -681,7 +770,7 @@
     <test>
       <description>enablePolicy : illegal parameters - policy is missing</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -enablePolicy RS-6-3-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -enablePolicy RS-6-3-1024k</ec-admin-command>
       </test-commands>
       <cleanup-commands>
       </cleanup-commands>
@@ -696,7 +785,7 @@
     <test>
       <description>enablePolicy : illegal parameters - too many parameters</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-64k RS-3-2-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-1024k RS-3-2-1024k</ec-admin-command>
       </test-commands>
       <cleanup-commands>
       </cleanup-commands>
@@ -711,7 +800,7 @@
     <test>
       <description>disablePolicy : illegal parameters - policy is missing</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -disablePolicy RS-6-3-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -disablePolicy RS-6-3-1024k</ec-admin-command>
       </test-commands>
       <cleanup-commands>
       </cleanup-commands>
@@ -726,7 +815,7 @@
     <test>
       <description>disablePolicy : illegal parameters - too many parameters</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -disablePolicy -policy RS-6-3-64k RS-3-2-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -disablePolicy -policy RS-6-3-1024k RS-3-2-1024k</ec-admin-command>
       </test-commands>
       <cleanup-commands>
       </cleanup-commands>
@@ -772,7 +861,7 @@
       <description>count: file using absolute path with option -e to show erasurecoding policy of a directory</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /dir1</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -path /dir1 -policy RS-6-3-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -path /dir1 -policy RS-6-3-1024k</ec-admin-command>
         <command>-fs NAMENODE -touchz /dir1/file1</command>
         <command>-fs NAMENODE -touchz /dir1/file2</command>
         <command>-fs NAMENODE -count -e -v /dir1</command>
@@ -813,7 +902,7 @@
       <description>ls: file using absolute path and option -e to show erasure coding policy of a directory</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir -p /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir -policy RS-6-3-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir -policy RS-6-3-1024k</ec-admin-command>
         <command>-fs NAMENODE -touchz /ecdir/file1</command>
         <command>-fs NAMENODE -touchz /ecdir/file2</command>
         <command>-fs NAMENODE -touchz /ecdir/file3</command>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/test_ec_policies.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/test_ec_policies.xml
index b2416ac..6d5bb28 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/test_ec_policies.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/test_ec_policies.xml
@@ -59,7 +59,7 @@
   </policy>
   <policy>
     <schema>RSk6m3</schema>
-    <cellsize>65536</cellsize>
+    <cellsize>1048576</cellsize>
   </policy>
 </policies>
 </configuration>
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index 53fe055..8fa417b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -78,6 +78,7 @@
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.util.TimelineServiceHelper;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.sun.jersey.api.client.ClientHandlerException;
@@ -430,10 +431,18 @@
             + " to have not been closed. Will close");
           //Create a JobFinishEvent so that it is written to the job history
           final Job job = context.getJob(toClose);
+          int successfulMaps = job.getCompletedMaps() - job.getFailedMaps()
+                  - job.getKilledMaps();
+          int successfulReduces = job.getCompletedReduces()
+                  - job.getFailedReduces() - job.getKilledReduces();
+
           JobUnsuccessfulCompletionEvent jucEvent =
             new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(toClose),
-                System.currentTimeMillis(), job.getCompletedMaps(),
-                job.getCompletedReduces(),
+                System.currentTimeMillis(),
+                successfulMaps,
+                successfulReduces,
+                job.getFailedMaps(), job.getFailedReduces(),
+                job.getKilledMaps(), job.getKilledReduces(),
                 createJobStateForJobUnsuccessfulCompletionEvent(
                     mi.getForcedJobStateOnShutDown()),
                 job.getDiagnostics());
@@ -654,9 +663,9 @@
           JobFinishedEvent jFinishedEvent =
               (JobFinishedEvent) event.getHistoryEvent();
           mi.getJobIndexInfo().setFinishTime(jFinishedEvent.getFinishTime());
-          mi.getJobIndexInfo().setNumMaps(jFinishedEvent.getFinishedMaps());
+          mi.getJobIndexInfo().setNumMaps(jFinishedEvent.getSucceededMaps());
           mi.getJobIndexInfo().setNumReduces(
-              jFinishedEvent.getFinishedReduces());
+              jFinishedEvent.getSucceededReduces());
           mi.getJobIndexInfo().setJobStatus(JobState.SUCCEEDED.toString());
           closeEventWriter(event.getJobID());
           processDoneFiles(event.getJobID());
@@ -671,8 +680,8 @@
           JobUnsuccessfulCompletionEvent jucEvent =
               (JobUnsuccessfulCompletionEvent) event.getHistoryEvent();
           mi.getJobIndexInfo().setFinishTime(jucEvent.getFinishTime());
-          mi.getJobIndexInfo().setNumMaps(jucEvent.getFinishedMaps());
-          mi.getJobIndexInfo().setNumReduces(jucEvent.getFinishedReduces());
+          mi.getJobIndexInfo().setNumMaps(jucEvent.getSucceededMaps());
+          mi.getJobIndexInfo().setNumReduces(jucEvent.getSucceededReduces());
           mi.getJobIndexInfo().setJobStatus(jucEvent.getStatus());
           closeEventWriter(event.getJobID());
           if(context.isLastAMRetry())
@@ -689,8 +698,8 @@
               (JobUnsuccessfulCompletionEvent) event
               .getHistoryEvent();
           mi.getJobIndexInfo().setFinishTime(jucEvent.getFinishTime());
-          mi.getJobIndexInfo().setNumMaps(jucEvent.getFinishedMaps());
-          mi.getJobIndexInfo().setNumReduces(jucEvent.getFinishedReduces());
+          mi.getJobIndexInfo().setNumMaps(jucEvent.getSucceededMaps());
+          mi.getJobIndexInfo().setNumReduces(jucEvent.getSucceededReduces());
           mi.getJobIndexInfo().setJobStatus(jucEvent.getStatus());
           closeEventWriter(event.getJobID());
           processDoneFiles(event.getJobID());
@@ -738,10 +747,12 @@
     case JOB_FINISHED:
       JobFinishedEvent jfe = (JobFinishedEvent) event;
       summary.setJobFinishTime(jfe.getFinishTime());
-      summary.setNumFinishedMaps(jfe.getFinishedMaps());
+      summary.setNumSucceededMaps(jfe.getSucceededMaps());
       summary.setNumFailedMaps(jfe.getFailedMaps());
-      summary.setNumFinishedReduces(jfe.getFinishedReduces());
+      summary.setNumSucceededReduces(jfe.getSucceededReduces());
       summary.setNumFailedReduces(jfe.getFailedReduces());
+      summary.setNumKilledMaps(jfe.getKilledMaps());
+      summary.setNumKilledReduces(jfe.getKilledReduces());
       if (summary.getJobStatus() == null)
         summary
             .setJobStatus(org.apache.hadoop.mapreduce.JobStatus.State.SUCCEEDED
@@ -752,11 +763,21 @@
       break;
     case JOB_FAILED:
     case JOB_KILLED:
+      Job job = context.getJob(jobId);
       JobUnsuccessfulCompletionEvent juce = (JobUnsuccessfulCompletionEvent) event;
+      int successfulMaps = job.getCompletedMaps() - job.getFailedMaps()
+          - job.getKilledMaps();
+      int successfulReduces = job.getCompletedReduces()
+          - job.getFailedReduces() - job.getKilledReduces();
+
       summary.setJobStatus(juce.getStatus());
-      summary.setNumFinishedMaps(context.getJob(jobId).getTotalMaps());
-      summary.setNumFinishedReduces(context.getJob(jobId).getTotalReduces());
+      summary.setNumSucceededMaps(successfulMaps);
+      summary.setNumSucceededReduces(successfulReduces);
+      summary.setNumFailedMaps(job.getFailedMaps());
+      summary.setNumFailedReduces(job.getFailedReduces());
       summary.setJobFinishTime(juce.getFinishTime());
+      summary.setNumKilledMaps(juce.getKilledMaps());
+      summary.setNumKilledReduces(juce.getKilledReduces());
       setSummarySlotSeconds(summary, context.getJob(jobId).getAllCounters());
       break;
     default:
@@ -839,12 +860,22 @@
         JobUnsuccessfulCompletionEvent juce =
               (JobUnsuccessfulCompletionEvent) event;
         tEvent.addEventInfo("FINISH_TIME", juce.getFinishTime());
-        tEvent.addEventInfo("NUM_MAPS", juce.getFinishedMaps());
-        tEvent.addEventInfo("NUM_REDUCES", juce.getFinishedReduces());
+        tEvent.addEventInfo("NUM_MAPS",
+            juce.getSucceededMaps() +
+            juce.getFailedMaps() +
+            juce.getKilledMaps());
+        tEvent.addEventInfo("NUM_REDUCES",
+            juce.getSucceededReduces() +
+            juce.getFailedReduces() +
+            juce.getKilledReduces());
         tEvent.addEventInfo("JOB_STATUS", juce.getStatus());
         tEvent.addEventInfo("DIAGNOSTICS", juce.getDiagnostics());
-        tEvent.addEventInfo("FINISHED_MAPS", juce.getFinishedMaps());
-        tEvent.addEventInfo("FINISHED_REDUCES", juce.getFinishedReduces());
+        tEvent.addEventInfo("SUCCESSFUL_MAPS", juce.getSucceededMaps());
+        tEvent.addEventInfo("SUCCESSFUL_REDUCES", juce.getSucceededReduces());
+        tEvent.addEventInfo("FAILED_MAPS", juce.getFailedMaps());
+        tEvent.addEventInfo("FAILED_REDUCES", juce.getFailedReduces());
+        tEvent.addEventInfo("KILLED_MAPS", juce.getKilledMaps());
+        tEvent.addEventInfo("KILLED_REDUCES", juce.getKilledReduces());
         tEntity.addEvent(tEvent);
         tEntity.setEntityId(jobId.toString());
         tEntity.setEntityType(MAPREDUCE_JOB_ENTITY_TYPE);
@@ -852,12 +883,20 @@
       case JOB_FINISHED:
         JobFinishedEvent jfe = (JobFinishedEvent) event;
         tEvent.addEventInfo("FINISH_TIME", jfe.getFinishTime());
-        tEvent.addEventInfo("NUM_MAPS", jfe.getFinishedMaps());
-        tEvent.addEventInfo("NUM_REDUCES", jfe.getFinishedReduces());
+        tEvent.addEventInfo("NUM_MAPS",
+            jfe.getSucceededMaps() +
+            jfe.getFailedMaps() +
+            jfe.getKilledMaps());
+        tEvent.addEventInfo("NUM_REDUCES",
+            jfe.getSucceededReduces() +
+            jfe.getFailedReduces() +
+            jfe.getKilledReduces());
         tEvent.addEventInfo("FAILED_MAPS", jfe.getFailedMaps());
         tEvent.addEventInfo("FAILED_REDUCES", jfe.getFailedReduces());
-        tEvent.addEventInfo("FINISHED_MAPS", jfe.getFinishedMaps());
-        tEvent.addEventInfo("FINISHED_REDUCES", jfe.getFinishedReduces());
+        tEvent.addEventInfo("SUCCESSFUL_MAPS", jfe.getSucceededMaps());
+        tEvent.addEventInfo("SUCCESSFUL_REDUCES", jfe.getSucceededReduces());
+        tEvent.addEventInfo("KILLED_MAPS", jfe.getKilledMaps());
+        tEvent.addEventInfo("KILLED_REDUCES", jfe.getKilledReduces());
         tEvent.addEventInfo("MAP_COUNTERS_GROUPS",
             JobHistoryEventUtils.countersToJSON(jfe.getMapCounters()));
         tEvent.addEventInfo("REDUCE_COUNTERS_GROUPS",
@@ -1124,7 +1163,7 @@
   private org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity
       createTaskEntity(HistoryEvent event, long timestamp, String taskId,
       String entityType, String relatedJobEntity, JobId jobId,
-      boolean setCreatedTime) {
+      boolean setCreatedTime, long taskIdPrefix) {
     org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity entity =
         createBaseEntity(event, timestamp, entityType, setCreatedTime);
     entity.setId(taskId);
@@ -1133,6 +1172,7 @@
           ((TaskStartedEvent)event).getTaskType().toString());
     }
     entity.addIsRelatedToEntity(relatedJobEntity, jobId.toString());
+    entity.setIdPrefix(taskIdPrefix);
     return entity;
   }
 
@@ -1141,11 +1181,12 @@
   private org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity
       createTaskAttemptEntity(HistoryEvent event, long timestamp,
       String taskAttemptId, String entityType, String relatedTaskEntity,
-      String taskId, boolean setCreatedTime) {
+      String taskId, boolean setCreatedTime, long taskAttemptIdPrefix) {
     org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity entity =
         createBaseEntity(event, timestamp, entityType, setCreatedTime);
     entity.setId(taskAttemptId);
     entity.addIsRelatedToEntity(relatedTaskEntity, taskId);
+    entity.setIdPrefix(taskAttemptIdPrefix);
     return entity;
   }
 
@@ -1196,6 +1237,8 @@
     String taskId = null;
     String taskAttemptId = null;
     boolean setCreatedTime = false;
+    long taskIdPrefix = 0;
+    long taskAttemptIdPrefix = 0;
 
     switch (event.getEventType()) {
     // Handle job events
@@ -1218,15 +1261,21 @@
     case TASK_STARTED:
       setCreatedTime = true;
       taskId = ((TaskStartedEvent)event).getTaskId().toString();
+      taskIdPrefix = TimelineServiceHelper.
+          invertLong(((TaskStartedEvent)event).getStartTime());
       break;
     case TASK_FAILED:
       taskId = ((TaskFailedEvent)event).getTaskId().toString();
+      taskIdPrefix = TimelineServiceHelper.
+          invertLong(((TaskFailedEvent)event).getStartTime());
       break;
     case TASK_UPDATED:
       taskId = ((TaskUpdatedEvent)event).getTaskId().toString();
       break;
     case TASK_FINISHED:
       taskId = ((TaskFinishedEvent)event).getTaskId().toString();
+      taskIdPrefix = TimelineServiceHelper.
+          invertLong(((TaskFinishedEvent)event).getStartTime());
       break;
     case MAP_ATTEMPT_STARTED:
     case REDUCE_ATTEMPT_STARTED:
@@ -1234,6 +1283,8 @@
       taskId = ((TaskAttemptStartedEvent)event).getTaskId().toString();
       taskAttemptId = ((TaskAttemptStartedEvent)event).
           getTaskAttemptId().toString();
+      taskAttemptIdPrefix = TimelineServiceHelper.
+          invertLong(((TaskAttemptStartedEvent)event).getStartTime());
       break;
     case CLEANUP_ATTEMPT_STARTED:
     case SETUP_ATTEMPT_STARTED:
@@ -1253,16 +1304,22 @@
           getTaskId().toString();
       taskAttemptId = ((TaskAttemptUnsuccessfulCompletionEvent)event).
           getTaskAttemptId().toString();
+      taskAttemptIdPrefix = TimelineServiceHelper.invertLong(
+          ((TaskAttemptUnsuccessfulCompletionEvent)event).getStartTime());
       break;
     case MAP_ATTEMPT_FINISHED:
       taskId = ((MapAttemptFinishedEvent)event).getTaskId().toString();
       taskAttemptId = ((MapAttemptFinishedEvent)event).
           getAttemptId().toString();
+      taskAttemptIdPrefix = TimelineServiceHelper.
+          invertLong(((MapAttemptFinishedEvent)event).getStartTime());
       break;
     case REDUCE_ATTEMPT_FINISHED:
       taskId = ((ReduceAttemptFinishedEvent)event).getTaskId().toString();
       taskAttemptId = ((ReduceAttemptFinishedEvent)event).
           getAttemptId().toString();
+      taskAttemptIdPrefix = TimelineServiceHelper.
+          invertLong(((ReduceAttemptFinishedEvent)event).getStartTime());
       break;
     case SETUP_ATTEMPT_FINISHED:
     case CLEANUP_ATTEMPT_FINISHED:
@@ -1291,12 +1348,12 @@
         // TaskEntity
         tEntity = createTaskEntity(event, timestamp, taskId,
             MAPREDUCE_TASK_ENTITY_TYPE, MAPREDUCE_JOB_ENTITY_TYPE,
-            jobId, setCreatedTime);
+            jobId, setCreatedTime, taskIdPrefix);
       } else {
         // TaskAttemptEntity
         tEntity = createTaskAttemptEntity(event, timestamp, taskAttemptId,
             MAPREDUCE_TASK_ATTEMPT_ENTITY_TYPE, MAPREDUCE_TASK_ENTITY_TYPE,
-            taskId, setCreatedTime);
+            taskId, setCreatedTime, taskAttemptIdPrefix);
       }
     }
     try {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobSummary.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobSummary.java
index abe9518..22ae079 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobSummary.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobSummary.java
@@ -30,10 +30,12 @@
   private long firstReduceTaskLaunchTime; // ReduceAttemptStarted |
                                           // TaskAttemptStartEvent
   private long jobFinishTime;
-  private int numFinishedMaps;
+  private int numSucceededMaps;
   private int numFailedMaps;
-  private int numFinishedReduces;
+  private int numSucceededReduces;
   private int numFailedReduces;
+  private int numKilledMaps;
+  private int numKilledReduces;
   private int resourcesPerMap; // resources used per map/min resource
   private int resourcesPerReduce; // resources used per reduce/min resource
   // resource models
@@ -98,12 +100,12 @@
     this.jobFinishTime = jobFinishTime;
   }
 
-  public int getNumFinishedMaps() {
-    return numFinishedMaps;
+  public int getNumSucceededMaps() {
+    return numSucceededMaps;
   }
 
-  public void setNumFinishedMaps(int numFinishedMaps) {
-    this.numFinishedMaps = numFinishedMaps;
+  public void setNumSucceededMaps(int numSucceededMaps) {
+    this.numSucceededMaps = numSucceededMaps;
   }
 
   public int getNumFailedMaps() {
@@ -114,6 +116,22 @@
     this.numFailedMaps = numFailedMaps;
   }
 
+  public int getKilledMaps() {
+    return numKilledMaps;
+  }
+
+  public void setNumKilledMaps(int numKilledMaps) {
+    this.numKilledMaps = numKilledMaps;
+  }
+
+  public int getKilledReduces() {
+    return numKilledReduces;
+  }
+
+  public void setNumKilledReduces(int numKilledReduces) {
+    this.numKilledReduces = numKilledReduces;
+  }
+
   public int getResourcesPerMap() {
     return resourcesPerMap;
   }
@@ -122,12 +140,12 @@
     this.resourcesPerMap = resourcesPerMap;
   }
   
-  public int getNumFinishedReduces() {
-    return numFinishedReduces;
+  public int getNumSucceededReduces() {
+    return numSucceededReduces;
   }
 
-  public void setNumFinishedReduces(int numFinishedReduces) {
-    this.numFinishedReduces = numFinishedReduces;
+  public void setNumSucceededReduces(int numSucceededReduces) {
+    this.numSucceededReduces = numSucceededReduces;
   }
 
   public int getNumFailedReduces() {
@@ -204,8 +222,15 @@
       .add("finishTime", jobFinishTime)
       .add("resourcesPerMap", resourcesPerMap)
       .add("resourcesPerReduce", resourcesPerReduce)
-      .add("numMaps", numFinishedMaps + numFailedMaps)
-      .add("numReduces", numFinishedReduces + numFailedReduces)
+      .add("numMaps", numSucceededMaps + numFailedMaps + numKilledMaps)
+      .add("numReduces", numSucceededReduces + numFailedReduces
+          + numKilledReduces)
+      .add("succededMaps", numSucceededMaps)
+      .add("succeededReduces", numSucceededReduces)
+      .add("failedMaps", numFailedMaps)
+      .add("failedReduces", numFailedReduces)
+      .add("killedMaps", numKilledMaps)
+      .add("killedReduces", numKilledReduces)
       .add("user", user)
       .add("queue", queue)
       .add("status", jobStatus)
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java
index 7738810..437707b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java
@@ -65,6 +65,10 @@
   int getTotalReduces();
   int getCompletedMaps();
   int getCompletedReduces();
+  int getFailedMaps();
+  int getFailedReduces();
+  int getKilledMaps();
+  int getKilledReduces();
   float getProgress();
   boolean isUber();
   String getUserName();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
index 6880b6c2..abc3e61 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
@@ -1684,6 +1684,10 @@
               finishTime,
               succeededMapTaskCount,
               succeededReduceTaskCount,
+              failedMapTaskCount,
+              failedReduceTaskCount,
+              killedMapTaskCount,
+              killedReduceTaskCount,
               finalState.toString(),
               diagnostics);
       eventHandler.handle(new JobHistoryEvent(jobId,
@@ -1748,6 +1752,7 @@
         job.oldJobId, job.finishTime,
         job.succeededMapTaskCount, job.succeededReduceTaskCount,
         job.failedMapTaskCount, job.failedReduceTaskCount,
+        job.killedMapTaskCount, job.killedReduceTaskCount,
         job.finalMapCounters,
         job.finalReduceCounters,
         job.fullCounters);
@@ -1797,7 +1802,7 @@
       job.setFinishTime();
       JobUnsuccessfulCompletionEvent failedEvent =
           new JobUnsuccessfulCompletionEvent(job.oldJobId,
-              job.finishTime, 0, 0,
+              job.finishTime, 0, 0, 0, 0, 0, 0,
               JobStateInternal.KILLED.toString(), job.diagnostics);
       job.eventHandler.handle(new JobHistoryEvent(job.jobId, failedEvent));
       job.finished(JobStateInternal.KILLED);
@@ -1954,8 +1959,8 @@
     @Override
     public JobStateInternal transition(JobImpl job, JobEvent event) {
       job.completedTaskCount++;
-      LOG.info("Num completed Tasks: " + job.completedTaskCount);
       JobTaskEvent taskEvent = (JobTaskEvent) event;
+      LOG.info("Num completed Tasks: " + job.completedTaskCount);
       Task task = job.tasks.get(taskEvent.getTaskID());
       if (taskEvent.getState() == TaskState.SUCCEEDED) {
         taskSucceeded(job, task);
@@ -1991,11 +1996,15 @@
         job.allowedMapFailuresPercent*job.numMapTasks ||
         job.failedReduceTaskCount*100 > 
         job.allowedReduceFailuresPercent*job.numReduceTasks) {
+
         job.setFinishTime();
 
         String diagnosticMsg = "Job failed as tasks failed. " +
             "failedMaps:" + job.failedMapTaskCount + 
-            " failedReduces:" + job.failedReduceTaskCount;
+            " failedReduces:" + job.failedReduceTaskCount +
+            " killedMaps:" + job.killedMapTaskCount +
+            " killedReduces: " + job.killedReduceTaskCount;
+
         LOG.info(diagnosticMsg);
         job.addDiagnostic(diagnosticMsg);
 
@@ -2226,7 +2235,13 @@
       job.setFinishTime();
       JobUnsuccessfulCompletionEvent failedEvent =
           new JobUnsuccessfulCompletionEvent(job.oldJobId,
-              job.finishTime, 0, 0,
+              job.finishTime,
+              job.succeededMapTaskCount,
+              job.succeededReduceTaskCount,
+              job.failedMapTaskCount,
+              job.failedReduceTaskCount,
+              job.killedMapTaskCount,
+              job.killedReduceTaskCount,
               jobHistoryString, job.diagnostics);
       job.eventHandler.handle(new JobHistoryEvent(job.jobId, failedEvent));
       job.finished(terminationState);
@@ -2266,4 +2281,24 @@
   public void setJobPriority(Priority priority) {
     this.jobPriority = priority;
   }
+
+  @Override
+  public int getFailedMaps() {
+    return failedMapTaskCount;
+  }
+
+  @Override
+  public int getFailedReduces() {
+    return failedReduceTaskCount;
+  }
+
+  @Override
+  public int getKilledMaps() {
+    return killedMapTaskCount;
+  }
+
+  @Override
+  public int getKilledReduces() {
+    return killedReduceTaskCount;
+  }
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
index 9ea1b9a..3faad48 100755
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
@@ -1530,7 +1530,7 @@
             StringUtils.join(
                 LINE_SEPARATOR, taskAttempt.getDiagnostics()),
                 taskAttempt.getCounters(), taskAttempt
-                .getProgressSplitBlock().burst());
+                .getProgressSplitBlock().burst(), taskAttempt.launchTime);
     return tauce;
   }
 
@@ -1943,35 +1943,35 @@
         this.container == null ? -1 : this.container.getNodeId().getPort();
     if (attemptId.getTaskId().getTaskType() == TaskType.MAP) {
       MapAttemptFinishedEvent mfe =
-         new MapAttemptFinishedEvent(TypeConverter.fromYarn(attemptId),
-         TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
-         state.toString(),
-         this.reportedStatus.mapFinishTime,
-         finishTime,
-         containerHostName,
-         containerNodePort,
-         this.nodeRackName == null ? "UNKNOWN" : this.nodeRackName,
-         this.reportedStatus.stateString,
-         getCounters(),
-         getProgressSplitBlock().burst());
-         eventHandler.handle(
-           new JobHistoryEvent(attemptId.getTaskId().getJobId(), mfe));
+          new MapAttemptFinishedEvent(TypeConverter.fromYarn(attemptId),
+          TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
+          state.toString(),
+          this.reportedStatus.mapFinishTime,
+          finishTime,
+          containerHostName,
+          containerNodePort,
+          this.nodeRackName == null ? "UNKNOWN" : this.nodeRackName,
+          this.reportedStatus.stateString,
+          getCounters(),
+          getProgressSplitBlock().burst(), launchTime);
+      eventHandler.handle(
+          new JobHistoryEvent(attemptId.getTaskId().getJobId(), mfe));
     } else {
-       ReduceAttemptFinishedEvent rfe =
-         new ReduceAttemptFinishedEvent(TypeConverter.fromYarn(attemptId),
-         TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
-         state.toString(),
-         this.reportedStatus.shuffleFinishTime,
-         this.reportedStatus.sortFinishTime,
-         finishTime,
-         containerHostName,
-         containerNodePort,
-         this.nodeRackName == null ? "UNKNOWN" : this.nodeRackName,
-         this.reportedStatus.stateString,
-         getCounters(),
-         getProgressSplitBlock().burst());
-         eventHandler.handle(
-           new JobHistoryEvent(attemptId.getTaskId().getJobId(), rfe));
+      ReduceAttemptFinishedEvent rfe =
+          new ReduceAttemptFinishedEvent(TypeConverter.fromYarn(attemptId),
+          TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
+          state.toString(),
+          this.reportedStatus.shuffleFinishTime,
+          this.reportedStatus.sortFinishTime,
+          finishTime,
+          containerHostName,
+          containerNodePort,
+          this.nodeRackName == null ? "UNKNOWN" : this.nodeRackName,
+          this.reportedStatus.stateString,
+          getCounters(),
+          getProgressSplitBlock().burst(), launchTime);
+      eventHandler.handle(
+          new JobHistoryEvent(attemptId.getTaskId().getJobId(), rfe));
     }
   }
 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
index 8a6fa30..228ae24 100755
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
@@ -139,6 +139,8 @@
   private final Set<TaskAttemptId> inProgressAttempts;
 
   private boolean historyTaskStartGenerated = false;
+  // Launch time reported in history events.
+  private long launchTime;
   
   private static final SingleArcTransition<TaskImpl, TaskEvent> 
      ATTEMPT_KILLED_TRANSITION = new AttemptKilledTransition();
@@ -705,8 +707,9 @@
   }
 
   private void sendTaskStartedEvent() {
+    launchTime = getLaunchTime();
     TaskStartedEvent tse = new TaskStartedEvent(
-        TypeConverter.fromYarn(taskId), getLaunchTime(),
+        TypeConverter.fromYarn(taskId), launchTime,
         TypeConverter.fromYarn(taskId.getTaskType()),
         getSplitsAsString());
     eventHandler
@@ -714,18 +717,19 @@
     historyTaskStartGenerated = true;
   }
 
-  private static TaskFinishedEvent createTaskFinishedEvent(TaskImpl task, TaskStateInternal taskState) {
+  private static TaskFinishedEvent createTaskFinishedEvent(TaskImpl task,
+      TaskStateInternal taskState) {
     TaskFinishedEvent tfe =
       new TaskFinishedEvent(TypeConverter.fromYarn(task.taskId),
         TypeConverter.fromYarn(task.successfulAttempt),
         task.getFinishTime(task.successfulAttempt),
         TypeConverter.fromYarn(task.taskId.getTaskType()),
-        taskState.toString(),
-        task.getCounters());
+        taskState.toString(), task.getCounters(), task.launchTime);
     return tfe;
   }
   
-  private static TaskFailedEvent createTaskFailedEvent(TaskImpl task, List<String> diag, TaskStateInternal taskState, TaskAttemptId taId) {
+  private static TaskFailedEvent createTaskFailedEvent(TaskImpl task,
+      List<String> diag, TaskStateInternal taskState, TaskAttemptId taId) {
     StringBuilder errorSb = new StringBuilder();
     if (diag != null) {
       for (String d : diag) {
@@ -740,7 +744,7 @@
         errorSb.toString(),
         taskState.toString(),
         taId == null ? null : TypeConverter.fromYarn(taId),
-        task.getCounters());
+        task.getCounters(), task.launchTime);
     return taskFailedEvent;
   }
   
@@ -861,7 +865,8 @@
       TaskFailedEvent tfe = new TaskFailedEvent(taskInfo.getTaskId(),
           taskInfo.getFinishTime(), taskInfo.getTaskType(),
           taskInfo.getError(), taskInfo.getTaskStatus(),
-          taskInfo.getFailedDueToAttemptId(), taskInfo.getCounters());
+          taskInfo.getFailedDueToAttemptId(), taskInfo.getCounters(),
+          launchTime);
       eventHandler.handle(new JobHistoryEvent(taskId.getJobId(), tfe));
       eventHandler.handle(
           new JobTaskEvent(taskId, getExternalState(taskState)));
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
index 0952797..0dc7642 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
@@ -848,7 +848,8 @@
       updateAMRMToken(response.getAMRMToken());
     }
 
-    List<ContainerStatus> finishedContainers = response.getCompletedContainersStatuses();
+    List<ContainerStatus> finishedContainers =
+        response.getCompletedContainersStatuses();
 
     // propagate preemption requests
     final PreemptionMessage preemptReq = response.getPreemptionMessage();
@@ -877,16 +878,13 @@
 
     handleUpdatedNodes(response);
     handleJobPriorityChange(response);
-    // handle receiving the timeline collector address for this app
-    String collectorAddr = response.getCollectorAddr();
+    // Handle receiving the timeline collector address and token for this app.
     MRAppMaster.RunningAppContext appContext =
         (MRAppMaster.RunningAppContext)this.getContext();
-    if (collectorAddr != null && !collectorAddr.isEmpty()
-        && appContext.getTimelineV2Client() != null) {
-      appContext.getTimelineV2Client().setTimelineServiceAddress(
-          response.getCollectorAddr());
+    if (appContext.getTimelineV2Client() != null) {
+      appContext.getTimelineV2Client().
+          setTimelineCollectorInfo(response.getCollectorInfo());
     }
-
     for (ContainerStatus cont : finishedContainers) {
       processFinishedContainer(cont);
     }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java
index ac510b3..e271319 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java
@@ -58,7 +58,7 @@
     Counters counters = new Counters();
     TaskAttemptFinishedEvent test = new TaskAttemptFinishedEvent(taskAttemptId,
         TaskType.REDUCE, "TEST", 123L, "RAKNAME", "HOSTNAME", "STATUS",
-        counters);
+        counters, 234);
     assertEquals(test.getAttemptId().toString(), taskAttemptId.toString());
 
     assertEquals(test.getCounters(), counters);
@@ -69,7 +69,7 @@
     assertEquals(test.getTaskId(), tid);
     assertEquals(test.getTaskStatus(), "TEST");
     assertEquals(test.getTaskType(), TaskType.REDUCE);
-
+    assertEquals(234, test.getStartTime());
   }
 
   /**
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
index caf8c67..47caf44 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
@@ -148,7 +148,7 @@
 
       // First completion event, but min-queue-size for batching flushes is 10
       handleEvent(jheh, new JobHistoryEvent(t.jobId, new TaskFinishedEvent(
-          t.taskID, t.taskAttemptID, 0, TaskType.MAP, "", null)));
+          t.taskID, t.taskAttemptID, 0, TaskType.MAP, "", null, 0)));
       verify(mockWriter).flush();
 
     } finally {
@@ -184,7 +184,7 @@
 
       for (int i = 0 ; i < 100 ; i++) {
         queueEvent(jheh, new JobHistoryEvent(t.jobId, new TaskFinishedEvent(
-            t.taskID, t.taskAttemptID, 0, TaskType.MAP, "", null)));
+            t.taskID, t.taskAttemptID, 0, TaskType.MAP, "", null, 0)));
       }
 
       handleNextNEvents(jheh, 9);
@@ -229,7 +229,7 @@
 
       for (int i = 0 ; i < 100 ; i++) {
         queueEvent(jheh, new JobHistoryEvent(t.jobId, new TaskFinishedEvent(
-            t.taskID, t.taskAttemptID, 0, TaskType.MAP, "", null)));
+            t.taskID, t.taskAttemptID, 0, TaskType.MAP, "", null, 0)));
       }
 
       handleNextNEvents(jheh, 9);
@@ -272,10 +272,11 @@
 
       for (int i = 0 ; i < 100 ; i++) {
         queueEvent(jheh, new JobHistoryEvent(t.jobId, new TaskFinishedEvent(
-            t.taskID, t.taskAttemptID, 0, TaskType.MAP, "", null)));
+            t.taskID, t.taskAttemptID, 0, TaskType.MAP, "", null, 0)));
       }
       queueEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
-          TypeConverter.fromYarn(t.jobId), 0, 10, 10, 0, 0, null, null, new Counters())));
+          TypeConverter.fromYarn(t.jobId), 0, 10, 10, 0, 0, 0, 0, null, null,
+          new Counters())));
 
       handleNextNEvents(jheh, 29);
       verify(mockWriter, times(0)).flush();
@@ -308,22 +309,22 @@
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
         new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0,
-          0, 0, JobStateInternal.ERROR.toString())));
+          0, 0, 0, 0, 0, 0, JobStateInternal.ERROR.toString())));
       verify(jheh, times(1)).processDoneFiles(any(JobId.class));
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
-        TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(),
-        new Counters(), new Counters())));
+          TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, 0, 0, new Counters(),
+          new Counters(), new Counters())));
       verify(jheh, times(2)).processDoneFiles(any(JobId.class));
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
         new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0,
-          0, 0, JobStateInternal.FAILED.toString())));
+          0, 0, 0, 0, 0, 0, JobStateInternal.FAILED.toString())));
       verify(jheh, times(3)).processDoneFiles(any(JobId.class));
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
         new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0,
-          0, 0, JobStateInternal.KILLED.toString())));
+          0, 0, 0, 0, 0, 0, JobStateInternal.KILLED.toString())));
       verify(jheh, times(4)).processDoneFiles(any(JobId.class));
 
       mockWriter = jheh.getEventWriter();
@@ -354,22 +355,22 @@
       // skip processing done files
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
         new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0,
-          0, 0, JobStateInternal.ERROR.toString())));
+          0, 0, 0, 0, 0, 0, JobStateInternal.ERROR.toString())));
       verify(jheh, times(0)).processDoneFiles(t.jobId);
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
-          TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(),
+          TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, 0, 0, new Counters(),
           new Counters(), new Counters())));
       verify(jheh, times(1)).processDoneFiles(t.jobId);
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
         new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0,
-          0, 0, JobStateInternal.FAILED.toString())));
+          0, 0, 0, 0, 0, 0, JobStateInternal.FAILED.toString())));
       verify(jheh, times(2)).processDoneFiles(t.jobId);
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
         new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0,
-          0, 0, JobStateInternal.KILLED.toString())));
+          0, 0, 0, 0, 0, 0, JobStateInternal.KILLED.toString())));
       verify(jheh, times(3)).processDoneFiles(t.jobId);
 
       mockWriter = jheh.getEventWriter();
@@ -405,7 +406,8 @@
               "nmhost", 3000, 4000, -1)));
       handleEvent(jheh, new JobHistoryEvent(params.jobId,
           new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(
-              params.jobId), 0, 0, 0, JobStateInternal.FAILED.toString())));
+              params.jobId), 0, 0, 0, 0, 0, 0, 0,
+              JobStateInternal.FAILED.toString())));
 
       // verify the value of the sensitive property in job.xml is restored.
       Assert.assertEquals(sensitivePropertyName + " is modified.",
@@ -476,7 +478,7 @@
           t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1)));
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
-          TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(),
+          TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, 0, 0, new Counters(),
           new Counters(), new Counters())));
 
       // If we got here then event handler worked but we don't know with which
@@ -546,7 +548,7 @@
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
         new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0,
-          0, 0, JobStateInternal.FAILED.toString())));
+          0, 0, 0, 0, 0, 0, JobStateInternal.FAILED.toString())));
 
       Assert.assertEquals(mi.getJobIndexInfo().getSubmitTime(), 100);
       Assert.assertEquals(mi.getJobIndexInfo().getJobStartTime(), 200);
@@ -642,7 +644,7 @@
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
               new JobFinishedEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0,
-              0, new Counters(), new Counters(), new Counters()), currentTime));
+              0, 0, 0, new Counters(), new Counters(), new Counters()), currentTime));
       entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
               null, null, null, null, null, null);
       Assert.assertEquals(1, entities.getEntities().size());
@@ -668,7 +670,8 @@
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
             new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId),
-            0, 0, 0, JobStateInternal.KILLED.toString()), currentTime + 20));
+            0, 0, 0, 0, 0, 0, 0, JobStateInternal.KILLED.toString()),
+            currentTime + 20));
       entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
               null, null, null, null, null, null);
       Assert.assertEquals(1, entities.getEntities().size());
@@ -944,7 +947,7 @@
 
       // Job finishes and successfully writes history
       handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
-          TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(),
+          TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, 0, 0, new Counters(),
           new Counters(), new Counters())));
 
       verify(jheh, times(1)).processDoneFiles(any(JobId.class));
@@ -978,7 +981,7 @@
 
       // Job finishes, but doesn't successfully write history
       handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
-          TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(),
+          TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, 0, 0, new Counters(),
           new Counters(), new Counters())));
       verify(jheh, times(1)).processDoneFiles(any(JobId.class));
       verify(t.mockAppContext, times(0)).setHistoryUrl(any(String.class));
@@ -1009,8 +1012,8 @@
       // Job finishes, but doesn't successfully write history
       try {
         handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
-            TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(),
-            new Counters(), new Counters())));
+            TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, 0, 0,
+            new Counters(), new Counters(), new Counters())));
         throw new RuntimeException(
             "processDoneFiles didn't throw, but should have");
       } catch (YarnRuntimeException yre) {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobSummary.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobSummary.java
index 1bea5c8..b6d8bbf 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobSummary.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobSummary.java
@@ -43,10 +43,12 @@
     summary.setFirstMapTaskLaunchTime(4L);
     summary.setFirstReduceTaskLaunchTime(5L);
     summary.setJobFinishTime(6L);
-    summary.setNumFinishedMaps(1);
+    summary.setNumSucceededMaps(1);
     summary.setNumFailedMaps(0);
-    summary.setNumFinishedReduces(1);
+    summary.setNumSucceededReduces(1);
     summary.setNumFailedReduces(0);
+    summary.setNumKilledMaps(0);
+    summary.setNumKilledReduces(0);
     summary.setUser("testUser");
     summary.setQueue("testQueue");
     summary.setJobStatus("testJobStatus");
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
index ccacf1c..bfb8d79 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
@@ -640,6 +640,25 @@
       public void setJobPriority(Priority priority) {
         // do nothing
       }
+
+      public int getFailedMaps() {
+        return 0;
+      }
+
+      @Override
+      public int getFailedReduces() {
+        return 0;
+      }
+
+      @Override
+      public int getKilledMaps() {
+        return 0;
+      }
+
+      @Override
+      public int getKilledReduces() {
+        return 0;
+      }
     };
   }
 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
index 301d498..e1fa198 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
@@ -533,6 +533,26 @@
     public void setJobPriority(Priority priority) {
       // do nothing
     }
+
+    @Override
+    public int getFailedMaps() {
+      return 0;
+    }
+
+    @Override
+    public int getFailedReduces() {
+      return 0;
+    }
+
+    @Override
+    public int getKilledMaps() {
+      return 0;
+    }
+
+    @Override
+    public int getKilledReduces() {
+      return 0;
+    }
   }
 
   /*
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
index 6c51626..e4a8a1a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.mapreduce.v2.app.rm;
 
+import static org.junit.Assert.assertEquals;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyFloat;
 import static org.mockito.Matchers.anyInt;
@@ -27,6 +28,7 @@
 import static org.mockito.Mockito.inOrder;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
@@ -99,6 +101,7 @@
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -110,6 +113,7 @@
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.client.api.TimelineV2Client;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
@@ -121,6 +125,7 @@
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.records.NodeAction;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
@@ -140,6 +145,7 @@
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.ControlledClock;
+import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.SystemClock;
 import org.junit.After;
 import org.junit.Assert;
@@ -749,6 +755,96 @@
   }
 
   @Test
+  public void testUpdateCollectorInfo() throws Exception {
+    LOG.info("Running testUpdateCollectorInfo");
+    Configuration conf = new Configuration();
+    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+    conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
+    ApplicationId appId = ApplicationId.newInstance(1, 1);
+    ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
+    JobId jobId = MRBuilderUtils.newJobId(appId, 0);
+    Job mockJob = mock(Job.class);
+    when(mockJob.getReport()).thenReturn(
+        MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
+            0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
+    String localAddr = "localhost:1234";
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+
+    // Generate a timeline delegation token.
+    TimelineDelegationTokenIdentifier ident =
+        new TimelineDelegationTokenIdentifier(new Text(ugi.getUserName()),
+        new Text("renewer"), null);
+    ident.setSequenceNumber(1);
+    Token<TimelineDelegationTokenIdentifier> collectorToken =
+        new Token<TimelineDelegationTokenIdentifier>(ident.getBytes(),
+        new byte[0], TimelineDelegationTokenIdentifier.KIND_NAME,
+        new Text(localAddr));
+    org.apache.hadoop.yarn.api.records.Token token =
+        org.apache.hadoop.yarn.api.records.Token.newInstance(
+            collectorToken.getIdentifier(), collectorToken.getKind().toString(),
+            collectorToken.getPassword(),
+            collectorToken.getService().toString());
+    CollectorInfo collectorInfo = CollectorInfo.newInstance(localAddr, token);
+    // Mock scheduler to server Allocate request.
+    final MockSchedulerForTimelineCollector mockScheduler =
+        new MockSchedulerForTimelineCollector(collectorInfo);
+    MyContainerAllocator allocator =
+        new MyContainerAllocator(null, conf, attemptId, mockJob,
+            SystemClock.getInstance()) {
+          @Override
+          protected void register() {
+          }
+
+          @Override
+          protected ApplicationMasterProtocol createSchedulerProxy() {
+            return mockScheduler;
+          }
+        };
+    // Initially UGI should have no tokens.
+    ArrayList<Token<? extends TokenIdentifier>> tokens =
+        new ArrayList<>(ugi.getTokens());
+    assertEquals(0, tokens.size());
+    TimelineV2Client client = spy(TimelineV2Client.createTimelineClient(appId));
+    client.init(conf);
+    when(((RunningAppContext)allocator.getContext()).getTimelineV2Client()).
+        thenReturn(client);
+
+    // Send allocate request to RM and fetch collector address and token.
+    allocator.schedule();
+    verify(client).setTimelineCollectorInfo(collectorInfo);
+    // Verify if token has been updated in UGI.
+    tokens = new ArrayList<>(ugi.getTokens());
+    assertEquals(1, tokens.size());
+    assertEquals(TimelineDelegationTokenIdentifier.KIND_NAME,
+        tokens.get(0).getKind());
+    assertEquals(collectorToken.decodeIdentifier(),
+        tokens.get(0).decodeIdentifier());
+
+    // Generate new collector token, send allocate request to RM and fetch the
+    // new token.
+    ident.setSequenceNumber(100);
+    Token<TimelineDelegationTokenIdentifier> collectorToken1 =
+        new Token<TimelineDelegationTokenIdentifier>(ident.getBytes(),
+        new byte[0], TimelineDelegationTokenIdentifier.KIND_NAME,
+        new Text(localAddr));
+    token = org.apache.hadoop.yarn.api.records.Token.newInstance(
+        collectorToken1.getIdentifier(), collectorToken1.getKind().toString(),
+        collectorToken1.getPassword(), collectorToken1.getService().toString());
+    collectorInfo = CollectorInfo.newInstance(localAddr, token);
+    mockScheduler.updateCollectorInfo(collectorInfo);
+    allocator.schedule();
+    verify(client).setTimelineCollectorInfo(collectorInfo);
+    // Verify if new token has been updated in UGI.
+    tokens = new ArrayList<>(ugi.getTokens());
+    assertEquals(1, tokens.size());
+    assertEquals(TimelineDelegationTokenIdentifier.KIND_NAME,
+        tokens.get(0).getKind());
+    assertEquals(collectorToken1.decodeIdentifier(),
+        tokens.get(0).decodeIdentifier());
+    allocator.close();
+  }
+
+  @Test
   public void testMapReduceScheduling() throws Exception {
 
     LOG.info("Running testMapReduceScheduling");
@@ -3488,6 +3584,46 @@
     }
   }
 
+  private final static class MockSchedulerForTimelineCollector
+      implements ApplicationMasterProtocol {
+    private CollectorInfo collectorInfo;
+
+    private MockSchedulerForTimelineCollector(CollectorInfo info) {
+      this.collectorInfo = info;
+    }
+
+    private void updateCollectorInfo(CollectorInfo info) {
+      collectorInfo = info;
+    }
+
+    @Override
+    public RegisterApplicationMasterResponse registerApplicationMaster(
+        RegisterApplicationMasterRequest request) throws YarnException,
+        IOException {
+      return Records.newRecord(RegisterApplicationMasterResponse.class);
+    }
+
+    @Override
+    public FinishApplicationMasterResponse finishApplicationMaster(
+        FinishApplicationMasterRequest request) throws YarnException,
+        IOException {
+      return FinishApplicationMasterResponse.newInstance(false);
+    }
+
+    @Override
+    public AllocateResponse allocate(AllocateRequest request)
+        throws YarnException, IOException {
+      AllocateResponse response =  AllocateResponse.newInstance(
+          request.getResponseId(), Collections.<ContainerStatus>emptyList(),
+          Collections.<Container>emptyList(),
+          Collections.<NodeReport>emptyList(),
+          Resource.newInstance(512000, 1024), null, 10, null,
+          Collections.<NMToken>emptyList());
+      response.setCollectorInfo(collectorInfo);
+      return response;
+    }
+  }
+
   public static void main(String[] args) throws Exception {
     TestRMContainerAllocator t = new TestRMContainerAllocator();
     t.testSimple();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
index db8ae49..b88b012 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
@@ -46,10 +46,6 @@
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-mapreduce-client-core</artifactId>
     </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-yarn-server-common</artifactId>
-    </dependency>
   </dependencies>
 
   <build>
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java
index 341f38d..d1d392e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java
@@ -28,7 +28,8 @@
 import java.util.zip.ZipEntry;
 
 import org.junit.Assert;
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -49,8 +50,6 @@
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
 import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
-import org.junit.Test;
-
 /**
  * Tests the use of the
  * {@link org.apache.hadoop.mapreduce.filecache.DistributedCache} within the
@@ -66,7 +65,7 @@
  * This test is not fast: it uses MiniMRCluster.
  */
 @SuppressWarnings("deprecation")
-public class TestMRWithDistributedCache extends TestCase {
+public class TestMRWithDistributedCache {
   private static Path TEST_ROOT_DIR =
     new Path(System.getProperty("test.build.data","/tmp"));
   private static File symlinkFile = new File("distributed.first.symlink");
@@ -97,23 +96,23 @@
       FileSystem fs = LocalFileSystem.get(conf);
 
       // Check that 2 files and 2 archives are present
-      TestCase.assertEquals(2, localFiles.length);
-      TestCase.assertEquals(2, localArchives.length);
-      TestCase.assertEquals(2, files.length);
-      TestCase.assertEquals(2, archives.length);
+      Assert.assertEquals(2, localFiles.length);
+      Assert.assertEquals(2, localArchives.length);
+      Assert.assertEquals(2, files.length);
+      Assert.assertEquals(2, archives.length);
 
       // Check the file name
-      TestCase.assertTrue(files[0].getPath().endsWith("distributed.first"));
-      TestCase.assertTrue(files[1].getPath().endsWith("distributed.second.jar"));
+      Assert.assertTrue(files[0].getPath().endsWith("distributed.first"));
+      Assert.assertTrue(files[1].getPath().endsWith("distributed.second.jar"));
       
       // Check lengths of the files
-      TestCase.assertEquals(1, fs.getFileStatus(localFiles[0]).getLen());
-      TestCase.assertTrue(fs.getFileStatus(localFiles[1]).getLen() > 1);
+      Assert.assertEquals(1, fs.getFileStatus(localFiles[0]).getLen());
+      Assert.assertTrue(fs.getFileStatus(localFiles[1]).getLen() > 1);
 
       // Check extraction of the archive
-      TestCase.assertTrue(fs.exists(new Path(localArchives[0],
+      Assert.assertTrue(fs.exists(new Path(localArchives[0],
           "distributed.jar.inside3")));
-      TestCase.assertTrue(fs.exists(new Path(localArchives[1],
+      Assert.assertTrue(fs.exists(new Path(localArchives[1],
           "distributed.jar.inside4")));
 
       // Check the class loaders
@@ -121,18 +120,18 @@
       ClassLoader cl = Thread.currentThread().getContextClassLoader();
       // Both the file and the archive were added to classpath, so both
       // should be reachable via the class loader.
-      TestCase.assertNotNull(cl.getResource("distributed.jar.inside2"));
-      TestCase.assertNotNull(cl.getResource("distributed.jar.inside3"));
-      TestCase.assertNull(cl.getResource("distributed.jar.inside4"));
+      Assert.assertNotNull(cl.getResource("distributed.jar.inside2"));
+      Assert.assertNotNull(cl.getResource("distributed.jar.inside3"));
+      Assert.assertNull(cl.getResource("distributed.jar.inside4"));
 
       // Check that the symlink for the renaming was created in the cwd;
-      TestCase.assertTrue("symlink distributed.first.symlink doesn't exist",
+      Assert.assertTrue("symlink distributed.first.symlink doesn't exist",
           symlinkFile.exists());
-      TestCase.assertEquals("symlink distributed.first.symlink length not 1", 1,
+      Assert.assertEquals("symlink distributed.first.symlink length not 1", 1,
           symlinkFile.length());
       
       //This last one is a difference between MRv2 and MRv1
-      TestCase.assertTrue("second file should be symlinked too",
+      Assert.assertTrue("second file should be symlinked too",
           expectedAbsentSymlinkFile.exists());
     }
 
@@ -188,6 +187,7 @@
   }
 
   /** Tests using the local job runner. */
+  @Test
   public void testLocalJobRunner() throws Exception {
     symlinkFile.delete(); // ensure symlink is not present (e.g. if test is
                           // killed part way through)
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr
index c7b3eb8..b5a4d87 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr
@@ -54,7 +54,9 @@
           {"name": "failedReduces", "type": "int"},
           {"name": "totalCounters", "type": "JhCounters"},
           {"name": "mapCounters", "type": "JhCounters"},
-          {"name": "reduceCounters", "type": "JhCounters"}
+          {"name": "reduceCounters", "type": "JhCounters"},
+          {"name": "killedMaps", "type": "int", "default": -1},
+          {"name": "killedReduces", "type": "int", "default": -1}
       ]
      },
 
@@ -136,7 +138,11 @@
           {"name": "finishedMaps", "type": "int"},
           {"name": "finishedReduces", "type": "int"},
           {"name": "jobStatus", "type": "string"},
-          {"name": "diagnostics", "type": ["null","string"], "default": null}
+          {"name": "diagnostics", "type": ["null","string"], "default": null},
+          {"name": "failedMaps", "type": "int", "default": -1},
+          {"name": "failedReduces", "type": "int",  "default": -1},
+          {"name": "killedMaps", "type": "int",  "default": -1},
+          {"name": "killedReduces", "type": "int",  "default": -1}
       ]
      },
 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
index 9f8edb5..ada14db 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java
@@ -28,7 +28,6 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.security.TokenCache;
 import org.apache.hadoop.security.Credentials;
@@ -293,7 +292,6 @@
   private static boolean checkPermissionOfOther(FileSystem fs, Path path,
       FsAction action, Map<URI, FileStatus> statCache) throws IOException {
     FileStatus status = getFileStatus(fs, path.toUri(), statCache);
-    FsPermission perms = status.getPermission();
 
     // Encrypted files are always treated as private. This stance has two
     // important side effects.  The first is that the encrypted files will be
@@ -302,8 +300,8 @@
     // world readable permissions that is stored in an encryption zone from
     // being localized as a publicly shared file with world readable
     // permissions.
-    if (!perms.getEncryptedBit()) {
-      FsAction otherAction = perms.getOtherAction();
+    if (!status.isEncrypted()) {
+      FsAction otherAction = status.getPermission().getOtherAction();
       if (otherAction.implies(action)) {
         return true;
       }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
index 5f10fdf..c25c73e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
@@ -327,12 +327,12 @@
     /** Generate analysis information for the parsed job */
     public AnalyzedJob (JobInfo job) {
       Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks();
-      int finishedMaps = (int) job.getFinishedMaps();
-      int finishedReduces = (int) job.getFinishedReduces();
+      int succeededMaps = (int) job.getSucceededMaps();
+      int succeededReduces = (int) job.getSucceededReduces();
       mapTasks = 
-        new JobHistoryParser.TaskAttemptInfo[finishedMaps]; 
+        new JobHistoryParser.TaskAttemptInfo[succeededMaps];
       reduceTasks = 
-        new JobHistoryParser.TaskAttemptInfo[finishedReduces]; 
+        new JobHistoryParser.TaskAttemptInfo[succeededReduces];
       int mapIndex = 0 , reduceIndex=0; 
       avgMapTime = 0;
       avgReduceTime = 0;
@@ -360,12 +360,12 @@
           }
         }
       }
-      if (finishedMaps > 0) {
-        avgMapTime /= finishedMaps;
+      if (succeededMaps > 0) {
+        avgMapTime /= succeededMaps;
       }
-      if (finishedReduces > 0) {
-        avgReduceTime /= finishedReduces;
-        avgShuffleTime /= finishedReduces;
+      if (succeededReduces > 0) {
+        avgReduceTime /= succeededReduces;
+        avgShuffleTime /= succeededReduces;
       }
     }
   }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
index d3da9f4..685fa05 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
@@ -236,7 +236,7 @@
     taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
         dateFormat, ts.setupFinished, ts.setupStarted));
     taskSummary.append("\nMap\t").append(ts.totalMaps);
-    taskSummary.append("\t").append(job.getFinishedMaps());
+    taskSummary.append("\t").append(job.getSucceededMaps());
     taskSummary.append("\t\t").append(ts.numFailedMaps);
     taskSummary.append("\t").append(ts.numKilledMaps);
     taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
@@ -244,7 +244,7 @@
     taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
         dateFormat, ts.mapFinished, ts.mapStarted));
     taskSummary.append("\nReduce\t").append(ts.totalReduces);
-    taskSummary.append("\t").append(job.getFinishedReduces());
+    taskSummary.append("\t").append(job.getSucceededReduces());
     taskSummary.append("\t\t").append(ts.numFailedReduces);
     taskSummary.append("\t").append(ts.numKilledReduces);
     taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java
index 456dcf7..cfb6641 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java
@@ -145,7 +145,7 @@
     jSums.put("setup", jSumSetup);
     JSONObject jSumMap = new JSONObject();
     jSumMap.put("total", ts.totalMaps);
-    jSumMap.put("successful", job.getFinishedMaps());
+    jSumMap.put("successful", job.getSucceededMaps());
     jSumMap.put("failed", ts.numFailedMaps);
     jSumMap.put("killed", ts.numKilledMaps);
     jSumMap.put("startTime", ts.mapStarted);
@@ -153,7 +153,7 @@
     jSums.put("map", jSumMap);
     JSONObject jSumReduce = new JSONObject();
     jSumReduce.put("total", ts.totalReduces);
-    jSumReduce.put("successful", job.getFinishedReduces());
+    jSumReduce.put("successful", job.getSucceededReduces());
     jSumReduce.put("failed", ts.numFailedReduces);
     jSumReduce.put("killed", ts.numKilledReduces);
     jSumReduce.put("startTime", ts.reduceStarted);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobFinishedEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobFinishedEvent.java
index ea21f60..34e4b2c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobFinishedEvent.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobFinishedEvent.java
@@ -36,16 +36,18 @@
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
-public class JobFinishedEvent  implements HistoryEvent {
+public class JobFinishedEvent implements HistoryEvent {
 
   private JobFinished datum = null;
 
   private JobID jobId;
   private long finishTime;
-  private int finishedMaps;
-  private int finishedReduces;
+  private int succeededMaps;
+  private int succeededReduces;
   private int failedMaps;
   private int failedReduces;
+  private int killedMaps;
+  private int killedReduces;
   private Counters mapCounters;
   private Counters reduceCounters;
   private Counters totalCounters;
@@ -54,8 +56,8 @@
    * Create an event to record successful job completion
    * @param id Job ID
    * @param finishTime Finish time of the job
-   * @param finishedMaps The number of finished maps
-   * @param finishedReduces The number of finished reduces
+   * @param succeededMaps The number of succeeded maps
+   * @param succeededReduces The number of succeeded reduces
    * @param failedMaps The number of failed maps
    * @param failedReduces The number of failed reduces
    * @param mapCounters Map Counters for the job
@@ -63,16 +65,19 @@
    * @param totalCounters Total Counters for the job
    */
   public JobFinishedEvent(JobID id, long finishTime,
-      int finishedMaps, int finishedReduces,
+      int succeededMaps, int succeededReduces,
       int failedMaps, int failedReduces,
+      int killedMaps, int killedReduces,
       Counters mapCounters, Counters reduceCounters,
       Counters totalCounters) {
     this.jobId = id;
     this.finishTime = finishTime;
-    this.finishedMaps = finishedMaps;
-    this.finishedReduces = finishedReduces;
+    this.succeededMaps = succeededMaps;
+    this.succeededReduces = succeededReduces;
     this.failedMaps = failedMaps;
     this.failedReduces = failedReduces;
+    this.killedMaps = killedMaps;
+    this.killedReduces = killedReduces;
     this.mapCounters = mapCounters;
     this.reduceCounters = reduceCounters;
     this.totalCounters = totalCounters;
@@ -85,10 +90,14 @@
       datum = new JobFinished();
       datum.setJobid(new Utf8(jobId.toString()));
       datum.setFinishTime(finishTime);
-      datum.setFinishedMaps(finishedMaps);
-      datum.setFinishedReduces(finishedReduces);
+      // using finishedMaps & finishedReduces in the Avro schema for backward
+      // compatibility
+      datum.setFinishedMaps(succeededMaps);
+      datum.setFinishedReduces(succeededReduces);
       datum.setFailedMaps(failedMaps);
       datum.setFailedReduces(failedReduces);
+      datum.setKilledMaps(killedMaps);
+      datum.setKilledReduces(killedReduces);
       datum.setMapCounters(EventWriter.toAvro(mapCounters, "MAP_COUNTERS"));
       datum.setReduceCounters(EventWriter.toAvro(reduceCounters,
           "REDUCE_COUNTERS"));
@@ -102,10 +111,12 @@
     this.datum = (JobFinished) oDatum;
     this.jobId = JobID.forName(datum.getJobid().toString());
     this.finishTime = datum.getFinishTime();
-    this.finishedMaps = datum.getFinishedMaps();
-    this.finishedReduces = datum.getFinishedReduces();
+    this.succeededMaps = datum.getFinishedMaps();
+    this.succeededReduces = datum.getFinishedReduces();
     this.failedMaps = datum.getFailedMaps();
     this.failedReduces = datum.getFailedReduces();
+    this.killedMaps = datum.getKilledMaps();
+    this.killedReduces = datum.getKilledReduces();
     this.mapCounters = EventReader.fromAvro(datum.getMapCounters());
     this.reduceCounters = EventReader.fromAvro(datum.getReduceCounters());
     this.totalCounters = EventReader.fromAvro(datum.getTotalCounters());
@@ -120,13 +131,17 @@
   /** Get the job finish time */
   public long getFinishTime() { return finishTime; }
   /** Get the number of finished maps for the job */
-  public int getFinishedMaps() { return finishedMaps; }
+  public int getSucceededMaps() { return succeededMaps; }
   /** Get the number of finished reducers for the job */
-  public int getFinishedReduces() { return finishedReduces; }
+  public int getSucceededReduces() { return succeededReduces; }
   /** Get the number of failed maps for the job */
   public int getFailedMaps() { return failedMaps; }
   /** Get the number of failed reducers for the job */
   public int getFailedReduces() { return failedReduces; }
+  /** Get the number of killed maps */
+  public int getKilledMaps() { return killedMaps; }
+  /** Get the number of killed reduces */
+  public int getKilledReduces() { return killedReduces; }
   /** Get the counters for the job */
   public Counters getTotalCounters() {
     return totalCounters;
@@ -145,12 +160,16 @@
     TimelineEvent tEvent = new TimelineEvent();
     tEvent.setId(StringUtils.toUpperCase(getEventType().name()));
     tEvent.addInfo("FINISH_TIME", getFinishTime());
-    tEvent.addInfo("NUM_MAPS", getFinishedMaps());
-    tEvent.addInfo("NUM_REDUCES", getFinishedReduces());
+    tEvent.addInfo("NUM_MAPS", getSucceededMaps() + getFailedMaps()
+        + getKilledMaps());
+    tEvent.addInfo("NUM_REDUCES", getSucceededReduces() + getFailedReduces()
+        + getKilledReduces());
     tEvent.addInfo("FAILED_MAPS", getFailedMaps());
     tEvent.addInfo("FAILED_REDUCES", getFailedReduces());
-    tEvent.addInfo("FINISHED_MAPS", getFinishedMaps());
-    tEvent.addInfo("FINISHED_REDUCES", getFinishedReduces());
+    tEvent.addInfo("SUCCESSFUL_MAPS", getSucceededMaps());
+    tEvent.addInfo("SUCCESSFUL_REDUCES", getSucceededReduces());
+    tEvent.addInfo("KILLED_MAPS", getKilledMaps());
+    tEvent.addInfo("KILLED_REDUCES", getKilledReduces());
     // TODO replace SUCCEEDED with JobState.SUCCEEDED.toString()
     tEvent.addInfo("JOB_STATUS", "SUCCEEDED");
     return tEvent;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
index 07699fd..28fcc92 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
@@ -376,18 +376,24 @@
 
   private void handleJobFailedEvent(JobUnsuccessfulCompletionEvent event) {
     info.finishTime = event.getFinishTime();
-    info.finishedMaps = event.getFinishedMaps();
-    info.finishedReduces = event.getFinishedReduces();
+    info.succeededMaps = event.getSucceededMaps();
+    info.succeededReduces = event.getSucceededReduces();
+    info.failedMaps = event.getFailedMaps();
+    info.failedReduces = event.getFailedReduces();
+    info.killedMaps = event.getKilledMaps();
+    info.killedReduces = event.getKilledReduces();
     info.jobStatus = StringInterner.weakIntern(event.getStatus());
     info.errorInfo = StringInterner.weakIntern(event.getDiagnostics());
   }
 
   private void handleJobFinishedEvent(JobFinishedEvent event) {
     info.finishTime = event.getFinishTime();
-    info.finishedMaps = event.getFinishedMaps();
-    info.finishedReduces = event.getFinishedReduces();
+    info.succeededMaps = event.getSucceededMaps();
+    info.succeededReduces = event.getSucceededReduces();
     info.failedMaps = event.getFailedMaps();
     info.failedReduces = event.getFailedReduces();
+    info.killedMaps = event.getKilledMaps();
+    info.killedReduces = event.getKilledReduces();
     info.totalCounters = event.getTotalCounters();
     info.mapCounters = event.getMapCounters();
     info.reduceCounters = event.getReduceCounters();
@@ -456,8 +462,10 @@
     int totalReduces;
     int failedMaps;
     int failedReduces;
-    int finishedMaps;
-    int finishedReduces;
+    int succeededMaps;
+    int succeededReduces;
+    int killedMaps;
+    int killedReduces;
     String jobStatus;
     Counters totalCounters;
     Counters mapCounters;
@@ -477,7 +485,7 @@
     public JobInfo() {
       submitTime = launchTime = finishTime = -1;
       totalMaps = totalReduces = failedMaps = failedReduces = 0;
-      finishedMaps = finishedReduces = 0;
+      succeededMaps = succeededReduces = 0;
       username = jobname = jobConfPath = jobQueueName = "";
       tasksMap = new HashMap<TaskID, TaskInfo>();
       completedTaskAttemptsMap = new HashMap<TaskAttemptID, TaskAttemptInfo>();
@@ -540,10 +548,14 @@
     public long getFailedMaps() { return failedMaps; }
     /** @return the number of failed reduces */
     public long getFailedReduces() { return failedReduces; }
-    /** @return the number of finished maps */
-    public long getFinishedMaps() { return finishedMaps; }
-    /** @return the number of finished reduces */
-    public long getFinishedReduces() { return finishedReduces; }
+    /** @return the number of killed maps */
+    public long getKilledMaps() { return killedMaps; }
+    /** @return the number of killed reduces */
+    public long getKilledReduces() { return killedReduces; }
+    /** @return the number of succeeded maps */
+    public long getSucceededMaps() { return succeededMaps; }
+    /** @return the number of succeeded reduces */
+    public long getSucceededReduces() { return succeededReduces; }
     /** @return the job status */
     public String getJobStatus() { return jobStatus; }
     public String getErrorInfo() { return errorInfo; }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java
index ce6fa32..da31591 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java
@@ -49,34 +49,58 @@
    * Create an event to record unsuccessful completion (killed/failed) of jobs
    * @param id Job ID
    * @param finishTime Finish time of the job
-   * @param finishedMaps Number of finished maps
-   * @param finishedReduces Number of finished reduces
+   * @param succeededMaps Number of succeeded maps
+   * @param succeededReduces Number of succeeded reduces
+   * @param failedMaps Number of failed maps
+   * @param failedReduces Number of failed reduces
+   * @param killedMaps Number of killed maps
+   * @param killedReduces Number of killed reduces
    * @param status Status of the job
    */
   public JobUnsuccessfulCompletionEvent(JobID id, long finishTime,
-      int finishedMaps,
-      int finishedReduces, String status) {
-    this(id, finishTime, finishedMaps, finishedReduces, status, NODIAGS_LIST);
+      int succeededMaps,
+      int succeededReduces,
+      int failedMaps,
+      int failedReduces,
+      int killedMaps,
+      int killedReduces,
+      String status) {
+    this(id, finishTime, succeededMaps, succeededReduces, failedMaps,
+            failedReduces, killedMaps, killedReduces, status, NODIAGS_LIST);
   }
 
   /**
    * Create an event to record unsuccessful completion (killed/failed) of jobs
    * @param id Job ID
    * @param finishTime Finish time of the job
-   * @param finishedMaps Number of finished maps
-   * @param finishedReduces Number of finished reduces
+   * @param succeededMaps Number of finished maps
+   * @param succeededReduces Number of finished reduces
+   * @param failedMaps Number of failed maps
+   * @param failedReduces Number of failed reduces
+   * @param killedMaps Number of killed maps
+   * @param killedReduces Number of killed reduces
    * @param status Status of the job
    * @param diagnostics job runtime diagnostics
    */
   public JobUnsuccessfulCompletionEvent(JobID id, long finishTime,
-      int finishedMaps,
-      int finishedReduces,
+      int succeededMaps,
+      int succeededReduces,
+      int failedMaps,
+      int failedReduces,
+      int killedMaps,
+      int killedReduces,
       String status,
       Iterable<String> diagnostics) {
     datum.setJobid(new Utf8(id.toString()));
     datum.setFinishTime(finishTime);
-    datum.setFinishedMaps(finishedMaps);
-    datum.setFinishedReduces(finishedReduces);
+    // using finishedMaps & finishedReduces in the Avro schema for backward
+    // compatibility
+    datum.setFinishedMaps(succeededMaps);
+    datum.setFinishedReduces(succeededReduces);
+    datum.setFailedMaps(failedMaps);
+    datum.setFailedReduces(failedReduces);
+    datum.setKilledMaps(killedMaps);
+    datum.setKilledReduces(killedReduces);
     datum.setJobStatus(new Utf8(status));
     if (diagnostics == null) {
       diagnostics = NODIAGS_LIST;
@@ -98,10 +122,19 @@
   }
   /** Get the job finish time */
   public long getFinishTime() { return datum.getFinishTime(); }
-  /** Get the number of finished maps */
-  public int getFinishedMaps() { return datum.getFinishedMaps(); }
-  /** Get the number of finished reduces */
-  public int getFinishedReduces() { return datum.getFinishedReduces(); }
+  /** Get the number of succeeded maps */
+  public int getSucceededMaps() { return datum.getFinishedMaps(); }
+  /** Get the number of succeeded reduces */
+  public int getSucceededReduces() { return datum.getFinishedReduces(); }
+  /** Get the number of failed maps */
+  public int getFailedMaps() { return datum.getFailedMaps(); }
+  /** Get the number of failed reduces */
+  public int getFailedReduces() { return datum.getFailedReduces(); }
+  /** Get the number of killed maps */
+  public int getKilledMaps() { return datum.getKilledMaps(); }
+  /** Get the number of killed reduces */
+  public int getKilledReduces() { return datum.getKilledReduces(); }
+
   /** Get the status */
   public String getStatus() { return datum.getJobStatus().toString(); }
   /** Get the event type */
@@ -129,12 +162,19 @@
     TimelineEvent tEvent = new TimelineEvent();
     tEvent.setId(StringUtils.toUpperCase(getEventType().name()));
     tEvent.addInfo("FINISH_TIME", getFinishTime());
-    tEvent.addInfo("NUM_MAPS", getFinishedMaps());
-    tEvent.addInfo("NUM_REDUCES", getFinishedReduces());
+    tEvent.addInfo("NUM_MAPS", getSucceededMaps() + getFailedMaps()
+        + getKilledMaps());
+    tEvent.addInfo("NUM_REDUCES", getSucceededReduces() + getFailedReduces()
+        + getKilledReduces());
     tEvent.addInfo("JOB_STATUS", getStatus());
     tEvent.addInfo("DIAGNOSTICS", getDiagnostics());
-    tEvent.addInfo("FINISHED_MAPS", getFinishedMaps());
-    tEvent.addInfo("FINISHED_REDUCES", getFinishedReduces());
+    tEvent.addInfo("SUCCESSFUL_MAPS", getSucceededMaps());
+    tEvent.addInfo("SUCCESSFUL_REDUCES", getSucceededReduces());
+    tEvent.addInfo("FAILED_MAPS", getFailedMaps());
+    tEvent.addInfo("FAILED_REDUCES", getFailedReduces());
+    tEvent.addInfo("KILLED_MAPS", getKilledMaps());
+    tEvent.addInfo("KILLED_REDUCES", getKilledReduces());
+
     return tEvent;
   }
 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/MapAttemptFinishedEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/MapAttemptFinishedEvent.java
index 3121c4e..2b1357e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/MapAttemptFinishedEvent.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/MapAttemptFinishedEvent.java
@@ -32,9 +32,10 @@
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
+import org.apache.hadoop.yarn.util.SystemClock;
 
 /**
- * Event to record successful completion of a map attempt
+ * Event to record successful completion of a map attempt.
  *
  */
 @InterfaceAudience.Private
@@ -58,9 +59,10 @@
   int[] cpuUsages;
   int[] vMemKbytes;
   int[] physMemKbytes;
+  private long startTime;
 
   /** 
-   * Create an event for successful completion of map attempts
+   * Create an event for successful completion of map attempts.
    * @param id Task Attempt ID
    * @param taskType Type of the task
    * @param taskStatus Status of the task
@@ -77,12 +79,13 @@
    *        virtual memory and physical memory. 
    *
    *        If you have no splits data, code {@code null} for this
-   *        parameter. 
+   *        parameter.
+   * @param startTs Task start time to be used for writing entity to ATSv2.
    */
-  public MapAttemptFinishedEvent
-      (TaskAttemptID id, TaskType taskType, String taskStatus, 
-       long mapFinishTime, long finishTime, String hostname, int port, 
-       String rackName, String state, Counters counters, int[][] allSplits) {
+  public MapAttemptFinishedEvent(TaskAttemptID id, TaskType taskType,
+      String taskStatus, long mapFinishTime, long finishTime, String hostname,
+      int port, String rackName, String state, Counters counters,
+      int[][] allSplits, long startTs) {
     this.attemptId = id;
     this.taskType = taskType;
     this.taskStatus = taskStatus;
@@ -98,6 +101,16 @@
     this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
     this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
     this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
+    this.startTime = startTs;
+  }
+
+  public MapAttemptFinishedEvent(TaskAttemptID id, TaskType taskType,
+      String taskStatus, long mapFinishTime, long finishTime, String hostname,
+      int port, String rackName, String state, Counters counters,
+      int[][] allSplits) {
+    this(id, taskType, taskStatus, mapFinishTime, finishTime, hostname, port,
+        rackName, state, counters, allSplits,
+        SystemClock.getInstance().getTime());
   }
 
   /** 
@@ -117,15 +130,13 @@
    * @param counters Counters for the attempt
    */
   @Deprecated
-  public MapAttemptFinishedEvent
-      (TaskAttemptID id, TaskType taskType, String taskStatus, 
-       long mapFinishTime, long finishTime, String hostname,
-       String state, Counters counters) {
+  public MapAttemptFinishedEvent(TaskAttemptID id, TaskType taskType,
+      String taskStatus, long mapFinishTime, long finishTime, String hostname,
+      String state, Counters counters) {
     this(id, taskType, taskStatus, mapFinishTime, finishTime, hostname, -1, "",
         state, counters, null);
   }
-  
-  
+
   MapAttemptFinishedEvent() {}
 
   public Object getDatum() {
@@ -175,38 +186,56 @@
     this.physMemKbytes = AvroArrayUtils.fromAvro(datum.getPhysMemKbytes());
   }
 
-  /** Get the task ID */
-  public TaskID getTaskId() { return attemptId.getTaskID(); }
-  /** Get the attempt id */
+  /** Gets the task ID. */
+  public TaskID getTaskId() {
+    return attemptId.getTaskID();
+  }
+  /** Gets the attempt id. */
   public TaskAttemptID getAttemptId() {
     return attemptId;
   }
 
-  /** Get the task type */
+  /** Gets the task type. */
   public TaskType getTaskType() {
     return taskType;
   }
-  /** Get the task status */
+  /** Gets the task status. */
   public String getTaskStatus() { return taskStatus.toString(); }
-  /** Get the map phase finish time */
+  /** Gets the map phase finish time. */
   public long getMapFinishTime() { return mapFinishTime; }
-  /** Get the attempt finish time */
+  /** Gets the attempt finish time. */
   public long getFinishTime() { return finishTime; }
-  /** Get the host name */
+  /**
+   * Gets the task attempt start time.
+   * @return task attempt start time.
+   */
+  public long getStartTime() {
+    return startTime;
+  }
+  /** Gets the host name. */
   public String getHostname() { return hostname.toString(); }
-  /** Get the tracker rpc port */
+  /** Gets the tracker rpc port. */
   public int getPort() { return port; }
   
-  /** Get the rack name */
+  /** Gets the rack name. */
   public String getRackName() {
     return rackName == null ? null : rackName.toString();
   }
-  
-  /** Get the state string */
-  public String getState() { return state.toString(); }
-  /** Get the counters */
-  Counters getCounters() { return counters; }
-  /** Get the event type */
+  /**
+   * Gets the attempt state string.
+   * @return map attempt state
+   */
+  public String getState() {
+    return state.toString();
+  }
+  /**
+   * Gets the counters.
+   * @return counters
+   */
+  Counters getCounters() {
+    return counters;
+  }
+  /** Gets the event type. */
    public EventType getEventType() {
     return EventType.MAP_ATTEMPT_FINISHED;
   }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/ReduceAttemptFinishedEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/ReduceAttemptFinishedEvent.java
index 9c0f09b..5a16f83 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/ReduceAttemptFinishedEvent.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/ReduceAttemptFinishedEvent.java
@@ -32,6 +32,7 @@
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
+import org.apache.hadoop.yarn.util.SystemClock;
 
 /**
  * Event to record successful completion of a reduce attempt
@@ -59,6 +60,7 @@
   int[] cpuUsages;
   int[] vMemKbytes;
   int[] physMemKbytes;
+  private long startTime;
 
   /**
    * Create an event to record completion of a reduce attempt
@@ -76,13 +78,13 @@
    * @param allSplits the "splits", or a pixelated graph of various
    *        measurable worker node state variables against progress.
    *        Currently there are four; wallclock time, CPU time,
-   *        virtual memory and physical memory.  
+   *        virtual memory and physical memory.
+   * @param startTs Task start time to be used for writing entity to ATSv2.
    */
-  public ReduceAttemptFinishedEvent
-    (TaskAttemptID id, TaskType taskType, String taskStatus, 
-     long shuffleFinishTime, long sortFinishTime, long finishTime,
-     String hostname, int port,  String rackName, String state, 
-     Counters counters, int[][] allSplits) {
+  public ReduceAttemptFinishedEvent(TaskAttemptID id, TaskType taskType,
+      String taskStatus, long shuffleFinishTime, long sortFinishTime,
+      long finishTime, String hostname, int port,  String rackName,
+      String state, Counters counters, int[][] allSplits, long startTs) {
     this.attemptId = id;
     this.taskType = taskType;
     this.taskStatus = taskStatus;
@@ -99,6 +101,16 @@
     this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
     this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
     this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
+    this.startTime = startTs;
+  }
+
+  public ReduceAttemptFinishedEvent(TaskAttemptID id, TaskType taskType,
+      String taskStatus, long shuffleFinishTime, long sortFinishTime,
+      long finishTime, String hostname, int port,  String rackName,
+      String state, Counters counters, int[][] allSplits) {
+    this(id, taskType, taskStatus, shuffleFinishTime, sortFinishTime,
+        finishTime, hostname, port, rackName, state, counters, allSplits,
+        SystemClock.getInstance().getTime());
   }
 
   /**
@@ -118,13 +130,12 @@
    * @param state State of the attempt
    * @param counters Counters for the attempt
    */
-  public ReduceAttemptFinishedEvent
-    (TaskAttemptID id, TaskType taskType, String taskStatus, 
-     long shuffleFinishTime, long sortFinishTime, long finishTime,
-     String hostname, String state, Counters counters) {
+  public ReduceAttemptFinishedEvent(TaskAttemptID id, TaskType taskType,
+      String taskStatus, long shuffleFinishTime, long sortFinishTime,
+      long finishTime, String hostname, String state, Counters counters) {
     this(id, taskType, taskStatus,
-         shuffleFinishTime, sortFinishTime, finishTime,
-         hostname, -1, "", state, counters, null);
+        shuffleFinishTime, sortFinishTime, finishTime,
+        hostname, -1, "", state, counters, null);
   }
 
   ReduceAttemptFinishedEvent() {}
@@ -178,39 +189,55 @@
     this.physMemKbytes = AvroArrayUtils.fromAvro(datum.getPhysMemKbytes());
   }
 
-  /** Get the Task ID */
+  /** Gets the Task ID. */
   public TaskID getTaskId() { return attemptId.getTaskID(); }
-  /** Get the attempt id */
+  /** Gets the attempt id. */
   public TaskAttemptID getAttemptId() {
     return attemptId;
   }
-  /** Get the task type */
+  /** Gets the task type. */
   public TaskType getTaskType() {
     return taskType;
   }
-  /** Get the task status */
+  /** Gets the task status. */
   public String getTaskStatus() { return taskStatus.toString(); }
-  /** Get the finish time of the sort phase */
+  /** Gets the finish time of the sort phase. */
   public long getSortFinishTime() { return sortFinishTime; }
-  /** Get the finish time of the shuffle phase */
+  /** Gets the finish time of the shuffle phase. */
   public long getShuffleFinishTime() { return shuffleFinishTime; }
-  /** Get the finish time of the attempt */
+  /** Gets the finish time of the attempt. */
   public long getFinishTime() { return finishTime; }
-  /** Get the name of the host where the attempt ran */
+  /**
+   * Gets the start time.
+   * @return task attempt start time.
+   */
+  public long getStartTime() {
+    return startTime;
+  }
+  /** Gets the name of the host where the attempt ran. */
   public String getHostname() { return hostname.toString(); }
-  /** Get the tracker rpc port */
+  /** Gets the tracker rpc port. */
   public int getPort() { return port; }
   
-  /** Get the rack name of the node where the attempt ran */
+  /** Gets the rack name of the node where the attempt ran. */
   public String getRackName() {
     return rackName == null ? null : rackName.toString();
   }
-  
-  /** Get the state string */
-  public String getState() { return state.toString(); }
-  /** Get the counters for the attempt */
-  Counters getCounters() { return counters; }
-  /** Get the event type */
+  /**
+   * Gets the state string.
+   * @return reduce attempt state
+   */
+  public String getState() {
+    return state.toString();
+  }
+  /**
+   * Gets the counters.
+   * @return counters
+   */
+  Counters getCounters() {
+    return counters;
+  }
+  /** Gets the event type. */
   public EventType getEventType() {
     return EventType.REDUCE_ATTEMPT_FINISHED;
   }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptFinishedEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptFinishedEvent.java
index a931ca2..c28c216 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptFinishedEvent.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptFinishedEvent.java
@@ -31,6 +31,7 @@
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
+import org.apache.hadoop.yarn.util.SystemClock;
 
 /**
  * Event to record successful task completion
@@ -50,10 +51,11 @@
   private String hostname;
   private String state;
   private Counters counters;
+  private long startTime;
 
   /**
-   * Create an event to record successful finishes for setup and cleanup 
-   * attempts
+   * Create an event to record successful finishes for setup and cleanup
+   * attempts.
    * @param id Attempt ID
    * @param taskType Type of task
    * @param taskStatus Status of task
@@ -61,11 +63,12 @@
    * @param hostname Host where the attempt executed
    * @param state State string
    * @param counters Counters for the attempt
+   * @param startTs Task start time to be used for writing entity to ATSv2.
    */
   public TaskAttemptFinishedEvent(TaskAttemptID id, 
       TaskType taskType, String taskStatus, 
       long finishTime, String rackName,
-      String hostname, String state, Counters counters) {
+      String hostname, String state, Counters counters, long startTs) {
     this.attemptId = id;
     this.taskType = taskType;
     this.taskStatus = taskStatus;
@@ -74,6 +77,14 @@
     this.hostname = hostname;
     this.state = state;
     this.counters = counters;
+    this.startTime = startTs;
+  }
+
+  public TaskAttemptFinishedEvent(TaskAttemptID id, TaskType taskType,
+      String taskStatus, long finishTime, String rackName, String hostname,
+      String state, Counters counters) {
+    this(id, taskType, taskStatus, finishTime, rackName, hostname, state,
+        counters, SystemClock.getInstance().getTime());
   }
 
   TaskAttemptFinishedEvent() {}
@@ -107,33 +118,43 @@
     this.counters = EventReader.fromAvro(datum.getCounters());
   }
 
-  /** Get the task ID */
+  /** Gets the task ID. */
   public TaskID getTaskId() { return attemptId.getTaskID(); }
-  /** Get the task attempt id */
+  /** Gets the task attempt id. */
   public TaskAttemptID getAttemptId() {
     return attemptId;
   }
-  /** Get the task type */
+  /** Gets the task type. */
   public TaskType getTaskType() {
     return taskType;
   }
-  /** Get the task status */
+  /** Gets the task status. */
   public String getTaskStatus() { return taskStatus.toString(); }
-  /** Get the attempt finish time */
+  /** Gets the attempt finish time. */
   public long getFinishTime() { return finishTime; }
-  /** Get the host where the attempt executed */
+  /**
+   * Gets the task attempt start time to be used while publishing to ATSv2.
+   * @return task attempt start time.
+   */
+  public long getStartTime() {
+    return startTime;
+  }
+  /** Gets the host where the attempt executed. */
   public String getHostname() { return hostname.toString(); }
   
-  /** Get the rackname where the attempt executed */
+  /** Gets the rackname where the attempt executed. */
   public String getRackName() {
     return rackName == null ? null : rackName.toString();
   }
   
-  /** Get the state string */
+  /**
+   * Gets the state string.
+   * @return task attempt state.
+   */
   public String getState() { return state.toString(); }
-  /** Get the counters for the attempt */
+  /** Gets the counters for the attempt. */
   Counters getCounters() { return counters; }
-  /** Get the event type */
+  /** Gets the event type. */
   public EventType getEventType() {
     // Note that the task type can be setup/map/reduce/cleanup but the 
     // attempt-type can only be map/reduce.
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
index 1732d91..9afa093 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
@@ -33,6 +33,7 @@
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
+import org.apache.hadoop.yarn.util.SystemClock;
 
 /**
  * Event to record unsuccessful (Killed/Failed) completion of task attempts
@@ -58,10 +59,11 @@
   int[] cpuUsages;
   int[] vMemKbytes;
   int[] physMemKbytes;
+  private long startTime;
   private static final Counters EMPTY_COUNTERS = new Counters();
 
-  /** 
-   * Create an event to record the unsuccessful completion of attempts
+  /**
+   * Create an event to record the unsuccessful completion of attempts.
    * @param id Attempt ID
    * @param taskType Type of the task
    * @param status Status of the attempt
@@ -74,13 +76,14 @@
    * @param allSplits the "splits", or a pixelated graph of various
    *        measurable worker node state variables against progress.
    *        Currently there are four; wallclock time, CPU time,
-   *        virtual memory and physical memory.  
+   *        virtual memory and physical memory.
+   * @param startTs Task start time to be used for writing entity to ATSv2.
    */
   public TaskAttemptUnsuccessfulCompletionEvent
        (TaskAttemptID id, TaskType taskType,
         String status, long finishTime,
         String hostname, int port, String rackName,
-        String error, Counters counters, int[][] allSplits) {
+        String error, Counters counters, int[][] allSplits, long startTs) {
     this.attemptId = id;
     this.taskType = taskType;
     this.status = status;
@@ -99,9 +102,18 @@
         ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
     this.physMemKbytes =
         ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
+    this.startTime = startTs;
   }
 
-  /** 
+  public TaskAttemptUnsuccessfulCompletionEvent(TaskAttemptID id,
+      TaskType taskType, String status, long finishTime, String hostname,
+      int port, String rackName, String error, Counters counters,
+      int[][] allSplits) {
+    this(id, taskType, status, finishTime, hostname, port, rackName, error,
+        counters, allSplits, SystemClock.getInstance().getTime());
+  }
+
+  /**
    * @deprecated please use the constructor with an additional
    *              argument, an array of splits arrays instead.  See
    *              {@link org.apache.hadoop.mapred.ProgressSplitsBlock}
@@ -117,19 +129,19 @@
    */
   public TaskAttemptUnsuccessfulCompletionEvent
        (TaskAttemptID id, TaskType taskType,
-        String status, long finishTime, 
+        String status, long finishTime,
         String hostname, String error) {
     this(id, taskType, status, finishTime, hostname, -1, "",
         error, EMPTY_COUNTERS, null);
   }
-  
+
   public TaskAttemptUnsuccessfulCompletionEvent
       (TaskAttemptID id, TaskType taskType,
        String status, long finishTime,
        String hostname, int port, String rackName,
        String error, int[][] allSplits) {
     this(id, taskType, status, finishTime, hostname, port,
-        rackName, error, EMPTY_COUNTERS, null);
+        rackName, error, EMPTY_COUNTERS, allSplits);
   }
 
   TaskAttemptUnsuccessfulCompletionEvent() {}
@@ -162,9 +174,9 @@
     }
     return datum;
   }
-  
-  
-  
+
+
+
   public void setDatum(Object odatum) {
     this.datum =
         (TaskAttemptUnsuccessfulCompletion)odatum;
@@ -190,46 +202,56 @@
         AvroArrayUtils.fromAvro(datum.getPhysMemKbytes());
   }
 
-  /** Get the task id */
+  /** Gets the task id. */
   public TaskID getTaskId() {
     return attemptId.getTaskID();
   }
-  /** Get the task type */
+  /** Gets the task type. */
   public TaskType getTaskType() {
     return TaskType.valueOf(taskType.toString());
   }
-  /** Get the attempt id */
+  /** Gets the attempt id. */
   public TaskAttemptID getTaskAttemptId() {
     return attemptId;
   }
-  /** Get the finish time */
+  /** Gets the finish time. */
   public long getFinishTime() { return finishTime; }
-  /** Get the name of the host where the attempt executed */
+  /**
+   * Gets the task attempt start time to be used while publishing to ATSv2.
+   * @return task attempt start time.
+   */
+  public long getStartTime() {
+    return startTime;
+  }
+  /** Gets the name of the host where the attempt executed. */
   public String getHostname() { return hostname; }
-  /** Get the rpc port for the host where the attempt executed */
+  /** Gets the rpc port for the host where the attempt executed. */
   public int getPort() { return port; }
-  
-  /** Get the rack name of the node where the attempt ran */
+
+  /** Gets the rack name of the node where the attempt ran. */
   public String getRackName() {
     return rackName == null ? null : rackName.toString();
   }
-  
-  /** Get the error string */
+
+  /** Gets the error string. */
   public String getError() { return error.toString(); }
-  /** Get the task status */
+  /**
+   * Gets the task attempt status.
+   * @return task attempt status.
+   */
   public String getTaskStatus() {
     return status.toString();
   }
-  /** Get the counters */
+  /** Gets the counters. */
   Counters getCounters() { return counters; }
-  /** Get the event type */
+  /** Gets the event type. */
   public EventType getEventType() {
-    // Note that the task type can be setup/map/reduce/cleanup but the 
+    // Note that the task type can be setup/map/reduce/cleanup but the
     // attempt-type can only be map/reduce.
     // find out if the task failed or got killed
     boolean failed = TaskStatus.State.FAILED.toString().equals(getTaskStatus());
-    return getTaskId().getTaskType() == TaskType.MAP 
-           ? (failed 
+    return getTaskId().getTaskType() == TaskType.MAP
+           ? (failed
               ? EventType.MAP_ATTEMPT_FAILED
               : EventType.MAP_ATTEMPT_KILLED)
            : (failed
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskFailedEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskFailedEvent.java
index d14350d..b4d9e41 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskFailedEvent.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskFailedEvent.java
@@ -32,6 +32,7 @@
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
+import org.apache.hadoop.yarn.util.SystemClock;
 
 /**
  * Event to record the failure of a task
@@ -49,11 +50,12 @@
   private String status;
   private String error;
   private Counters counters;
+  private long startTime;
 
   private static final Counters EMPTY_COUNTERS = new Counters();
 
   /**
-   * Create an event to record task failure
+   * Create an event to record task failure.
    * @param id Task ID
    * @param finishTime Finish time of the task
    * @param taskType Type of the task
@@ -61,10 +63,11 @@
    * @param status Status
    * @param failedDueToAttempt The attempt id due to which the task failed
    * @param counters Counters for the task
+   * @param startTs task start time.
    */
   public TaskFailedEvent(TaskID id, long finishTime, 
       TaskType taskType, String error, String status,
-      TaskAttemptID failedDueToAttempt, Counters counters) {
+      TaskAttemptID failedDueToAttempt, Counters counters, long startTs) {
     this.id = id;
     this.finishTime = finishTime;
     this.taskType = taskType;
@@ -72,15 +75,23 @@
     this.status = status;
     this.failedDueToAttempt = failedDueToAttempt;
     this.counters = counters;
+    this.startTime = startTs;
+  }
+
+  public TaskFailedEvent(TaskID id, long finishTime, TaskType taskType,
+      String error, String status, TaskAttemptID failedDueToAttempt,
+      Counters counters) {
+    this(id, finishTime, taskType, error, status, failedDueToAttempt, counters,
+        SystemClock.getInstance().getTime());
   }
 
   public TaskFailedEvent(TaskID id, long finishTime, 
-	      TaskType taskType, String error, String status,
-	      TaskAttemptID failedDueToAttempt) {
-    this(id, finishTime, taskType, error, status,
-        failedDueToAttempt, EMPTY_COUNTERS);
+      TaskType taskType, String error, String status,
+      TaskAttemptID failedDueToAttempt) {
+    this(id, finishTime, taskType, error, status, failedDueToAttempt,
+        EMPTY_COUNTERS);
   }
-  
+
   TaskFailedEvent() {}
 
   public Object getDatum() {
@@ -118,27 +129,37 @@
         EventReader.fromAvro(datum.getCounters());
   }
 
-  /** Get the task id */
+  /** Gets the task id. */
   public TaskID getTaskId() { return id; }
-  /** Get the error string */
+  /** Gets the error string. */
   public String getError() { return error; }
-  /** Get the finish time of the attempt */
+  /** Gets the finish time of the attempt. */
   public long getFinishTime() {
     return finishTime;
   }
-  /** Get the task type */
+  /**
+   * Gets the task start time to be reported to ATSv2.
+   * @return task start time.
+   */
+  public long getStartTime() {
+    return startTime;
+  }
+  /** Gets the task type. */
   public TaskType getTaskType() {
     return taskType;
   }
-  /** Get the attempt id due to which the task failed */
+  /** Gets the attempt id due to which the task failed. */
   public TaskAttemptID getFailedAttemptID() {
     return failedDueToAttempt;
   }
-  /** Get the task status */
+  /**
+   * Gets the task status.
+   * @return task status
+   */
   public String getTaskStatus() { return status; }
-  /** Get task counters */
+  /** Gets task counters. */
   public Counters getCounters() { return counters; }
-  /** Get the event type */
+  /** Gets the event type. */
   public EventType getEventType() {
     return EventType.TASK_FAILED;
   }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskFinishedEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskFinishedEvent.java
index 0bc4383..97557c7 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskFinishedEvent.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskFinishedEvent.java
@@ -32,6 +32,7 @@
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
+import org.apache.hadoop.yarn.util.SystemClock;
 
 /**
  * Event to record the successful completion of a task
@@ -49,27 +50,36 @@
   private TaskType taskType;
   private String status;
   private Counters counters;
-  
+  private long startTime;
+
   /**
-   * Create an event to record the successful completion of a task
+   * Create an event to record the successful completion of a task.
    * @param id Task ID
    * @param attemptId Task Attempt ID of the successful attempt for this task
    * @param finishTime Finish time of the task
    * @param taskType Type of the task
    * @param status Status string
    * @param counters Counters for the task
+   * @param startTs task start time
    */
   public TaskFinishedEvent(TaskID id, TaskAttemptID attemptId, long finishTime,
                            TaskType taskType,
-                           String status, Counters counters) {
+                           String status, Counters counters, long startTs) {
     this.taskid = id;
     this.successfulAttemptId = attemptId;
     this.finishTime = finishTime;
     this.taskType = taskType;
     this.status = status;
     this.counters = counters;
+    this.startTime = startTs;
   }
-  
+
+  public TaskFinishedEvent(TaskID id, TaskAttemptID attemptId, long finishTime,
+          TaskType taskType, String status, Counters counters) {
+    this(id, attemptId, finishTime, taskType, status, counters,
+        SystemClock.getInstance().getTime());
+  }
+
   TaskFinishedEvent() {}
 
   public Object getDatum() {
@@ -101,23 +111,33 @@
     this.counters = EventReader.fromAvro(datum.getCounters());
   }
 
-  /** Get task id */
+  /** Gets task id. */
   public TaskID getTaskId() { return taskid; }
-  /** Get successful task attempt id */
+  /** Gets successful task attempt id. */
   public TaskAttemptID getSuccessfulTaskAttemptId() {
     return successfulAttemptId;
   }
-  /** Get the task finish time */
+  /** Gets the task finish time. */
   public long getFinishTime() { return finishTime; }
-  /** Get task counters */
+  /**
+   * Gets the task start time to be reported to ATSv2.
+   * @return task start time
+   */
+  public long getStartTime() {
+    return startTime;
+  }
+  /** Gets task counters. */
   public Counters getCounters() { return counters; }
-  /** Get task type */
+  /** Gets task type. */
   public TaskType getTaskType() {
     return taskType;
   }
-  /** Get task status */
+  /**
+   * Gets task status.
+   * @return task status
+   */
   public String getTaskStatus() { return status.toString(); }
-  /** Get event type */
+  /** Gets event type. */
   public EventType getEventType() {
     return EventType.TASK_FINISHED;
   }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java
index e15f7ab..999561a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java
@@ -23,7 +23,8 @@
 import java.io.IOException;
 import java.net.URI;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 import org.junit.Assert;
 
 import org.apache.hadoop.fs.FileStatus;
@@ -38,7 +39,7 @@
 
 
 @SuppressWarnings("unchecked")
-public class TestFileOutputCommitter extends TestCase {
+public class TestFileOutputCommitter {
   private static Path outDir = new Path(System.getProperty("test.build.data",
       "/tmp"), "output");
 
@@ -153,14 +154,18 @@
     validateContent(outDir);
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
+
+  @Test
   public void testRecoveryV1() throws Exception {
     testRecoveryInternal(1, 1);
   }
 
+  @Test
   public void testRecoveryV2() throws Exception {
     testRecoveryInternal(2, 2);
   }
 
+  @Test
   public void testRecoveryUpgradeV1V2() throws Exception {
     testRecoveryInternal(1, 2);
   }
@@ -203,11 +208,13 @@
     assert(dataFileFound && indexFileFound);
   }
 
+  @Test
   public void testCommitterWithFailureV1() throws Exception {
     testCommitterWithFailureInternal(1, 1);
     testCommitterWithFailureInternal(1, 2);
   }
 
+  @Test
   public void testCommitterWithFailureV2() throws Exception {
     testCommitterWithFailureInternal(2, 1);
     testCommitterWithFailureInternal(2, 2);
@@ -256,10 +263,12 @@
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testCommitterWithDuplicatedCommitV1() throws Exception {
     testCommitterWithDuplicatedCommitInternal(1);
   }
 
+  @Test
   public void testCommitterWithDuplicatedCommitV2() throws Exception {
     testCommitterWithDuplicatedCommitInternal(2);
   }
@@ -340,10 +349,12 @@
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testCommitterV1() throws Exception {
     testCommitterInternal(1);
   }
 
+  @Test
   public void testCommitterV2() throws Exception {
     testCommitterInternal(2);
   }
@@ -380,18 +391,22 @@
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testMapFileOutputCommitterV1() throws Exception {
     testMapFileOutputCommitterInternal(1);
   }
 
+  @Test
   public void testMapFileOutputCommitterV2() throws Exception {
     testMapFileOutputCommitterInternal(2);
   }
 
+  @Test
   public void testMapOnlyNoOutputV1() throws Exception {
     testMapOnlyNoOutputInternal(1);
   }
 
+  @Test
   public void testMapOnlyNoOutputV2() throws Exception {
     testMapOnlyNoOutputInternal(2);
   }
@@ -456,10 +471,12 @@
     FileUtil.fullyDelete(out);
   }
 
+  @Test
   public void testAbortV1() throws Exception {
     testAbortInternal(1);
   }
 
+  @Test
   public void testAbortV2() throws Exception {
     testAbortInternal(2);
   }
@@ -537,10 +554,12 @@
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testFailAbortV1() throws Exception {
     testFailAbortInternal(1);
   }
 
+  @Test
   public void testFailAbortV2() throws Exception {
     testFailAbortInternal(2);
   }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java
index b6a2df0..0cc3c66 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java
@@ -32,14 +32,16 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
 
-import junit.framework.TestCase;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
-public class TestIndexCache extends TestCase {
+public class TestIndexCache {
   private JobConf conf;
   private FileSystem fs;
   private Path p;
 
-  @Override
+  @Before
   public void setUp() throws IOException {
     conf = new JobConf();
     fs = FileSystem.getLocal(conf).getRaw();
@@ -47,6 +49,7 @@
         "cache").makeQualified(fs.getUri(), fs.getWorkingDirectory());
   }
 
+  @Test
   public void testLRCPolicy() throws Exception {
     Random r = new Random();
     long seed = r.nextLong();
@@ -120,6 +123,7 @@
     checkRecord(rec, totalsize);
   }
 
+  @Test
   public void testBadIndex() throws Exception {
     final int parts = 30;
     fs.delete(p, true);
@@ -152,6 +156,7 @@
     }
   }
 
+  @Test
   public void testInvalidReduceNumberOrLength() throws Exception {
     fs.delete(p, true);
     conf.setInt(TTConfig.TT_INDEX_CACHE, 1);
@@ -192,6 +197,7 @@
     }
   }
 
+  @Test
   public void testRemoveMap() throws Exception {
     // This test case use two thread to call getIndexInformation and 
     // removeMap concurrently, in order to construct race condition.
@@ -241,7 +247,8 @@
       assertEquals(true, cache.checkTotalMemoryUsed());
     }      
   }
-  
+
+  @Test
   public void testCreateRace() throws Exception {
     fs.delete(p, true);
     conf.setInt(TTConfig.TT_INDEX_CACHE, 1);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java
index 7d3e2ed..75893f5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java
@@ -31,12 +31,15 @@
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
-import junit.framework.TestCase;
+import org.junit.After;
+import org.junit.Before;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer2;
+import org.junit.Test;
 
-public class TestJobEndNotifier extends TestCase {
+public class TestJobEndNotifier {
   HttpServer2 server;
   URL baseUrl;
 
@@ -99,6 +102,7 @@
     }
   }
 
+  @Before
   public void setUp() throws Exception {
     new File(System.getProperty("build.webapps", "build/webapps") + "/test"
         ).mkdirs();
@@ -118,6 +122,7 @@
     FailServlet.calledTimes = 0;
   }
 
+  @After
   public void tearDown() throws Exception {
     server.stop();
   }
@@ -125,6 +130,7 @@
   /**
    * Basic validation for localRunnerNotification.
    */
+  @Test
   public void testLocalJobRunnerUriSubstitution() throws InterruptedException {
     JobStatus jobStatus = createTestJobStatus(
         "job_20130313155005308_0001", JobStatus.SUCCEEDED);
@@ -145,6 +151,7 @@
   /**
    * Validate job.end.retry.attempts for the localJobRunner.
    */
+  @Test
   public void testLocalJobRunnerRetryCount() throws InterruptedException {
     int retryAttempts = 3;
     JobStatus jobStatus = createTestJobStatus(
@@ -161,6 +168,7 @@
    * Validate that the notification times out after reaching
    * mapreduce.job.end-notification.timeout.
    */
+  @Test
   public void testNotificationTimeout() throws InterruptedException {
     Configuration conf = new Configuration();
     // Reduce the timeout to 1 second
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestMapFileOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestMapFileOutputFormat.java
index 7e315ae..b82cd70 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestMapFileOutputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestMapFileOutputFormat.java
@@ -24,6 +24,7 @@
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.MapFile.Reader;
+import org.junit.After;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -40,9 +41,10 @@
     assertTrue(!MyPartitioner.isGetPartitionCalled());
   }
 
-  protected void tearDown() throws Exception {
+  @After
+  public void tearDown() throws Exception {
     MyPartitioner.setGetPartitionCalled(false);
-  };
+  }
   private static class MyPartitioner
       implements
         Partitioner<WritableComparable, Writable> {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java
index 1b533e7..fabe5f2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java
@@ -33,7 +33,9 @@
 import java.io.LineNumberReader;
 import java.io.StringReader;
 
-import junit.framework.TestCase;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapred.TaskReport;
@@ -43,8 +45,6 @@
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.apache.log4j.WriterAppender;
-import org.junit.Before;
-import org.junit.Test;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
@@ -53,7 +53,7 @@
  * job monitoring is correct and prints 100% for map and reduce before 
  * successful completion.
  */
-public class TestJobMonitorAndPrint extends TestCase {
+public class TestJobMonitorAndPrint {
   private Job job;
   private Configuration conf;
   private ClientProtocol clientProtocol;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
index 358e9b2..2e2dbe1 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
@@ -883,8 +883,8 @@
     job.totalReduces = 1;
     job.failedMaps = 1;
     job.failedReduces = 0;
-    job.finishedMaps = 5;
-    job.finishedReduces = 1;
+    job.succeededMaps = 5;
+    job.succeededReduces = 1;
     job.jobStatus = JobStatus.State.SUCCEEDED.name();
     job.totalCounters = createCounters();
     job.mapCounters = createCounters();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
index 20d8ab5..abbfcb2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
@@ -27,7 +27,10 @@
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
 
-import junit.framework.TestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
 import org.junit.Assert;
 
@@ -55,7 +58,7 @@
 import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
 
 @SuppressWarnings("unchecked")
-public class TestFileOutputCommitter extends TestCase {
+public class TestFileOutputCommitter {
   private static final Path outDir = new Path(
       System.getProperty("test.build.data",
           System.getProperty("java.io.tmpdir")),
@@ -87,12 +90,12 @@
     fs.delete(outDir, true);
   }
   
-  @Override
+  @Before
   public void setUp() throws IOException {
     cleanup();
   }
-  
-  @Override
+
+  @After
   public void tearDown() throws IOException {
     cleanup();
   }
@@ -195,14 +198,17 @@
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testRecoveryV1() throws Exception {
     testRecoveryInternal(1, 1);
   }
 
+  @Test
   public void testRecoveryV2() throws Exception {
     testRecoveryInternal(2, 2);
   }
 
+  @Test
   public void testRecoveryUpgradeV1V2() throws Exception {
     testRecoveryInternal(1, 2);
   }
@@ -278,18 +284,22 @@
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testCommitterV1() throws Exception {
     testCommitterInternal(1);
   }
 
+  @Test
   public void testCommitterV2() throws Exception {
     testCommitterInternal(2);
   }
-  
+
+  @Test
   public void testCommitterWithDuplicatedCommitV1() throws Exception {
     testCommitterWithDuplicatedCommitInternal(1);
   }
 
+  @Test
   public void testCommitterWithDuplicatedCommitV2() throws Exception {
     testCommitterWithDuplicatedCommitInternal(2);
   }
@@ -336,11 +346,13 @@
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testCommitterWithFailureV1() throws Exception {
     testCommitterWithFailureInternal(1, 1);
     testCommitterWithFailureInternal(1, 2);
   }
 
+  @Test
   public void testCommitterWithFailureV2() throws Exception {
     testCommitterWithFailureInternal(2, 1);
     testCommitterWithFailureInternal(2, 2);
@@ -390,10 +402,12 @@
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testCommitterRepeatableV1() throws Exception {
     testCommitterRetryInternal(1);
   }
 
+  @Test
   public void testCommitterRepeatableV2() throws Exception {
     testCommitterRetryInternal(2);
   }
@@ -493,14 +507,17 @@
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testMapFileOutputCommitterV1() throws Exception {
     testMapFileOutputCommitterInternal(1);
   }
-  
+
+  @Test
   public void testMapFileOutputCommitterV2() throws Exception {
     testMapFileOutputCommitterInternal(2);
   }
 
+  @Test
   public void testInvalidVersionNumber() throws IOException {
     Job job = Job.getInstance();
     FileOutputFormat.setOutputPath(job, outDir);
@@ -552,10 +569,12 @@
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testAbortV1() throws IOException, InterruptedException {
     testAbortInternal(1);
   }
 
+  @Test
   public void testAbortV2() throws IOException, InterruptedException {
     testAbortInternal(2);
   }
@@ -575,7 +594,7 @@
     }
   }
 
-  
+
   private void testFailAbortInternal(int version)
       throws IOException, InterruptedException {
     Job job = Job.getInstance();
@@ -631,10 +650,12 @@
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testFailAbortV1() throws Exception {
     testFailAbortInternal(1);
   }
 
+  @Test
   public void testFailAbortV2() throws Exception {
     testFailAbortInternal(2);
   }
@@ -732,10 +753,12 @@
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
 
+  @Test
   public void testConcurrentCommitTaskWithSubDirV1() throws Exception {
     testConcurrentCommitTaskWithSubDir(1);
   }
 
+  @Test
   public void testConcurrentCommitTaskWithSubDirV2() throws Exception {
     testConcurrentCommitTaskWithSubDir(2);
   }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputFormat.java
index a48fe3b..a5a8eb7 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputFormat.java
@@ -19,7 +19,8 @@
 package org.apache.hadoop.mapreduce.lib.output;
 
 import java.io.IOException;
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -28,8 +29,9 @@
 import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 
-public class TestFileOutputFormat extends TestCase {
+public class TestFileOutputFormat {
 
+  @Test
   public void testSetOutputPathException() throws Exception {
     Job job = Job.getInstance();
     try {
@@ -42,6 +44,7 @@
     }
   }
 
+  @Test
   public void testCheckOutputSpecsException() throws Exception {
     Job job = Job.getInstance();
     Path outDir = new Path(System.getProperty("test.build.data", "/tmp"),
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestMapFileOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestMapFileOutputFormat.java
index 82758f1..47220be 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestMapFileOutputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestMapFileOutputFormat.java
@@ -25,6 +25,7 @@
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.MapFile.Reader;
 import org.apache.hadoop.mapreduce.Partitioner;
+import org.junit.After;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -41,6 +42,7 @@
     assertTrue(!MyPartitioner.isGetPartitionCalled());
   }
 
+  @After
   public void tearDown() throws Exception {
     MyPartitioner.setGetPartitionCalled(false);
   }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
index bbb126d..5afb645 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
@@ -56,7 +56,6 @@
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo;
-import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
 import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
 import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -71,7 +70,11 @@
  * Data from job history file is loaded lazily.
  */
 public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job {
-  
+  // Backward compatibility: if the failed or killed map/reduce
+  // count is -1, that means the value was not recorded
+  // so we count it as 0
+  private static final int UNDEFINED_VALUE = -1;
+
   static final Log LOG = LogFactory.getLog(CompletedJob.class);
   private final Configuration conf;
   private final JobId jobId; //Can be picked from JobInfo with a conversion.
@@ -104,12 +107,36 @@
 
   @Override
   public int getCompletedMaps() {
-    return (int) jobInfo.getFinishedMaps();
+    int killedMaps = (int) jobInfo.getKilledMaps();
+    int failedMaps = (int) jobInfo.getFailedMaps();
+
+    if (killedMaps == UNDEFINED_VALUE) {
+      killedMaps = 0;
+    }
+
+    if (failedMaps == UNDEFINED_VALUE) {
+      failedMaps = 0;
+    }
+
+    return (int) (jobInfo.getSucceededMaps() +
+        killedMaps + failedMaps);
   }
 
   @Override
   public int getCompletedReduces() {
-    return (int) jobInfo.getFinishedReduces();
+    int killedReduces = (int) jobInfo.getKilledReduces();
+    int failedReduces = (int) jobInfo.getFailedReduces();
+
+    if (killedReduces == UNDEFINED_VALUE) {
+      killedReduces = 0;
+    }
+
+    if (failedReduces == UNDEFINED_VALUE) {
+      failedReduces = 0;
+    }
+
+    return (int) (jobInfo.getSucceededReduces() +
+        killedReduces + failedReduces);
   }
 
   @Override
@@ -481,4 +508,24 @@
     throw new UnsupportedOperationException(
         "Can't set job's priority in history");
   }
+
+  @Override
+  public int getFailedMaps() {
+    return (int) jobInfo.getFailedMaps();
+  }
+
+  @Override
+  public int getFailedReduces() {
+    return (int) jobInfo.getFailedReduces();
+  }
+
+  @Override
+  public int getKilledMaps() {
+    return (int) jobInfo.getKilledMaps();
+  }
+
+  @Override
+  public int getKilledReduces() {
+    return (int) jobInfo.getKilledReduces();
+  }
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java
index b3b181c..b14f0c3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java
@@ -203,4 +203,23 @@
         "Can't set job's priority in history");
   }
 
+  @Override
+  public int getFailedMaps() {
+    return -1;
+  }
+
+  @Override
+  public int getFailedReduces() {
+    return -1;
+  }
+
+  @Override
+  public int getKilledMaps() {
+    return -1;
+  }
+
+  @Override
+  public int getKilledReduces() {
+    return -1;
+  }
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/UnparsedJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/UnparsedJob.java
index cea336c..ecc4945 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/UnparsedJob.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/UnparsedJob.java
@@ -208,4 +208,24 @@
     throw new UnsupportedOperationException(
         "Can't set job's priority in history");
   }
+
+  @Override
+  public int getFailedMaps() {
+    return -1;
+  }
+
+  @Override
+  public int getFailedReduces() {
+    return -1;
+  }
+
+  @Override
+  public int getKilledMaps() {
+    return -1;
+  }
+
+  @Override
+  public int getKilledReduces() {
+    return -1;
+  }
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java
index 84b1c6d..e881b37 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java
@@ -79,9 +79,12 @@
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
 import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo;
 import org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory;
 import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobsInfo;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo;
 import org.apache.hadoop.net.DNSToSwitchMapping;
@@ -292,7 +295,7 @@
     Assert.assertEquals("incorrect finishedMap ", numSuccessfulMaps,
         numFinishedMaps);
     Assert.assertEquals("incorrect finishedReduces ", numReduces,
-        jobInfo.getFinishedReduces());
+        jobInfo.getSucceededReduces());
     Assert.assertEquals("incorrect uberized ", job.isUber(),
         jobInfo.getUberized());
     Map<TaskID, TaskInfo> allTasks = jobInfo.getAllTasks();
@@ -379,7 +382,7 @@
   private long computeFinishedMaps(JobInfo jobInfo, int numMaps,
       int numSuccessfulMaps) {
     if (numMaps == numSuccessfulMaps) {
-      return jobInfo.getFinishedMaps();
+      return jobInfo.getSucceededMaps();
     }
 
     long numFinishedMaps = 0;
@@ -458,6 +461,76 @@
     }
   }
 
+  @Test(timeout = 30000)
+  public void testHistoryParsingForKilledAndFailedAttempts() throws Exception {
+    MRApp app = null;
+    JobHistory jobHistory = null;
+    LOG.info("STARTING testHistoryParsingForKilledAndFailedAttempts");
+    try {
+      Configuration conf = new Configuration();
+      conf.setClass(
+          NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
+          MyResolver.class, DNSToSwitchMapping.class);
+      conf.set(JHAdminConfig.MR_HS_JHIST_FORMAT, "json");
+      // "CommitterEventHandler" thread could be slower in some cases,
+      // which might cause a failed map/reduce task to fail the job
+      // immediately (see JobImpl.checkJobAfterTaskCompletion()). If there are
+      // killed events in progress, those will not be counted. Instead,
+      // we allow a 50% failure rate, so the job will always succeed and kill
+      // events will not be ignored.
+      conf.setInt(MRJobConfig.MAP_FAILURES_MAX_PERCENT, 50);
+      conf.setInt(MRJobConfig.REDUCE_FAILURES_MAXPERCENT, 50);
+      RackResolver.init(conf);
+      app = new MRAppWithHistoryWithFailedAndKilledTask(3, 3, true, this
+          .getClass().getName(), true);
+      app.submit(conf);
+      Job job = app.getContext().getAllJobs().values().iterator().next();
+      JobId jobId = job.getID();
+      app.waitForState(job, JobState.SUCCEEDED);
+
+      // make sure all events are flushed
+      app.waitForState(Service.STATE.STOPPED);
+
+      jobHistory = new JobHistory();
+      jobHistory.init(conf);
+      HistoryFileInfo fileInfo = jobHistory.getJobFileInfo(jobId);
+
+      JobHistoryParser parser;
+      JobInfo jobInfo;
+      synchronized (fileInfo) {
+        Path historyFilePath = fileInfo.getHistoryFile();
+        FSDataInputStream in = null;
+        FileContext fc = null;
+        try {
+          fc = FileContext.getFileContext(conf);
+          in = fc.open(fc.makeQualified(historyFilePath));
+        } catch (IOException ioe) {
+          LOG.info("Can not open history file: " + historyFilePath, ioe);
+          throw (new Exception("Can not open History File"));
+        }
+
+        parser = new JobHistoryParser(in);
+        jobInfo = parser.parse();
+      }
+      Exception parseException = parser.getParseException();
+      Assert.assertNull("Caught an expected exception " + parseException,
+          parseException);
+
+      assertEquals("FailedMaps", 1, jobInfo.getFailedMaps());
+      assertEquals("KilledMaps", 1, jobInfo.getKilledMaps());
+      assertEquals("FailedReduces", 1, jobInfo.getFailedReduces());
+      assertEquals("KilledReduces", 1, jobInfo.getKilledReduces());
+    } finally {
+      LOG.info("FINISHED testHistoryParsingForKilledAndFailedAttempts");
+      if (app != null) {
+        app.close();
+      }
+      if (jobHistory != null) {
+        jobHistory.close();
+      }
+    }
+  }
+
   @Test(timeout = 60000)
   public void testCountersForFailedTask() throws Exception {
     LOG.info("STARTING testCountersForFailedTask");
@@ -666,6 +739,40 @@
     }
   }
 
+  static class MRAppWithHistoryWithFailedAndKilledTask
+    extends MRAppWithHistory {
+
+    MRAppWithHistoryWithFailedAndKilledTask(int maps, int reduces,
+        boolean autoComplete, String testName, boolean cleanOnStart) {
+      super(maps, reduces, autoComplete, testName, cleanOnStart);
+    }
+
+    @Override
+    protected void attemptLaunched(TaskAttemptId attemptID) {
+      final int taskId = attemptID.getTaskId().getId();
+      final TaskType taskType = attemptID.getTaskId().getTaskType();
+
+      // map #0 --> kill
+      // reduce #0 --> fail
+      if (taskType == TaskType.MAP && taskId == 0) {
+        getContext().getEventHandler().handle(
+            new TaskEvent(attemptID.getTaskId(), TaskEventType.T_KILL));
+      } else if (taskType == TaskType.MAP && taskId == 1) {
+        getContext().getEventHandler().handle(
+            new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
+      } else if (taskType == TaskType.REDUCE && taskId == 0) {
+        getContext().getEventHandler().handle(
+            new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
+      } else if (taskType == TaskType.REDUCE && taskId == 1) {
+        getContext().getEventHandler().handle(
+            new TaskEvent(attemptID.getTaskId(), TaskEventType.T_KILL));
+      } else {
+        getContext().getEventHandler().handle(
+            new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_DONE));
+      }
+    }
+  }
+
   static class MRAppWithHistoryWithJobKilled extends MRAppWithHistory {
 
     public MRAppWithHistoryWithJobKilled(int maps, int reduces,
@@ -864,6 +971,7 @@
             if (eventId < 5) {
               JobUnsuccessfulCompletionEvent juce =
                   new JobUnsuccessfulCompletionEvent(jid, 100L, 2, 0,
+                          0, 0, 0, 0,
                       "JOB_FAILED", Collections.singletonList(
                           "Task failed: " + tids[0].toString()));
               return juce;
@@ -907,9 +1015,9 @@
           (new Configuration()), histPath);
       JobInfo jobInfo = parser.parse(); 
       LOG.info(" job info: " + jobInfo.getJobname() + " "
-        + jobInfo.getFinishedMaps() + " " 
-        + jobInfo.getTotalMaps() + " " 
-        + jobInfo.getJobId() ) ;
+          + jobInfo.getSucceededMaps() + " "
+          + jobInfo.getTotalMaps() + " "
+          + jobInfo.getJobId() ) ;
     }
   
   /**
@@ -925,7 +1033,7 @@
           (new Configuration()), histPath);
       JobInfo jobInfo = parser.parse(); 
       LOG.info(" job info: " + jobInfo.getJobname() + " "
-        + jobInfo.getFinishedMaps() + " "
+        + jobInfo.getSucceededMaps() + " "
         + jobInfo.getTotalMaps() + " "
         + jobInfo.getJobId() );
     }
@@ -943,7 +1051,7 @@
           (new Configuration()), histPath);
       JobInfo jobInfo = parser.parse(); 
       LOG.info(" job info: " + jobInfo.getJobname() + " "
-        + jobInfo.getFinishedMaps() + " " 
+        + jobInfo.getSucceededMaps() + " "
         + jobInfo.getTotalMaps() + " " 
         + jobInfo.getJobId() ) ;
       }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAcls.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAcls.java
index 14961d2..867c661 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAcls.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAcls.java
@@ -424,5 +424,25 @@
     @Override
     public void setJobPriority(Priority priority) {
     }
+
+    @Override
+    public int getFailedMaps() {
+      return mockJob.getFailedMaps();
+    }
+
+    @Override
+    public int getFailedReduces() {
+      return mockJob.getFailedReduces();
+    }
+
+    @Override
+    public int getKilledMaps() {
+      return mockJob.getKilledMaps();
+    }
+
+    @Override
+    public int getKilledReduces() {
+      return mockJob.getKilledReduces();
+    }
   }
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
index 12fbdad..1d495c4 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
@@ -855,7 +855,7 @@
       long tStart = System.currentTimeMillis();
       sequentialTest(fs, testType, nrBytes, nrFiles);
       long execTime = System.currentTimeMillis() - tStart;
-      String resultLine = "Seq Test exec time sec: " + (float)execTime / 1000;
+      String resultLine = "Seq Test exec time sec: " + msToSecs(execTime);
       LOG.info(resultLine);
       return 0;
     }
@@ -919,6 +919,10 @@
     return ((float)bytes)/MEGA;
   }
 
+  static float msToSecs(long timeMillis) {
+    return timeMillis / 1000.0f;
+  }
+
   private boolean checkErasureCodePolicy(String erasureCodePolicyName,
       FileSystem fs, TestType testType) throws IOException {
     Collection<ErasureCodingPolicy> list =
@@ -1051,11 +1055,10 @@
         "            Date & time: " + new Date(System.currentTimeMillis()),
         "        Number of files: " + tasks,
         " Total MBytes processed: " + df.format(toMB(size)),
-        "      Throughput mb/sec: " + df.format(size * 1000.0 / (time * MEGA)),
-        "Total Throughput mb/sec: " + df.format(toMB(size) / ((float)execTime)),
+        "      Throughput mb/sec: " + df.format(toMB(size) / msToSecs(time)),
         " Average IO rate mb/sec: " + df.format(med),
         "  IO rate std deviation: " + df.format(stdDev),
-        "     Test exec time sec: " + df.format((float)execTime / 1000),
+        "     Test exec time sec: " + df.format(msToSecs(execTime)),
         "" };
 
     PrintStream res = null;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
index cbca3c8..19313d3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
@@ -298,10 +298,10 @@
         " does not exist.",
         jobEventFile.exists());
     verifyEntity(jobEventFile, EventType.JOB_FINISHED.name(),
-        true, false, null);
+        true, false, null, false);
     Set<String> cfgsToCheck = Sets.newHashSet("dummy_conf1", "dummy_conf2",
         "huge_dummy_conf1", "huge_dummy_conf2");
-    verifyEntity(jobEventFile, null, false, true, cfgsToCheck);
+    verifyEntity(jobEventFile, null, false, true, cfgsToCheck, false);
 
     // for this test, we expect MR job metrics are published in YARN_APPLICATION
     String outputAppDir =
@@ -322,8 +322,8 @@
         "appEventFilePath: " + appEventFilePath +
         " does not exist.",
         appEventFile.exists());
-    verifyEntity(appEventFile, null, true, false, null);
-    verifyEntity(appEventFile, null, false, true, cfgsToCheck);
+    verifyEntity(appEventFile, null, true, false, null, false);
+    verifyEntity(appEventFile, null, false, true, cfgsToCheck, false);
 
     // check for task event file
     String outputDirTask =
@@ -344,7 +344,7 @@
         " does not exist.",
         taskEventFile.exists());
     verifyEntity(taskEventFile, EventType.TASK_FINISHED.name(),
-        true, false, null);
+        true, false, null, true);
 
     // check for task attempt event file
     String outputDirTaskAttempt =
@@ -363,7 +363,7 @@
     Assert.assertTrue("taskAttemptEventFileName: " + taskAttemptEventFilePath +
         " does not exist.", taskAttemptEventFile.exists());
     verifyEntity(taskAttemptEventFile, EventType.MAP_ATTEMPT_FINISHED.name(),
-        true, false, null);
+        true, false, null, true);
   }
 
   /**
@@ -380,12 +380,13 @@
    * @throws IOException
    */
   private void verifyEntity(File entityFile, String eventId,
-      boolean chkMetrics, boolean chkCfg, Set<String> cfgsToVerify)
-      throws IOException {
+      boolean chkMetrics, boolean chkCfg, Set<String> cfgsToVerify,
+      boolean checkIdPrefix) throws IOException {
     BufferedReader reader = null;
     String strLine;
     try {
       reader = new BufferedReader(new FileReader(entityFile));
+      long idPrefix = -1;
       while ((strLine = reader.readLine()) != null) {
         if (strLine.trim().length() > 0) {
           org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity
@@ -394,6 +395,19 @@
                       strLine.trim(),
                       org.apache.hadoop.yarn.api.records.timelineservice.
                           TimelineEntity.class);
+
+          LOG.info("strLine.trim()= " + strLine.trim());
+          if (checkIdPrefix) {
+            Assert.assertTrue("Entity ID prefix expected to be > 0",
+                entity.getIdPrefix() > 0);
+            if (idPrefix == -1) {
+              idPrefix = entity.getIdPrefix();
+            } else {
+              Assert.assertEquals("Entity ID prefix should be same across " +
+                  "each publish of same entity",
+                      idPrefix, entity.getIdPrefix());
+            }
+          }
           if (eventId == null) {
             // Job metrics are published without any events for
             // ApplicationEntity. There is also possibility that some other
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java
index 768448f..96954d5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java
@@ -38,7 +38,7 @@
  * This class performs unit test for Job/JobControl classes.
  *  
  */
-public class TestJobControl extends junit.framework.TestCase {
+public class TestJobControl {
 
   /**
    * This is a main function for testing JobControl class.
@@ -263,13 +263,13 @@
     JobConf jc = new JobConf();
     Job j = new Job(jc);
     //Just make sure no exception is thrown
-    assertNull(j.getAssignedJobID());
+    Assert.assertNull(j.getAssignedJobID());
     org.apache.hadoop.mapreduce.Job mockjob = mock(org.apache.hadoop.mapreduce.Job.class);
     org.apache.hadoop.mapreduce.JobID jid = new org.apache.hadoop.mapreduce.JobID("test",0);
     when(mockjob.getJobID()).thenReturn(jid);
     j.setJob(mockjob);
     JobID expected = new JobID("test",0);
-    assertEquals(expected, j.getAssignedJobID());
+    Assert.assertEquals(expected, j.getAssignedJobID());
     verify(mockjob).getJobID();
   }
   
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
index 447ea4e..d553596 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/JobHistoryFileReplayMapperV1.java
@@ -37,7 +37,6 @@
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
-import org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 
 
@@ -54,7 +53,7 @@
 
   public void map(IntWritable key, IntWritable val, Context context) throws IOException {
     // collect the apps it needs to process
-    TimelineClient tlc = new TimelineClientImpl();
+    TimelineClient tlc = TimelineClient.createTimelineClient();
     TimelineEntityConverterV1 converter = new TimelineEntityConverterV1();
     JobHistoryFileReplayHelper helper = new JobHistoryFileReplayHelper(context);
     int replayMode = helper.getReplayMode();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
index 16d14a1..6d6151f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/SimpleEntityWriterV1.java
@@ -32,7 +32,6 @@
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
-import org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl;
 
 /**
    * Adds simple entities with random string payload, events, metrics, and
@@ -46,7 +45,7 @@
 
   public void map(IntWritable key, IntWritable val, Context context)
       throws IOException {
-    TimelineClient tlc = new TimelineClientImpl();
+    TimelineClient tlc = TimelineClient.createTimelineClient();
     Configuration conf = context.getConfiguration();
 
     final int kbs = conf.getInt(KBS_SENT, KBS_SENT_DEFAULT);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapCollection.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapCollection.java
index ecc01db..afe4a10 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapCollection.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapCollection.java
@@ -17,9 +17,6 @@
  */
 
 package org.apache.hadoop.mapreduce;
-
-import junit.framework.TestCase;
-
 import java.io.IOException;
 import java.io.DataInput;
 import java.io.DataOutput;
@@ -27,9 +24,6 @@
 import java.util.Arrays;
 import java.util.List;
 import java.util.Random;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.Test;
 import static org.junit.Assert.*;
 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestDelegatingInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestDelegatingInputFormat.java
index 1428e47..194cdeb 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestDelegatingInputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestDelegatingInputFormat.java
@@ -20,8 +20,8 @@
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.util.List;
-
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -30,9 +30,10 @@
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.Mapper;
 
-public class TestDelegatingInputFormat extends TestCase {
+public class TestDelegatingInputFormat {
 
   @SuppressWarnings("unchecked")
+  @Test
   public void testSplitting() throws Exception {
     Job job = Job.getInstance();
     MiniDFSCluster dfs = null;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java
index d86ddd0..da011a2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java
@@ -95,7 +95,7 @@
     cjob2 = new ControlledJob(job2, dependingJobs);
 
     Job job3 = MapReduceTestUtil.createCopyJob(conf, outdir_3, 
-	                                   outdir_1, outdir_2);
+                                     outdir_1, outdir_2);
     dependingJobs = new ArrayList<ControlledJob>();
     dependingJobs.add(cjob1);
     dependingJobs.add(cjob2);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestMRCJCFileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestMRCJCFileOutputCommitter.java
index ae06812..14f123a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestMRCJCFileOutputCommitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestMRCJCFileOutputCommitter.java
@@ -21,7 +21,10 @@
 import java.io.*;
 import java.net.URI;
 
-import junit.framework.TestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.*;
@@ -38,7 +41,7 @@
 import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
 
 
-public class TestMRCJCFileOutputCommitter extends TestCase {
+public class TestMRCJCFileOutputCommitter {
   private static Path outDir = new Path(System.getProperty("test.build.data",
       "/tmp"), "output");
 
@@ -76,17 +79,18 @@
     fs.delete(outDir, true);
   }
   
-  @Override
+  @Before
   public void setUp() throws IOException {
     cleanup();
   }
   
-  @Override
+  @After
   public void tearDown() throws IOException {
     cleanup();
   }
   
   @SuppressWarnings("unchecked")
+  @Test
   public void testCommitter() throws Exception {
     Job job = Job.getInstance();
     FileOutputFormat.setOutputPath(job, outDir);
@@ -122,7 +126,8 @@
     assertEquals(output, expectedOutput.toString());
     FileUtil.fullyDelete(new File(outDir.toString()));
   }
-  
+
+  @Test
   public void testEmptyOutput() throws Exception {
     Job job = Job.getInstance();
     FileOutputFormat.setOutputPath(job, outDir);
@@ -146,6 +151,7 @@
   }
 
   @SuppressWarnings("unchecked")
+  @Test
   public void testAbort() throws IOException, InterruptedException {
     Job job = Job.getInstance();
     FileOutputFormat.setOutputPath(job, outDir);
@@ -195,6 +201,7 @@
   }
 
   @SuppressWarnings("unchecked")
+  @Test
   public void testFailAbort() throws IOException, InterruptedException {
     Job job = Job.getInstance();
     Configuration conf = job.getConfiguration();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt
index bbeece9..ae3b9c6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt
@@ -16,7 +16,7 @@
 # limitations under the License.
 #
 
-cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
 
 list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/../../../../hadoop-common-project/hadoop-common/)
 include(HadoopCommon)
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/TestTaskContext.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/TestTaskContext.java
index 508ded3..471c68f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/TestTaskContext.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/TestTaskContext.java
@@ -22,23 +22,30 @@
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import org.junit.Assert;
 
-public class TestTaskContext extends TestCase {
-  
+public class TestTaskContext {
+
+  @Test
   public void testTaskContext() {
-    TaskContext context = new TaskContext(null, null, null, null, null, null, null);
+    TaskContext context = new TaskContext(null, null, null, null, null, null,
+        null);
     
     context.setInputKeyClass(IntWritable.class);
-    assertEquals(IntWritable.class.getName(), context.getInputKeyClass().getName());
+    Assert.assertEquals(IntWritable.class.getName(), context.getInputKeyClass
+        ().getName());
  
     context.setInputValueClass(Text.class);
-    assertEquals(Text.class.getName(), context.getInputValueClass().getName()); 
+    Assert.assertEquals(Text.class.getName(), context.getInputValueClass()
+        .getName());
    
     context.setOutputKeyClass(LongWritable.class);
-    assertEquals(LongWritable.class.getName(), context.getOutputKeyClass().getName()); 
+    Assert.assertEquals(LongWritable.class.getName(), context
+        .getOutputKeyClass().getName());
 
     context.setOutputValueClass(FloatWritable.class);
-    assertEquals(FloatWritable.class.getName(), context.getOutputValueClass().getName()); 
+    Assert.assertEquals(FloatWritable.class.getName(), context
+        .getOutputValueClass().getName());
   }
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/buffer/TestInputBuffer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/buffer/TestInputBuffer.java
index 7eb6467..fa68364 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/buffer/TestInputBuffer.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/buffer/TestInputBuffer.java
@@ -19,11 +19,12 @@
 
 import java.io.IOException;
 
-import junit.framework.TestCase;
-
+import org.junit.Test;
 import org.junit.Assert;
 
-public class TestInputBuffer extends TestCase {
+public class TestInputBuffer {
+
+  @Test
   public void testInputBuffer() throws IOException {
     final int size = 100;
     final InputBuffer input1 = new InputBuffer(BufferType.DIRECT_BUFFER, size);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/buffer/TestOutputBuffer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/buffer/TestOutputBuffer.java
index 39c25a6..af6693e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/buffer/TestOutputBuffer.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/buffer/TestOutputBuffer.java
@@ -17,11 +17,12 @@
  */
 package org.apache.hadoop.mapred.nativetask.buffer;
 
-import junit.framework.TestCase;
-
+import org.junit.Test;
 import org.junit.Assert;
 
-public class TestOutputBuffer extends TestCase {
+public class TestOutputBuffer {
+
+  @Test
   public void testOutputBuffer() {
     final int size = 100;
     final OutputBuffer output1 = new OutputBuffer(BufferType.DIRECT_BUFFER, size);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/serde/TestKVSerializer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/serde/TestKVSerializer.java
index fd5b100..1a7dace 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/serde/TestKVSerializer.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/serde/TestKVSerializer.java
@@ -20,7 +20,8 @@
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 
-import junit.framework.TestCase;
+import org.junit.Before;
+import org.junit.Test;
 
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.mapred.nativetask.Constants;
@@ -30,12 +31,11 @@
 import org.apache.hadoop.mapred.nativetask.testutil.TestInput.KV;
 import org.apache.hadoop.mapred.nativetask.util.SizedWritable;
 import org.junit.Assert;
-import org.junit.Before;
 import org.mockito.Matchers;
 import org.mockito.Mockito;
 
 @SuppressWarnings({ "rawtypes", "unchecked" })
-public class TestKVSerializer extends TestCase {
+public class TestKVSerializer {
 
   int inputArraySize = 1000; // 1000 bytesWriable elements
   int bufferSize = 100; // bytes
@@ -46,7 +46,6 @@
   private SizedWritable value;
   private KVSerializer serializer;
 
-  @Override
   @Before
   public void setUp() throws IOException {
     this.inputArray = TestInput.getMapInputs(inputArraySize);
@@ -60,6 +59,7 @@
     serializer.updateLength(key, value);
   }
 
+  @Test
   public void testUpdateLength() throws IOException {
     Mockito.mock(DataOutputStream.class);
 
@@ -75,6 +75,7 @@
     }
   }
 
+  @Test
   public void testSerializeKV() throws IOException {
     final DataOutputStream dataOut = Mockito.mock(DataOutputStream.class);
 
@@ -92,6 +93,7 @@
     Assert.assertEquals(written, key.length + value.length + Constants.SIZEOF_KV_LENGTH);
   }
 
+  @Test
   public void testSerializeNoFlush() throws IOException {
     final DataOutputStream dataOut = Mockito.mock(DataOutputStream.class);
 
@@ -109,6 +111,7 @@
     Assert.assertEquals(written, key.length + value.length + Constants.SIZEOF_KV_LENGTH);
   }
 
+  @Test
   public void testSerializePartitionKV() throws IOException {
     final DataOutputStream dataOut = Mockito.mock(DataOutputStream.class);
 
@@ -130,12 +133,14 @@
         + Constants.SIZEOF_PARTITION_LENGTH);
   }
 
+  @Test
   public void testDeserializerNoData() throws IOException {
     final DataInputStream in = Mockito.mock(DataInputStream.class);
     Mockito.when(in.hasUnReadData()).thenReturn(false);
     Assert.assertEquals(0, serializer.deserializeKV(in, key, value));
   }
 
+  @Test
   public void testDeserializer() throws IOException {
     final DataInputStream in = Mockito.mock(DataInputStream.class);
     Mockito.when(in.hasUnReadData()).thenReturn(true);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/utils/TestReadWriteBuffer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/utils/TestReadWriteBuffer.java
index 6ea8092..584aedd 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/utils/TestReadWriteBuffer.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/utils/TestReadWriteBuffer.java
@@ -17,15 +17,16 @@
  */
 package org.apache.hadoop.mapred.nativetask.utils;
 
-import junit.framework.TestCase;
-
-import org.apache.hadoop.mapred.nativetask.util.ReadWriteBuffer;
+import org.junit.Test;
 import org.junit.Assert;
 
-public class TestReadWriteBuffer extends TestCase {
+import org.apache.hadoop.mapred.nativetask.util.ReadWriteBuffer;
+
+public class TestReadWriteBuffer {
 
   private static byte[] bytes = new byte[] { '0', 'a', 'b', 'c', 'd', '9' };
 
+  @Test
   public void testReadWriteBuffer() {
 
     final ReadWriteBuffer buffer = new ReadWriteBuffer();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/utils/TestSizedWritable.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/utils/TestSizedWritable.java
index 7b82eff..a6e43ed 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/utils/TestSizedWritable.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/utils/TestSizedWritable.java
@@ -17,15 +17,16 @@
  */
 package org.apache.hadoop.mapred.nativetask.utils;
 
-import junit.framework.TestCase;
+import org.junit.Test;
 
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.mapred.nativetask.util.SizedWritable;
 import org.junit.Assert;
 
 @SuppressWarnings({ "rawtypes", "unchecked" })
-public class TestSizedWritable extends TestCase {
+public class TestSizedWritable {
 
+  @Test
   public void testSizedWritable() {
     final SizedWritable w = new SizedWritable(BytesWritable.class);
     Assert.assertTrue(w.length == SizedWritable.INVALID_LENGTH);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/TestBaileyBorweinPlouffe.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/TestBaileyBorweinPlouffe.java
index 3215bfe..2df2df0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/TestBaileyBorweinPlouffe.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/TestBaileyBorweinPlouffe.java
@@ -18,29 +18,35 @@
 package org.apache.hadoop.examples;
 
 import java.math.BigInteger;
+import org.junit.Test;
+import org.junit.Assert;
 
 /** Tests for BaileyBorweinPlouffe */
-public class TestBaileyBorweinPlouffe extends junit.framework.TestCase {
+public class TestBaileyBorweinPlouffe {
 
+  @Test
   public void testMod() {
     final BigInteger TWO = BigInteger.ONE.add(BigInteger.ONE);
     for(long n = 3; n < 100; n++) {
       for (long e = 1; e < 100; e++) {
         final long r = TWO.modPow(
             BigInteger.valueOf(e), BigInteger.valueOf(n)).longValue();
-        assertEquals("e=" + e + ", n=" + n, r, BaileyBorweinPlouffe.mod(e, n));
+        Assert.assertEquals("e=" + e + ", n=" + n, r, BaileyBorweinPlouffe
+            .mod(e, n));
       }
     }
   }
 
+  @Test
   public void testHexDigit() {
     final long[] answers = {0x43F6, 0xA308, 0x29B7, 0x49F1, 0x8AC8, 0x35EA};
     long d = 1;
     for(int i = 0; i < answers.length; i++) {
-      assertEquals("d=" + d, answers[i], BaileyBorweinPlouffe.hexDigits(d));
+      Assert.assertEquals("d=" + d, answers[i], BaileyBorweinPlouffe
+          .hexDigits(d));
       d *= 10;
     }
 
-    assertEquals(0x243FL, BaileyBorweinPlouffe.hexDigits(0));
+    Assert.assertEquals(0x243FL, BaileyBorweinPlouffe.hexDigits(0));
  }
 }
\ No newline at end of file
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestLongLong.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestLongLong.java
index 991121f..d6f284e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestLongLong.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestLongLong.java
@@ -19,24 +19,30 @@
 
 import java.math.BigInteger;
 import java.util.Random;
+import org.junit.Test;
+import org.junit.Assert;
 
-public class TestLongLong extends junit.framework.TestCase {
-  static final Random RAN = new Random(); 
+public class TestLongLong {
+
+  static final Random RAN = new Random();
   static final long MASK = (1L << (LongLong.SIZE >> 1)) - 1;
 
   static long nextPositiveLong() {
     return RAN.nextLong() & MASK;
   }
-  
+
   static void verifyMultiplication(long a, long b) {
     final LongLong ll = LongLong.multiplication(new LongLong(), a, b);
     final BigInteger bi = BigInteger.valueOf(a).multiply(BigInteger.valueOf(b));
 
-    final String s = String.format("\na = %x\nb = %x\nll= " + ll + "\nbi= " + bi.toString(16) + "\n", a, b);
+    final String s = String.format(
+        "\na = %x\nb = %x\nll= " + ll + "\nbi= " + bi.toString(16) + "\n", a,
+        b);
     //System.out.println(s);
-    assertEquals(s, bi, ll.toBigInteger());
+    Assert.assertEquals(s, bi, ll.toBigInteger());
   }
 
+  @Test
   public void testMultiplication() {
     for(int i = 0; i < 100; i++) {
       final long a = nextPositiveLong();
@@ -50,19 +56,24 @@
   static void verifyRightShift(long a, long b) {
     final LongLong ll = new LongLong().set(a, b);
     final BigInteger bi = ll.toBigInteger();
-    
-    for(int i = 0; i < LongLong.SIZE >> 1; i++) {
+
+    for (int i = 0; i < LongLong.SIZE >> 1; i++) {
       final long result = ll.shiftRight(i) & MASK;
       final long expected = bi.shiftRight(i).longValue() & MASK;
-      final String s = String.format("\na = %x\nb = %x\nll= " + ll + "\nbi= " + bi.toString(16) + "\n", a, b);
-      assertEquals(s, expected, result);
+      final String s = String.format(
+          "\na = %x\nb = %x\nll= " + ll + "\nbi= " + bi.toString(16) + "\n", a,
+          b);
+      Assert.assertEquals(s, expected, result);
     }
 
-    final String s = String.format("\na = %x\nb = %x\nll= " + ll + "\nbi= " + bi.toString(16) + "\n", a, b);
+    final String s = String.format(
+        "\na = %x\nb = %x\nll= " + ll + "\nbi= " + bi.toString(16) + "\n", a,
+        b);
     //System.out.println(s);
-    assertEquals(s, bi, ll.toBigInteger());
+    Assert.assertEquals(s, bi, ll.toBigInteger());
   }
 
+  @Test
   public void testRightShift() {
     for(int i = 0; i < 1000; i++) {
       final long a = nextPositiveLong();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestModular.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestModular.java
index 079eb7f..a75ec29 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestModular.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestModular.java
@@ -21,14 +21,16 @@
 import java.util.Random;
 
 import org.apache.hadoop.examples.pi.Util.Timer;
+import org.junit.Assert;
+import org.junit.Test;
 
-public class TestModular extends junit.framework.TestCase { 
-  private static final Random RANDOM = new Random(); 
+public class TestModular{
+  private static final Random RANDOM = new Random();
   private static final BigInteger TWO = BigInteger.valueOf(2);
 
 
   static final int DIV_VALID_BIT = 32;
-  static final long DIV_LIMIT = 1L << DIV_VALID_BIT; 
+  static final long DIV_LIMIT = 1L << DIV_VALID_BIT;
 
   // return r/n for n > r > 0
   static long div(long sum, long r, long n) {
@@ -36,7 +38,7 @@
     int i = DIV_VALID_BIT - 1;
     for(r <<= 1; r < n; r <<= 1) i--;
 //System.out.printf("  r=%d, n=%d, q=%d\n", r, n, q);
-    
+
     for(; i >= 0 ;) {
       r -= n;
       q |= (1L << i);
@@ -48,14 +50,15 @@
     sum += q;
     return sum < DIV_LIMIT? sum: sum - DIV_LIMIT;
   }
- 
+
+  @Test
   public void testDiv() {
     for(long n = 2; n < 100; n++)
       for(long r = 1; r < n; r++) {
         final long a = div(0, r, n);
         final long b = (long)((r*1.0/n) * (1L << DIV_VALID_BIT));
         final String s = String.format("r=%d, n=%d, a=%X, b=%X", r, n, a, b);
-        assertEquals(s, b, a);
+        Assert.assertEquals(s, b, a);
       }
   }
 
@@ -64,16 +67,16 @@
 
     for(int i = 0; i < rn.length; i++) {
       rn[i] = new long[rsize + 1][];
-      long n = RANDOM.nextLong() & 0xFFFFFFFFFFFFFFFL; 
+      long n = RANDOM.nextLong() & 0xFFFFFFFFFFFFFFFL;
       if (n <= 1) n = 0xFFFFFFFFFFFFFFFL - n;
       rn[i][0] = new long[]{n};
-      final BigInteger N = BigInteger.valueOf(n); 
+      final BigInteger N = BigInteger.valueOf(n);
 
       for(int j = 1; j < rn[i].length; j++) {
         long r = RANDOM.nextLong();
         if (r < 0) r = -r;
         if (r >= n) r %= n;
-        final BigInteger R = BigInteger.valueOf(r); 
+        final BigInteger R = BigInteger.valueOf(r);
         rn[i][j] = new long[]{r, R.multiply(R).mod(N).longValue()};
       }
     }
@@ -102,20 +105,20 @@
     } else {
       final int HALF = (63 - Long.numberOfLeadingZeros(n)) >> 1;
       final int FULL = HALF << 1;
-      final long ONES = (1 << HALF) - 1; 
-  
+      final long ONES = (1 << HALF) - 1;
+
       final long high = r >>> HALF;
       final long low  = r &= ONES;
 
       r *= r;
       if (r >= n) r %= n;
-  
+
       if (high != 0) {
         long s = high * high;
         if (s >= n) s %= n;
         for(int i = 0; i < FULL; i++)
           if ((s <<= 1) >= n) s -= n;
-        
+
         if (low == 0)
           r = s;
         else {
@@ -123,7 +126,7 @@
           if (t >= n) t %= n;
           for(int i = -1; i < HALF; i++)
             if ((t <<= 1) >= n) t -= n;
-          
+
           r += s;
           if (r >= n) r -= n;
           r += t;
@@ -133,7 +136,7 @@
     }
     return r;
   }
-  
+
   static void squareBenchmarks() {
     final Timer t = new Timer(false);
     t.tick("squareBenchmarks(), MAX_SQRT=" + Modular.MAX_SQRT_LONG);
@@ -147,8 +150,11 @@
         final long r = rn[i][j][0];
         final long answer = rn[i][j][1];
         final long s = square_slow(r, n);
-        if (s != answer)
-          assertEquals("r=" + r + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
+        if (s != answer) {
+          Assert.assertEquals(
+              "r=" + r + ", n=" + n + ", answer=" + answer + " but s=" + s,
+              answer, s);
+        }
       }
     }
     t.tick("square_slow");
@@ -161,8 +167,11 @@
         final long r = rn[i][j][0];
         final long answer = rn[i][j][1];
         final long s = square(r, n, r2p64);
-        if (s != answer)
-          assertEquals("r=" + r + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
+        if (s != answer) {
+          Assert.assertEquals(
+              "r=" + r + ", n=" + n + ", answer=" + answer + " but s=" + s,
+              answer, s);
+        }
       }
     }
     t.tick("square");
@@ -175,8 +184,11 @@
         final long answer = rn[i][j][1];
         final BigInteger R = BigInteger.valueOf(r);
         final long s = R.multiply(R).mod(N).longValue();
-        if (s != answer)
-          assertEquals("r=" + r + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
+        if (s != answer) {
+          Assert.assertEquals(
+              "r=" + r + ", n=" + n + ", answer=" + answer + " but s=" + s,
+              answer, s);
+        }
       }
     }
     t.tick("R.multiply(R).mod(N)");
@@ -189,8 +201,11 @@
         final long answer = rn[i][j][1];
         final BigInteger R = BigInteger.valueOf(r);
         final long s = R.modPow(TWO, N).longValue();
-        if (s != answer)
-          assertEquals("r=" + r + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
+        if (s != answer) {
+          Assert.assertEquals(
+              "r=" + r + ", n=" + n + ", answer=" + answer + " but s=" + s,
+              answer, s);
+        }
       }
     }
     t.tick("R.modPow(TWO, N)");
@@ -201,15 +216,15 @@
 
     for(int i = 0; i < en.length; i++) {
       en[i] = new long[esize + 1][];
-      long n = (RANDOM.nextLong() & 0xFFFFFFFFFFFFFFFL) | 1L; 
+      long n = (RANDOM.nextLong() & 0xFFFFFFFFFFFFFFFL) | 1L;
       if (n == 1) n = 3;
       en[i][0] = new long[]{n};
-      final BigInteger N = BigInteger.valueOf(n); 
+      final BigInteger N = BigInteger.valueOf(n);
 
       for(int j = 1; j < en[i].length; j++) {
         long e = RANDOM.nextLong();
         if (e < 0) e = -e;
-        final BigInteger E = BigInteger.valueOf(e); 
+        final BigInteger E = BigInteger.valueOf(e);
         en[i][j] = new long[]{e, TWO.modPow(E, N).longValue()};
       }
     }
@@ -253,10 +268,10 @@
   static class Montgomery2 extends Montgomery {
     /** Compute 2^y mod N for N odd. */
     long mod2(final long y) {
-      long r0 = R - N; 
+      long r0 = R - N;
       long r1 = r0 << 1;
       if (r1 >= N) r1 -= N;
-      
+
       for(long mask = Long.highestOneBit(y); mask > 0; mask >>>= 1) {
         if ((mask & y) == 0) {
           r1 = product.m(r0, r1);
@@ -269,7 +284,7 @@
       return product.m(r0, 1);
     }
   }
-  
+
   static void modBenchmarks() {
     final Timer t = new Timer(false);
     t.tick("modBenchmarks()");
@@ -283,12 +298,15 @@
         final long e = en[i][j][0];
         final long answer = en[i][j][1];
         final long s = Modular.mod(e, n);
-        if (s != answer)
-          assertEquals("e=" + e + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
+        if (s != answer) {
+          Assert.assertEquals(
+              "e=" + e + ", n=" + n + ", answer=" + answer + " but s=" + s,
+              answer, s);
+        }
       }
     }
     t.tick("Modular.mod");
-    
+
     final Montgomery2 m2 = new Montgomery2();
     for(int i = 0; i < en.length; i++) {
       final long n = en[i][0][0];
@@ -297,8 +315,11 @@
         final long e = en[i][j][0];
         final long answer = en[i][j][1];
         final long s = m2.mod(e);
-        if (s != answer)
-          assertEquals("e=" + e + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
+        if (s != answer) {
+          Assert.assertEquals(
+              "e=" + e + ", n=" + n + ", answer=" + answer + " but s=" + s,
+              answer, s);
+        }
       }
     }
     t.tick("montgomery.mod");
@@ -310,21 +331,27 @@
         final long e = en[i][j][0];
         final long answer = en[i][j][1];
         final long s = m2.mod2(e);
-        if (s != answer)
-          assertEquals("e=" + e + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
+        if (s != answer) {
+          Assert.assertEquals(
+              "e=" + e + ", n=" + n + ", answer=" + answer + " but s=" + s,
+              answer, s);
+        }
       }
     }
     t.tick("montgomery.mod2");
 
     for(int i = 0; i < en.length; i++) {
       final long n = en[i][0][0];
-      final BigInteger N = BigInteger.valueOf(n); 
+      final BigInteger N = BigInteger.valueOf(n);
       for(int j = 1; j < en[i].length; j++) {
         final long e = en[i][j][0];
         final long answer = en[i][j][1];
         final long s = TWO.modPow(BigInteger.valueOf(e), N).longValue();
-        if (s != answer)
-          assertEquals("e=" + e + ", n=" + n + ", answer=" + answer + " but s=" + s, answer, s);
+        if (s != answer) {
+          Assert.assertEquals(
+              "e=" + e + ", n=" + n + ", answer=" + answer + " but s=" + s,
+              answer, s);
+        }
       }
     }
     t.tick("BigInteger.modPow(e, n)");
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestSummation.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestSummation.java
index e80b9bb..2741962 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestSummation.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestSummation.java
@@ -28,14 +28,19 @@
 import org.apache.hadoop.examples.pi.Util;
 import org.apache.hadoop.examples.pi.Util.Timer;
 import org.apache.hadoop.examples.pi.math.TestModular.Montgomery2;
+import org.junit.Test;
+import org.junit.Assert;
 
-public class TestSummation extends junit.framework.TestCase {
+public class TestSummation {
   static final Random RANDOM = new Random();
   static final BigInteger TWO = BigInteger.valueOf(2);
+  private static final double DOUBLE_DELTA = 0.000000001f;
 
   private static Summation2 newSummation(final long base, final long range, final long delta) {
-    final ArithmeticProgression N = new ArithmeticProgression('n', base+3, delta, base+3+range);
-    final ArithmeticProgression E = new ArithmeticProgression('e', base+range, -delta, base);
+    final ArithmeticProgression N = new ArithmeticProgression('n', base + 3,
+        delta, base + 3 + range);
+    final ArithmeticProgression E = new ArithmeticProgression('e', base + range,
+        -delta, base);
     return new Summation2(N, E);
   }
 
@@ -53,10 +58,11 @@
 
     final List<Summation> combined = Util.combine(a);
 //    Util.out.println("combined=" + combined);
-    assertEquals(1, combined.size());
-    assertEquals(sigma, combined.get(0));
+    Assert.assertEquals(1, combined.size());
+    Assert.assertEquals(sigma, combined.get(0));
   }
 
+  @Test
   public void testSubtract() {
     final Summation sigma = newSummation(3, 10000, 20);
     final int size = 10;
@@ -112,7 +118,9 @@
       long n = N.value;
       double s = 0;
       for(; e > E.limit; e += E.delta) {
-        s = Modular.addMod(s, TWO.modPow(BigInteger.valueOf(e), BigInteger.valueOf(n)).doubleValue()/n);
+        s = Modular.addMod(s,
+            TWO.modPow(BigInteger.valueOf(e), BigInteger.valueOf(n))
+                .doubleValue() / n);
         n += N.delta;
       }
       return s;
@@ -124,16 +132,16 @@
     t.tick("sigma=" + sigma);
     final double value = sigma.compute();
     t.tick("compute=" + value);
-    assertEquals(value, sigma.compute_modular());
+    Assert.assertEquals(value, sigma.compute_modular(), DOUBLE_DELTA);
     t.tick("compute_modular");
-    assertEquals(value, sigma.compute_montgomery());
+    Assert.assertEquals(value, sigma.compute_montgomery(), DOUBLE_DELTA);
     t.tick("compute_montgomery");
-    assertEquals(value, sigma.compute_montgomery2());
+    Assert.assertEquals(value, sigma.compute_montgomery2(), DOUBLE_DELTA);
     t.tick("compute_montgomery2");
 
-    assertEquals(value, sigma.compute_modBigInteger());
+    Assert.assertEquals(value, sigma.compute_modBigInteger(), DOUBLE_DELTA);
     t.tick("compute_modBigInteger");
-    assertEquals(value, sigma.compute_modPow());
+    Assert.assertEquals(value, sigma.compute_modPow(), DOUBLE_DELTA);
     t.tick("compute_modPow");
   }
 
diff --git a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/cmakebuilder/CompileMojo.java b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/cmakebuilder/CompileMojo.java
index 0196352..9b0bde7 100644
--- a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/cmakebuilder/CompileMojo.java
+++ b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/cmakebuilder/CompileMojo.java
@@ -96,8 +96,6 @@
     validatePlatform();
     runCMake();
     runMake();
-    runMake(); // The second make is a workaround for HADOOP-9215.  It can be
-               // removed when cmake 2.6 is no longer supported.
     long end = System.nanoTime();
     getLog().info("cmake compilation finished successfully in " +
           TimeUnit.MILLISECONDS.convert(end - start, TimeUnit.NANOSECONDS) +
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index ac007380..9a52d76 100755
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1151,7 +1151,7 @@
       <dependency>
         <groupId>com.aliyun.oss</groupId>
         <artifactId>aliyun-sdk-oss</artifactId>
-        <version>2.4.1</version>
+        <version>2.8.1</version>
         <exclusions>
           <exclusion>
             <groupId>org.apache.httpcomponents</groupId>
diff --git a/hadoop-project/src/site/markdown/index.md.vm b/hadoop-project/src/site/markdown/index.md.vm
index 62e21b2ed..bb7bda2 100644
--- a/hadoop-project/src/site/markdown/index.md.vm
+++ b/hadoop-project/src/site/markdown/index.md.vm
@@ -55,17 +55,15 @@
 YARN Timeline Service v.2
 -------------------
 
-We are introducing an early preview (alpha 1) of a major revision of YARN
+We are introducing an early preview (alpha 2) of a major revision of YARN
 Timeline Service: v.2. YARN Timeline Service v.2 addresses two major
 challenges: improving scalability and reliability of Timeline Service, and
 enhancing usability by introducing flows and aggregation.
 
-YARN Timeline Service v.2 alpha 1 is provided so that users and developers
+YARN Timeline Service v.2 alpha 2 is provided so that users and developers
 can test it and provide feedback and suggestions for making it a ready
 replacement for Timeline Service v.1.x. It should be used only in a test
-capacity. Most importantly, security is not enabled. Do not set up or use
-Timeline Service v.2 until security is implemented if security is a
-critical requirement.
+capacity.
 
 More details are available in the
 [YARN Timeline Service v.2](./hadoop-yarn/hadoop-yarn-site/TimelineServiceV2.html)
diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
index 0491087..3561b02 100644
--- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
+++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
@@ -22,11 +22,13 @@
 import java.io.IOException;
 import java.net.URI;
 import java.util.ArrayList;
+import java.util.EnumSet;
 import java.util.List;
 
 import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
@@ -103,6 +105,31 @@
         store, key, progress, statistics), (Statistics)(null));
   }
 
+  /**
+   * {@inheritDoc}
+   * @throws FileNotFoundException if the parent directory is not present -or
+   * is not a directory.
+   */
+  @Override
+  public FSDataOutputStream createNonRecursive(Path path,
+      FsPermission permission,
+      EnumSet<CreateFlag> flags,
+      int bufferSize,
+      short replication,
+      long blockSize,
+      Progressable progress) throws IOException {
+    Path parent = path.getParent();
+    if (parent != null) {
+      // expect this to raise an exception if there is no parent
+      if (!getFileStatus(parent).isDirectory()) {
+        throw new FileAlreadyExistsException("Not a directory: " + parent);
+      }
+    }
+    return create(path, permission,
+      flags.contains(CreateFlag.OVERWRITE), bufferSize,
+      replication, blockSize, progress);
+  }
+
   @Override
   public boolean delete(Path path, boolean recursive) throws IOException {
     try {
diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
index a944fc1..a85a739 100644
--- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
+++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
@@ -129,6 +129,10 @@
     }
 
     String endPoint = conf.getTrimmed(ENDPOINT_KEY, "");
+    if (StringUtils.isEmpty(endPoint)) {
+      throw new IllegalArgumentException("Aliyun OSS endpoint should not be " +
+        "null or empty. Please set proper endpoint with 'fs.oss.endpoint'.");
+    }
     CredentialsProvider provider =
         AliyunOSSUtils.getCredentialsProvider(conf);
     ossClient = new OSSClient(endPoint, provider, clientConf);
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestGetFileStatus.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestGetFileStatus.java
index d9e22db9..95c2363 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestGetFileStatus.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestGetFileStatus.java
@@ -98,4 +98,5 @@
     Assert.assertFalse(fileStatus.hasAcl());
     Assert.assertFalse(fileStatus.getPermission().getAclBit());
   }
+
 }
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 554027b..bd8ac68 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -2027,24 +2027,30 @@
 
         LOG.debug("Found {} as an explicit blob. Checking if it's a file or folder.", key);
 
-        // The blob exists, so capture the metadata from the blob
-        // properties.
-        blob.downloadAttributes(getInstrumentedContext());
-        BlobProperties properties = blob.getProperties();
+        try {
+          // The blob exists, so capture the metadata from the blob
+          // properties.
+          blob.downloadAttributes(getInstrumentedContext());
+          BlobProperties properties = blob.getProperties();
 
-        if (retrieveFolderAttribute(blob)) {
-          LOG.debug("{} is a folder blob.", key);
-          return new FileMetadata(key, properties.getLastModified().getTime(),
-              getPermissionStatus(blob), BlobMaterialization.Explicit);
-        } else {
+          if (retrieveFolderAttribute(blob)) {
+            LOG.debug("{} is a folder blob.", key);
+            return new FileMetadata(key, properties.getLastModified().getTime(),
+                getPermissionStatus(blob), BlobMaterialization.Explicit);
+          } else {
 
-          LOG.debug("{} is a normal blob.", key);
+            LOG.debug("{} is a normal blob.", key);
 
-          return new FileMetadata(
-              key, // Always return denormalized key with metadata.
-              getDataLength(blob, properties),
-              properties.getLastModified().getTime(),
-              getPermissionStatus(blob));
+            return new FileMetadata(
+                key, // Always return denormalized key with metadata.
+                getDataLength(blob, properties),
+                properties.getLastModified().getTime(),
+                getPermissionStatus(blob));
+          }
+        } catch(StorageException e){
+          if (!NativeAzureFileSystemHelper.isFileNotFoundException(e)) {
+            throw e;
+          }
         }
       }
 
@@ -2459,8 +2465,11 @@
     try {
       blob.delete(operationContext, lease);
     } catch (StorageException e) {
-      LOG.error("Encountered Storage Exception for delete on Blob: {}, Exception Details: {} Error Code: {}",
-          blob.getUri(), e.getMessage(), e.getErrorCode());
+      if (!NativeAzureFileSystemHelper.isFileNotFoundException(e)) {
+        LOG.error("Encountered Storage Exception for delete on Blob: {}"
+            + ", Exception Details: {} Error Code: {}",
+            blob.getUri(), e.getMessage(), e.getErrorCode());
+      }
       // On exception, check that if:
       // 1. It's a BlobNotFound exception AND
       // 2. It got there after one-or-more retries THEN
@@ -2491,17 +2500,17 @@
         // Container doesn't exist, no need to do anything
         return true;
       }
-
       // Get the blob reference and delete it.
       CloudBlobWrapper blob = getBlobReference(key);
-      if (blob.exists(getInstrumentedContext())) {
-        safeDelete(blob, lease);
-        return true;
-      } else {
+      safeDelete(blob, lease);
+      return true;
+    } catch (Exception e) {
+      if (e instanceof StorageException
+          && NativeAzureFileSystemHelper.isFileNotFoundException(
+              (StorageException) e)) {
+        // the file or directory does not exist
         return false;
       }
-    } catch (Exception e) {
-      // Re-throw as an Azure storage exception.
       throw new AzureException(e);
     }
   }
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index a7558a3..2abc6c6 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -2043,7 +2043,12 @@
       AzureFileSystemThreadTask task = new AzureFileSystemThreadTask() {
         @Override
         public boolean execute(FileMetadata file) throws IOException{
-          return deleteFile(file.getKey(), file.isDir());
+          if (!deleteFile(file.getKey(), file.isDir())) {
+            LOG.warn("Attempt to delete non-existent {} {}",
+                file.isDir() ? "directory" : "file",
+                file.getKey());
+          }
+          return true;
         }
       };
 
@@ -2080,30 +2085,28 @@
     return new AzureFileSystemThreadPoolExecutor(threadCount, threadNamePrefix, operation, key, config);
   }
 
-  // Delete single file / directory from key.
+  /**
+   * Delete the specified file or directory and increment metrics.
+   * If the file or directory does not exist, the operation returns false.
+   * @param path the path to a file or directory.
+   * @param isDir true if the path is a directory; otherwise false.
+   * @return true if delete is successful; otherwise false.
+   * @throws IOException if an IO error occurs while attempting to delete the
+   * path.
+   *
+   */
   @VisibleForTesting
-  boolean deleteFile(String key, boolean isDir) throws IOException {
-    try {
-      if (store.delete(key)) {
-        if (isDir) {
-          instrumentation.directoryDeleted();
-        } else {
-          instrumentation.fileDeleted();
-        }
-        return true;
-      } else {
-        return false;
-      }
-    } catch(IOException e) {
-      Throwable innerException = NativeAzureFileSystemHelper.checkForAzureStorageException(e);
-
-      if (innerException instanceof StorageException
-          && NativeAzureFileSystemHelper.isFileNotFoundException((StorageException) innerException)) {
-        return false;
-      }
-
-      throw e;
+  boolean deleteFile(String path, boolean isDir) throws IOException {
+    if (!store.delete(path)) {
+      return false;
     }
+
+    if (isDir) {
+      instrumentation.directoryDeleted();
+    } else {
+      instrumentation.fileDeleted();
+    }
+    return true;
   }
 
   @Override
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java
index 3d33453..5dbb6bc 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java
@@ -71,6 +71,13 @@
   private String storageAccount;
   private RetryPolicyFactory retryPolicy;
   private int timeoutIntervalInMs;
+  private boolean useContainerSasKeyForAllAccess;
+
+  /**
+   * Configuration key to specify if containerSasKey should be used for all accesses
+   */
+  public static final String KEY_USE_CONTAINER_SASKEY_FOR_ALL_ACCESS =
+      "fs.azure.saskey.usecontainersaskeyforallaccess";
 
   public SecureStorageInterfaceImpl(boolean useLocalSASKeyMode,
       Configuration conf) throws SecureModeException {
@@ -88,6 +95,7 @@
       }
       this.sasKeyGenerator = remoteSasKeyGenerator;
     }
+    this.useContainerSasKeyForAllAccess = conf.getBoolean(KEY_USE_CONTAINER_SASKEY_FOR_ALL_ACCESS, true);
   }
 
   @Override
@@ -145,7 +153,9 @@
       if (timeoutIntervalInMs > 0) {
         container.getServiceClient().getDefaultRequestOptions().setTimeoutIntervalInMs(timeoutIntervalInMs);
       }
-      return new SASCloudBlobContainerWrapperImpl(storageAccount, container, sasKeyGenerator);
+      return (useContainerSasKeyForAllAccess)
+          ? new SASCloudBlobContainerWrapperImpl(storageAccount, container, null)
+          : new SASCloudBlobContainerWrapperImpl(storageAccount, container, sasKeyGenerator);
     } catch (SASKeyGenerationException sasEx) {
       String errorMsg = "Encountered SASKeyGeneration exception while "
           + "generating SAS Key for container : " + name
@@ -226,12 +236,12 @@
     public CloudBlobWrapper getBlockBlobReference(String relativePath)
         throws URISyntaxException, StorageException {
       try {
-        CloudBlockBlob blob = new CloudBlockBlob(sasKeyGenerator.getRelativeBlobSASUri(
-                storageAccount, getName(), relativePath));
+        CloudBlockBlob blob = (sasKeyGenerator!=null)
+          ? new CloudBlockBlob(sasKeyGenerator.getRelativeBlobSASUri(storageAccount, getName(), relativePath))
+          : container.getBlockBlobReference(relativePath);
         blob.getServiceClient().setDefaultRequestOptions(
                 container.getServiceClient().getDefaultRequestOptions());
-        return new SASCloudBlockBlobWrapperImpl(
-                blob);
+        return new SASCloudBlockBlobWrapperImpl(blob);
       } catch (SASKeyGenerationException sasEx) {
         String errorMsg = "Encountered SASKeyGeneration exception while "
             + "generating SAS Key for relativePath : " + relativePath
@@ -245,12 +255,13 @@
     public CloudBlobWrapper getPageBlobReference(String relativePath)
         throws URISyntaxException, StorageException {
       try {
-        CloudPageBlob blob = new CloudPageBlob(sasKeyGenerator.getRelativeBlobSASUri(
-                storageAccount, getName(), relativePath));
+        CloudPageBlob blob   = (sasKeyGenerator!=null)
+          ? new CloudPageBlob(sasKeyGenerator.getRelativeBlobSASUri(storageAccount, getName(), relativePath))
+          : container.getPageBlobReference(relativePath);
+
         blob.getServiceClient().setDefaultRequestOptions(
                 container.getServiceClient().getDefaultRequestOptions());
-        return new SASCloudPageBlobWrapperImpl(
-                blob);
+        return new SASCloudPageBlobWrapperImpl(blob);
       } catch (SASKeyGenerationException sasEx) {
         String errorMsg = "Encountered SASKeyGeneration exception while "
             + "generating SAS Key for relativePath : " + relativePath
diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/index.md b/hadoop-tools/hadoop-azure/src/site/markdown/index.md
index 79cb0ea..758650d 100644
--- a/hadoop-tools/hadoop-azure/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure/src/site/markdown/index.md
@@ -476,6 +476,15 @@
     </property>
 ```
 
+ Use container saskey for access to all blobs within the container.
+ Blob-specific saskeys are not used when this setting is enabled.
+ This setting provides better performance compared to blob-specific saskeys.
+ ```
+    <property>
+      <name>fs.azure.saskey.usecontainersaskeyforallaccess</name>
+      <value>true</value>
+    </property>
+```
 ## Testing the hadoop-azure Module
 
 The hadoop-azure module includes a full suite of unit tests.  Most of the tests
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java
index ce3cdee..fd3690c4 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java
@@ -39,6 +39,8 @@
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
 import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
 
 /**
  * Tests the Native Azure file system (WASB) using parallel threads for rename and delete operations.
@@ -529,30 +531,65 @@
   }
 
   /*
-   * Test case for delete operation with multiple threads and flat listing enabled.
+   * Validate that when a directory is deleted recursively, the operation succeeds
+   * even if a child directory delete fails because the directory does not exist.
+   * This can happen if a child directory is deleted by an external agent while
+   * the parent is in progress of being deleted recursively.
    */
   @Test
-  public void testDeleteSingleDeleteFailure() throws Exception {
+  public void testRecursiveDirectoryDeleteWhenChildDirectoryDeleted()
+      throws Exception {
+    testRecusiveDirectoryDelete(true);
+  }
 
+  /*
+   * Validate that when a directory is deleted recursively, the operation succeeds
+   * even if a file delete fails because it does not exist.
+   * This can happen if a file is deleted by an external agent while
+   * the parent directory is in progress of being deleted.
+   */
+  @Test
+  public void testRecursiveDirectoryDeleteWhenDeletingChildFileReturnsFalse()
+      throws Exception {
+    testRecusiveDirectoryDelete(false);
+  }
+
+  private void testRecusiveDirectoryDelete(boolean useDir) throws Exception {
+    String childPathToBeDeletedByExternalAgent = (useDir)
+        ? "root/0"
+        : "root/0/fileToRename";
     // Spy azure file system object and return false for deleting one file
-    LOG.info("testDeleteSingleDeleteFailure");
     NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
-    String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root/0")));
-    Mockito.when(mockFs.deleteFile(path, true)).thenReturn(false);
+    String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path(
+        childPathToBeDeletedByExternalAgent)));
+
+    Answer<Boolean> answer = new Answer<Boolean>() {
+      public Boolean answer(InvocationOnMock invocation) throws Throwable {
+        String path = (String) invocation.getArguments()[0];
+        boolean isDir = (boolean) invocation.getArguments()[1];
+        boolean realResult = fs.deleteFile(path, isDir);
+        assertTrue(realResult);
+        boolean fakeResult = false;
+        return fakeResult;
+      }
+    };
+
+    Mockito.when(mockFs.deleteFile(path, useDir)).thenAnswer(answer);
 
     createFolder(mockFs, "root");
     Path sourceFolder = new Path("root");
-    assertFalse(mockFs.delete(sourceFolder, true));
-    assertTrue(mockFs.exists(sourceFolder));
 
-    // Validate from logs that threads are enabled and delete operation failed.
+    assertTrue(mockFs.delete(sourceFolder, true));
+    assertFalse(mockFs.exists(sourceFolder));
+
+    // Validate from logs that threads are enabled, that a child directory was
+    // deleted by an external caller, and the parent delete operation still
+    // succeeds.
     String content = logs.getOutput();
     assertInLog(content,
         "Using thread pool for Delete operation with threads");
-    assertInLog(content, "Delete operation failed for file " + path);
-    assertInLog(content,
-        "Terminating execution of Delete operation now as some other thread already got exception or operation failed");
-    assertInLog(content, "Failed to delete files / subfolders in blob");
+    assertInLog(content, String.format("Attempt to delete non-existent %s %s",
+        useDir ? "directory" : "file", path));
   }
 
   /*
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthWithBlobSpecificKeys.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthWithBlobSpecificKeys.java
new file mode 100644
index 0000000..6149154
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFSAuthWithBlobSpecificKeys.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import org.apache.hadoop.conf.Configuration;
+
+import static org.apache.hadoop.fs.azure.SecureStorageInterfaceImpl.KEY_USE_CONTAINER_SASKEY_FOR_ALL_ACCESS;
+
+/**
+ * Test class to hold all WASB authorization tests that use blob-specific keys
+ * to access storage.
+ */
+public class TestNativeAzureFSAuthWithBlobSpecificKeys
+    extends TestNativeAzureFileSystemAuthorizationWithOwner {
+
+  @Override
+  public Configuration getConfiguration() {
+    Configuration conf = super.getConfiguration();
+    conf.set(KEY_USE_CONTAINER_SASKEY_FOR_ALL_ACCESS, "false");
+    return conf;
+  }
+
+  @Override
+  protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
+    Configuration conf = getConfiguration();
+    return AzureBlobStorageTestAccount.create(conf);
+  }
+}
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrencyLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrencyLive.java
index ec72cce..7c5899d 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrencyLive.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrencyLive.java
@@ -19,101 +19,166 @@
 package org.apache.hadoop.fs.azure;
 
 
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.junit.Assert;
 import org.junit.Test;
 
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
 /***
  * Test class to hold all Live Azure storage concurrency tests.
  */
 public class TestNativeAzureFileSystemConcurrencyLive
     extends AbstractWasbTestBase {
 
-  private static final int TEST_COUNT = 102;
+  private static final int THREAD_COUNT = 102;
+  private static final int TEST_EXECUTION_TIMEOUT = 5000;
   @Override
   protected AzureBlobStorageTestAccount createTestAccount() throws Exception {
     return AzureBlobStorageTestAccount.create();
   }
 
   /**
-   * Test multi-threaded deletes in WASB. Expected behavior is one of the thread
-   * should be to successfully delete the file and return true and all other
-   * threads need to return false.
+   * Validate contract for FileSystem.create when overwrite is true and there
+   * are concurrent callers of FileSystem.delete.  An existing file should be
+   * overwritten, even if the original destination exists but is deleted by an
+   * external agent during the create operation.
    */
-  @Test
-  public void testMultiThreadedDeletes() throws Exception {
+  @Test(timeout = TEST_EXECUTION_TIMEOUT)
+  public void testConcurrentCreateDeleteFile() throws Exception {
+    Path testFile = new Path("test.dat");
+
+    List<CreateFileTask> tasks = new ArrayList<>(THREAD_COUNT);
+
+    for (int i = 0; i < THREAD_COUNT; i++) {
+      tasks.add(new CreateFileTask(fs, testFile));
+    }
+
+    ExecutorService es = null;
+
+    try {
+      es = Executors.newFixedThreadPool(THREAD_COUNT);
+
+      List<Future<Void>> futures = es.invokeAll(tasks);
+
+      for (Future<Void> future : futures) {
+        Assert.assertTrue(future.isDone());
+
+        // we are using Callable<V>, so if an exception
+        // occurred during the operation, it will be thrown
+        // when we call get
+        Assert.assertEquals(null, future.get());
+      }
+    } finally {
+      if (es != null) {
+        es.shutdownNow();
+      }
+    }
+  }
+
+  /**
+   * Validate contract for FileSystem.delete when invoked concurrently.
+   * One of the threads should successfully delete the file and return true;
+   * all other threads should return false.
+   */
+  @Test(timeout = TEST_EXECUTION_TIMEOUT)
+  public void testConcurrentDeleteFile() throws Exception {
     Path testFile = new Path("test.dat");
     fs.create(testFile).close();
 
-    int threadCount = TEST_COUNT;
-    DeleteHelperThread[] helperThreads = new DeleteHelperThread[threadCount];
+    List<DeleteFileTask> tasks = new ArrayList<>(THREAD_COUNT);
 
-    for (int i = 0; i < threadCount; i++) {
-      helperThreads[i] = new DeleteHelperThread(fs, testFile);
+    for (int i = 0; i < THREAD_COUNT; i++) {
+      tasks.add(new DeleteFileTask(fs, testFile));
     }
 
-    Thread[] threads = new Thread[threadCount];
+    ExecutorService es = null;
+    try {
+      es = Executors.newFixedThreadPool(THREAD_COUNT);
 
-    for (int i = 0; i < threadCount; i++) {
-      threads[i] = new Thread(helperThreads[i]);
-      threads[i].start();
-    }
+      List<Future<Boolean>> futures = es.invokeAll(tasks);
 
-    for (int i = 0; i < threadCount; i++) {
-      threads[i].join();
-    }
+      int successCount = 0;
+      for (Future<Boolean> future : futures) {
+        Assert.assertTrue(future.isDone());
 
-    boolean deleteSuccess = false;
+        // we are using Callable<V>, so if an exception
+        // occurred during the operation, it will be thrown
+        // when we call get
+        Boolean success = future.get();
+        if (success) {
+          successCount++;
+        }
+      }
 
-    for (int i = 0; i < threadCount; i++) {
-
-      Assert.assertFalse("child thread has exception : " + helperThreads[i].getException(),
-          helperThreads[i].getExceptionEncounteredFlag());
-
-      if (deleteSuccess) {
-        Assert.assertFalse("More than one thread delete() retuhelperThreads[i].getDeleteSuccess()",
-            helperThreads[i].getExceptionEncounteredFlag());
-      } else {
-        deleteSuccess = helperThreads[i].getDeleteSuccess();
+      Assert.assertEquals(
+          "Exactly one delete operation should return true.",
+          1,
+          successCount);
+    } finally {
+      if (es != null) {
+        es.shutdownNow();
       }
     }
-
-    Assert.assertTrue("No successfull delete found", deleteSuccess);
   }
 }
 
-class DeleteHelperThread implements Runnable {
+abstract class FileSystemTask<V> implements Callable<V> {
+  private final FileSystem fileSystem;
+  private final Path path;
 
-  private FileSystem fs;
-  private Path p;
-  private boolean deleteSuccess;
-  private boolean exceptionEncountered;
-  private Exception ex;
-
-  public DeleteHelperThread(FileSystem fs, Path p) {
-    this.fs = fs;
-    this.p = p;
+  protected FileSystem getFileSystem() {
+    return this.fileSystem;
   }
 
-  public void run() {
-    try {
-      deleteSuccess = fs.delete(p, false);
-    } catch (Exception ioEx) {
-      exceptionEncountered = true;
-      this.ex = ioEx;
-    }
+  protected Path getFilePath() {
+    return this.path;
   }
 
-  public boolean getDeleteSuccess() {
-    return deleteSuccess;
+  FileSystemTask(FileSystem fs, Path p) {
+    this.fileSystem = fs;
+    this.path = p;
   }
 
-  public boolean getExceptionEncounteredFlag() {
-    return exceptionEncountered;
+  public abstract V call() throws Exception;
+}
+
+class DeleteFileTask extends FileSystemTask<Boolean> {
+
+  DeleteFileTask(FileSystem fs, Path p) {
+    super(fs, p);
   }
 
-  public Exception getException() {
-    return ex;
+  @Override
+  public Boolean call() throws Exception {
+    return this.getFileSystem().delete(this.getFilePath(), false);
+  }
+}
+
+class CreateFileTask extends FileSystemTask<Void> {
+  CreateFileTask(FileSystem fs, Path p) {
+    super(fs, p);
+  }
+
+  public Void call() throws Exception {
+    FileSystem fs = getFileSystem();
+    Path p = getFilePath();
+
+    // Create an empty file and close the stream.
+    FSDataOutputStream stream = fs.create(p, true);
+    stream.close();
+
+    // Delete the file.  We don't care if delete returns true or false.
+    // We just want to ensure the file does not exist.
+    this.getFileSystem().delete(this.getFilePath(), false);
+
+    return null;
   }
 }
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/TestDataJoin.java b/hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/TestDataJoin.java
index dbb8ef0..2daae2e 100644
--- a/hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/TestDataJoin.java
+++ b/hadoop-tools/hadoop-datajoin/src/test/java/org/apache/hadoop/contrib/utils/join/TestDataJoin.java
@@ -20,10 +20,10 @@
 
 import java.io.IOException;
 
-import junit.framework.Test;
-import junit.framework.TestCase;
-import junit.framework.TestSuite;
-import junit.extensions.TestSetup;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -36,24 +36,27 @@
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.*;
 
-public class TestDataJoin extends TestCase {
-
+/**
+ * Class to test JOIN between 2 data
+ * sources.
+ */
+public class TestDataJoin {
   private static MiniDFSCluster cluster = null;
-  public static Test suite() {
-    TestSetup setup = new TestSetup(new TestSuite(TestDataJoin.class)) {
-      protected void setUp() throws Exception {
-        Configuration conf = new Configuration();
-        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
-      }
-      protected void tearDown() throws Exception {
-        if (cluster != null) {
-          cluster.shutdown();
-        }
-      }
-    };
-    return setup;
+
+  @Before
+  public void setUp() throws Exception {
+    Configuration conf = new Configuration();
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
   }
 
+  @After
+  public void tearDown() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
   public void testDataJoin() throws Exception {
     final int srcs = 4;
     JobConf job = new JobConf();
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java
index 29c59ac..138b491 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java
@@ -280,7 +280,7 @@
     out.writeLong(getBlockSize());
     out.writeLong(getModificationTime());
     out.writeLong(getAccessTime());
-    getPermission().write(out);
+    out.writeShort(getPermission().toShort());
     Text.writeString(out, getOwner(), Text.DEFAULT_MAX_LEN);
     Text.writeString(out, getGroup(), Text.DEFAULT_MAX_LEN);
     if (aclEntries != null) {
@@ -330,7 +330,7 @@
     blocksize = in.readLong();
     modificationTime = in.readLong();
     accessTime = in.readLong();
-    permission.readFields(in);
+    permission.fromShort(in.readShort());
     owner = Text.readString(in, Text.DEFAULT_MAX_LEN);
     group = Text.readString(in, Text.DEFAULT_MAX_LEN);
     byte aclEntriesSize = in.readByte();
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
index dbe750a..2b3b529 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
@@ -31,7 +31,6 @@
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclUtil;
-import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
@@ -403,8 +402,7 @@
     CopyListingFileStatus copyListingFileStatus =
         new CopyListingFileStatus(fileStatus, chunkOffset, chunkLength);
     if (preserveAcls) {
-      FsPermission perm = fileStatus.getPermission();
-      if (perm.getAclBit()) {
+      if (fileStatus.hasAcl()) {
         List<AclEntry> aclEntries = fileSystem.getAclStatus(
           fileStatus.getPath()).getEntries();
         copyListingFileStatus.setAclEntries(aclEntries);
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java
index c42e546..f626de4 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java
@@ -18,16 +18,6 @@
 
 package org.apache.hadoop.tools.util;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.EnumSet;
-import java.util.Random;
-import java.util.Stack;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -49,6 +39,16 @@
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.EnumSet;
+import java.util.Random;
+import java.util.Stack;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 public class TestDistCpUtils {
   private static final Log LOG = LogFactory.getLog(TestDistCpUtils.class);
 
@@ -61,7 +61,7 @@
   @BeforeClass
   public static void create() throws IOException {
     config.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
-        "XOR-2-1-64k");
+        "XOR-2-1-1024k");
     cluster = new MiniDFSCluster.Builder(config)
         .numDataNodes(2)
         .format(true)
@@ -562,7 +562,7 @@
     fs.mkdirs(srcECDir);
     fs.mkdirs(dstReplDir);
     String[] args = {"-setPolicy", "-path", "/tmp/srcECDir",
-        "-policy", "XOR-2-1-64k"};
+        "-policy", "XOR-2-1-1024k"};
     int res = ToolRunner.run(config, new ECAdmin(config), args);
     assertEquals("Setting EC policy should succeed!", 0, res);
     verifyReplFactorNotPreservedOnErasureCodedFile(srcECFile, true,
@@ -577,7 +577,7 @@
     fs.mkdirs(srcReplDir);
     fs.mkdirs(dstECDir);
     args = new String[]{"-setPolicy", "-path", "/tmp/dstECDir",
-        "-policy", "XOR-2-1-64k"};
+        "-policy", "XOR-2-1-1024k"};
     res = ToolRunner.run(config, new ECAdmin(config), args);
     assertEquals("Setting EC policy should succeed!", 0, res);
     verifyReplFactorNotPreservedOnErasureCodedFile(srcReplFile,
diff --git a/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestDistCh.java b/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestDistCh.java
index 3704c5b..3e52b3c 100644
--- a/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestDistCh.java
+++ b/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestDistCh.java
@@ -38,11 +38,13 @@
 import org.apache.hadoop.mapred.MiniMRClientClusterFactory;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.junit.Assert;
+import org.junit.Test;
 import org.slf4j.event.Level;
 
 import static org.slf4j.LoggerFactory.getLogger;
 
-public class TestDistCh extends junit.framework.TestCase {
+public class TestDistCh {
   {
     GenericTestUtils.setLogLevel(
         getLogger("org.apache.hadoop.hdfs.StateChange"), Level.ERROR);
@@ -75,20 +77,20 @@
 
     Path createSmallFile(Path dir) throws IOException {
       final Path f = new Path(dir, "f" + ++fcount);
-      assertTrue(!fs.exists(f));
+      Assert.assertTrue(!fs.exists(f));
       final DataOutputStream out = fs.create(f);
       try {
         out.writeBytes("createSmallFile: f=" + f);
       } finally {
         out.close();
       }
-      assertTrue(fs.exists(f));
+      Assert.assertTrue(fs.exists(f));
       return f;
     }
 
     Path mkdir(Path dir) throws IOException {
-      assertTrue(fs.mkdirs(dir));
-      assertTrue(fs.getFileStatus(dir).isDirectory());
+      Assert.assertTrue(fs.mkdirs(dir));
+      Assert.assertTrue(fs.getFileStatus(dir).isDirectory());
       return dir;
     }
     
@@ -127,7 +129,8 @@
       defaultPerm = permission == null || "".equals(permission);
     }
   }
-  
+
+  @Test
   public void testDistCh() throws Exception {
     final Configuration conf = new Configuration();
 
@@ -190,13 +193,13 @@
   }
 
   static void checkFileStatus(ChPermissionStatus expected, FileStatus actual) {
-    assertEquals(expected.getUserName(), actual.getOwner());
-    assertEquals(expected.getGroupName(), actual.getGroup());
+    Assert.assertEquals(expected.getUserName(), actual.getOwner());
+    Assert.assertEquals(expected.getGroupName(), actual.getGroup());
     FsPermission perm = expected.getPermission();
     if (actual.isFile() && expected.defaultPerm) {
       perm = perm.applyUMask(UMASK);
     }
-    assertEquals(perm, actual.getPermission());
+    Assert.assertEquals(perm, actual.getPermission());
   }
 
   private static String runLsr(final FsShell shell, String root, int returnvalue
@@ -210,7 +213,7 @@
     System.setErr(out);
     final String results;
     try {
-      assertEquals(returnvalue, shell.run(new String[]{"-lsr", root}));
+      Assert.assertEquals(returnvalue, shell.run(new String[]{"-lsr", root}));
       results = bytes.toString();
     } finally {
       IOUtils.closeStream(out);
diff --git a/hadoop-tools/hadoop-pipes/src/CMakeLists.txt b/hadoop-tools/hadoop-pipes/src/CMakeLists.txt
index 3b0b28c..ff660bf 100644
--- a/hadoop-tools/hadoop-pipes/src/CMakeLists.txt
+++ b/hadoop-tools/hadoop-pipes/src/CMakeLists.txt
@@ -16,7 +16,7 @@
 # limitations under the License.
 #
 
-cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
 
 list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/../../../hadoop-common-project/hadoop-common)
 include(HadoopCommon)
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Job20LineHistoryEventEmitter.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Job20LineHistoryEventEmitter.java
index a8497f4..ef32db4 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Job20LineHistoryEventEmitter.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Job20LineHistoryEventEmitter.java
@@ -223,7 +223,7 @@
           && finishedReduces != null) {
         return new JobUnsuccessfulCompletionEvent(jobID, Long
             .parseLong(finishTime), Integer.parseInt(finishedMaps), Integer
-            .parseInt(finishedReduces), status);
+            .parseInt(finishedReduces), -1, -1, -1, -1, status);
       }
 
       return null;
@@ -256,8 +256,8 @@
           && finishedReduces != null) {
         return new JobFinishedEvent(jobID, Long.parseLong(finishTime), Integer
             .parseInt(finishedMaps), Integer.parseInt(finishedReduces), Integer
-            .parseInt(failedMaps), Integer.parseInt(failedReduces), null, null,
-            maybeParseCounters(counters));
+            .parseInt(failedMaps), Integer.parseInt(failedReduces), -1, -1,
+            null, null, maybeParseCounters(counters));
       }
 
       return null;
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index 477cc4a..59f9c17 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -66,6 +66,7 @@
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
+import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
@@ -227,6 +228,9 @@
     if (Class.forName(schedulerClass) == CapacityScheduler.class) {
       rmConf.set(YarnConfiguration.RM_SCHEDULER,
           SLSCapacityScheduler.class.getName());
+      rmConf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
+      rmConf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
+          ProportionalCapacityPreemptionPolicy.class.getName());
     } else if (Class.forName(schedulerClass) == FairScheduler.class) {
       rmConf.set(YarnConfiguration.RM_SCHEDULER,
           SLSFairScheduler.class.getName());
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
index 8962aba..e71ddff 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
@@ -179,7 +179,7 @@
     }
 
     @Override
-    public void updateNodeHeartbeatResponseForContainersDecreasing(
+    public void updateNodeHeartbeatResponseForUpdatedContainers(
         NodeHeartbeatResponse response) {
       // TODO Auto-generated method stub
       
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
index d7b159c..6b7ac3c 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
@@ -168,7 +168,7 @@
   }
 
   @Override
-  public void updateNodeHeartbeatResponseForContainersDecreasing(
+  public void updateNodeHeartbeatResponseForUpdatedContainers(
       NodeHeartbeatResponse response) {
     // TODO Auto-generated method stub
   }
diff --git a/hadoop-tools/hadoop-sls/src/test/resources/yarn-site.xml b/hadoop-tools/hadoop-sls/src/test/resources/yarn-site.xml
index 7b2e674..20146fc 100644
--- a/hadoop-tools/hadoop-sls/src/test/resources/yarn-site.xml
+++ b/hadoop-tools/hadoop-sls/src/test/resources/yarn-site.xml
@@ -22,16 +22,6 @@
   </property>
 
   <property>
-    <name>yarn.resourcemanager.scheduler.monitor.enable</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.scheduler.monitor.policies</name>
-    <value>org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy</value>
-  </property>
-
-  <property>
     <description>The address of the RM web application.</description>
     <name>yarn.resourcemanager.webapp.address</name>
     <value>localhost:18088</value>
diff --git a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/typedbytes/TestTypedBytesWritable.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/typedbytes/TestTypedBytesWritable.java
index 3a81e42..1ca5436 100644
--- a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/typedbytes/TestTypedBytesWritable.java
+++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/typedbytes/TestTypedBytesWritable.java
@@ -26,10 +26,12 @@
 import java.io.DataOutputStream;
 import java.io.IOException;
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
-public class TestTypedBytesWritable extends TestCase {
+public class TestTypedBytesWritable {
 
+  @Test
   public void testToString() {
     TypedBytesWritable tbw = new TypedBytesWritable();
     tbw.setValue(true);
@@ -46,6 +48,7 @@
     assertEquals("random text", tbw.toString());
   }
 
+  @Test
   public void testIO() throws IOException {
     TypedBytesWritable tbw = new TypedBytesWritable();
     tbw.setValue(12345);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
index d3ca765..9b254ae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
@@ -27,6 +27,7 @@
 import org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.AMCommand;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
@@ -92,21 +93,21 @@
         .preemptionMessage(preempt).nmTokens(nmTokens).build();
   }
 
-  @Public
+  @Private
   @Unstable
   public static AllocateResponse newInstance(int responseId,
       List<ContainerStatus> completedContainers,
       List<Container> allocatedContainers, List<NodeReport> updatedNodes,
       Resource availResources, AMCommand command, int numClusterNodes,
       PreemptionMessage preempt, List<NMToken> nmTokens,
-      List<UpdatedContainer> updatedContainers) {
+      CollectorInfo collectorInfo) {
     return AllocateResponse.newBuilder().numClusterNodes(numClusterNodes)
         .responseId(responseId)
         .completedContainersStatuses(completedContainers)
         .allocatedContainers(allocatedContainers).updatedNodes(updatedNodes)
         .availableResources(availResources).amCommand(command)
         .preemptionMessage(preempt).nmTokens(nmTokens)
-        .updatedContainers(updatedContainers).build();
+        .collectorInfo(collectorInfo).build();
   }
 
   @Private
@@ -133,7 +134,7 @@
       List<Container> allocatedContainers, List<NodeReport> updatedNodes,
       Resource availResources, AMCommand command, int numClusterNodes,
       PreemptionMessage preempt, List<NMToken> nmTokens, Token amRMToken,
-      List<UpdatedContainer> updatedContainers, String collectorAddr) {
+      List<UpdatedContainer> updatedContainers, CollectorInfo collectorInfo) {
     return AllocateResponse.newBuilder().numClusterNodes(numClusterNodes)
         .responseId(responseId)
         .completedContainersStatuses(completedContainers)
@@ -141,7 +142,7 @@
         .availableResources(availResources).amCommand(command)
         .preemptionMessage(preempt).nmTokens(nmTokens)
         .updatedContainers(updatedContainers).amRmToken(amRMToken)
-        .collectorAddr(collectorAddr).build();
+        .collectorInfo(collectorInfo).build();
   }
 
   /**
@@ -333,17 +334,18 @@
   public abstract void setApplicationPriority(Priority priority);
 
   /**
-   * The address of collector that belong to this app
+   * The data associated with the collector that belongs to this app. Contains
+   * address and token alongwith identification information.
    *
-   * @return The address of collector that belong to this attempt
+   * @return The data of collector that belong to this attempt
    */
   @Public
   @Unstable
-  public abstract String getCollectorAddr();
+  public abstract CollectorInfo getCollectorInfo();
 
   @Private
   @Unstable
-  public abstract void setCollectorAddr(String collectorAddr);
+  public abstract void setCollectorInfo(CollectorInfo info);
 
   /**
    * Get the list of container update errors to inform the
@@ -559,15 +561,17 @@
     }
 
     /**
-     * Set the <code>collectorAddr</code> of the response.
-     * @see AllocateResponse#setCollectorAddr(String)
-     * @param collectorAddr <code>collectorAddr</code> of the response
+     * Set the <code>collectorInfo</code> of the response.
+     * @see AllocateResponse#setCollectorInfo(CollectorInfo)
+     * @param collectorInfo <code>collectorInfo</code> of the response which
+     *    contains collector address, RM id, version and collector token.
      * @return {@link AllocateResponseBuilder}
      */
     @Private
     @Unstable
-    public AllocateResponseBuilder collectorAddr(String collectorAddr) {
-      allocateResponse.setCollectorAddr(collectorAddr);
+    public AllocateResponseBuilder collectorInfo(
+        CollectorInfo collectorInfo) {
+      allocateResponse.setCollectorInfo(collectorInfo);
       return this;
     }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/CollectorInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/CollectorInfo.java
new file mode 100644
index 0000000..d22b9fb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/CollectorInfo.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * Collector info containing collector address and collector token passed from
+ * RM to AM in Allocate Response.
+ */
+@Private
+@InterfaceStability.Unstable
+public abstract class CollectorInfo {
+
+  protected static final long DEFAULT_TIMESTAMP_VALUE = -1;
+
+  public static CollectorInfo newInstance(String collectorAddr) {
+    return newInstance(collectorAddr, null);
+  }
+
+  public static CollectorInfo newInstance(String collectorAddr, Token token) {
+    CollectorInfo amCollectorInfo =
+        Records.newRecord(CollectorInfo.class);
+    amCollectorInfo.setCollectorAddr(collectorAddr);
+    amCollectorInfo.setCollectorToken(token);
+    return amCollectorInfo;
+  }
+
+  public abstract String getCollectorAddr();
+
+  public abstract void setCollectorAddr(String addr);
+
+  /**
+   * Get delegation token for app collector which AM will use to publish
+   * entities.
+   * @return the delegation token for app collector.
+   */
+  public abstract Token getCollectorToken();
+
+  public abstract void setCollectorToken(Token token);
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/ApplicationEntity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/ApplicationEntity.java
index 6075ec4..20226aa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/ApplicationEntity.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/ApplicationEntity.java
@@ -49,4 +49,32 @@
   public void setQueue(String queue) {
     addInfo(QUEUE_INFO_KEY, queue);
   }
+
+  /**
+   * Checks if the input TimelineEntity object is an ApplicationEntity.
+   *
+   * @param te TimelineEntity object.
+   * @return true if input is an ApplicationEntity, false otherwise
+   */
+  public static boolean isApplicationEntity(TimelineEntity te) {
+    return (te == null ? false
+        : te.getType().equals(TimelineEntityType.YARN_APPLICATION.toString()));
+  }
+
+  /**
+   * @param te TimelineEntity object.
+   * @param eventId event with this id needs to be fetched
+   * @return TimelineEvent if TimelineEntity contains the desired event.
+   */
+  public static TimelineEvent getApplicationEvent(TimelineEntity te,
+      String eventId) {
+    if (isApplicationEntity(te)) {
+      for (TimelineEvent event : te.getEvents()) {
+        if (event.getId().equals(eventId)) {
+          return event;
+        }
+      }
+    }
+    return null;
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
index e43c3ac..0af5ea47 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
@@ -55,6 +55,7 @@
 @InterfaceStability.Unstable
 public class TimelineEntity implements Comparable<TimelineEntity> {
   protected final static String SYSTEM_INFO_KEY_PREFIX = "SYSTEM_INFO_";
+  public final static long DEFAULT_ENTITY_PREFIX = 0L;
 
   /**
    * Identifier of timeline entity(entity id + entity type).
@@ -146,6 +147,7 @@
   private HashMap<String, Set<String>> isRelatedToEntities = new HashMap<>();
   private HashMap<String, Set<String>> relatesToEntities = new HashMap<>();
   private Long createdTime;
+  private long idPrefix;
 
   public TimelineEntity() {
     identifier = new Identifier();
@@ -548,20 +550,10 @@
   public int compareTo(TimelineEntity other) {
     int comparison = getType().compareTo(other.getType());
     if (comparison == 0) {
-      if (getCreatedTime() == null) {
-        if (other.getCreatedTime() == null) {
-          return getId().compareTo(other.getId());
-        } else {
-          return 1;
-        }
-      }
-      if (other.getCreatedTime() == null) {
+      if (getIdPrefix() > other.getIdPrefix()) {
+        // Descending order by entity id prefix
         return -1;
-      }
-      if (getCreatedTime() > other.getCreatedTime()) {
-        // Order by created time desc
-        return -1;
-      } else if (getCreatedTime() < other.getCreatedTime()) {
+      } else if (getIdPrefix() < other.getIdPrefix()) {
         return 1;
       } else {
         return getId().compareTo(other.getId());
@@ -582,4 +574,38 @@
       return real.toString();
     }
   }
+
+  @XmlElement(name = "idprefix")
+  public long getIdPrefix() {
+    if (real == null) {
+      return idPrefix;
+    } else {
+      return real.getIdPrefix();
+    }
+  }
+
+  /**
+   * Sets idPrefix for an entity.
+   * <p>
+   * <b>Note</b>: Entities will be stored in the order of idPrefix specified.
+   * If users decide to set idPrefix for an entity, they <b>MUST</b> provide
+   * the same prefix for every update of this entity.
+   * </p>
+   * Example: <blockquote><pre>
+   * TimelineEntity entity = new TimelineEntity();
+   * entity.setIdPrefix(value);
+   * </pre></blockquote>
+   * Users can use {@link TimelineServiceHelper#invertLong(long)} to invert
+   * the prefix if necessary.
+   *
+   * @param entityIdPrefix prefix for an entity.
+   */
+  @JsonSetter("idprefix")
+  public void setIdPrefix(long entityIdPrefix) {
+    if (real == null) {
+      this.idPrefix = entityIdPrefix;
+    } else {
+      real.setIdPrefix(entityIdPrefix);
+    }
+  }
 }
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8acaef8..4944821 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -167,6 +167,10 @@
   public static final String RM_APPLICATION_MASTER_SERVICE_PROCESSORS =
       RM_PREFIX + "application-master-service.processors";
 
+  public static final String RM_AUTO_UPDATE_CONTAINERS =
+      RM_PREFIX + "auto-update.containers";
+  public static final boolean DEFAULT_RM_AUTO_UPDATE_CONTAINERS = false;
+
   /** The actual bind address for the RM.*/
   public static final String RM_BIND_HOST =
     RM_PREFIX + "bind-host";
@@ -1060,7 +1064,17 @@
   public static final String LOG_AGGREGATION_ENABLED = YARN_PREFIX
       + "log-aggregation-enable";
   public static final boolean DEFAULT_LOG_AGGREGATION_ENABLED = false;
-  
+
+  public static final String LOG_AGGREGATION_FILE_FORMATS = YARN_PREFIX
+      + "log-aggregation.file-formats";
+  public static final String LOG_AGGREGATION_FILE_CONTROLLER_FMT =
+      YARN_PREFIX + "log-aggregation.file-controller.%s.class";
+
+  public static final String LOG_AGGREGATION_REMOTE_APP_LOG_DIR_FMT
+      = YARN_PREFIX + "log-aggregation.%s.remote-app-log-dir";
+  public static final String LOG_AGGREGATION_REMOTE_APP_LOG_DIR_SUFFIX_FMT
+      = YARN_PREFIX + "log-aggregation.%s.remote-app-log-dir-suffix";
+
   /** 
    * How long to wait before deleting aggregated logs, -1 disables.
    * Be careful set this too small and you will spam the name node.
@@ -1768,6 +1782,10 @@
   YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONHISTORY_PROTOCOL =
       "security.applicationhistory.protocol.acl";
 
+  public static final String
+      YARN_SECURITY_SERVICE_AUTHORIZATION_COLLECTOR_NODEMANAGER_PROTOCOL =
+      "security.collector-nodemanager.protocol.acl";
+
   /** No. of milliseconds to wait between sending a SIGTERM and SIGKILL
    * to a running container */
   public static final String NM_SLEEP_DELAY_BEFORE_SIGKILL_MS =
@@ -2085,7 +2103,7 @@
       TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_PREFIX + "with-user-dir";
 
   /**
-   * Settings for timeline service v2.0
+   * Settings for timeline service v2.0.
    */
   public static final String TIMELINE_SERVICE_WRITER_CLASS =
       TIMELINE_SERVICE_PREFIX + "writer.class";
@@ -2098,9 +2116,20 @@
       TIMELINE_SERVICE_PREFIX + "reader.class";
 
   public static final String DEFAULT_TIMELINE_SERVICE_READER_CLASS =
-      "org.apache.hadoop.yarn.server.timelineservice" +
-          ".storage.HBaseTimelineReaderImpl";
+      "org.apache.hadoop.yarn.server.timelineservice.storage" +
+          ".HBaseTimelineReaderImpl";
 
+  /**
+   * default schema prefix for hbase tables.
+   */
+  public static final String DEFAULT_TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX =
+      "prod.";
+
+  /**
+   * config param name to override schema prefix.
+   */
+  public static final String TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX_NAME =
+      TIMELINE_SERVICE_PREFIX + "hbase-schema.prefix";
 
   /** The setting that controls how often the timeline collector flushes the
    * timeline writer.
@@ -2121,6 +2150,58 @@
       + "hbase.coprocessor.app-final-value-retention-milliseconds";
 
   /**
+   * The name of the setting for the location of the coprocessor
+   * jar on hdfs.
+   */
+  public static final String FLOW_RUN_COPROCESSOR_JAR_HDFS_LOCATION =
+      TIMELINE_SERVICE_PREFIX
+      + "hbase.coprocessor.jar.hdfs.location";
+
+  /** default hdfs location for flowrun coprocessor jar. */
+  public static final String DEFAULT_HDFS_LOCATION_FLOW_RUN_COPROCESSOR_JAR =
+      "/hbase/coprocessor/hadoop-yarn-server-timelineservice.jar";
+
+    /**
+   * The name for setting that points to an optional HBase configuration
+   * (hbase-site.xml file) with settings that will override the ones found on
+   * the classpath.
+   */
+  public static final String TIMELINE_SERVICE_HBASE_CONFIGURATION_FILE =
+      TIMELINE_SERVICE_PREFIX
+      + "hbase.configuration.file";
+
+  /**
+   * The name for setting that enables or disables authentication checks
+   * for reading timeline service v2 data.
+   */
+  public static final String TIMELINE_SERVICE_READ_AUTH_ENABLED =
+      TIMELINE_SERVICE_PREFIX + "read.authentication.enabled";
+
+  /**
+   * The default setting for authentication checks for reading timeline
+   * service v2 data.
+   */
+  public static final Boolean DEFAULT_TIMELINE_SERVICE_READ_AUTH_ENABLED =
+      false;
+
+  /**
+   * The name for setting that lists the users and groups who are allowed
+   * to read timeline service v2 data. It is a comma separated list of
+   * user, followed by space, then comma separated list of groups.
+   * It will allow this list of users and groups to read the data
+   * and reject everyone else.
+   */
+  public static final String TIMELINE_SERVICE_READ_ALLOWED_USERS =
+      TIMELINE_SERVICE_PREFIX + "read.allowed.users";
+
+  /**
+   * The default value for list of the users who are allowed to read
+   * timeline service v2 data.
+   */
+  public static final String DEFAULT_TIMELINE_SERVICE_READ_ALLOWED_USERS =
+      "";
+
+  /**
    * The setting that controls how long the final value of a metric of a
    * completed app is retained before merging into the flow sum. Up to this time
    * after an application is completed out-of-order values that arrive can be
@@ -2140,6 +2221,8 @@
 
   public static final int DEFAULT_NUMBER_OF_ASYNC_ENTITIES_TO_MERGE = 10;
 
+  /** default version for any flow. */
+  public static final String DEFAULT_FLOW_VERSION = "1";
 
   /**
    * The time period for which timeline v2 client will wait for draining
@@ -2629,6 +2712,14 @@
 
   public static final String DEFAULT_FEDERATION_POLICY_MANAGER_PARAMS = "";
 
+  public static final String FEDERATION_STATESTORE_ZK_PREFIX =
+      FEDERATION_PREFIX + "zk-state-store.";
+  /** Parent znode path under which ZKRMStateStore will create znodes. */
+  public static final String FEDERATION_STATESTORE_ZK_PARENT_PATH =
+      FEDERATION_STATESTORE_ZK_PREFIX + "parent-path";
+  public static final String DEFAULT_FEDERATION_STATESTORE_ZK_PARENT_PATH =
+      "/federationstore";
+
   private static final String FEDERATION_STATESTORE_SQL_PREFIX =
       FEDERATION_PREFIX + "state-store.sql.";
 
@@ -2731,6 +2822,15 @@
       "org.apache.hadoop.yarn.server.router.webapp."
           + "DefaultRequestInterceptorREST";
 
+  /**
+   * The interceptor class used in FederationInterceptorREST should return
+   * partial AppReports.
+   */
+  public static final String ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED =
+      ROUTER_WEBAPP_PREFIX + "partial-result.enabled";
+  public static final boolean DEFAULT_ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED =
+      false;
+
   ////////////////////////////////
   // Other Configs
   ////////////////////////////////
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/TimelineServiceHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/TimelineServiceHelper.java
index e0268a6..65ed18a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/TimelineServiceHelper.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/TimelineServiceHelper.java
@@ -46,4 +46,12 @@
         (HashMap<E, V>) originalMap : new HashMap<E, V>(originalMap);
   }
 
+  /**
+   * Inverts the given key.
+   * @param key value to be inverted .
+   * @return inverted long
+   */
+  public static long invertLong(long key) {
+    return Long.MAX_VALUE - key;
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 81ebd79..c5f485f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -613,3 +613,8 @@
   optional string key = 1;
   optional bytes value = 2;
 }
+
+message CollectorInfoProto {
+  optional string collector_addr = 1;
+  optional hadoop.common.TokenProto collector_token = 2;
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index b92c46e..7a7f035 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -112,7 +112,7 @@
   repeated NMTokenProto nm_tokens = 9;
   optional hadoop.common.TokenProto am_rm_token = 12;
   optional PriorityProto application_priority = 13;
-  optional string collector_addr = 14;
+  optional CollectorInfoProto collector_info = 14;
   repeated UpdateContainerErrorProto update_errors = 15;
   repeated UpdatedContainerProto updated_containers = 16;
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/records/timelineservice/TestApplicationEntity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/records/timelineservice/TestApplicationEntity.java
new file mode 100644
index 0000000..c3f2777
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/records/timelineservice/TestApplicationEntity.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records.timelineservice;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertEquals;
+import org.junit.Test;
+
+/**
+ * Various tests for the ApplicationEntity class.
+ *
+ */
+public class TestApplicationEntity {
+
+  @Test
+  public void testIsApplicationEntity() {
+    TimelineEntity te = new TimelineEntity();
+    te.setType(TimelineEntityType.YARN_APPLICATION.toString());
+    assertTrue(ApplicationEntity.isApplicationEntity(te));
+
+    te = null;
+    assertEquals(false, ApplicationEntity.isApplicationEntity(te));
+
+    te = new TimelineEntity();
+    te.setType(TimelineEntityType.YARN_CLUSTER.toString());
+    assertEquals(false, ApplicationEntity.isApplicationEntity(te));
+  }
+
+  @Test
+  public void testGetApplicationEvent() {
+    TimelineEntity te = null;
+    TimelineEvent tEvent = ApplicationEntity.getApplicationEvent(te,
+        "no event");
+    assertEquals(null, tEvent);
+
+    te = new TimelineEntity();
+    te.setType(TimelineEntityType.YARN_APPLICATION.toString());
+    TimelineEvent event = new TimelineEvent();
+    event.setId("start_event");
+    event.setTimestamp(System.currentTimeMillis());
+    te.addEvent(event);
+    tEvent = ApplicationEntity.getApplicationEvent(te, "start_event");
+    assertEquals(event, tEvent);
+
+    te = new TimelineEntity();
+    te.setType(TimelineEntityType.YARN_CLUSTER.toString());
+    event = new TimelineEvent();
+    event.setId("start_event_cluster");
+    event.setTimestamp(System.currentTimeMillis());
+    te.addEvent(event);
+    tEvent = ApplicationEntity.getApplicationEvent(te, "start_event_cluster");
+    assertEquals(null, tEvent);
+
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 91a8b0a..bd7bf93 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -66,6 +66,8 @@
     configurationPropsToSkipCompare
         .add(YarnConfiguration
             .YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCETRACKER_PROTOCOL);
+    configurationPropsToSkipCompare.add(YarnConfiguration
+        .YARN_SECURITY_SERVICE_AUTHORIZATION_COLLECTOR_NODEMANAGER_PROTOCOL);
     configurationPropsToSkipCompare.add(YarnConfiguration.CURATOR_LEADER_ELECTOR);
 
     // Federation default configs to be ignored
@@ -96,6 +98,10 @@
     configurationPropsToSkipCompare
         .add(YarnConfiguration.DEFAULT_FEDERATION_POLICY_MANAGER_PARAMS);
 
+    // Federation StateStore ZK implementation configs to be ignored
+    configurationPropsToSkipCompare.add(
+        YarnConfiguration.FEDERATION_STATESTORE_ZK_PARENT_PATH);
+
     // Federation StateStore SQL implementation configs to be ignored
     configurationPropsToSkipCompare
         .add(YarnConfiguration.FEDERATION_STATESTORE_SQL_JDBC_CLASS);
@@ -155,6 +161,8 @@
 
     configurationPrefixToSkipCompare
         .add(YarnConfiguration.ROUTER_CLIENTRM_SUBMIT_RETRY);
+    configurationPrefixToSkipCompare
+        .add(YarnConfiguration.ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED);
 
     // Set by container-executor.cfg
     configurationPrefixToSkipCompare.add(YarnConfiguration.NM_USER_HOME_DIR);
@@ -180,6 +188,8 @@
     // Currently defined in RegistryConstants/core-site.xml
     xmlPrefixToSkipCompare.add("hadoop.registry");
 
+    xmlPrefixToSkipCompare.add(
+        "yarn.log-aggregation.file-controller.TFile.class");
     // Add the filters used for checking for collision of default values.
     initDefaultValueCollisionCheck();
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index ab4607a..a02af70 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -104,6 +104,8 @@
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
+import org.apache.hadoop.yarn.util.SystemClock;
+import org.apache.hadoop.yarn.util.TimelineServiceHelper;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.apache.log4j.LogManager;
 
@@ -314,6 +316,17 @@
       Collections.newSetFromMap(new ConcurrentHashMap<ContainerId, Boolean>());
 
   /**
+   * Container start times used to set id prefix while publishing entity
+   * to ATSv2.
+   */
+  private final ConcurrentMap<ContainerId, Long> containerStartTimes =
+      new ConcurrentHashMap<ContainerId, Long>();
+
+  private ConcurrentMap<ContainerId, Long> getContainerStartTimes() {
+    return containerStartTimes;
+  }
+
+  /**
    * @param args Command line args
    */
   public static void main(String[] args) {
@@ -866,7 +879,15 @@
               + containerStatus.getContainerId());
         }
         if (timelineServiceV2Enabled) {
-          publishContainerEndEventOnTimelineServiceV2(containerStatus);
+          Long containerStartTime =
+              containerStartTimes.get(containerStatus.getContainerId());
+          if (containerStartTime == null) {
+            containerStartTime = SystemClock.getInstance().getTime();
+            containerStartTimes.put(containerStatus.getContainerId(),
+                containerStartTime);
+          }
+          publishContainerEndEventOnTimelineServiceV2(containerStatus,
+              containerStartTime);
         } else if (timelineServiceV1Enabled) {
           publishContainerEndEvent(timelineClient, containerStatus, domainId,
               appSubmitterUgi);
@@ -994,8 +1015,10 @@
             containerId, container.getNodeId());
       }
       if (applicationMaster.timelineServiceV2Enabled) {
-        applicationMaster
-            .publishContainerStartEventOnTimelineServiceV2(container);
+        long startTime = SystemClock.getInstance().getTime();
+        applicationMaster.getContainerStartTimes().put(containerId, startTime);
+        applicationMaster.publishContainerStartEventOnTimelineServiceV2(
+            container, startTime);
       } else if (applicationMaster.timelineServiceV1Enabled) {
         applicationMaster.publishContainerStartEvent(
             applicationMaster.timelineClient, container,
@@ -1356,24 +1379,24 @@
   }
 
   private void publishContainerStartEventOnTimelineServiceV2(
-      Container container) {
+      Container container, long startTime) {
     final org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity
         entity =
             new org.apache.hadoop.yarn.api.records.timelineservice.
             TimelineEntity();
     entity.setId(container.getId().toString());
     entity.setType(DSEntity.DS_CONTAINER.toString());
-    long ts = System.currentTimeMillis();
-    entity.setCreatedTime(ts);
+    entity.setCreatedTime(startTime);
     entity.addInfo("user", appSubmitterUgi.getShortUserName());
 
     org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent event =
         new org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent();
-    event.setTimestamp(ts);
+    event.setTimestamp(startTime);
     event.setId(DSEvent.DS_CONTAINER_START.toString());
     event.addInfo("Node", container.getNodeId().toString());
     event.addInfo("Resources", container.getResource().toString());
     entity.addEvent(event);
+    entity.setIdPrefix(TimelineServiceHelper.invertLong(startTime));
 
     try {
       appSubmitterUgi.doAs(new PrivilegedExceptionAction<Object>() {
@@ -1391,7 +1414,7 @@
   }
 
   private void publishContainerEndEventOnTimelineServiceV2(
-      final ContainerStatus container) {
+      final ContainerStatus container, long containerStartTime) {
     final org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity
         entity =
             new org.apache.hadoop.yarn.api.records.timelineservice.
@@ -1407,6 +1430,7 @@
     event.addInfo("State", container.getState().name());
     event.addInfo("Exit Status", container.getExitStatus());
     entity.addEvent(event);
+    entity.setIdPrefix(TimelineServiceHelper.invertLong(containerStartTime));
 
     try {
       appSubmitterUgi.doAs(new PrivilegedExceptionAction<Object>() {
@@ -1441,6 +1465,8 @@
     event.setId(appEvent.toString());
     event.setTimestamp(ts);
     entity.addEvent(event);
+    entity.setIdPrefix(
+        TimelineServiceHelper.invertLong(appAttemptID.getAttemptId()));
 
     try {
       appSubmitterUgi.doAs(new PrivilegedExceptionAction<Object>() {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
index ef21c87..fc270cb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
@@ -64,7 +64,9 @@
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
+import org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster.DSEvent;
 import org.apache.hadoop.yarn.client.api.YarnClient;
 import org.apache.hadoop.yarn.client.api.impl.DirectTimelineWriter;
 import org.apache.hadoop.yarn.client.api.impl.TestTimelineClient;
@@ -81,6 +83,7 @@
 import org.apache.hadoop.yarn.server.timeline.TimelineVersion;
 import org.apache.hadoop.yarn.server.timeline.TimelineVersionWatcher;
 import org.apache.hadoop.yarn.server.timelineservice.collector.PerNodeTimelineCollectorsAuxService;
+import org.apache.hadoop.yarn.server.timelineservice.storage.FileSystemTimelineReaderImpl;
 import org.apache.hadoop.yarn.server.timelineservice.storage.FileSystemTimelineWriterImpl;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.LinuxResourceCalculatorPlugin;
@@ -523,15 +526,31 @@
           "appattempt_" + appId.getClusterTimestamp() + "_000" + appId.getId()
               + "_000001"
               + FileSystemTimelineWriterImpl.TIMELINE_SERVICE_STORAGE_EXTENSION;
-      verifyEntityTypeFileExists(basePath, "DS_APP_ATTEMPT",
-          appTimestampFileName);
+      File dsAppAttemptEntityFile = verifyEntityTypeFileExists(basePath,
+          "DS_APP_ATTEMPT", appTimestampFileName);
+      // Check if required events are published and same idprefix is sent for
+      // on each publish.
+      verifyEntityForTimelineV2(dsAppAttemptEntityFile,
+          DSEvent.DS_APP_ATTEMPT_START.toString(), 1, 1, 0, true);
+      // to avoid race condition of testcase, atleast check 40 times with sleep
+      // of 50ms
+      verifyEntityForTimelineV2(dsAppAttemptEntityFile,
+          DSEvent.DS_APP_ATTEMPT_END.toString(), 1, 40, 50, true);
 
-      // Verify DS_CONTAINER entities posted by the client
+      // Verify DS_CONTAINER entities posted by the client.
       String containerTimestampFileName =
           "container_" + appId.getClusterTimestamp() + "_000" + appId.getId()
               + "_01_000002.thist";
-      verifyEntityTypeFileExists(basePath, "DS_CONTAINER",
-          containerTimestampFileName);
+      File dsContainerEntityFile = verifyEntityTypeFileExists(basePath,
+          "DS_CONTAINER", containerTimestampFileName);
+      // Check if required events are published and same idprefix is sent for
+      // on each publish.
+      verifyEntityForTimelineV2(dsContainerEntityFile,
+          DSEvent.DS_CONTAINER_START.toString(), 1, 1, 0, true);
+      // to avoid race condition of testcase, atleast check 40 times with sleep
+      // of 50ms
+      verifyEntityForTimelineV2(dsContainerEntityFile,
+          DSEvent.DS_CONTAINER_END.toString(), 1, 40, 50, true);
 
       // Verify NM posting container metrics info.
       String containerMetricsTimestampFileName =
@@ -541,29 +560,13 @@
       File containerEntityFile = verifyEntityTypeFileExists(basePath,
           TimelineEntityType.YARN_CONTAINER.toString(),
           containerMetricsTimestampFileName);
-      Assert.assertEquals(
-          "Container created event needs to be published atleast once",
-          1,
-          getNumOfStringOccurrences(containerEntityFile,
-              ContainerMetricsConstants.CREATED_EVENT_TYPE));
+      verifyEntityForTimelineV2(containerEntityFile,
+          ContainerMetricsConstants.CREATED_EVENT_TYPE, 1, 1, 0, true);
 
-      // to avoid race condition of testcase, atleast check 4 times with sleep
-      // of 500ms
-      long numOfContainerFinishedOccurrences = 0;
-      for (int i = 0; i < 4; i++) {
-        numOfContainerFinishedOccurrences =
-            getNumOfStringOccurrences(containerEntityFile,
-                ContainerMetricsConstants.FINISHED_EVENT_TYPE);
-        if (numOfContainerFinishedOccurrences > 0) {
-          break;
-        } else {
-          Thread.sleep(500L);
-        }
-      }
-      Assert.assertEquals(
-          "Container finished event needs to be published atleast once",
-          1,
-          numOfContainerFinishedOccurrences);
+      // to avoid race condition of testcase, atleast check 40 times with sleep
+      // of 50ms
+      verifyEntityForTimelineV2(containerEntityFile,
+          ContainerMetricsConstants.FINISHED_EVENT_TYPE, 1, 40, 50, true);
 
       // Verify RM posting Application life cycle Events are getting published
       String appMetricsTimestampFileName =
@@ -573,29 +576,14 @@
           verifyEntityTypeFileExists(basePath,
               TimelineEntityType.YARN_APPLICATION.toString(),
               appMetricsTimestampFileName);
-      Assert.assertEquals(
-          "Application created event should be published atleast once",
-          1,
-          getNumOfStringOccurrences(appEntityFile,
-              ApplicationMetricsConstants.CREATED_EVENT_TYPE));
+      // No need to check idprefix for app.
+      verifyEntityForTimelineV2(appEntityFile,
+          ApplicationMetricsConstants.CREATED_EVENT_TYPE, 1, 1, 0, false);
 
-      // to avoid race condition of testcase, atleast check 4 times with sleep
-      // of 500ms
-      long numOfStringOccurrences = 0;
-      for (int i = 0; i < 4; i++) {
-        numOfStringOccurrences =
-            getNumOfStringOccurrences(appEntityFile,
-                ApplicationMetricsConstants.FINISHED_EVENT_TYPE);
-        if (numOfStringOccurrences > 0) {
-          break;
-        } else {
-          Thread.sleep(500L);
-        }
-      }
-      Assert.assertEquals(
-          "Application finished event should be published atleast once",
-          1,
-          numOfStringOccurrences);
+      // to avoid race condition of testcase, atleast check 40 times with sleep
+      // of 50ms
+      verifyEntityForTimelineV2(appEntityFile,
+          ApplicationMetricsConstants.FINISHED_EVENT_TYPE, 1, 40, 50, false);
 
       // Verify RM posting AppAttempt life cycle Events are getting published
       String appAttemptMetricsTimestampFileName =
@@ -606,17 +594,10 @@
           verifyEntityTypeFileExists(basePath,
               TimelineEntityType.YARN_APPLICATION_ATTEMPT.toString(),
               appAttemptMetricsTimestampFileName);
-      Assert.assertEquals(
-          "AppAttempt register event should be published atleast once",
-          1,
-          getNumOfStringOccurrences(appAttemptEntityFile,
-              AppAttemptMetricsConstants.REGISTERED_EVENT_TYPE));
-
-      Assert.assertEquals(
-          "AppAttempt finished event should be published atleast once",
-          1,
-          getNumOfStringOccurrences(appAttemptEntityFile,
-              AppAttemptMetricsConstants.FINISHED_EVENT_TYPE));
+      verifyEntityForTimelineV2(appAttemptEntityFile,
+          AppAttemptMetricsConstants.REGISTERED_EVENT_TYPE, 1, 1, 0, true);
+      verifyEntityForTimelineV2(appAttemptEntityFile,
+          AppAttemptMetricsConstants.FINISHED_EVENT_TYPE, 1, 1, 0, true);
     } finally {
       FileUtils.deleteDirectory(tmpRootFolder.getParentFile());
     }
@@ -636,22 +617,64 @@
     return entityFile;
   }
 
-  private long getNumOfStringOccurrences(File entityFile, String searchString)
-      throws IOException {
-    BufferedReader reader = null;
-    String strLine;
+  /**
+   * Checks the events and idprefix published for an entity.
+   *
+   * @param entityFile Entity file.
+   * @param expectedEvent Expected event Id.
+   * @param numOfExpectedEvent Number of expected occurences of expected event
+   *     id.
+   * @param checkTimes Number of times to check.
+   * @param sleepTime Sleep time for each iteration.
+   * @param checkIdPrefix Whether to check idprefix.
+   * @throws IOException if entity file reading fails.
+   * @throws InterruptedException if sleep is interrupted.
+   */
+  private void verifyEntityForTimelineV2(File entityFile, String expectedEvent,
+      long numOfExpectedEvent, int checkTimes, long sleepTime,
+      boolean checkIdPrefix) throws IOException, InterruptedException {
     long actualCount = 0;
-    try {
-      reader = new BufferedReader(new FileReader(entityFile));
-      while ((strLine = reader.readLine()) != null) {
-        if (strLine.trim().contains(searchString)) {
-          actualCount++;
+    for (int i = 0; i < checkTimes; i++) {
+      BufferedReader reader = null;
+      String strLine = null;
+      actualCount = 0;
+      try {
+        reader = new BufferedReader(new FileReader(entityFile));
+        long idPrefix = -1;
+        while ((strLine = reader.readLine()) != null) {
+          String entityLine = strLine.trim();
+          if (entityLine.isEmpty()) {
+            continue;
+          }
+          if (entityLine.contains(expectedEvent)) {
+            actualCount++;
+          }
+          if (checkIdPrefix) {
+            TimelineEntity entity = FileSystemTimelineReaderImpl.
+                getTimelineRecordFromJSON(entityLine, TimelineEntity.class);
+            Assert.assertTrue("Entity ID prefix expected to be > 0",
+                entity.getIdPrefix() > 0);
+            if (idPrefix == -1) {
+              idPrefix = entity.getIdPrefix();
+            } else {
+              Assert.assertEquals("Entity ID prefix should be same across " +
+                  "each publish of same entity",
+                      idPrefix, entity.getIdPrefix());
+            }
+          }
         }
+      } finally {
+        reader.close();
       }
-    } finally {
-      reader.close();
+      if (numOfExpectedEvent == actualCount) {
+        break;
+      }
+      if (sleepTime > 0 && i < checkTimes - 1) {
+        Thread.sleep(sleepTime);
+      }
     }
-    return actualCount;
+    Assert.assertEquals("Unexpected number of " +  expectedEvent +
+        " event published.", numOfExpectedEvent, actualCount);
   }
 
   /**
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
index 69f3777..60e305f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
@@ -122,9 +122,10 @@
     private List<String> racks;
     private Priority priority;
     private long allocationRequestId;
-    private boolean relaxLocality;
+    private boolean relaxLocality = true;
     private String nodeLabelsExpression;
-    private ExecutionTypeRequest executionTypeRequest;
+    private ExecutionTypeRequest executionTypeRequest =
+        ExecutionTypeRequest.newInstance();
     
     /**
      * Instantiates a {@link ContainerRequest} with the given constraints and
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
index 6711da2..d12b108 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
@@ -68,8 +68,6 @@
   
   private volatile boolean keepRunning;
   private volatile float progress;
-  
-  private volatile String collectorAddr;
 
   /**
    *
@@ -325,17 +323,16 @@
           }
 
           AllocateResponse response = (AllocateResponse) object;
-          String collectorAddress = response.getCollectorAddr();
+          String collectorAddress = null;
+          if (response.getCollectorInfo() != null) {
+            collectorAddress = response.getCollectorInfo().getCollectorAddr();
+          }
+
           TimelineV2Client timelineClient =
               client.getRegisteredTimelineV2Client();
-          if (timelineClient != null && collectorAddress != null
-              && !collectorAddress.isEmpty()) {
-            if (collectorAddr == null
-                || !collectorAddr.equals(collectorAddress)) {
-              collectorAddr = collectorAddress;
-              timelineClient.setTimelineServiceAddress(collectorAddress);
-              LOG.info("collectorAddress " + collectorAddress);
-            }
+          if (timelineClient != null && response.getCollectorInfo() != null) {
+            timelineClient.
+                setTimelineCollectorInfo(response.getCollectorInfo());
           }
 
           List<NodeReport> updatedNodes = response.getUpdatedNodes();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
index ac5f559..24c1da0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
@@ -113,7 +113,8 @@
               + " be handled by the client or the ResourceManager. The client"
               + "-side tracking is blocking, while the server-side tracking"
               + " is not. Omitting the timeout, or a timeout of -1, indicates"
-              + " an infinite timeout."))
+              + " an infinite timeout. Known Issue: the server-side tracking"
+              + " will immediately decommission if an RM HA failover occurs."))
           .put("-refreshNodesResources", new UsageInfo("",
               "Refresh resources of NodeManagers at the ResourceManager."))
           .put("-refreshSuperUserGroupsConfiguration", new UsageInfo("",
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ApplicationMasterServiceProtoTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ApplicationMasterServiceProtoTestBase.java
new file mode 100644
index 0000000..4521018
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ApplicationMasterServiceProtoTestBase.java
@@ -0,0 +1,72 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.client;
+
+import java.io.IOException;
+
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
+import org.junit.After;
+
+/**
+ * Test Base for Application Master Service Protocol.
+ */
+public abstract class ApplicationMasterServiceProtoTestBase
+    extends ProtocolHATestBase {
+
+  private ApplicationMasterProtocol amClient;
+  private ApplicationAttemptId attemptId;
+
+  protected void startupHAAndSetupClient() throws Exception {
+    attemptId = this.cluster.createFakeApplicationAttemptId();
+
+    Token<AMRMTokenIdentifier> appToken =
+        this.cluster.getResourceManager().getRMContext()
+          .getAMRMTokenSecretManager().createAndGetAMRMToken(attemptId);
+    appToken.setService(ClientRMProxy.getAMRMTokenService(this.conf));
+    UserGroupInformation.setLoginUser(UserGroupInformation
+        .createRemoteUser(UserGroupInformation.getCurrentUser().getUserName()));
+    UserGroupInformation.getCurrentUser().addToken(appToken);
+    syncToken(appToken);
+    amClient = ClientRMProxy
+        .createRMProxy(this.conf, ApplicationMasterProtocol.class);
+  }
+
+  @After
+  public void shutDown() {
+    if(this.amClient != null) {
+      RPC.stopProxy(this.amClient);
+    }
+  }
+
+  protected ApplicationMasterProtocol getAMClient() {
+    return amClient;
+  }
+
+  private void syncToken(Token<AMRMTokenIdentifier> token) throws IOException {
+    for (int i = 0; i < this.cluster.getNumOfResourceManager(); i++) {
+      this.cluster.getResourceManager(i).getRMContext()
+          .getAMRMTokenSecretManager().addPersistedPassword(token);
+    }
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
index a8e9132..f4005e9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
@@ -25,6 +25,7 @@
 import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -804,11 +805,20 @@
     }
 
     public AllocateResponse createFakeAllocateResponse() {
-      return AllocateResponse.newInstance(-1,
-          new ArrayList<ContainerStatus>(),
-          new ArrayList<Container>(), new ArrayList<NodeReport>(),
-          Resource.newInstance(1024, 2), null, 1,
-          null, new ArrayList<NMToken>());
+      if (YarnConfiguration.timelineServiceV2Enabled(getConfig())) {
+        return AllocateResponse.newInstance(-1,
+            new ArrayList<ContainerStatus>(), new ArrayList<Container>(),
+            new ArrayList<NodeReport>(), Resource.newInstance(1024, 2), null, 1,
+            null, new ArrayList<NMToken>(), CollectorInfo.newInstance(
+            "host:port", Token.newInstance(new byte[] {0}, "TIMELINE",
+            new byte[] {0}, "rm")));
+      } else {
+        return AllocateResponse.newInstance(-1,
+            new ArrayList<ContainerStatus>(),
+            new ArrayList<Container>(), new ArrayList<NodeReport>(),
+            Resource.newInstance(1024, 2), null, 1,
+            null, new ArrayList<NMToken>());
+      }
     }
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceProtocolForTimelineV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceProtocolForTimelineV2.java
new file mode 100644
index 0000000..be8c302
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceProtocolForTimelineV2.java
@@ -0,0 +1,71 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.client;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.resourcemanager.HATestUtil;
+import org.apache.hadoop.yarn.server.timelineservice.storage.FileSystemTimelineWriterImpl;
+import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Tests Application Master Protocol with timeline service v2 enabled.
+ */
+public class TestApplicationMasterServiceProtocolForTimelineV2
+    extends ApplicationMasterServiceProtoTestBase {
+
+  @Before
+  public void initialize() throws Exception {
+    HATestUtil.setRpcAddressForRM(RM1_NODE_ID, RM1_PORT_BASE + 200, conf);
+    HATestUtil.setRpcAddressForRM(RM2_NODE_ID, RM2_PORT_BASE + 200, conf);
+    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+    conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
+    conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS,
+        FileSystemTimelineWriterImpl.class, TimelineWriter.class);
+    startHACluster(0, false, false, true);
+    super.startupHAAndSetupClient();
+  }
+
+  @Test(timeout = 15000)
+  public void testAllocateForTimelineV2OnHA()
+      throws YarnException, IOException {
+    AllocateRequest request = AllocateRequest.newInstance(0, 50f,
+        new ArrayList<ResourceRequest>(),
+        new ArrayList<ContainerId>(),
+        ResourceBlacklistRequest.newInstance(new ArrayList<String>(),
+            new ArrayList<String>()));
+    AllocateResponse response = getAMClient().allocate(request);
+    Assert.assertEquals(response, this.cluster.createFakeAllocateResponse());
+    Assert.assertNotNull(response.getCollectorInfo());
+    Assert.assertEquals("host:port",
+        response.getCollectorInfo().getCollectorAddr());
+    Assert.assertNotNull(response.getCollectorInfo().getCollectorToken());
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceProtocolOnHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceProtocolOnHA.java
index ad86fb3..c2f39a1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceProtocolOnHA.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceProtocolOnHA.java
@@ -23,10 +23,6 @@
 
 import org.junit.Assert;
 
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
@@ -34,45 +30,20 @@
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
-import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
 
 public class TestApplicationMasterServiceProtocolOnHA
-    extends ProtocolHATestBase {
-  private ApplicationMasterProtocol amClient;
-  private ApplicationAttemptId attemptId ;
-
+    extends ApplicationMasterServiceProtoTestBase {
   @Before
   public void initialize() throws Exception {
     startHACluster(0, false, false, true);
-    attemptId = this.cluster.createFakeApplicationAttemptId();
-
-    Token<AMRMTokenIdentifier> appToken =
-        this.cluster.getResourceManager().getRMContext()
-          .getAMRMTokenSecretManager().createAndGetAMRMToken(attemptId);
-    appToken.setService(ClientRMProxy.getAMRMTokenService(this.conf));
-    UserGroupInformation.setLoginUser(UserGroupInformation
-        .createRemoteUser(UserGroupInformation.getCurrentUser().getUserName()));
-    UserGroupInformation.getCurrentUser().addToken(appToken);
-    syncToken(appToken);
-
-    amClient = ClientRMProxy
-        .createRMProxy(this.conf, ApplicationMasterProtocol.class);
-  }
-
-  @After
-  public void shutDown() {
-    if(this.amClient != null) {
-      RPC.stopProxy(this.amClient);
-    }
+    super.startupHAAndSetupClient();
   }
 
   @Test(timeout = 15000)
@@ -81,7 +52,7 @@
     RegisterApplicationMasterRequest request =
         RegisterApplicationMasterRequest.newInstance("localhost", 0, "");
     RegisterApplicationMasterResponse response =
-        amClient.registerApplicationMaster(request);
+        getAMClient().registerApplicationMaster(request);
     Assert.assertEquals(response,
         this.cluster.createFakeRegisterApplicationMasterResponse());
   }
@@ -93,7 +64,7 @@
         FinishApplicationMasterRequest.newInstance(
             FinalApplicationStatus.SUCCEEDED, "", "");
     FinishApplicationMasterResponse response =
-        amClient.finishApplicationMaster(request);
+        getAMClient().finishApplicationMaster(request);
     Assert.assertEquals(response,
         this.cluster.createFakeFinishApplicationMasterResponse());
   }
@@ -105,14 +76,7 @@
         new ArrayList<ContainerId>(),
         ResourceBlacklistRequest.newInstance(new ArrayList<String>(),
             new ArrayList<String>()));
-    AllocateResponse response = amClient.allocate(request);
+    AllocateResponse response = getAMClient().allocate(request);
     Assert.assertEquals(response, this.cluster.createFakeAllocateResponse());
   }
-
-  private void syncToken(Token<AMRMTokenIdentifier> token) throws IOException {
-    for (int i = 0; i < this.cluster.getNumOfResourceManager(); i++) {
-      this.cluster.getResourceManager(i).getRMContext()
-          .getAMRMTokenSecretManager().addPersistedPassword(token);
-    }
-  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java
index ba38340..56826c4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java
@@ -426,7 +426,7 @@
     }
     AllocateResponse response =
         AllocateResponse.newInstance(0, completed, allocated,
-            new ArrayList<NodeReport>(), null, null, 1, null, nmTokens,
+            new ArrayList<NodeReport>(), null, null, 1, null, nmTokens, null,
             updatedContainers);
     return response;
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/BaseAMRMProxyE2ETest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/BaseAMRMProxyE2ETest.java
index 0b62054..fcc3c7b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/BaseAMRMProxyE2ETest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/BaseAMRMProxyE2ETest.java
@@ -128,7 +128,7 @@
     ResourceBlacklistRequest resourceBlacklistRequest = ResourceBlacklistRequest
         .newInstance(new ArrayList<>(), new ArrayList<>());
 
-    int responseId = 1;
+    int responseId = 0;
 
     return AllocateRequest.newInstance(responseId, 0, resourceAsk,
         new ArrayList<>(), resourceBlacklistRequest);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java
index ad18da3..9603539 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java
@@ -51,8 +51,10 @@
 
     Resource capability = Resource.newInstance(1024, 1);
     ContainerRequest request =
-        new ContainerRequest(capability, new String[] {"host1", "host2"},
-            new String[] {"/rack2"}, Priority.newInstance(1));
+        ContainerRequest.newBuilder().capability(capability)
+            .nodes(new String[] { "host1", "host2" })
+            .racks(new String[] { "/rack2" }).priority(Priority.newInstance(1))
+            .build();
     client.addContainerRequest(request);
     verifyResourceRequest(client, request, "host1", true);
     verifyResourceRequest(client, request, "host2", true);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
index 14df94a..6a063e6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
@@ -151,13 +151,13 @@
            YarnClient rmClient = YarnClient.createYarnClient()) {
       Configuration conf = new YarnConfiguration();
       conf.setBoolean(YarnConfiguration.AMRM_PROXY_ENABLED, true);
-      conf.setInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 1500);
-      conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 1500);
-      conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 1500);
+      conf.setInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 4500);
+      conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 4500);
+      conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 4500);
       // RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS should be at least
       // RM_AM_EXPIRY_INTERVAL_MS * 1.5 *3
       conf.setInt(
-          YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, 6);
+          YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, 20);
       cluster.init(conf);
       cluster.start();
       final Configuration yarnConf = cluster.getConfig();
@@ -198,7 +198,7 @@
         lastToken = response.getAMRMToken();
 
         // Time slot to be sure the AMRMProxy renew the token
-        Thread.sleep(1500);
+        Thread.sleep(4500);
 
       }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
index c054209..26e0319 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
@@ -36,7 +36,6 @@
 import com.sun.jersey.api.client.ClientResponse.Status;
 import java.io.BufferedReader;
 import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileWriter;
@@ -78,6 +77,9 @@
 import org.apache.hadoop.yarn.logaggregation.ContainerLogsRequest;
 import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils;
 import org.apache.hadoop.yarn.logaggregation.LogCLIHelpers;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerContext;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerFactory;
 import org.codehaus.jettison.json.JSONObject;
 import org.junit.Assert;
 import org.junit.Before;
@@ -1345,42 +1347,55 @@
     Path path =
         new Path(appDir, LogAggregationUtils.getNodeString(nodeId)
             + System.currentTimeMillis());
-    try (AggregatedLogFormat.LogWriter writer =
-             new AggregatedLogFormat.LogWriter()) {
-      writer.initialize(configuration, path, ugi);
-      writer.writeApplicationOwner(ugi.getUserName());
-
+    LogAggregationFileControllerFactory factory
+        = new LogAggregationFileControllerFactory(configuration);
+    LogAggregationFileController fileFormat = factory
+        .getFileControllerForWrite();
+    try {
       Map<ApplicationAccessType, String> appAcls = new HashMap<>();
       appAcls.put(ApplicationAccessType.VIEW_APP, ugi.getUserName());
-      writer.writeApplicationACLs(appAcls);
-      writer.append(new AggregatedLogFormat.LogKey(containerId),
+      LogAggregationFileControllerContext context
+          = new LogAggregationFileControllerContext(
+              path, path, true, 1000,
+              containerId.getApplicationAttemptId().getApplicationId(),
+              appAcls, nodeId, ugi);
+      fileFormat.initializeWriter(context);
+      fileFormat.write(new AggregatedLogFormat.LogKey(containerId),
           new AggregatedLogFormat.LogValue(rootLogDirs, containerId,
               UserGroupInformation.getCurrentUser().getShortUserName()));
+    } finally {
+      fileFormat.closeWriter();
     }
   }
 
+  @SuppressWarnings("static-access")
   private static void uploadEmptyContainerLogIntoRemoteDir(UserGroupInformation ugi,
       Configuration configuration, List<String> rootLogDirs, NodeId nodeId,
       ContainerId containerId, Path appDir, FileSystem fs) throws Exception {
-    Path path =
-        new Path(appDir, LogAggregationUtils.getNodeString(nodeId)
-            + System.currentTimeMillis());
-    try (AggregatedLogFormat.LogWriter writer =
-             new AggregatedLogFormat.LogWriter()) {
-      writer.initialize(configuration, path, ugi);
-      writer.writeApplicationOwner(ugi.getUserName());
-
+    LogAggregationFileControllerFactory factory
+        = new LogAggregationFileControllerFactory(configuration);
+    LogAggregationFileController fileFormat = factory
+        .getFileControllerForWrite();
+    try {
       Map<ApplicationAccessType, String> appAcls = new HashMap<>();
       appAcls.put(ApplicationAccessType.VIEW_APP, ugi.getUserName());
-      writer.writeApplicationACLs(appAcls);
-      DataOutputStream out = writer.getWriter().prepareAppendKey(-1);
-      new AggregatedLogFormat.LogKey(containerId).write(out);
-      out.close();
-      out = writer.getWriter().prepareAppendValue(-1);
-      new AggregatedLogFormat.LogValue(rootLogDirs, containerId,
-          UserGroupInformation.getCurrentUser().getShortUserName()).write(out,
-              new HashSet<>());
-      out.close();
+      ApplicationId appId = containerId.getApplicationAttemptId()
+          .getApplicationId();
+      Path path = fileFormat.getRemoteNodeLogFileForApp(
+          appId, ugi.getCurrentUser().getShortUserName(), nodeId);
+      LogAggregationFileControllerContext context
+          = new LogAggregationFileControllerContext(
+              path, path, true, 1000,
+              appId, appAcls, nodeId, ugi);
+      fileFormat.initializeWriter(context);
+      AggregatedLogFormat.LogKey key = new AggregatedLogFormat.LogKey(
+          containerId);
+      AggregatedLogFormat.LogValue value = new AggregatedLogFormat.LogValue(
+          rootLogDirs, containerId, UserGroupInformation.getCurrentUser()
+              .getShortUserName());
+      fileFormat.write(key, value);
+    } finally {
+      fileFormat.closeWriter();
     }
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index f17cf8c..c2a5c67 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -108,6 +108,15 @@
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-annotations</artifactId>
     </dependency>
+    <!--
+    junit must be before mockito-all on the classpath.  mockito-all bundles its
+    own copy of the hamcrest classes, but they don't match our junit version.
+    -->
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
     <dependency>
       <groupId>org.mockito</groupId>
       <artifactId>mockito-all</artifactId>
@@ -129,11 +138,6 @@
       <artifactId>protobuf-java</artifactId>
     </dependency>
     <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
       <groupId>org.bouncycastle</groupId>
       <artifactId>bcprov-jdk16</artifactId>
       <scope>test</scope>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
index 1455d6a..ff35da8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
@@ -27,6 +27,7 @@
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.AMCommand;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
@@ -38,6 +39,7 @@
 import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.UpdateContainerError;
 import org.apache.hadoop.yarn.api.records.UpdatedContainer;
+import org.apache.hadoop.yarn.api.records.impl.pb.CollectorInfoPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerStatusPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.NMTokenPBImpl;
@@ -48,6 +50,7 @@
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.UpdatedContainerPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.CollectorInfoProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProto;
@@ -80,6 +83,7 @@
   private PreemptionMessage preempt;
   private Token amrmToken = null;
   private Priority appPriority = null;
+  private CollectorInfo collectorInfo = null;
 
   public AllocateResponsePBImpl() {
     builder = AllocateResponseProto.newBuilder();
@@ -162,6 +166,9 @@
     if (this.amrmToken != null) {
       builder.setAmRmToken(convertToProtoFormat(this.amrmToken));
     }
+    if (this.collectorInfo != null) {
+      builder.setCollectorInfo(convertToProtoFormat(this.collectorInfo));
+    }
     if (this.appPriority != null) {
       builder.setApplicationPriority(convertToProtoFormat(this.appPriority));
     }
@@ -398,19 +405,25 @@
 
 
   @Override
-  public synchronized String getCollectorAddr() {
+  public synchronized CollectorInfo getCollectorInfo() {
     AllocateResponseProtoOrBuilder p = viaProto ? proto : builder;
-    return p.getCollectorAddr();
+    if (this.collectorInfo != null) {
+      return this.collectorInfo;
+    }
+    if (!p.hasCollectorInfo()) {
+      return null;
+    }
+    this.collectorInfo = convertFromProtoFormat(p.getCollectorInfo());
+    return this.collectorInfo;
   }
 
   @Override
-  public synchronized void setCollectorAddr(String collectorAddr) {
+  public synchronized void setCollectorInfo(CollectorInfo info) {
     maybeInitBuilder();
-    if (collectorAddr == null) {
-      builder.clearCollectorAddr();
-      return;
+    if (info == null) {
+      builder.clearCollectorInfo();
     }
-    builder.setCollectorAddr(collectorAddr);
+    this.collectorInfo = info;
   }
 
   @Override
@@ -718,6 +731,16 @@
     return ((NodeReportPBImpl)t).getProto();
   }
 
+  private synchronized CollectorInfoPBImpl convertFromProtoFormat(
+      CollectorInfoProto p) {
+    return new CollectorInfoPBImpl(p);
+  }
+
+  private synchronized CollectorInfoProto convertToProtoFormat(
+      CollectorInfo t) {
+    return ((CollectorInfoPBImpl)t).getProto();
+  }
+
   private synchronized ContainerPBImpl convertFromProtoFormat(
       ContainerProto p) {
     return new ContainerPBImpl(p);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/CollectorInfoPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/CollectorInfoPBImpl.java
new file mode 100644
index 0000000..5835d1a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/CollectorInfoPBImpl.java
@@ -0,0 +1,152 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
+import org.apache.hadoop.yarn.api.records.Token;
+import org.apache.hadoop.yarn.proto.YarnProtos.CollectorInfoProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.CollectorInfoProtoOrBuilder;
+
+import com.google.protobuf.TextFormat;
+
+/**
+ * Protocol record implementation of {@link CollectorInfo}.
+ */
+public class CollectorInfoPBImpl extends CollectorInfo {
+
+  private CollectorInfoProto proto = CollectorInfoProto.getDefaultInstance();
+
+  private CollectorInfoProto.Builder builder = null;
+  private boolean viaProto = false;
+
+  private String collectorAddr = null;
+  private Token collectorToken = null;
+
+
+  public CollectorInfoPBImpl() {
+    builder = CollectorInfoProto.newBuilder();
+  }
+
+  public CollectorInfoPBImpl(CollectorInfoProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  public CollectorInfoProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  @Override
+  public int hashCode() {
+    return getProto().hashCode();
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = CollectorInfoProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) {
+      maybeInitBuilder();
+    }
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other == null) {
+      return false;
+    }
+    if (other.getClass().isAssignableFrom(this.getClass())) {
+      return this.getProto().equals(this.getClass().cast(other).getProto());
+    }
+    return false;
+  }
+
+  @Override
+  public String toString() {
+    return TextFormat.shortDebugString(getProto());
+  }
+
+  @Override
+  public String getCollectorAddr() {
+    CollectorInfoProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.collectorAddr == null && p.hasCollectorAddr()) {
+      this.collectorAddr = p.getCollectorAddr();
+    }
+    return this.collectorAddr;
+  }
+
+  @Override
+  public void setCollectorAddr(String addr) {
+    maybeInitBuilder();
+    if (collectorAddr == null) {
+      builder.clearCollectorAddr();
+    }
+    this.collectorAddr = addr;
+  }
+
+  @Override
+  public Token getCollectorToken() {
+    CollectorInfoProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.collectorToken != null) {
+      return this.collectorToken;
+    }
+    if (!p.hasCollectorToken()) {
+      return null;
+    }
+    this.collectorToken = convertFromProtoFormat(p.getCollectorToken());
+    return this.collectorToken;
+  }
+
+  @Override
+  public void setCollectorToken(Token token) {
+    maybeInitBuilder();
+    if (token == null) {
+      builder.clearCollectorToken();
+    }
+    this.collectorToken = token;
+  }
+
+  private TokenPBImpl convertFromProtoFormat(TokenProto p) {
+    return new TokenPBImpl(p);
+  }
+
+  private TokenProto convertToProtoFormat(Token t) {
+    return ((TokenPBImpl) t).getProto();
+  }
+
+  private void mergeLocalToBuilder() {
+    if (this.collectorAddr != null) {
+      builder.setCollectorAddr(this.collectorAddr);
+    }
+    if (this.collectorToken != null) {
+      builder.setCollectorToken(convertToProtoFormat(this.collectorToken));
+    }
+  }
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
index 32cf1e9..423c059 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
@@ -23,6 +23,7 @@
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.client.api.impl.TimelineV2ClientImpl;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -83,10 +84,13 @@
 
   /**
    * <p>
-   * Update the timeline service address where the request will be sent to.
+   * Update collector info received in AllocateResponse which contains the
+   * timeline service address where the request will be sent to and the timeline
+   * delegation token which will be used to send the request.
    * </p>
    *
-   * @param address the timeline service address
+   * @param collectorInfo Collector info which contains the timeline service
+   * address and timeline delegation token.
    */
-  public abstract void setTimelineServiceAddress(String address);
+  public abstract void setTimelineCollectorInfo(CollectorInfo collectorInfo);
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
index 5d88f70..220d6af 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
@@ -19,7 +19,11 @@
 package org.apache.hadoop.yarn.client.api.impl;
 
 import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.net.InetSocketAddress;
 import java.net.URI;
+import java.security.PrivilegedExceptionAction;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
@@ -36,15 +40,22 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
+import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.client.api.TimelineV2Client;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.core.util.MultivaluedMapImpl;
 
@@ -59,6 +70,8 @@
 
   private TimelineEntityDispatcher entityDispatcher;
   private volatile String timelineServiceAddress;
+  @VisibleForTesting
+  volatile Token currentTimelineToken = null;
 
   // Retry parameters for identifying new timeline service
   // TODO consider to merge with connection retry
@@ -69,6 +82,8 @@
 
   private ApplicationId contextAppId;
 
+  private UserGroupInformation authUgi;
+
   public TimelineV2ClientImpl(ApplicationId appId) {
     super(TimelineV2ClientImpl.class.getName());
     this.contextAppId = appId;
@@ -88,7 +103,6 @@
     UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
     UserGroupInformation realUgi = ugi.getRealUser();
     String doAsUser = null;
-    UserGroupInformation authUgi = null;
     if (realUgi != null) {
       authUgi = realUgi;
       doAsUser = ugi.getShortUserName();
@@ -96,7 +110,6 @@
       authUgi = ugi;
       doAsUser = null;
     }
-
     // TODO need to add/cleanup filter retry later for ATSV2. similar to V1
     DelegationTokenAuthenticatedURL.Token token =
         new DelegationTokenAuthenticatedURL.Token();
@@ -140,8 +153,72 @@
   }
 
   @Override
-  public void setTimelineServiceAddress(String address) {
-    this.timelineServiceAddress = address;
+  public void setTimelineCollectorInfo(CollectorInfo collectorInfo) {
+    if (collectorInfo == null) {
+      LOG.warn("Not setting collector info as it is null.");
+      return;
+    }
+    // First update the token so that it is available when collector address is
+    // used.
+    if (collectorInfo.getCollectorToken() != null) {
+      // Use collector address to update token service if its not available.
+      setTimelineDelegationToken(
+          collectorInfo.getCollectorToken(), collectorInfo.getCollectorAddr());
+    }
+    // Update timeline service address.
+    if (collectorInfo.getCollectorAddr() != null &&
+        !collectorInfo.getCollectorAddr().isEmpty() &&
+        !collectorInfo.getCollectorAddr().equals(timelineServiceAddress)) {
+      this.timelineServiceAddress = collectorInfo.getCollectorAddr();
+      LOG.info("Updated timeline service address to " + timelineServiceAddress);
+    }
+  }
+
+  private void setTimelineDelegationToken(Token delegationToken,
+      String collectorAddr) {
+    // Checks below are to ensure that an invalid token is not updated in UGI.
+    // This is required because timeline token is set via a public API.
+    if (!delegationToken.getKind().equals(
+        TimelineDelegationTokenIdentifier.KIND_NAME.toString())) {
+      LOG.warn("Timeline token to be updated should be of kind " +
+          TimelineDelegationTokenIdentifier.KIND_NAME);
+      return;
+    }
+    if (collectorAddr == null || collectorAddr.isEmpty()) {
+      collectorAddr = timelineServiceAddress;
+    }
+    // Token need not be updated if both address and token service do not exist.
+    String service = delegationToken.getService();
+    if ((service == null || service.isEmpty()) &&
+        (collectorAddr == null || collectorAddr.isEmpty())) {
+      LOG.warn("Timeline token does not have service and timeline service " +
+          "address is not yet set. Not updating the token");
+      return;
+    }
+    // No need to update a duplicate token.
+    if (currentTimelineToken != null &&
+        currentTimelineToken.equals(delegationToken)) {
+      return;
+    }
+    currentTimelineToken = delegationToken;
+    // Convert the token, sanitize the token service and add it to UGI.
+    org.apache.hadoop.security.token.
+        Token<TimelineDelegationTokenIdentifier> timelineToken =
+            new org.apache.hadoop.security.token.
+            Token<TimelineDelegationTokenIdentifier>(
+                delegationToken.getIdentifier().array(),
+                delegationToken.getPassword().array(),
+                new Text(delegationToken.getKind()),
+                service == null ? new Text() : new Text(service));
+    // Prefer timeline service address over service coming in the token for
+    // updating the token service.
+    InetSocketAddress serviceAddr =
+        (collectorAddr != null && !collectorAddr.isEmpty()) ?
+        NetUtils.createSocketAddr(collectorAddr) :
+        SecurityUtil.getTokenServiceAddr(timelineToken);
+    SecurityUtil.setTokenService(timelineToken, serviceAddr);
+    authUgi.addToken(timelineToken);
+    LOG.info("Updated timeline delegation token " + timelineToken);
   }
 
   @Private
@@ -192,19 +269,33 @@
     }
   }
 
+  private ClientResponse doPutObjects(URI base, String path,
+      MultivaluedMap<String, String> params, Object obj) {
+    return connector.getClient().resource(base).path(path).queryParams(params)
+        .accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON)
+        .put(ClientResponse.class, obj);
+  }
+
   protected void putObjects(URI base, String path,
       MultivaluedMap<String, String> params, Object obj)
       throws IOException, YarnException {
-    ClientResponse resp;
+    ClientResponse resp = null;
     try {
-      resp = connector.getClient().resource(base).path(path).queryParams(params)
-          .accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON)
-          .put(ClientResponse.class, obj);
-    } catch (RuntimeException re) {
-      // runtime exception is expected if the client cannot connect the server
-      String msg = "Failed to get the response from the timeline server.";
-      LOG.error(msg, re);
-      throw new IOException(re);
+      resp = authUgi.doAs(new PrivilegedExceptionAction<ClientResponse>() {
+        @Override
+        public ClientResponse run() throws Exception {
+          return doPutObjects(base, path, params, obj);
+        }
+      });
+    } catch (UndeclaredThrowableException ue) {
+      Throwable cause = ue.getCause();
+      if (cause instanceof IOException) {
+        throw (IOException)cause;
+      } else {
+        throw new IOException(cause);
+      }
+    } catch (InterruptedException ie) {
+      throw (IOException) new InterruptedIOException().initCause(ie);
     }
     if (resp == null || resp.getStatusInfo()
         .getStatusCode() != ClientResponse.Status.OK.getStatusCode()) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
index d806b12..3c1dcdc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
@@ -44,7 +44,7 @@
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.regex.Pattern;
-
+import org.apache.commons.io.IOUtils;
 import org.apache.commons.io.input.BoundedInputStream;
 import org.apache.commons.io.output.WriterOutputStream;
 import org.apache.commons.logging.Log;
@@ -61,7 +61,6 @@
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.SecureIOUtils;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.file.tfile.TFile;
@@ -71,7 +70,6 @@
 import org.apache.hadoop.yarn.api.records.LogAggregationContext;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.Times;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -249,7 +247,7 @@
           in = secureOpenFile(logFile);
         } catch (IOException e) {
           logErrorMessage(logFile, e);
-          IOUtils.cleanup(LOG, in);
+          IOUtils.closeQuietly(in);
           continue;
         }
 
@@ -287,7 +285,7 @@
           String message = logErrorMessage(logFile, e);
           out.write(message.getBytes(Charset.forName("UTF-8")));
         } finally {
-          IOUtils.cleanup(LOG, in);
+          IOUtils.closeQuietly(in);
         }
       }
     }
@@ -557,7 +555,7 @@
       } catch (Exception e) {
         LOG.warn("Exception closing writer", e);
       } finally {
-        IOUtils.closeStream(this.fsDataOStream);
+        IOUtils.closeQuietly(this.fsDataOStream);
       }
     }
   }
@@ -605,7 +603,7 @@
         }
         return null;
       } finally {
-        IOUtils.cleanup(LOG, ownerScanner);
+        IOUtils.closeQuietly(ownerScanner);
       }
     }
 
@@ -651,7 +649,7 @@
         }
         return acls;
       } finally {
-        IOUtils.cleanup(LOG, aclScanner);
+        IOUtils.closeQuietly(aclScanner);
       }
     }
 
@@ -775,8 +773,8 @@
           }
         }
       } finally {
-        IOUtils.cleanup(LOG, ps);
-        IOUtils.cleanup(LOG, os);
+        IOUtils.closeQuietly(ps);
+        IOUtils.closeQuietly(os);
       }
     }
 
@@ -1001,7 +999,9 @@
     }
 
     public void close() {
-      IOUtils.cleanup(LOG, scanner, reader, fsDataIStream);
+      IOUtils.closeQuietly(scanner);
+      IOUtils.closeQuietly(reader);
+      IOUtils.closeQuietly(fsDataIStream);
     }
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java
index 24baaab..e8a28de 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java
@@ -133,6 +133,23 @@
         new org.apache.hadoop.fs.Path(conf.get(
             YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
             YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
+    return getRemoteAppLogDir(conf, appId, appOwner, remoteRootLogDir, suffix);
+  }
+
+  /**
+   * Return the remote application log directory.
+   * @param conf the configuration
+   * @param appId the application
+   * @param appOwner the application owner
+   * @param remoteRootLogDir the remote root log directory
+   * @param suffix the log directory suffix
+   * @return the remote application log directory path
+   * @throws IOException if we can not find remote application log directory
+   */
+  public static org.apache.hadoop.fs.Path getRemoteAppLogDir(
+      Configuration conf, ApplicationId appId, String appOwner,
+      org.apache.hadoop.fs.Path remoteRootLogDir, String suffix)
+      throws IOException {
     org.apache.hadoop.fs.Path remoteAppDir = null;
     if (appOwner == null) {
       org.apache.hadoop.fs.Path qualifiedRemoteRootLogDir =
@@ -159,6 +176,30 @@
    * @param conf the configuration
    * @param appId the applicationId
    * @param appOwner the application owner
+   * @param remoteRootLogDir the remote root log directory
+   * @param suffix the log directory suffix
+   * @return the iterator of available log files
+   * @throws IOException if there is no log file available
+   */
+  public static RemoteIterator<FileStatus> getRemoteNodeFileDir(
+      Configuration conf, ApplicationId appId, String appOwner,
+      org.apache.hadoop.fs.Path remoteRootLogDir, String suffix)
+      throws IOException {
+    Path remoteAppLogDir = getRemoteAppLogDir(conf, appId, appOwner,
+        remoteRootLogDir, suffix);
+    RemoteIterator<FileStatus> nodeFiles = null;
+    Path qualifiedLogDir =
+        FileContext.getFileContext(conf).makeQualified(remoteAppLogDir);
+    nodeFiles = FileContext.getFileContext(qualifiedLogDir.toUri(),
+        conf).listStatus(remoteAppLogDir);
+    return nodeFiles;
+  }
+
+  /**
+   * Get all available log files under remote app log directory.
+   * @param conf the configuration
+   * @param appId the applicationId
+   * @param appOwner the application owner
    * @return the iterator of available log files
    * @throws IOException if there is no log file available
    */
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java
index ae2517a..74f694e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java
@@ -19,9 +19,13 @@
 
 import java.io.DataInputStream;
 import java.io.EOFException;
+import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.nio.channels.Channels;
+import java.nio.channels.FileChannel;
+import java.nio.channels.WritableByteChannel;
 import java.nio.charset.Charset;
 import java.util.ArrayList;
 import java.util.List;
@@ -195,6 +199,54 @@
     os.flush();
   }
 
+  public static void outputContainerLogThroughZeroCopy(String containerId,
+      String nodeId, String fileName, long fileLength, long outputSize,
+      String lastModifiedTime, FileInputStream fis, OutputStream os,
+      ContainerLogAggregationType logType) throws IOException {
+    long toSkip = 0;
+    long totalBytesToRead = fileLength;
+    if (outputSize < 0) {
+      long absBytes = Math.abs(outputSize);
+      if (absBytes < fileLength) {
+        toSkip = fileLength - absBytes;
+        totalBytesToRead = absBytes;
+      }
+    } else {
+      if (outputSize < fileLength) {
+        totalBytesToRead = outputSize;
+      }
+    }
+
+    if (totalBytesToRead > 0) {
+      // output log summary
+      StringBuilder sb = new StringBuilder();
+      String containerStr = String.format(
+          LogToolUtils.CONTAINER_ON_NODE_PATTERN,
+          containerId, nodeId);
+      sb.append(containerStr + "\n");
+      sb.append("LogAggregationType: " + logType + "\n");
+      sb.append(StringUtils.repeat("=", containerStr.length()) + "\n");
+      sb.append("LogType:" + fileName + "\n");
+      sb.append("LogLastModifiedTime:" + lastModifiedTime + "\n");
+      sb.append("LogLength:" + Long.toString(fileLength) + "\n");
+      sb.append("LogContents:\n");
+      byte[] b = sb.toString().getBytes(
+          Charset.forName("UTF-8"));
+      os.write(b, 0, b.length);
+      // output log content
+      FileChannel inputChannel = fis.getChannel();
+      WritableByteChannel outputChannel = Channels.newChannel(os);
+      long position = toSkip;
+      while (totalBytesToRead > 0) {
+        long transferred =
+            inputChannel.transferTo(position, totalBytesToRead, outputChannel);
+        totalBytesToRead -= transferred;
+        position += transferred;
+      }
+      os.flush();
+    }
+  }
+
   public static boolean outputAggregatedContainerLog(Configuration conf,
       ApplicationId appId, String appOwner,
       String containerId, String nodeId,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
new file mode 100644
index 0000000..5503f8f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
@@ -0,0 +1,404 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.logaggregation.filecontroller;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Sets;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils;
+import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey;
+import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogValue;
+
+/**
+ * Base class to implement Log Aggregation File Controller.
+ */
+@Private
+@Unstable
+public abstract class LogAggregationFileController {
+
+  private static final Log LOG = LogFactory.getLog(
+      LogAggregationFileController.class);
+
+  /*
+   * Expected deployment TLD will be 1777, owner=<NMOwner>, group=<NMGroup -
+   * Group to which NMOwner belongs> App dirs will be created as 770,
+   * owner=<AppOwner>, group=<NMGroup>: so that the owner and <NMOwner> can
+   * access / modify the files.
+   * <NMGroup> should obviously be a limited access group.
+   */
+  /**
+   * Permissions for the top level directory under which app directories will be
+   * created.
+   */
+  protected static final FsPermission TLDIR_PERMISSIONS = FsPermission
+      .createImmutable((short) 01777);
+
+  /**
+   * Permissions for the Application directory.
+   */
+  protected static final FsPermission APP_DIR_PERMISSIONS = FsPermission
+      .createImmutable((short) 0770);
+
+  // This is temporary solution. The configuration will be deleted once
+  // we find a more scalable method to only write a single log file per LRS.
+  private static final String NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP
+      = YarnConfiguration.NM_PREFIX + "log-aggregation.num-log-files-per-app";
+  private static final int
+      DEFAULT_NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP = 30;
+
+  protected Configuration conf;
+  protected Path remoteRootLogDir;
+  protected String remoteRootLogDirSuffix;
+  protected int retentionSize;
+  protected String fileControllerName;
+
+  public LogAggregationFileController() {}
+
+  /**
+   * Initialize the log file controller.
+   * @param conf the Configuration
+   * @param controllerName the log controller class name
+   */
+  public void initialize(Configuration conf, String controllerName) {
+    this.conf = conf;
+    int configuredRentionSize =
+        conf.getInt(NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP,
+            DEFAULT_NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP);
+    if (configuredRentionSize <= 0) {
+      this.retentionSize =
+        DEFAULT_NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP;
+    } else {
+      this.retentionSize = configuredRentionSize;
+    }
+    this.fileControllerName = controllerName;
+    initInternal(conf);
+  }
+
+  /**
+   * Derived classes initialize themselves using this method.
+   * @param conf the Configuration
+   */
+  protected abstract void initInternal(Configuration conf);
+
+  /**
+   * Get the remote root log directory.
+   * @return the remote root log directory path
+   */
+  public Path getRemoteRootLogDir() {
+    return this.remoteRootLogDir;
+  }
+
+  /**
+   * Get the log aggregation directory suffix.
+   * @return the log aggregation directory suffix
+   */
+  public String getRemoteRootLogDirSuffix() {
+    return this.remoteRootLogDirSuffix;
+  }
+
+  /**
+   * Initialize the writer.
+   * @param context the {@link LogAggregationFileControllerContext}
+   * @throws IOException if fails to initialize the writer
+   */
+  public abstract void initializeWriter(
+      LogAggregationFileControllerContext context) throws IOException;
+
+  /**
+   * Close the writer.
+   */
+  public abstract void closeWriter();
+
+  /**
+   * Write the log content.
+   * @param logKey the log key
+   * @param logValue the log content
+   * @throws IOException if fails to write the logs
+   */
+  public abstract void write(LogKey logKey, LogValue logValue)
+      throws IOException;
+
+  /**
+   * Operations needed after write the log content.
+   * @param record the {@link LogAggregationFileControllerContext}
+   * @throws Exception if anything fails
+   */
+  public abstract void postWrite(LogAggregationFileControllerContext record)
+      throws Exception;
+
+  /**
+   * Verify and create the remote log directory.
+   */
+  public void verifyAndCreateRemoteLogDir() {
+    boolean logPermError = true;
+    // Checking the existence of the TLD
+    FileSystem remoteFS = null;
+    try {
+      remoteFS = getFileSystem(conf);
+    } catch (IOException e) {
+      throw new YarnRuntimeException(
+          "Unable to get Remote FileSystem instance", e);
+    }
+    boolean remoteExists = true;
+    Path remoteRootLogDir = getRemoteRootLogDir();
+    try {
+      FsPermission perms =
+          remoteFS.getFileStatus(remoteRootLogDir).getPermission();
+      if (!perms.equals(TLDIR_PERMISSIONS) && logPermError) {
+        LOG.warn("Remote Root Log Dir [" + remoteRootLogDir
+            + "] already exist, but with incorrect permissions. "
+            + "Expected: [" + TLDIR_PERMISSIONS + "], Found: [" + perms
+            + "]." + " The cluster may have problems with multiple users.");
+        logPermError = false;
+      } else {
+        logPermError = true;
+      }
+    } catch (FileNotFoundException e) {
+      remoteExists = false;
+    } catch (IOException e) {
+      throw new YarnRuntimeException(
+          "Failed to check permissions for dir ["
+              + remoteRootLogDir + "]", e);
+    }
+    if (!remoteExists) {
+      LOG.warn("Remote Root Log Dir [" + remoteRootLogDir
+          + "] does not exist. Attempting to create it.");
+      try {
+        Path qualified =
+            remoteRootLogDir.makeQualified(remoteFS.getUri(),
+                remoteFS.getWorkingDirectory());
+        remoteFS.mkdirs(qualified, new FsPermission(TLDIR_PERMISSIONS));
+        remoteFS.setPermission(qualified, new FsPermission(TLDIR_PERMISSIONS));
+
+        UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
+        String primaryGroupName = null;
+        try {
+          primaryGroupName = loginUser.getPrimaryGroupName();
+        } catch (IOException e) {
+          LOG.warn("No primary group found. The remote root log directory" +
+              " will be created with the HDFS superuser being its group " +
+              "owner. JobHistoryServer may be unable to read the directory.");
+        }
+        // set owner on the remote directory only if the primary group exists
+        if (primaryGroupName != null) {
+          remoteFS.setOwner(qualified,
+              loginUser.getShortUserName(), primaryGroupName);
+        }
+      } catch (IOException e) {
+        throw new YarnRuntimeException("Failed to create remoteLogDir ["
+            + remoteRootLogDir + "]", e);
+      }
+    }
+  }
+
+  /**
+   * Create remote Application directory for log aggregation.
+   * @param user the user
+   * @param appId the application ID
+   * @param userUgi the UGI
+   */
+  public void createAppDir(final String user, final ApplicationId appId,
+      UserGroupInformation userUgi) {
+    final Path remoteRootLogDir = getRemoteRootLogDir();
+    final String remoteRootLogDirSuffix = getRemoteRootLogDirSuffix();
+    try {
+      userUgi.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          try {
+            // TODO: Reuse FS for user?
+            FileSystem remoteFS = getFileSystem(conf);
+
+            // Only creating directories if they are missing to avoid
+            // unnecessary load on the filesystem from all of the nodes
+            Path appDir = LogAggregationUtils.getRemoteAppLogDir(
+                remoteRootLogDir, appId, user, remoteRootLogDirSuffix);
+
+            appDir = appDir.makeQualified(remoteFS.getUri(),
+                remoteFS.getWorkingDirectory());
+
+            if (!checkExists(remoteFS, appDir, APP_DIR_PERMISSIONS)) {
+              Path suffixDir = LogAggregationUtils.getRemoteLogSuffixedDir(
+                  remoteRootLogDir, user, remoteRootLogDirSuffix);
+              suffixDir = suffixDir.makeQualified(remoteFS.getUri(),
+                  remoteFS.getWorkingDirectory());
+
+              if (!checkExists(remoteFS, suffixDir, APP_DIR_PERMISSIONS)) {
+                Path userDir = LogAggregationUtils.getRemoteLogUserDir(
+                    remoteRootLogDir, user);
+                userDir = userDir.makeQualified(remoteFS.getUri(),
+                    remoteFS.getWorkingDirectory());
+
+                if (!checkExists(remoteFS, userDir, APP_DIR_PERMISSIONS)) {
+                  createDir(remoteFS, userDir, APP_DIR_PERMISSIONS);
+                }
+
+                createDir(remoteFS, suffixDir, APP_DIR_PERMISSIONS);
+              }
+
+              createDir(remoteFS, appDir, APP_DIR_PERMISSIONS);
+            }
+
+          } catch (IOException e) {
+            LOG.error("Failed to setup application log directory for "
+                + appId, e);
+            throw e;
+          }
+          return null;
+        }
+      });
+    } catch (Exception e) {
+      throw new YarnRuntimeException(e);
+    }
+  }
+
+  @VisibleForTesting
+  protected FileSystem getFileSystem(Configuration conf) throws IOException {
+    return getRemoteRootLogDir().getFileSystem(conf);
+  }
+
+  protected void createDir(FileSystem fs, Path path, FsPermission fsPerm)
+      throws IOException {
+    FsPermission dirPerm = new FsPermission(fsPerm);
+    fs.mkdirs(path, dirPerm);
+    FsPermission umask = FsPermission.getUMask(fs.getConf());
+    if (!dirPerm.equals(dirPerm.applyUMask(umask))) {
+      fs.setPermission(path, new FsPermission(fsPerm));
+    }
+  }
+
+  protected boolean checkExists(FileSystem fs, Path path, FsPermission fsPerm)
+      throws IOException {
+    boolean exists = true;
+    try {
+      FileStatus appDirStatus = fs.getFileStatus(path);
+      if (!APP_DIR_PERMISSIONS.equals(appDirStatus.getPermission())) {
+        fs.setPermission(path, APP_DIR_PERMISSIONS);
+      }
+    } catch (FileNotFoundException fnfe) {
+      exists = false;
+    }
+    return exists;
+  }
+
+  /**
+   * Get the remote aggregated log path.
+   * @param appId the ApplicationId
+   * @param user the Application Owner
+   * @param nodeId the NodeManager Id
+   * @return the remote aggregated log path
+   */
+  public Path getRemoteNodeLogFileForApp(ApplicationId appId, String user,
+      NodeId nodeId) {
+    return LogAggregationUtils.getRemoteNodeLogFileForApp(
+        getRemoteRootLogDir(), appId, user, nodeId,
+        getRemoteRootLogDirSuffix());
+  }
+
+  /**
+   * Get the remote application directory for log aggregation.
+   * @param appId the Application ID
+   * @param appOwner the Application Owner
+   * @return the remote application directory
+   * @throws IOException if can not find the remote application directory
+   */
+  public Path getRemoteAppLogDir(ApplicationId appId, String appOwner)
+      throws IOException {
+    return LogAggregationUtils.getRemoteAppLogDir(conf, appId, appOwner,
+        this.remoteRootLogDir, this.remoteRootLogDirSuffix);
+  }
+
+  protected void cleanOldLogs(Path remoteNodeLogFileForApp,
+      final NodeId nodeId, UserGroupInformation userUgi) {
+    try {
+      final FileSystem remoteFS = remoteNodeLogFileForApp.getFileSystem(conf);
+      Path appDir = remoteNodeLogFileForApp.getParent().makeQualified(
+          remoteFS.getUri(), remoteFS.getWorkingDirectory());
+      Set<FileStatus> status =
+          new HashSet<FileStatus>(Arrays.asList(remoteFS.listStatus(appDir)));
+
+      Iterable<FileStatus> mask =
+          Iterables.filter(status, new Predicate<FileStatus>() {
+            @Override
+            public boolean apply(FileStatus next) {
+              return next.getPath().getName()
+                .contains(LogAggregationUtils.getNodeString(nodeId))
+                && !next.getPath().getName().endsWith(
+                    LogAggregationUtils.TMP_FILE_SUFFIX);
+            }
+          });
+      status = Sets.newHashSet(mask);
+      // Normally, we just need to delete one oldest log
+      // before we upload a new log.
+      // If we can not delete the older logs in this cycle,
+      // we will delete them in next cycle.
+      if (status.size() >= this.retentionSize) {
+        // sort by the lastModificationTime ascending
+        List<FileStatus> statusList = new ArrayList<FileStatus>(status);
+        Collections.sort(statusList, new Comparator<FileStatus>() {
+          public int compare(FileStatus s1, FileStatus s2) {
+            return s1.getModificationTime() < s2.getModificationTime() ? -1
+                : s1.getModificationTime() > s2.getModificationTime() ? 1 : 0;
+          }
+        });
+        for (int i = 0; i <= statusList.size() - this.retentionSize; i++) {
+          final FileStatus remove = statusList.get(i);
+          try {
+            userUgi.doAs(new PrivilegedExceptionAction<Object>() {
+              @Override
+              public Object run() throws Exception {
+                remoteFS.delete(remove.getPath(), false);
+                return null;
+              }
+            });
+          } catch (Exception e) {
+            LOG.error("Failed to delete " + remove.getPath(), e);
+          }
+        }
+      }
+    } catch (Exception e) {
+      LOG.error("Failed to clean old logs", e);
+    }
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileControllerContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileControllerContext.java
new file mode 100644
index 0000000..32128bc
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileControllerContext.java
@@ -0,0 +1,130 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.logaggregation.filecontroller;
+
+import java.util.Map;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+
+/**
+ * {@code LogAggregationFileControllerContext} is a record used in
+ * the log aggregation process.
+ */
+@Private
+@Unstable
+public class LogAggregationFileControllerContext {
+  private final boolean logAggregationInRolling;
+  private final long rollingMonitorInterval;
+  private final Path remoteNodeLogFileForApp;
+  private final NodeId nodeId;
+  private final UserGroupInformation userUgi;
+  private final ApplicationId appId;
+  private final Path remoteNodeTmpLogFileForApp;
+  private final Map<ApplicationAccessType, String> appAcls;
+  private int logAggregationTimes = 0;
+  private int cleanOldLogsTimes = 0;
+
+  private boolean uploadedLogsInThisCycle;
+  private long logUploadedTimeStamp;
+
+  public LogAggregationFileControllerContext(Path remoteNodeLogFileForApp,
+      Path remoteNodeTmpLogFileForApp,
+      boolean logAggregationInRolling,
+      long rollingMonitorInterval,
+      ApplicationId appId,
+      Map<ApplicationAccessType, String> appAcls,
+      NodeId nodeId, UserGroupInformation userUgi) {
+    this.remoteNodeLogFileForApp = remoteNodeLogFileForApp;
+    this.remoteNodeTmpLogFileForApp = remoteNodeTmpLogFileForApp;
+    this.logAggregationInRolling = logAggregationInRolling;
+    this.rollingMonitorInterval = rollingMonitorInterval;
+    this.nodeId = nodeId;
+    this.appId = appId;
+    this.appAcls = appAcls;
+    this.userUgi = userUgi;
+  }
+
+  public boolean isUploadedLogsInThisCycle() {
+    return uploadedLogsInThisCycle;
+  }
+
+  public void setUploadedLogsInThisCycle(boolean uploadedLogsInThisCycle) {
+    this.uploadedLogsInThisCycle = uploadedLogsInThisCycle;
+  }
+
+  public Path getRemoteNodeLogFileForApp() {
+    return remoteNodeLogFileForApp;
+  }
+
+  public long getRollingMonitorInterval() {
+    return rollingMonitorInterval;
+  }
+
+  public boolean isLogAggregationInRolling() {
+    return logAggregationInRolling;
+  }
+
+  public long getLogUploadTimeStamp() {
+    return logUploadedTimeStamp;
+  }
+
+  public void setLogUploadTimeStamp(long uploadTimeStamp) {
+    this.logUploadedTimeStamp = uploadTimeStamp;
+  }
+
+  public NodeId getNodeId() {
+    return nodeId;
+  }
+
+  public UserGroupInformation getUserUgi() {
+    return userUgi;
+  }
+
+  public ApplicationId getAppId() {
+    return appId;
+  }
+
+  public Path getRemoteNodeTmpLogFileForApp() {
+    return remoteNodeTmpLogFileForApp;
+  }
+
+  public void increLogAggregationTimes() {
+    this.logAggregationTimes++;
+  }
+
+  public void increcleanupOldLogTimes() {
+    this.cleanOldLogsTimes++;
+  }
+
+  public int getLogAggregationTimes() {
+    return logAggregationTimes;
+  }
+
+  public int getCleanOldLogsTimes() {
+    return cleanOldLogsTimes;
+  }
+
+  public Map<ApplicationAccessType, String> getAppAcls() {
+    return appAcls;
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileControllerFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileControllerFactory.java
new file mode 100644
index 0000000..746bf5a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileControllerFactory.java
@@ -0,0 +1,195 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.logaggregation.filecontroller;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Pattern;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+
+/**
+ * Use {@code LogAggregationFileControllerFactory} to get the correct
+ * {@link LogAggregationFileController} for write and read.
+ *
+ */
+@Private
+@Unstable
+public class LogAggregationFileControllerFactory {
+
+  private static final Log LOG = LogFactory.getLog(
+      LogAggregationFileControllerFactory.class);
+  private final Pattern p = Pattern.compile(
+      "^[A-Za-z_]+[A-Za-z0-9_]*$");
+  private LinkedList<LogAggregationFileController> controllers
+      = new LinkedList<>();
+  private Configuration conf;
+
+  /**
+   * Construct the LogAggregationFileControllerFactory object.
+   * @param conf the Configuration
+   */
+  public LogAggregationFileControllerFactory(Configuration conf) {
+    this.conf = conf;
+    Collection<String> fileControllers = conf.getStringCollection(
+        YarnConfiguration.LOG_AGGREGATION_FILE_FORMATS);
+    List<String> controllerClassName = new ArrayList<>();
+
+    Map<String, String> controllerChecker = new HashMap<>();
+
+    for (String fileController : fileControllers) {
+      Preconditions.checkArgument(validateAggregatedFileControllerName(
+          fileController), "The FileControllerName: " + fileController
+          + " set in " + YarnConfiguration.LOG_AGGREGATION_FILE_FORMATS
+          +" is invalid." + "The valid File Controller name should only "
+          + "contain a-zA-Z0-9_ and can not start with numbers");
+
+      String remoteDirStr = String.format(
+          YarnConfiguration.LOG_AGGREGATION_REMOTE_APP_LOG_DIR_FMT,
+          fileController);
+      String remoteDir = conf.get(remoteDirStr);
+      boolean defaultRemoteDir = false;
+      if (remoteDir == null || remoteDir.isEmpty()) {
+        remoteDir = conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
+            YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR);
+        defaultRemoteDir = true;
+      }
+      String suffixStr = String.format(
+          YarnConfiguration.LOG_AGGREGATION_REMOTE_APP_LOG_DIR_SUFFIX_FMT,
+          fileController);
+      String suffix = conf.get(suffixStr);
+      boolean defaultSuffix = false;
+      if (suffix == null || suffix.isEmpty()) {
+        suffix = conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX,
+            YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR_SUFFIX);
+        defaultSuffix = true;
+      }
+      String dirSuffix = remoteDir + "-" + suffix;
+      if (controllerChecker.containsKey(dirSuffix)) {
+        if (defaultRemoteDir && defaultSuffix) {
+          String fileControllerStr = controllerChecker.get(dirSuffix);
+          List<String> controllersList = new ArrayList<>();
+          controllersList.add(fileControllerStr);
+          controllersList.add(fileController);
+          fileControllerStr = StringUtils.join(controllersList, ",");
+          controllerChecker.put(dirSuffix, fileControllerStr);
+        } else {
+          String conflictController = controllerChecker.get(dirSuffix);
+          throw new RuntimeException("The combined value of " + remoteDirStr
+              + " and " + suffixStr + " should not be the same as the value"
+              + " set for " + conflictController);
+        }
+      } else {
+        controllerChecker.put(dirSuffix, fileController);
+      }
+      String classKey = String.format(
+          YarnConfiguration.LOG_AGGREGATION_FILE_CONTROLLER_FMT,
+          fileController);
+      String className = conf.get(classKey);
+      if (className == null || className.isEmpty()) {
+        throw new RuntimeException("No class configured for "
+            + fileController);
+      }
+      controllerClassName.add(className);
+      Class<? extends LogAggregationFileController> sClass = conf.getClass(
+          classKey, null, LogAggregationFileController.class);
+      if (sClass == null) {
+        throw new RuntimeException("No class defined for " + fileController);
+      }
+      LogAggregationFileController s = ReflectionUtils.newInstance(
+          sClass, conf);
+      if (s == null) {
+        throw new RuntimeException("No object created for "
+            + controllerClassName);
+      }
+      s.initialize(conf, fileController);
+      controllers.add(s);
+    }
+  }
+
+  /**
+   * Get {@link LogAggregationFileController} to write.
+   * @return the LogAggregationFileController instance
+   */
+  public LogAggregationFileController getFileControllerForWrite() {
+    return controllers.getFirst();
+  }
+
+  /**
+   * Get {@link LogAggregationFileController} to read the aggregated logs
+   * for this application.
+   * @param appId the ApplicationId
+   * @param appOwner the Application Owner
+   * @return the LogAggregationFileController instance
+   * @throws IOException if can not find any log aggregation file controller
+   */
+  public LogAggregationFileController getFileControllerForRead(
+      ApplicationId appId, String appOwner) throws IOException {
+    StringBuilder diagnosis = new StringBuilder();
+    for(LogAggregationFileController fileController : controllers) {
+      try {
+        Path remoteAppLogDir = fileController.getRemoteAppLogDir(
+            appId, appOwner);
+        Path qualifiedLogDir = FileContext.getFileContext(conf).makeQualified(
+            remoteAppLogDir);
+        RemoteIterator<FileStatus> nodeFiles = FileContext.getFileContext(
+            qualifiedLogDir.toUri(), conf).listStatus(remoteAppLogDir);
+        if (nodeFiles.hasNext()) {
+          return fileController;
+        }
+      } catch (Exception ex) {
+        diagnosis.append(ex.getMessage() + "\n");
+        continue;
+      }
+    }
+    throw new IOException(diagnosis.toString());
+  }
+
+  private boolean validateAggregatedFileControllerName(String name) {
+    if (name == null || name.trim().isEmpty()) {
+      return false;
+    }
+    return p.matcher(name).matches();
+  }
+
+  @Private
+  @VisibleForTesting
+  public LinkedList<LogAggregationFileController>
+      getConfiguredLogAggregationFileControllerList() {
+    return this.controllers;
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationTFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationTFileController.java
new file mode 100644
index 0000000..9e0c66d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationTFileController.java
@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.logaggregation.filecontroller;
+
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey;
+import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogValue;
+import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter;
+import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils;
+import org.apache.hadoop.yarn.util.Times;
+
+/**
+ * The TFile log aggregation file Controller implementation.
+ */
+@Private
+@Unstable
+public class LogAggregationTFileController
+    extends LogAggregationFileController {
+
+  private static final Log LOG = LogFactory.getLog(
+      LogAggregationTFileController.class);
+
+  private LogWriter writer;
+
+  public LogAggregationTFileController(){}
+
+  @Override
+  public void initInternal(Configuration conf) {
+    this.remoteRootLogDir = new Path(
+        conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
+            YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
+    this.remoteRootLogDirSuffix =
+        conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX,
+            YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR_SUFFIX);
+  }
+
+  @Override
+  public void initializeWriter(LogAggregationFileControllerContext context)
+      throws IOException {
+    this.writer = new LogWriter();
+    writer.initialize(this.conf, context.getRemoteNodeTmpLogFileForApp(),
+        context.getUserUgi());
+    // Write ACLs once when the writer is created.
+    writer.writeApplicationACLs(context.getAppAcls());
+    writer.writeApplicationOwner(context.getUserUgi().getShortUserName());
+  }
+
+  @Override
+  public void closeWriter() {
+    this.writer.close();
+  }
+
+  @Override
+  public void write(LogKey logKey, LogValue logValue) throws IOException {
+    this.writer.append(logKey, logValue);
+  }
+
+  @Override
+  public void postWrite(final LogAggregationFileControllerContext record)
+      throws Exception {
+    // Before upload logs, make sure the number of existing logs
+    // is smaller than the configured NM log aggregation retention size.
+    if (record.isUploadedLogsInThisCycle() &&
+        record.isLogAggregationInRolling()) {
+      cleanOldLogs(record.getRemoteNodeLogFileForApp(), record.getNodeId(),
+          record.getUserUgi());
+      record.increcleanupOldLogTimes();
+    }
+
+    final Path renamedPath = record.getRollingMonitorInterval() <= 0
+        ? record.getRemoteNodeLogFileForApp() : new Path(
+            record.getRemoteNodeLogFileForApp().getParent(),
+            record.getRemoteNodeLogFileForApp().getName() + "_"
+            + record.getLogUploadTimeStamp());
+    final boolean rename = record.isUploadedLogsInThisCycle();
+    try {
+      record.getUserUgi().doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          FileSystem remoteFS = record.getRemoteNodeLogFileForApp()
+              .getFileSystem(conf);
+          if (rename) {
+            remoteFS.rename(record.getRemoteNodeTmpLogFileForApp(),
+                renamedPath);
+          } else {
+            remoteFS.delete(record.getRemoteNodeTmpLogFileForApp(), false);
+          }
+          return null;
+        }
+      });
+    } catch (Exception e) {
+      LOG.error(
+          "Failed to move temporary log file to final location: ["
+          + record.getRemoteNodeTmpLogFileForApp() + "] to ["
+          + renamedPath + "]", e);
+      throw new Exception("Log uploaded failed for Application: "
+          + record.getAppId() + " in NodeManager: "
+          + LogAggregationUtils.getNodeString(record.getNodeId()) + " at "
+          + Times.format(record.getLogUploadTimeStamp()) + "\n");
+    }
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/package-info.java
similarity index 83%
copy from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
copy to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/package-info.java
index 5d14c6f..cad238a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/package-info.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -15,11 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+@InterfaceAudience.Public
+package org.apache.hadoop.yarn.logaggregation.filecontroller;
+import org.apache.hadoop.classification.InterfaceAudience;
 
-import Ember from 'ember';
-
-export default Ember.Route.extend({
-  model() {
-    return this.store.findAll('yarn-queue');
-  },
-});
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BoundedAppender.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BoundedAppender.java
new file mode 100644
index 0000000..917d696
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BoundedAppender.java
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * A {@link CharSequence} appender that considers its {@link #limit} as upper
+ * bound.
+ * <p>
+ * When {@link #limit} would be reached on append, past messages will be
+ * truncated from head, and a header telling the user about truncation will be
+ * prepended, with ellipses in between header and messages.
+ * <p>
+ * Note that header and ellipses are not counted against {@link #limit}.
+ * <p>
+ * An example:
+ *
+ * <pre>
+ * {@code
+ *   // At the beginning it's an empty string
+ *   final Appendable shortAppender = new BoundedAppender(80);
+ *   // The whole message fits into limit
+ *   shortAppender.append(
+ *       "message1 this is a very long message but fitting into limit\n");
+ *   // The first message is truncated, the second not
+ *   shortAppender.append("message2 this is shorter than the previous one\n");
+ *   // The first message is deleted, the second truncated, the third
+ *   // preserved
+ *   shortAppender.append("message3 this is even shorter message, maybe.\n");
+ *   // The first two are deleted, the third one truncated, the last preserved
+ *   shortAppender.append("message4 the shortest one, yet the greatest :)");
+ *   // Current contents are like this:
+ *   // Diagnostic messages truncated, showing last 80 chars out of 199:
+ *   // ...s is even shorter message, maybe.
+ *   // message4 the shortest one, yet the greatest :)
+ * }
+ * </pre>
+ * <p>
+ * Note that <tt>null</tt> values are {@link #append(CharSequence) append}ed
+ * just like in {@link StringBuilder#append(CharSequence) original
+ * implementation}.
+ * <p>
+ * Note that this class is not thread safe.
+ */
+
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+@VisibleForTesting
+public class BoundedAppender {
+  @VisibleForTesting
+  public static final String TRUNCATED_MESSAGES_TEMPLATE =
+      "Diagnostic messages truncated, showing last "
+          + "%d chars out of %d:%n...%s";
+
+  private final int limit;
+  private final StringBuilder messages = new StringBuilder();
+  private int totalCharacterCount = 0;
+
+  public BoundedAppender(final int limit) {
+    Preconditions.checkArgument(limit > 0, "limit should be positive");
+
+    this.limit = limit;
+  }
+
+  /**
+   * Append a {@link CharSequence} considering {@link #limit}, truncating
+   * from the head of {@code csq} or {@link #messages} when necessary.
+   *
+   * @param csq the {@link CharSequence} to append
+   * @return this
+   */
+  public BoundedAppender append(final CharSequence csq) {
+    appendAndCount(csq);
+    checkAndCut();
+
+    return this;
+  }
+
+  private void appendAndCount(final CharSequence csq) {
+    final int before = messages.length();
+    messages.append(csq);
+    final int after = messages.length();
+    totalCharacterCount += after - before;
+  }
+
+  private void checkAndCut() {
+    if (messages.length() > limit) {
+      final int newStart = messages.length() - limit;
+      messages.delete(0, newStart);
+    }
+  }
+
+  /**
+   * Get current length of messages considering truncates
+   * without header and ellipses.
+   *
+   * @return current length
+   */
+  public int length() {
+    return messages.length();
+  }
+
+  public int getLimit() {
+    return limit;
+  }
+
+  /**
+   * Get a string representation of the actual contents, displaying also a
+   * header and ellipses when there was a truncate.
+   *
+   * @return String representation of the {@link #messages}
+   */
+  @Override
+  public String toString() {
+    if (messages.length() < totalCharacterCount) {
+      return String.format(TRUNCATED_MESSAGES_TEMPLATE, messages.length(),
+          totalCharacterCount, messages.toString());
+    }
+
+    return messages.toString();
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index dbf115b..17d0496 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -73,6 +73,14 @@
   </property>
 
   <property>
+    <description>
+      If set to true, then ALL container updates will be automatically sent to
+      the NM in the next heartbeat</description>
+    <name>yarn.resourcemanager.auto-update.containers</name>
+    <value>false</value>
+  </property>
+
+  <property>
     <description>The number of threads used to handle applications manager requests.</description>
     <name>yarn.resourcemanager.client.thread-count</name>
     <value>50</value>
@@ -1159,6 +1167,25 @@
   </property>
 
   <property>
+    <description>Specify which log file controllers we will support. The first
+    file controller we add will be used to write the aggregated logs.
+    This comma separated configuration will work with the configuration:
+    yarn.log-aggregation.file-controller.%s.class which defines the supported
+    file controller's class. By default, the TFile controller would be used.
+    The user could override this configuration by adding more file controllers.
+    To support back-ward compatibility, make sure that we always
+    add TFile file controller.</description>
+    <name>yarn.log-aggregation.file-formats</name>
+    <value>TFile</value>
+  </property>
+
+  <property>
+    <description>Class that supports TFile read and write operations.</description>
+    <name>yarn.log-aggregation.file-controller.TFile.class</name>
+    <value>org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationTFileController</value>
+  </property>
+
+  <property>
     <description>
     How long for ResourceManager to wait for NodeManager to report its
     log aggregation status. If waiting time of which the log aggregation
@@ -2314,6 +2341,39 @@
     <value>259200000</value>
   </property>
 
+  <property>
+    <description>
+    The default hdfs location for flowrun coprocessor jar.
+    </description>
+    <name>yarn.timeline-service.hbase.coprocessor.jar.hdfs.location
+    </name>
+    <value>/hbase/coprocessor/hadoop-yarn-server-timelineservice.jar</value>
+  </property>
+
+  <property>
+    <description>
+    The value of this parameter sets the prefix for all tables that are part of
+    timeline service in the hbase storage schema. It can be set to "dev."
+    or "staging." if it is to be used for development or staging instances.
+    This way the data in production tables stays in a separate set of tables
+    prefixed by "prod.".
+    </description>
+    <name>yarn.timeline-service.hbase-schema.prefix</name>
+    <value>prod.</value>
+  </property>
+
+  <property>
+    <description> Optional URL to an hbase-site.xml configuration file to be
+    used to connect to the timeline-service hbase cluster. If empty or not
+    specified, then the HBase configuration will be loaded from the classpath.
+    When specified the values in the specified configuration file will override
+    those from the ones that are present on the classpath.
+    </description>
+    <name>yarn.timeline-service.hbase.configuration.file
+    </name>
+    <value></value>
+  </property>
+
   <!--  Shared Cache Configuration -->
 
   <property>
@@ -3128,6 +3188,17 @@
     <name>yarn.timeline-service.http-cross-origin.enabled</name>
     <value>false</value>
   </property>
+ 
+  <property>
+    <description>
+      Flag to enable cross-origin (CORS) support for timeline service v1.x or
+      Timeline Reader in timeline service v2. For timeline service v2, also add
+      org.apache.hadoop.security.HttpCrossOriginFilterInitializer to the
+      configuration hadoop.http.filter.initializers in core-site.xml.
+    </description>
+    <name>yarn.timeline-service.http-cross-origin.enabled</name>
+    <value>false</value>
+  </property>
 
   <property>
     <description>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
index bb688c9..a3f5491 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
@@ -101,6 +101,7 @@
 import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainersResponsePBImpl;
 import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationRequestPBImpl;
 import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -406,6 +407,7 @@
     generateByNewInstance(CommitResponse.class);
     generateByNewInstance(ApplicationTimeout.class);
     generateByNewInstance(QueueConfigurations.class);
+    generateByNewInstance(CollectorInfo.class);
   }
 
   @Test
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientV2Impl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientV2Impl.java
index c5b02fd..95595a9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientV2Impl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientV2Impl.java
@@ -18,6 +18,11 @@
 
 package org.apache.hadoop.yarn.client.api.impl;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
 import java.io.IOException;
 import java.net.URI;
 import java.util.ArrayList;
@@ -27,11 +32,16 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
+import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -151,7 +161,7 @@
         maxRetries);
     c.init(conf);
     c.start();
-    c.setTimelineServiceAddress("localhost:12345");
+    c.setTimelineCollectorInfo(CollectorInfo.newInstance("localhost:12345"));
     try {
       c.putEntities(new TimelineEntity());
     } catch (IOException e) {
@@ -311,6 +321,50 @@
   }
 
   @Test
+  public void testSetTimelineToken() throws Exception {
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    assertEquals(0, ugi.getTokens().size());
+    assertNull("Timeline token in v2 client should not be set",
+        client.currentTimelineToken);
+
+    Token token = Token.newInstance(
+        new byte[0], "kind", new byte[0], "service");
+    client.setTimelineCollectorInfo(CollectorInfo.newInstance(null, token));
+    assertNull("Timeline token in v2 client should not be set as token kind " +
+        "is unexepcted.", client.currentTimelineToken);
+    assertEquals(0, ugi.getTokens().size());
+
+    token = Token.newInstance(new byte[0], TimelineDelegationTokenIdentifier.
+        KIND_NAME.toString(), new byte[0], null);
+    client.setTimelineCollectorInfo(CollectorInfo.newInstance(null, token));
+    assertNull("Timeline token in v2 client should not be set as serice is " +
+        "not set.", client.currentTimelineToken);
+    assertEquals(0, ugi.getTokens().size());
+
+    TimelineDelegationTokenIdentifier ident =
+        new TimelineDelegationTokenIdentifier(new Text(ugi.getUserName()),
+        new Text("renewer"), null);
+    ident.setSequenceNumber(1);
+    token = Token.newInstance(ident.getBytes(),
+        TimelineDelegationTokenIdentifier.KIND_NAME.toString(), new byte[0],
+        "localhost:1234");
+    client.setTimelineCollectorInfo(CollectorInfo.newInstance(null, token));
+    assertEquals(1, ugi.getTokens().size());
+    assertNotNull("Timeline token should be set in v2 client.",
+        client.currentTimelineToken);
+    assertEquals(token, client.currentTimelineToken);
+
+    ident.setSequenceNumber(20);
+    Token newToken = Token.newInstance(ident.getBytes(),
+        TimelineDelegationTokenIdentifier.KIND_NAME.toString(), new byte[0],
+        "localhost:1234");
+    client.setTimelineCollectorInfo(CollectorInfo.newInstance(null, newToken));
+    assertEquals(1, ugi.getTokens().size());
+    assertNotEquals(token, client.currentTimelineToken);
+    assertEquals(newToken, client.currentTimelineToken);
+  }
+
+  @Test
   public void testAfterStop() throws Exception {
     client.setSleepBeforeReturn(true);
     try {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
index 1e71b3c..3dd7de3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
@@ -40,10 +40,14 @@
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerContext;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerFactory;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
 import org.apache.hadoop.yarn.webapp.log.AggregatedLogsBlockForTest;
 import org.apache.hadoop.yarn.webapp.view.BlockForTest;
@@ -249,7 +253,7 @@
   
   
   private Configuration getConfiguration() {
-    Configuration configuration = new Configuration();
+    Configuration configuration = new YarnConfiguration();
     configuration.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
     configuration.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, "target/logs");
     configuration.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
@@ -295,19 +299,25 @@
     List<String> rootLogDirs = Arrays.asList("target/logs/logs");
     UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
 
-    try (AggregatedLogFormat.LogWriter writer =
-             new AggregatedLogFormat.LogWriter()) {
-      writer.initialize(configuration, new Path(path), ugi);
-      writer.writeApplicationOwner(ugi.getUserName());
-
+    LogAggregationFileControllerFactory factory
+        = new LogAggregationFileControllerFactory(configuration);
+    LogAggregationFileController fileController = factory
+        .getFileControllerForWrite();
+    try {
       Map<ApplicationAccessType, String> appAcls = new HashMap<>();
       appAcls.put(ApplicationAccessType.VIEW_APP, ugi.getUserName());
-      writer.writeApplicationACLs(appAcls);
-
-      writer.append(
+      NodeId nodeId = NodeId.newInstance("localhost", 1234);
+      LogAggregationFileControllerContext context
+          = new LogAggregationFileControllerContext(
+              new Path(path), new Path(path), false, 3600,
+              appId, appAcls, nodeId, ugi);
+      fileController.initializeWriter(context);
+      fileController.write(
           new AggregatedLogFormat.LogKey("container_0_0001_01_000001"),
           new AggregatedLogFormat.LogValue(rootLogDirs, containerId,
               UserGroupInformation.getCurrentUser().getShortUserName()));
+    } finally {
+      fileController.closeWriter();
     }
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestContainerLogsUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestContainerLogsUtils.java
index 8b665e0..a12e2a1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestContainerLogsUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestContainerLogsUtils.java
@@ -24,15 +24,21 @@
 import java.io.IOException;
 import java.io.Writer;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerContext;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerFactory;
 
 /**
  * This class contains several utility functions for log aggregation tests.
@@ -110,14 +116,25 @@
       ContainerId containerId, Path appDir, FileSystem fs) throws IOException {
     Path path =
         new Path(appDir, LogAggregationUtils.getNodeString(nodeId));
-    try (AggregatedLogFormat.LogWriter writer =
-        new AggregatedLogFormat.LogWriter()) {
-      writer.initialize(configuration, path, ugi);
-      writer.writeApplicationOwner(ugi.getUserName());
-
-      writer.append(new AggregatedLogFormat.LogKey(containerId),
+    LogAggregationFileControllerFactory factory
+        = new LogAggregationFileControllerFactory(configuration);
+    LogAggregationFileController fileController = factory
+        .getFileControllerForWrite();
+    try {
+      Map<ApplicationAccessType, String> appAcls = new HashMap<>();
+      appAcls.put(ApplicationAccessType.VIEW_APP, ugi.getUserName());
+      ApplicationId appId = containerId.getApplicationAttemptId()
+          .getApplicationId();
+      LogAggregationFileControllerContext context
+          = new LogAggregationFileControllerContext(
+              path, path, true, 1000,
+              appId, appAcls, nodeId, ugi);
+      fileController.initializeWriter(context);
+      fileController.write(new AggregatedLogFormat.LogKey(containerId),
           new AggregatedLogFormat.LogValue(rootLogDirs, containerId,
               ugi.getShortUserName()));
+    } finally {
+      fileController.closeWriter();
     }
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/TestLogAggregationFileControllerFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/TestLogAggregationFileControllerFactory.java
new file mode 100644
index 0000000..45f18c1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/TestLogAggregationFileControllerFactory.java
@@ -0,0 +1,171 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.logaggregation.filecontroller;
+
+import static org.junit.Assert.*;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.Writer;
+import java.util.LinkedList;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey;
+import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogValue;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerContext;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerFactory;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationTFileController;
+import org.junit.Test;
+
+/**
+ * Test LogAggregationFileControllerFactory.
+ *
+ */
+public class TestLogAggregationFileControllerFactory {
+
+  @Test(timeout = 10000)
+  public void testLogAggregationFileControllerFactory() throws Exception {
+    ApplicationId appId = ApplicationId.newInstance(
+        System.currentTimeMillis(), 1);
+    String appOwner = "test";
+    String remoteLogRootDir = "target/app-logs/";
+    Configuration conf = new YarnConfiguration();
+    conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
+    conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogRootDir);
+    conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX, "log");
+    FileSystem fs = FileSystem.get(conf);
+
+    LogAggregationFileControllerFactory factory =
+        new LogAggregationFileControllerFactory(conf);
+    LinkedList<LogAggregationFileController> list = factory
+        .getConfiguredLogAggregationFileControllerList();
+    assertTrue(list.size() == 1);
+    assertTrue(list.getFirst() instanceof LogAggregationTFileController);
+    assertTrue(factory.getFileControllerForWrite()
+        instanceof LogAggregationTFileController);
+    Path logPath = list.getFirst().getRemoteAppLogDir(appId, appOwner);
+    try {
+      if (fs.exists(logPath)) {
+        fs.delete(logPath, true);
+      }
+      assertTrue(fs.mkdirs(logPath));
+      Writer writer =
+          new FileWriter(new File(logPath.toString(), "testLog"));
+      writer.write("test");
+      writer.close();
+      assertTrue(factory.getFileControllerForRead(appId, appOwner)
+          instanceof LogAggregationTFileController);
+    } finally {
+      fs.delete(logPath, true);
+    }
+
+    conf.set(YarnConfiguration.LOG_AGGREGATION_FILE_FORMATS,
+        "TestLogAggregationFileController");
+    // Did not set class for TestLogAggregationFileController,
+    // should get the exception.
+    try {
+      factory =
+          new LogAggregationFileControllerFactory(conf);
+      fail();
+    } catch (Exception ex) {
+      // should get exception
+    }
+
+    conf.set(YarnConfiguration.LOG_AGGREGATION_FILE_FORMATS,
+        "TestLogAggregationFileController,TFile");
+    conf.setClass(
+        "yarn.log-aggregation.file-controller.TestLogAggregationFileController"
+        + ".class", TestLogAggregationFileController.class,
+        LogAggregationFileController.class);
+
+    conf.set(
+        "yarn.log-aggregation.TestLogAggregationFileController"
+        + ".remote-app-log-dir", remoteLogRootDir);
+    conf.set(
+        "yarn.log-aggregation.TestLogAggregationFileController"
+        + ".remote-app-log-dir-suffix", "testLog");
+
+    factory = new LogAggregationFileControllerFactory(conf);
+    list = factory.getConfiguredLogAggregationFileControllerList();
+    assertTrue(list.size() == 2);
+    assertTrue(list.getFirst() instanceof TestLogAggregationFileController);
+    assertTrue(list.getLast() instanceof LogAggregationTFileController);
+    assertTrue(factory.getFileControllerForWrite()
+        instanceof TestLogAggregationFileController);
+
+    logPath = list.getFirst().getRemoteAppLogDir(appId, appOwner);
+    try {
+      if (fs.exists(logPath)) {
+        fs.delete(logPath, true);
+      }
+      assertTrue(fs.mkdirs(logPath));
+      Writer writer =
+          new FileWriter(new File(logPath.toString(), "testLog"));
+      writer.write("test");
+      writer.close();
+      assertTrue(factory.getFileControllerForRead(appId, appOwner)
+          instanceof TestLogAggregationFileController);
+    } finally {
+      fs.delete(logPath, true);
+    }
+  }
+
+  private static class TestLogAggregationFileController
+      extends LogAggregationFileController {
+
+    @Override
+    public void initInternal(Configuration conf) {
+      String remoteDirStr = String.format(
+          YarnConfiguration.LOG_AGGREGATION_REMOTE_APP_LOG_DIR_FMT,
+          this.fileControllerName);
+      this.remoteRootLogDir = new Path(conf.get(remoteDirStr));
+      String suffix = String.format(
+          YarnConfiguration.LOG_AGGREGATION_REMOTE_APP_LOG_DIR_SUFFIX_FMT,
+           this.fileControllerName);
+      this.remoteRootLogDirSuffix = conf.get(suffix);
+    }
+
+    @Override
+    public void closeWriter() {
+      // Do Nothing
+    }
+
+    @Override
+    public void write(LogKey logKey, LogValue logValue) throws IOException {
+      // Do Nothing
+    }
+
+    @Override
+    public void postWrite(LogAggregationFileControllerContext record)
+        throws Exception {
+      // Do Nothing
+    }
+
+    @Override
+    public void initializeWriter(LogAggregationFileControllerContext context)
+        throws IOException {
+      // Do Nothing
+    }
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestBoundedAppender.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestBoundedAppender.java
similarity index 95%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestBoundedAppender.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestBoundedAppender.java
index 9cb1e04..2b9cfce 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestBoundedAppender.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestBoundedAppender.java
@@ -16,14 +16,13 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt;
+package org.apache.hadoop.yarn.util;
 
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
 
 import static org.junit.Assert.assertEquals;
-import static org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl.BoundedAppender;
 
 /**
  * Test class for {@link BoundedAppender}.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
index 85e5f2d..4e3a1e6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
@@ -20,14 +20,14 @@
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
-import java.util.ArrayList;
+import java.util.LinkedHashSet;
+import java.util.Set;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
-import org.apache.hadoop.security.AuthenticationFilterInitializer;
 import org.apache.hadoop.security.HttpCrossOriginFilterInitializer;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.service.CompositeService;
@@ -47,10 +47,9 @@
 import org.apache.hadoop.yarn.server.timeline.TimelineDataManager;
 import org.apache.hadoop.yarn.server.timeline.TimelineStore;
 import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
-import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilter;
-import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilterInitializer;
-import org.apache.hadoop.yarn.server.timeline.security.TimelineDelegationTokenSecretManagerService;
+import org.apache.hadoop.yarn.server.timeline.security.TimelineV1DelegationTokenSecretManagerService;
 import org.apache.hadoop.yarn.server.timeline.webapp.CrossOriginFilterInitializer;
+import org.apache.hadoop.yarn.server.util.timeline.TimelineServerUtils;
 import org.apache.hadoop.yarn.webapp.WebApp;
 import org.apache.hadoop.yarn.webapp.WebApps;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
@@ -75,7 +74,7 @@
   private ApplicationACLsManager aclsManager;
   private ApplicationHistoryManager historyManager;
   private TimelineStore timelineStore;
-  private TimelineDelegationTokenSecretManagerService secretManagerService;
+  private TimelineV1DelegationTokenSecretManagerService secretManagerService;
   private TimelineDataManager timelineDataManager;
   private WebApp webApp;
   private JvmPauseMonitor pauseMonitor;
@@ -223,9 +222,9 @@
         TimelineStore.class), conf);
   }
 
-  private TimelineDelegationTokenSecretManagerService
+  private TimelineV1DelegationTokenSecretManagerService
       createTimelineDelegationTokenSecretManagerService(Configuration conf) {
-    return new TimelineDelegationTokenSecretManagerService();
+    return new TimelineV1DelegationTokenSecretManagerService();
   }
 
   private TimelineDataManager createTimelineDataManager(Configuration conf) {
@@ -237,63 +236,33 @@
   @SuppressWarnings("unchecked")
   private void startWebApp() {
     Configuration conf = getConfig();
-    TimelineAuthenticationFilter.setTimelineDelegationTokenSecretManager(
-        secretManagerService.getTimelineDelegationTokenSecretManager());
     // Always load pseudo authentication filter to parse "user.name" in an URL
     // to identify a HTTP request's user in insecure mode.
     // When Kerberos authentication type is set (i.e., secure mode is turned on),
     // the customized filter will be loaded by the timeline server to do Kerberos
     // + DT authentication.
-    String initializers = conf.get("hadoop.http.filter.initializers");
-    boolean modifiedInitializers = false;
-
-    initializers =
-        initializers == null || initializers.length() == 0 ? "" : initializers;
-
+    String initializers = conf.get("hadoop.http.filter.initializers", "");
+    Set<String> defaultInitializers = new LinkedHashSet<String>();
+    // Add CORS filter
     if (!initializers.contains(CrossOriginFilterInitializer.class.getName())) {
-      if(conf.getBoolean(YarnConfiguration
-          .TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED, YarnConfiguration
-              .TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED_DEFAULT)) {
-        if (initializers.contains(HttpCrossOriginFilterInitializer.class.getName())) {
-          initializers =
-            initializers.replaceAll(HttpCrossOriginFilterInitializer.class.getName(),
+      if(conf.getBoolean(YarnConfiguration.
+          TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED,
+          YarnConfiguration.
+          TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED_DEFAULT)) {
+        if (initializers.contains(
+            HttpCrossOriginFilterInitializer.class.getName())) {
+          initializers = initializers.replaceAll(
+              HttpCrossOriginFilterInitializer.class.getName(),
               CrossOriginFilterInitializer.class.getName());
+        } else {
+          defaultInitializers.add(CrossOriginFilterInitializer.class.getName());
         }
-        else {
-          if (initializers.length() != 0) {
-            initializers += ",";
-          }
-          initializers += CrossOriginFilterInitializer.class.getName();
-        }
-        modifiedInitializers = true;
       }
     }
-
-    if (!initializers.contains(TimelineAuthenticationFilterInitializer.class
-      .getName())) {
-      if (initializers.length() != 0) {
-        initializers += ",";
-      }
-      initializers += TimelineAuthenticationFilterInitializer.class.getName();
-      modifiedInitializers = true;
-    }
-
-    String[] parts = initializers.split(",");
-    ArrayList<String> target = new ArrayList<String>();
-    for (String filterInitializer : parts) {
-      filterInitializer = filterInitializer.trim();
-      if (filterInitializer.equals(AuthenticationFilterInitializer.class
-        .getName())) {
-        modifiedInitializers = true;
-        continue;
-      }
-      target.add(filterInitializer);
-    }
-    String actualInitializers =
-        org.apache.commons.lang.StringUtils.join(target, ",");
-    if (modifiedInitializers) {
-      conf.set("hadoop.http.filter.initializers", actualInitializers);
-    }
+    TimelineServerUtils.addTimelineAuthFilter(
+        initializers, defaultInitializers, secretManagerService);
+    TimelineServerUtils.setTimelineFilters(
+        conf, initializers, defaultInitializers);
     String bindAddress = WebAppUtils.getWebAppBindURL(conf,
                           YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
                           WebAppUtils.getAHSWebAppURLWithoutScheme(conf));
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineV1DelegationTokenSecretManagerService.java
similarity index 79%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineV1DelegationTokenSecretManagerService.java
index 0c6892a..85d8cca 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineV1DelegationTokenSecretManagerService.java
@@ -26,7 +26,6 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
 import org.apache.hadoop.security.token.delegation.DelegationKey;
-import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
@@ -37,18 +36,16 @@
 import org.slf4j.LoggerFactory;
 
 /**
- * The service wrapper of {@link TimelineDelegationTokenSecretManager}
+ * The service wrapper of {@link TimelineV1DelegationTokenSecretManager}.
  */
 @Private
 @Unstable
-public class TimelineDelegationTokenSecretManagerService extends
-    AbstractService {
-
-  private TimelineDelegationTokenSecretManager secretManager = null;
+public class TimelineV1DelegationTokenSecretManagerService extends
+    TimelineDelgationTokenSecretManagerService {
   private TimelineStateStore stateStore = null;
 
-  public TimelineDelegationTokenSecretManagerService() {
-    super(TimelineDelegationTokenSecretManagerService.class.getName());
+  public TimelineV1DelegationTokenSecretManagerService() {
+    super(TimelineV1DelegationTokenSecretManagerService.class.getName());
   }
 
   @Override
@@ -58,19 +55,7 @@
       stateStore = createStateStore(conf);
       stateStore.init(conf);
     }
-
-    long secretKeyInterval =
-        conf.getLong(YarnConfiguration.TIMELINE_DELEGATION_KEY_UPDATE_INTERVAL,
-            YarnConfiguration.DEFAULT_TIMELINE_DELEGATION_KEY_UPDATE_INTERVAL);
-    long tokenMaxLifetime =
-        conf.getLong(YarnConfiguration.TIMELINE_DELEGATION_TOKEN_MAX_LIFETIME,
-            YarnConfiguration.DEFAULT_TIMELINE_DELEGATION_TOKEN_MAX_LIFETIME);
-    long tokenRenewInterval =
-        conf.getLong(YarnConfiguration.TIMELINE_DELEGATION_TOKEN_RENEW_INTERVAL,
-            YarnConfiguration.DEFAULT_TIMELINE_DELEGATION_TOKEN_RENEW_INTERVAL);
-    secretManager = new TimelineDelegationTokenSecretManager(secretKeyInterval,
-        tokenMaxLifetime, tokenRenewInterval, 3600000, stateStore);
-    super.init(conf);
+    super.serviceInit(conf);
   }
 
   @Override
@@ -78,10 +63,9 @@
     if (stateStore != null) {
       stateStore.start();
       TimelineServiceState state = stateStore.loadState();
-      secretManager.recover(state);
+      ((TimelineV1DelegationTokenSecretManager)
+          getTimelineDelegationTokenSecretManager()).recover(state);
     }
-
-    secretManager.startThreads();
     super.serviceStart();
   }
 
@@ -90,9 +74,18 @@
     if (stateStore != null) {
       stateStore.stop();
     }
+    super.serviceStop();
+  }
 
-    secretManager.stopThreads();
-    super.stop();
+  @Override
+  protected AbstractDelegationTokenSecretManager
+      <TimelineDelegationTokenIdentifier>
+      createTimelineDelegationTokenSecretManager(long secretKeyInterval,
+          long tokenMaxLifetime, long tokenRenewInterval,
+          long tokenRemovalScanInterval) {
+    return new TimelineV1DelegationTokenSecretManager(secretKeyInterval,
+        tokenMaxLifetime, tokenRenewInterval, tokenRemovalScanInterval,
+        stateStore);
   }
 
   protected TimelineStateStore createStateStore(
@@ -104,27 +97,20 @@
   }
 
   /**
-   * Ge the instance of {link #TimelineDelegationTokenSecretManager}
-   *
-   * @return the instance of {link #TimelineDelegationTokenSecretManager}
+   * Delegation token secret manager for ATSv1 and ATSv1.5.
    */
-  public TimelineDelegationTokenSecretManager
-  getTimelineDelegationTokenSecretManager() {
-    return secretManager;
-  }
-
   @Private
   @Unstable
-  public static class TimelineDelegationTokenSecretManager extends
+  public static class TimelineV1DelegationTokenSecretManager extends
       AbstractDelegationTokenSecretManager<TimelineDelegationTokenIdentifier> {
 
     public static final Logger LOG =
-        LoggerFactory.getLogger(TimelineDelegationTokenSecretManager.class);
+        LoggerFactory.getLogger(TimelineV1DelegationTokenSecretManager.class);
 
     private TimelineStateStore stateStore;
 
     /**
-     * Create a timeline secret manager
+     * Create a timeline v1 secret manager.
      * @param delegationKeyUpdateInterval the number of milliseconds for rolling
      *        new secret keys.
      * @param delegationTokenMaxLifetime the maximum lifetime of the delegation
@@ -135,7 +121,7 @@
      *        scanned for expired tokens in milliseconds
      * @param stateStore timeline service state store
      */
-    public TimelineDelegationTokenSecretManager(
+    public TimelineV1DelegationTokenSecretManager(
         long delegationKeyUpdateInterval,
         long delegationTokenMaxLifetime,
         long delegationTokenRenewInterval,
@@ -236,5 +222,4 @@
       }
     }
   }
-
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterForV1.java
similarity index 84%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilter.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterForV1.java
index 063f512..d918e8d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterForV1.java
@@ -55,27 +55,31 @@
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
+/**
+ * Test cases for authentication via TimelineAuthenticationFilter while
+ * publishing entities for ATSv1.
+ */
 @RunWith(Parameterized.class)
-public class TestTimelineAuthenticationFilter {
+public class TestTimelineAuthenticationFilterForV1 {
 
   private static final String FOO_USER = "foo";
   private static final String BAR_USER = "bar";
   private static final String HTTP_USER = "HTTP";
 
-  private static final File testRootDir = new File(
+  private static final File TEST_ROOT_DIR = new File(
       System.getProperty("test.build.dir", "target/test-dir"),
-      TestTimelineAuthenticationFilter.class.getName() + "-root");
+          TestTimelineAuthenticationFilterForV1.class.getName() + "-root");
   private static File httpSpnegoKeytabFile = new File(
       KerberosTestUtils.getKeytabFile());
   private static String httpSpnegoPrincipal =
       KerberosTestUtils.getServerPrincipal();
   private static final String BASEDIR =
       System.getProperty("test.build.dir", "target/test-dir") + "/"
-          + TestTimelineAuthenticationFilter.class.getSimpleName();
+          + TestTimelineAuthenticationFilterForV1.class.getSimpleName();
 
   @Parameterized.Parameters
   public static Collection<Object[]> withSsl() {
-    return Arrays.asList(new Object[][] { { false }, { true } });
+    return Arrays.asList(new Object[][] {{false}, {true}});
   }
 
   private static MiniKdc testMiniKDC;
@@ -85,14 +89,14 @@
   private static Configuration conf;
   private static boolean withSsl;
 
-  public TestTimelineAuthenticationFilter(boolean withSsl) {
-    TestTimelineAuthenticationFilter.withSsl = withSsl;
+  public TestTimelineAuthenticationFilterForV1(boolean withSsl) {
+    TestTimelineAuthenticationFilterForV1.withSsl = withSsl;
   }
 
   @BeforeClass
   public static void setup() {
     try {
-      testMiniKDC = new MiniKdc(MiniKdc.createConf(), testRootDir);
+      testMiniKDC = new MiniKdc(MiniKdc.createConf(), TEST_ROOT_DIR);
       testMiniKDC.start();
       testMiniKDC.createPrincipal(
           httpSpnegoKeytabFile, HTTP_USER + "/localhost");
@@ -111,11 +115,11 @@
           KerberosAuthenticationHandler.KEYTAB,
           httpSpnegoKeytabFile.getAbsolutePath());
       conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
-        "kerberos");
+          "kerberos");
       conf.set(YarnConfiguration.TIMELINE_SERVICE_PRINCIPAL,
-        httpSpnegoPrincipal);
+          httpSpnegoPrincipal);
       conf.set(YarnConfiguration.TIMELINE_SERVICE_KEYTAB,
-        httpSpnegoKeytabFile.getAbsolutePath());
+          httpSpnegoKeytabFile.getAbsolutePath());
       conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
       conf.setClass(YarnConfiguration.TIMELINE_SERVICE_STORE,
           MemoryTimelineStore.class, TimelineStore.class);
@@ -136,8 +140,8 @@
         FileUtil.fullyDelete(base);
         base.mkdirs();
         keystoresDir = new File(BASEDIR).getAbsolutePath();
-        sslConfDir =
-            KeyStoreTestUtil.getClasspathDir(TestTimelineAuthenticationFilter.class);
+        sslConfDir = KeyStoreTestUtil.getClasspathDir(
+            TestTimelineAuthenticationFilterForV1.class);
         KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
       }
 
@@ -145,6 +149,7 @@
       testTimelineServer.init(conf);
       testTimelineServer.start();
     } catch (Exception e) {
+      e.printStackTrace();
       assertTrue("Couldn't setup TimelineServer", false);
     }
   }
@@ -181,14 +186,14 @@
         TimelineClient client = createTimelineClientForUGI();
         TimelineEntity entityToStore = new TimelineEntity();
         entityToStore.setEntityType(
-            TestTimelineAuthenticationFilter.class.getName());
+            TestTimelineAuthenticationFilterForV1.class.getName());
         entityToStore.setEntityId("entity1");
         entityToStore.setStartTime(0L);
         TimelinePutResponse putResponse = client.putEntities(entityToStore);
         Assert.assertEquals(0, putResponse.getErrors().size());
         TimelineEntity entityToRead =
-            testTimelineServer.getTimelineStore().getEntity(
-                "entity1", TestTimelineAuthenticationFilter.class.getName(), null);
+            testTimelineServer.getTimelineStore().getEntity("entity1",
+                TestTimelineAuthenticationFilterForV1.class.getName(), null);
         Assert.assertNotNull(entityToRead);
         return null;
       }
@@ -202,13 +207,14 @@
       public Void call() throws Exception {
         TimelineClient client = createTimelineClientForUGI();
         TimelineDomain domainToStore = new TimelineDomain();
-        domainToStore.setId(TestTimelineAuthenticationFilter.class.getName());
+        domainToStore.setId(
+            TestTimelineAuthenticationFilterForV1.class.getName());
         domainToStore.setReaders("*");
         domainToStore.setWriters("*");
         client.putDomain(domainToStore);
         TimelineDomain domainToRead =
             testTimelineServer.getTimelineStore().getDomain(
-                TestTimelineAuthenticationFilter.class.getName());
+                TestTimelineAuthenticationFilterForV1.class.getName());
         Assert.assertNotNull(domainToRead);
         return null;
       }
@@ -218,22 +224,24 @@
   @Test
   public void testDelegationTokenOperations() throws Exception {
     TimelineClient httpUserClient =
-      KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<TimelineClient>() {
-        @Override
-        public TimelineClient call() throws Exception {
-          return createTimelineClientForUGI();
-        }
-      });
+        KerberosTestUtils.doAs(HTTP_USER + "/localhost",
+            new Callable<TimelineClient>() {
+            @Override
+            public TimelineClient call() throws Exception {
+              return createTimelineClientForUGI();
+            }
+          });
     UserGroupInformation httpUser =
-      KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<UserGroupInformation>() {
-        @Override
-        public UserGroupInformation call() throws Exception {
-          return UserGroupInformation.getCurrentUser();
-        }
-      });
+        KerberosTestUtils.doAs(HTTP_USER + "/localhost",
+            new Callable<UserGroupInformation>() {
+            @Override
+            public UserGroupInformation call() throws Exception {
+              return UserGroupInformation.getCurrentUser();
+            }
+          });
     // Let HTTP user to get the delegation for itself
     Token<TimelineDelegationTokenIdentifier> token =
-      httpUserClient.getDelegationToken(httpUser.getShortUserName());
+        httpUserClient.getDelegationToken(httpUser.getShortUserName());
     Assert.assertNotNull(token);
     TimelineDelegationTokenIdentifier tDT = token.decodeIdentifier();
     Assert.assertNotNull(tDT);
@@ -317,7 +325,8 @@
       barUserClient.getDelegationToken(httpUser.getShortUserName());
       Assert.fail();
     } catch (Exception e) {
-      Assert.assertTrue(e.getCause() instanceof AuthorizationException || e.getCause() instanceof AuthenticationException);
+      Assert.assertTrue(e.getCause() instanceof AuthorizationException ||
+          e.getCause() instanceof AuthenticationException);
     }
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index 441a574..e8d3880 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -130,6 +130,11 @@
         </exclusion>
       </exclusions>
     </dependency>
+    <dependency>
+      <groupId>org.apache.curator</groupId>
+      <artifactId>curator-test</artifactId>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
 
   <build>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
index c795e55..f238f79 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
@@ -24,6 +24,7 @@
 
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.NodeLabel;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.api.records.NodeStatus;
 import org.apache.hadoop.yarn.util.Records;
@@ -47,7 +48,7 @@
   public static NodeHeartbeatRequest newInstance(NodeStatus nodeStatus,
       MasterKey lastKnownContainerTokenMasterKey,
       MasterKey lastKnownNMTokenMasterKey, Set<NodeLabel> nodeLabels,
-      Map<ApplicationId, String> registeredCollectors) {
+      Map<ApplicationId, AppCollectorData> registeringCollectors) {
     NodeHeartbeatRequest nodeHeartbeatRequest =
         Records.newRecord(NodeHeartbeatRequest.class);
     nodeHeartbeatRequest.setNodeStatus(nodeStatus);
@@ -56,7 +57,7 @@
     nodeHeartbeatRequest
         .setLastKnownNMTokenMasterKey(lastKnownNMTokenMasterKey);
     nodeHeartbeatRequest.setNodeLabels(nodeLabels);
-    nodeHeartbeatRequest.setRegisteredCollectors(registeredCollectors);
+    nodeHeartbeatRequest.setRegisteringCollectors(registeringCollectors);
     return nodeHeartbeatRequest;
   }
 
@@ -79,7 +80,9 @@
       List<LogAggregationReport> logAggregationReportsForApps);
 
   // This tells RM registered collectors' address info on this node
-  public abstract Map<ApplicationId, String> getRegisteredCollectors();
-  public abstract void setRegisteredCollectors(Map<ApplicationId,
-      String> appCollectorsMap);
+  public abstract Map<ApplicationId, AppCollectorData>
+      getRegisteringCollectors();
+
+  public abstract void setRegisteringCollectors(Map<ApplicationId,
+      AppCollectorData> appCollectorsMap);
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatResponse.java
index 7568bbb..2ebca57 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatResponse.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatResponse.java
@@ -28,6 +28,7 @@
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.api.records.ContainerQueuingLimit;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.api.records.NodeAction;
@@ -47,10 +48,9 @@
   public abstract List<ApplicationId> getApplicationsToCleanup();
 
   // This tells NM the collectors' address info of related apps
-  public abstract Map<ApplicationId, String> getAppCollectorsMap();
-
-  public abstract void setAppCollectorsMap(
-      Map<ApplicationId, String> appCollectorsMap);
+  public abstract Map<ApplicationId, AppCollectorData> getAppCollectors();
+  public abstract void setAppCollectors(
+      Map<ApplicationId, AppCollectorData> appCollectorsMap);
 
   public abstract void setResponseId(int responseId);
 
@@ -104,10 +104,10 @@
 
   public abstract void setResource(Resource resource);
 
-  public abstract List<Container> getContainersToDecrease();
+  public abstract List<Container> getContainersToUpdate();
 
-  public abstract void addAllContainersToDecrease(
-      Collection<Container> containersToDecrease);
+  public abstract void addAllContainersToUpdate(
+      Collection<Container> containersToUpdate);
 
   public abstract ContainerQueuingLimit getContainerQueuingLimit();
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReportNewCollectorInfoRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReportNewCollectorInfoRequest.java
index 3498de9..a4c1a38 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReportNewCollectorInfoRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReportNewCollectorInfoRequest.java
@@ -22,14 +22,15 @@
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.server.api.records.AppCollectorsMap;
+import org.apache.hadoop.yarn.api.records.Token;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.util.Records;
 
 @Private
 public abstract class ReportNewCollectorInfoRequest {
 
   public static ReportNewCollectorInfoRequest newInstance(
-      List<AppCollectorsMap> appCollectorsList) {
+      List<AppCollectorData> appCollectorsList) {
     ReportNewCollectorInfoRequest request =
         Records.newRecord(ReportNewCollectorInfoRequest.class);
     request.setAppCollectorsList(appCollectorsList);
@@ -37,17 +38,17 @@
   }
 
   public static ReportNewCollectorInfoRequest newInstance(
-      ApplicationId id, String collectorAddr) {
+      ApplicationId id, String collectorAddr, Token token) {
     ReportNewCollectorInfoRequest request =
         Records.newRecord(ReportNewCollectorInfoRequest.class);
     request.setAppCollectorsList(
-        Arrays.asList(AppCollectorsMap.newInstance(id, collectorAddr)));
+        Arrays.asList(AppCollectorData.newInstance(id, collectorAddr, token)));
     return request;
   }
 
-  public abstract List<AppCollectorsMap> getAppCollectorsList();
+  public abstract List<AppCollectorData> getAppCollectorsList();
 
   public abstract void setAppCollectorsList(
-      List<AppCollectorsMap> appCollectorsList);
+      List<AppCollectorData> appCollectorsList);
 
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java
index d0c1198..1ffd223 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java
@@ -26,16 +26,20 @@
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.NodeLabel;
+import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.NodeLabelPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.AppCollectorDataProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.LogAggregationReportProto;
-import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.AppCollectorsMapProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProtoOrBuilder;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeLabelsProto;
@@ -58,7 +62,7 @@
   private Set<NodeLabel> labels = null;
   private List<LogAggregationReport> logAggregationReportsForApps = null;
 
-  private Map<ApplicationId, String> registeredCollectors = null;
+  private Map<ApplicationId, AppCollectorData> registeringCollectors = null;
 
   public NodeHeartbeatRequestPBImpl() {
     builder = NodeHeartbeatRequestProto.newBuilder();
@@ -114,8 +118,8 @@
     if (this.logAggregationReportsForApps != null) {
       addLogAggregationStatusForAppsToProto();
     }
-    if (this.registeredCollectors != null) {
-      addRegisteredCollectorsToProto();
+    if (this.registeringCollectors != null) {
+      addRegisteringCollectorsToProto();
     }
   }
 
@@ -158,14 +162,23 @@
     return ((LogAggregationReportPBImpl) value).getProto();
   }
 
-  private void addRegisteredCollectorsToProto() {
+  private void addRegisteringCollectorsToProto() {
     maybeInitBuilder();
-    builder.clearRegisteredCollectors();
-    for (Map.Entry<ApplicationId, String> entry :
-        registeredCollectors.entrySet()) {
-      builder.addRegisteredCollectors(AppCollectorsMapProto.newBuilder()
-          .setAppId(convertToProtoFormat(entry.getKey()))
-          .setAppCollectorAddr(entry.getValue()));
+    builder.clearRegisteringCollectors();
+    for (Map.Entry<ApplicationId, AppCollectorData> entry :
+        registeringCollectors.entrySet()) {
+      AppCollectorData data = entry.getValue();
+      AppCollectorDataProto.Builder appCollectorDataBuilder =
+          AppCollectorDataProto.newBuilder()
+              .setAppId(convertToProtoFormat(entry.getKey()))
+              .setAppCollectorAddr(data.getCollectorAddr())
+              .setRmIdentifier(data.getRMIdentifier())
+              .setVersion(data.getVersion());
+      if (data.getCollectorToken() != null) {
+        appCollectorDataBuilder.setAppCollectorToken(
+            convertToProtoFormat(data.getCollectorToken()));
+      }
+      builder.addRegisteringCollectors(appCollectorDataBuilder);
     }
   }
 
@@ -251,35 +264,42 @@
   }
 
   @Override
-  public Map<ApplicationId, String> getRegisteredCollectors() {
-    if (this.registeredCollectors != null) {
-      return this.registeredCollectors;
+  public Map<ApplicationId, AppCollectorData> getRegisteringCollectors() {
+    if (this.registeringCollectors != null) {
+      return this.registeringCollectors;
     }
     initRegisteredCollectors();
-    return registeredCollectors;
+    return registeringCollectors;
   }
 
   private void initRegisteredCollectors() {
     NodeHeartbeatRequestProtoOrBuilder p = viaProto ? proto : builder;
-    List<AppCollectorsMapProto> list = p.getRegisteredCollectorsList();
+    List<AppCollectorDataProto> list = p.getRegisteringCollectorsList();
     if (!list.isEmpty()) {
-      this.registeredCollectors = new HashMap<>();
-      for (AppCollectorsMapProto c : list) {
+      this.registeringCollectors = new HashMap<>();
+      for (AppCollectorDataProto c : list) {
         ApplicationId appId = convertFromProtoFormat(c.getAppId());
-        this.registeredCollectors.put(appId, c.getAppCollectorAddr());
+        Token collectorToken = null;
+        if (c.hasAppCollectorToken()){
+          collectorToken = convertFromProtoFormat(c.getAppCollectorToken());
+        }
+        AppCollectorData data = AppCollectorData.newInstance(appId,
+            c.getAppCollectorAddr(), c.getRmIdentifier(), c.getVersion(),
+            collectorToken);
+        this.registeringCollectors.put(appId, data);
       }
     }
   }
 
   @Override
-  public void setRegisteredCollectors(
-      Map<ApplicationId, String> registeredCollectors) {
+  public void setRegisteringCollectors(
+      Map<ApplicationId, AppCollectorData> registeredCollectors) {
     if (registeredCollectors == null || registeredCollectors.isEmpty()) {
       return;
     }
     maybeInitBuilder();
-    this.registeredCollectors = new HashMap<ApplicationId, String>();
-    this.registeredCollectors.putAll(registeredCollectors);
+    this.registeringCollectors = new HashMap<>();
+    this.registeringCollectors.putAll(registeredCollectors);
   }
 
   private NodeStatusPBImpl convertFromProtoFormat(NodeStatusProto p) {
@@ -306,6 +326,14 @@
     return ((MasterKeyPBImpl)t).getProto();
   }
 
+  private TokenPBImpl convertFromProtoFormat(TokenProto p) {
+    return new TokenPBImpl(p);
+  }
+
+  private TokenProto convertToProtoFormat(Token t) {
+    return ((TokenPBImpl) t).getProto();
+  }
+
   @Override
   public Set<NodeLabel> getNodeLabels() {
     initNodeLabels();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
index 51c1a786..11f5f61 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
@@ -26,26 +26,30 @@
 import java.util.List;
 import java.util.Map;
 
+import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SignalContainerRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.AppCollectorDataProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.ContainerQueuingLimitProto;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.SignalContainerRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeActionProto;
-import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.AppCollectorsMapProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatResponseProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatResponseProtoOrBuilder;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SystemCredentialsForAppsProto;
@@ -70,12 +74,12 @@
   private List<ApplicationId> applicationsToCleanup = null;
   private Map<ApplicationId, ByteBuffer> systemCredentials = null;
   private Resource resource = null;
-  private Map<ApplicationId, String> appCollectorsMap = null;
+  private Map<ApplicationId, AppCollectorData> appCollectorsMap = null;
 
   private MasterKey containerTokenMasterKey = null;
   private MasterKey nmTokenMasterKey = null;
   private ContainerQueuingLimit containerQueuingLimit = null;
-  private List<Container> containersToDecrease = null;
+  private List<Container> containersToUpdate = null;
   private List<SignalContainerRequest> containersToSignal = null;
 
   public NodeHeartbeatResponsePBImpl() {
@@ -119,8 +123,8 @@
     if (this.systemCredentials != null) {
       addSystemCredentialsToProto();
     }
-    if (this.containersToDecrease != null) {
-      addContainersToDecreaseToProto();
+    if (this.containersToUpdate != null) {
+      addContainersToUpdateToProto();
     }
     if (this.containersToSignal != null) {
       addContainersToSignalToProto();
@@ -146,11 +150,21 @@
 
   private void addAppCollectorsMapToProto() {
     maybeInitBuilder();
-    builder.clearAppCollectorsMap();
-    for (Map.Entry<ApplicationId, String> entry : appCollectorsMap.entrySet()) {
-      builder.addAppCollectorsMap(AppCollectorsMapProto.newBuilder()
-          .setAppId(convertToProtoFormat(entry.getKey()))
-          .setAppCollectorAddr(entry.getValue()));
+    builder.clearAppCollectors();
+    for (Map.Entry<ApplicationId, AppCollectorData> entry
+        : appCollectorsMap.entrySet()) {
+      AppCollectorData data = entry.getValue();
+      AppCollectorDataProto.Builder appCollectorDataBuilder =
+          AppCollectorDataProto.newBuilder()
+              .setAppId(convertToProtoFormat(entry.getKey()))
+              .setAppCollectorAddr(data.getCollectorAddr())
+              .setRmIdentifier(data.getRMIdentifier())
+              .setVersion(data.getVersion());
+      if (data.getCollectorToken() != null) {
+        appCollectorDataBuilder.setAppCollectorToken(
+            convertToProtoFormat(data.getCollectorToken()));
+      }
+      builder.addAppCollectors(appCollectorDataBuilder);
     }
   }
 
@@ -499,39 +513,39 @@
     builder.addAllApplicationsToCleanup(iterable);
   }
 
-  private void initContainersToDecrease() {
-    if (this.containersToDecrease != null) {
+  private void initContainersToUpdate() {
+    if (this.containersToUpdate != null) {
       return;
     }
     NodeHeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder;
-    List<ContainerProto> list = p.getContainersToDecreaseList();
-    this.containersToDecrease = new ArrayList<>();
+    List<ContainerProto> list = p.getContainersToUpdateList();
+    this.containersToUpdate = new ArrayList<>();
 
     for (ContainerProto c : list) {
-      this.containersToDecrease.add(convertFromProtoFormat(c));
+      this.containersToUpdate.add(convertFromProtoFormat(c));
     }
   }
 
   @Override
-  public List<Container> getContainersToDecrease() {
-    initContainersToDecrease();
-    return this.containersToDecrease;
+  public List<Container> getContainersToUpdate() {
+    initContainersToUpdate();
+    return this.containersToUpdate;
   }
 
   @Override
-  public void addAllContainersToDecrease(
-      final Collection<Container> containersToDecrease) {
-    if (containersToDecrease == null) {
+  public void addAllContainersToUpdate(
+      final Collection<Container> containersToBeUpdated) {
+    if (containersToBeUpdated == null) {
       return;
     }
-    initContainersToDecrease();
-    this.containersToDecrease.addAll(containersToDecrease);
+    initContainersToUpdate();
+    this.containersToUpdate.addAll(containersToBeUpdated);
   }
 
-  private void addContainersToDecreaseToProto() {
+  private void addContainersToUpdateToProto() {
     maybeInitBuilder();
-    builder.clearContainersToDecrease();
-    if (this.containersToDecrease == null) {
+    builder.clearContainersToUpdate();
+    if (this.containersToUpdate == null) {
       return;
     }
     Iterable<ContainerProto> iterable = new
@@ -539,7 +553,7 @@
       @Override
       public Iterator<ContainerProto> iterator() {
         return new Iterator<ContainerProto>() {
-          private Iterator<Container> iter = containersToDecrease.iterator();
+          private Iterator<Container> iter = containersToUpdate.iterator();
           @Override
           public boolean hasNext() {
             return iter.hasNext();
@@ -555,7 +569,7 @@
         };
       }
     };
-    builder.addAllContainersToDecrease(iterable);
+    builder.addAllContainersToUpdate(iterable);
   }
 
   @Override
@@ -568,7 +582,7 @@
   }
 
   @Override
-  public Map<ApplicationId, String> getAppCollectorsMap() {
+  public Map<ApplicationId, AppCollectorData> getAppCollectors() {
     if (this.appCollectorsMap != null) {
       return this.appCollectorsMap;
     }
@@ -589,12 +603,19 @@
 
   private void initAppCollectorsMap() {
     NodeHeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder;
-    List<AppCollectorsMapProto> list = p.getAppCollectorsMapList();
+    List<AppCollectorDataProto> list = p.getAppCollectorsList();
     if (!list.isEmpty()) {
       this.appCollectorsMap = new HashMap<>();
-      for (AppCollectorsMapProto c : list) {
+      for (AppCollectorDataProto c : list) {
         ApplicationId appId = convertFromProtoFormat(c.getAppId());
-        this.appCollectorsMap.put(appId, c.getAppCollectorAddr());
+        Token collectorToken = null;
+        if (c.hasAppCollectorToken()){
+          collectorToken = convertFromProtoFormat(c.getAppCollectorToken());
+        }
+        AppCollectorData data = AppCollectorData.newInstance(appId,
+            c.getAppCollectorAddr(), c.getRmIdentifier(), c.getVersion(),
+            collectorToken);
+        this.appCollectorsMap.put(appId, data);
       }
     }
   }
@@ -611,14 +632,14 @@
   }
 
   @Override
-  public void setAppCollectorsMap(
-      Map<ApplicationId, String> appCollectorsMap) {
-    if (appCollectorsMap == null || appCollectorsMap.isEmpty()) {
+  public void setAppCollectors(
+      Map<ApplicationId, AppCollectorData> appCollectors) {
+    if (appCollectors == null || appCollectors.isEmpty()) {
       return;
     }
     maybeInitBuilder();
-    this.appCollectorsMap = new HashMap<ApplicationId, String>();
-    this.appCollectorsMap.putAll(appCollectorsMap);
+    this.appCollectorsMap = new HashMap<>();
+    this.appCollectorsMap.putAll(appCollectors);
   }
 
   @Override
@@ -773,5 +794,13 @@
       SignalContainerRequest t) {
     return ((SignalContainerRequestPBImpl)t).getProto();
   }
+
+  private TokenProto convertToProtoFormat(Token t) {
+    return ((TokenPBImpl) t).getProto();
+  }
+
+  private TokenPBImpl convertFromProtoFormat(TokenProto p) {
+    return new TokenPBImpl(p);
+  }
 }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReportNewCollectorInfoRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReportNewCollectorInfoRequestPBImpl.java
index c6f6619..5ffc3a2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReportNewCollectorInfoRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReportNewCollectorInfoRequestPBImpl.java
@@ -20,12 +20,12 @@
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.AppCollectorsMapProto;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
+import org.apache.hadoop.yarn.server.api.records.impl.pb.AppCollectorDataPBImpl;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.AppCollectorDataProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.ReportNewCollectorInfoRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.ReportNewCollectorInfoRequestProtoOrBuilder;
 import org.apache.hadoop.yarn.server.api.protocolrecords.ReportNewCollectorInfoRequest;
-import org.apache.hadoop.yarn.server.api.records.AppCollectorsMap;
-import org.apache.hadoop.yarn.server.api.records.impl.pb.AppCollectorsMapPBImpl;
 
 public class ReportNewCollectorInfoRequestPBImpl extends
     ReportNewCollectorInfoRequest {
@@ -36,7 +36,7 @@
   private ReportNewCollectorInfoRequestProto.Builder builder = null;
   private boolean viaProto = false;
 
-  private List<AppCollectorsMap> collectorsList = null;
+  private List<AppCollectorData> collectorsList = null;
 
   public ReportNewCollectorInfoRequestPBImpl() {
     builder = ReportNewCollectorInfoRequestProto.newBuilder();
@@ -96,9 +96,9 @@
   private void addLocalCollectorsToProto() {
     maybeInitBuilder();
     builder.clearAppCollectors();
-    List<AppCollectorsMapProto> protoList =
-        new ArrayList<AppCollectorsMapProto>();
-    for (AppCollectorsMap m : this.collectorsList) {
+    List<AppCollectorDataProto> protoList =
+        new ArrayList<AppCollectorDataProto>();
+    for (AppCollectorData m : this.collectorsList) {
       protoList.add(convertToProtoFormat(m));
     }
     builder.addAllAppCollectors(protoList);
@@ -106,16 +106,16 @@
 
   private void initLocalCollectorsList() {
     ReportNewCollectorInfoRequestProtoOrBuilder p = viaProto ? proto : builder;
-    List<AppCollectorsMapProto> list =
+    List<AppCollectorDataProto> list =
         p.getAppCollectorsList();
-    this.collectorsList = new ArrayList<AppCollectorsMap>();
-    for (AppCollectorsMapProto m : list) {
+    this.collectorsList = new ArrayList<AppCollectorData>();
+    for (AppCollectorDataProto m : list) {
       this.collectorsList.add(convertFromProtoFormat(m));
     }
   }
 
   @Override
-  public List<AppCollectorsMap> getAppCollectorsList() {
+  public List<AppCollectorData> getAppCollectorsList() {
     if (this.collectorsList == null) {
       initLocalCollectorsList();
     }
@@ -123,7 +123,7 @@
   }
 
   @Override
-  public void setAppCollectorsList(List<AppCollectorsMap> appCollectorsList) {
+  public void setAppCollectorsList(List<AppCollectorData> appCollectorsList) {
     maybeInitBuilder();
     if (appCollectorsList == null) {
       builder.clearAppCollectors();
@@ -131,14 +131,14 @@
     this.collectorsList = appCollectorsList;
   }
 
-  private AppCollectorsMapPBImpl convertFromProtoFormat(
-      AppCollectorsMapProto p) {
-    return new AppCollectorsMapPBImpl(p);
+  private AppCollectorDataPBImpl convertFromProtoFormat(
+      AppCollectorDataProto p) {
+    return new AppCollectorDataPBImpl(p);
   }
 
-  private AppCollectorsMapProto convertToProtoFormat(
-      AppCollectorsMap m) {
-    return ((AppCollectorsMapPBImpl) m).getProto();
+  private AppCollectorDataProto convertToProtoFormat(
+      AppCollectorData m) {
+    return ((AppCollectorDataPBImpl) m).getProto();
   }
 
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/AppCollectorData.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/AppCollectorData.java
new file mode 100644
index 0000000..5266dca
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/AppCollectorData.java
@@ -0,0 +1,125 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Token;
+import org.apache.hadoop.yarn.util.Records;
+
+
+@Private
+@InterfaceStability.Unstable
+public abstract class AppCollectorData {
+
+  protected static final long DEFAULT_TIMESTAMP_VALUE = -1;
+
+  public static AppCollectorData newInstance(
+      ApplicationId id, String collectorAddr, long rmIdentifier, long version,
+      Token token) {
+    AppCollectorData appCollectorData =
+        Records.newRecord(AppCollectorData.class);
+    appCollectorData.setApplicationId(id);
+    appCollectorData.setCollectorAddr(collectorAddr);
+    appCollectorData.setRMIdentifier(rmIdentifier);
+    appCollectorData.setVersion(version);
+    appCollectorData.setCollectorToken(token);
+    return appCollectorData;
+  }
+
+  public static AppCollectorData newInstance(
+      ApplicationId id, String collectorAddr, long rmIdentifier, long version) {
+    return newInstance(id, collectorAddr, rmIdentifier, version, null);
+  }
+
+  public static AppCollectorData newInstance(ApplicationId id,
+      String collectorAddr, Token token) {
+    return newInstance(id, collectorAddr, DEFAULT_TIMESTAMP_VALUE,
+        DEFAULT_TIMESTAMP_VALUE, token);
+  }
+
+  public static AppCollectorData newInstance(ApplicationId id,
+      String collectorAddr) {
+    return newInstance(id, collectorAddr, null);
+  }
+
+  /**
+   * Returns if a collector data item happens before another one. Null data
+   * items happens before any other non-null items. Non-null data items A
+   * happens before another non-null item B when A's rmIdentifier is less than
+   * B's rmIdentifier. Or A's version is less than B's if they have the same
+   * rmIdentifier.
+   *
+   * @param dataA first collector data item.
+   * @param dataB second collector data item.
+   * @return true if dataA happens before dataB.
+   */
+  public static boolean happensBefore(AppCollectorData dataA,
+      AppCollectorData dataB) {
+    if (dataA == null && dataB == null) {
+      return false;
+    } else if (dataA == null || dataB == null) {
+      return dataA == null;
+    }
+
+    return
+        (dataA.getRMIdentifier() < dataB.getRMIdentifier())
+        || ((dataA.getRMIdentifier() == dataB.getRMIdentifier())
+            && (dataA.getVersion() < dataB.getVersion()));
+  }
+
+  /**
+   * Returns if the collector data has been stamped by the RM with a RM cluster
+   * timestamp and a version number.
+   *
+   * @return true if RM has already assigned a timestamp for this collector.
+   * Otherwise, it means the RM has not recognized the existence of this
+   * collector.
+   */
+  public boolean isStamped() {
+    return (getRMIdentifier() != DEFAULT_TIMESTAMP_VALUE)
+        || (getVersion() != DEFAULT_TIMESTAMP_VALUE);
+  }
+
+  public abstract ApplicationId getApplicationId();
+
+  public abstract void setApplicationId(ApplicationId id);
+
+  public abstract String getCollectorAddr();
+
+  public abstract void setCollectorAddr(String addr);
+
+  public abstract long getRMIdentifier();
+
+  public abstract void setRMIdentifier(long rmId);
+
+  public abstract long getVersion();
+
+  public abstract void setVersion(long version);
+
+  /**
+   * Get delegation token for app collector which AM will use to publish
+   * entities.
+   * @return the delegation token for app collector.
+   */
+  public abstract Token getCollectorToken();
+
+  public abstract void setCollectorToken(Token token);
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/AppCollectorsMap.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/AppCollectorsMap.java
deleted file mode 100644
index 07e1d92..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/AppCollectorsMap.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.api.records;
-
-import org.apache.hadoop.classification.InterfaceAudience.Private;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.util.Records;
-
-
-@Private
-public abstract class AppCollectorsMap {
-
-  public static AppCollectorsMap newInstance(
-      ApplicationId id, String collectorAddr) {
-    AppCollectorsMap appCollectorsMap =
-        Records.newRecord(AppCollectorsMap.class);
-    appCollectorsMap.setApplicationId(id);
-    appCollectorsMap.setCollectorAddr(collectorAddr);
-    return appCollectorsMap;
-  }
-
-  public abstract ApplicationId getApplicationId();
-
-  public abstract void setApplicationId(ApplicationId id);
-
-  public abstract String getCollectorAddr();
-
-  public abstract void setCollectorAddr(String addr);
-
-}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/AppCollectorDataPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/AppCollectorDataPBImpl.java
new file mode 100644
index 0000000..c08e9ca
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/AppCollectorDataPBImpl.java
@@ -0,0 +1,227 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.api.records.impl.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Token;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.AppCollectorDataProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.AppCollectorDataProtoOrBuilder;
+
+import com.google.protobuf.TextFormat;
+
+@Private
+@Unstable
+public class AppCollectorDataPBImpl extends AppCollectorData {
+
+  private AppCollectorDataProto proto =
+      AppCollectorDataProto.getDefaultInstance();
+
+  private AppCollectorDataProto.Builder builder = null;
+  private boolean viaProto = false;
+
+  private ApplicationId appId = null;
+  private String collectorAddr = null;
+  private Long rmIdentifier = null;
+  private Long version = null;
+  private Token collectorToken = null;
+
+  public AppCollectorDataPBImpl() {
+    builder = AppCollectorDataProto.newBuilder();
+  }
+
+  public AppCollectorDataPBImpl(AppCollectorDataProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  public AppCollectorDataProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  @Override
+  public int hashCode() {
+    return getProto().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other == null) {
+      return false;
+    }
+    if (other.getClass().isAssignableFrom(this.getClass())) {
+      return this.getProto().equals(this.getClass().cast(other).getProto());
+    }
+    return false;
+  }
+
+  @Override
+  public String toString() {
+    return TextFormat.shortDebugString(getProto());
+  }
+
+  @Override
+  public ApplicationId getApplicationId() {
+    AppCollectorDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.appId == null && p.hasAppId()) {
+      this.appId = convertFromProtoFormat(p.getAppId());
+    }
+    return this.appId;
+  }
+
+  @Override
+  public String getCollectorAddr() {
+    AppCollectorDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.collectorAddr == null
+        && p.hasAppCollectorAddr()) {
+      this.collectorAddr = p.getAppCollectorAddr();
+    }
+    return this.collectorAddr;
+  }
+
+  @Override
+  public void setApplicationId(ApplicationId id) {
+    maybeInitBuilder();
+    if (id == null) {
+      builder.clearAppId();
+    }
+    this.appId = id;
+  }
+
+  @Override
+  public void setCollectorAddr(String collectorAddr) {
+    maybeInitBuilder();
+    if (collectorAddr == null) {
+      builder.clearAppCollectorAddr();
+    }
+    this.collectorAddr = collectorAddr;
+  }
+
+  @Override
+  public long getRMIdentifier() {
+    AppCollectorDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.rmIdentifier == null && p.hasRmIdentifier()) {
+      this.rmIdentifier = p.getRmIdentifier();
+    }
+    if (this.rmIdentifier != null) {
+      return this.rmIdentifier;
+    } else {
+      return AppCollectorData.DEFAULT_TIMESTAMP_VALUE;
+    }
+  }
+
+  @Override
+  public void setRMIdentifier(long rmId) {
+    maybeInitBuilder();
+    this.rmIdentifier = rmId;
+    builder.setRmIdentifier(rmId);
+  }
+
+  @Override
+  public long getVersion() {
+    AppCollectorDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.version == null && p.hasRmIdentifier()) {
+      this.version = p.getRmIdentifier();
+    }
+    if (this.version != null) {
+      return this.version;
+    } else {
+      return AppCollectorData.DEFAULT_TIMESTAMP_VALUE;
+    }
+  }
+
+  @Override
+  public void setVersion(long version) {
+    maybeInitBuilder();
+    this.version = version;
+    builder.setVersion(version);
+  }
+
+  @Override
+  public Token getCollectorToken() {
+    AppCollectorDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.collectorToken != null) {
+      return this.collectorToken;
+    }
+    if (!p.hasAppCollectorToken()) {
+      return null;
+    }
+    this.collectorToken = new TokenPBImpl(p.getAppCollectorToken());
+    return this.collectorToken;
+  }
+
+  @Override
+  public void setCollectorToken(Token token) {
+    maybeInitBuilder();
+    if (token == null) {
+      builder.clearAppCollectorToken();
+    }
+    this.collectorToken = token;
+  }
+
+  private ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto p) {
+    return new ApplicationIdPBImpl(p);
+  }
+
+  private ApplicationIdProto convertToProtoFormat(ApplicationId t) {
+    return ((ApplicationIdPBImpl) t).getProto();
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = AppCollectorDataProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) {
+      maybeInitBuilder();
+    }
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void mergeLocalToBuilder() {
+    if (this.appId != null) {
+      builder.setAppId(convertToProtoFormat(this.appId));
+    }
+    if (this.collectorAddr != null) {
+      builder.setAppCollectorAddr(this.collectorAddr);
+    }
+    if (this.rmIdentifier != null) {
+      builder.setRmIdentifier(this.rmIdentifier);
+    }
+    if (this.version != null) {
+      builder.setVersion(this.version);
+    }
+    if (this.collectorToken != null) {
+      builder.setAppCollectorToken(
+          ((TokenPBImpl)this.collectorToken).getProto());
+    }
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/AppCollectorsMapPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/AppCollectorsMapPBImpl.java
deleted file mode 100644
index 3740035..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/AppCollectorsMapPBImpl.java
+++ /dev/null
@@ -1,152 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-package org.apache.hadoop.yarn.server.api.records.impl.pb;
-
-import org.apache.hadoop.classification.InterfaceAudience.Private;
-import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
-import org.apache.hadoop.yarn.server.api.records.AppCollectorsMap;
-
-import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
-import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.AppCollectorsMapProto;
-import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.AppCollectorsMapProtoOrBuilder;
-
-import com.google.protobuf.TextFormat;
-
-@Private
-@Unstable
-public class AppCollectorsMapPBImpl extends AppCollectorsMap {
-
-  private AppCollectorsMapProto proto =
-      AppCollectorsMapProto.getDefaultInstance();
-
-  private AppCollectorsMapProto.Builder builder = null;
-  private boolean viaProto = false;
-
-  private ApplicationId appId = null;
-  private String collectorAddr = null;
-
-  public AppCollectorsMapPBImpl() {
-    builder = AppCollectorsMapProto.newBuilder();
-  }
-
-  public AppCollectorsMapPBImpl(AppCollectorsMapProto proto) {
-    this.proto = proto;
-    viaProto = true;
-  }
-
-  public AppCollectorsMapProto getProto() {
-    mergeLocalToProto();
-    proto = viaProto ? proto : builder.build();
-    viaProto = true;
-    return proto;
-  }
-
-  @Override
-  public int hashCode() {
-    return getProto().hashCode();
-  }
-
-  @Override
-  public boolean equals(Object other) {
-    if (other == null) {
-      return false;
-    }
-    if (other.getClass().isAssignableFrom(this.getClass())) {
-      return this.getProto().equals(this.getClass().cast(other).getProto());
-    }
-    return false;
-  }
-
-  @Override
-  public String toString() {
-    return TextFormat.shortDebugString(getProto());
-  }
-
-  @Override
-  public ApplicationId getApplicationId() {
-    AppCollectorsMapProtoOrBuilder p = viaProto ? proto : builder;
-    if (this.appId == null && p.hasAppId()) {
-      this.appId = convertFromProtoFormat(p.getAppId());
-    }
-    return this.appId;
-  }
-
-  @Override
-  public String getCollectorAddr() {
-    AppCollectorsMapProtoOrBuilder p = viaProto ? proto : builder;
-    if (this.collectorAddr == null
-        && p.hasAppCollectorAddr()) {
-      this.collectorAddr = p.getAppCollectorAddr();
-    }
-    return this.collectorAddr;
-  }
-
-  @Override
-  public void setApplicationId(ApplicationId id) {
-    maybeInitBuilder();
-    if (id == null) {
-      builder.clearAppId();
-    }
-    this.appId = id;
-  }
-
-  @Override
-  public void setCollectorAddr(String collectorAddr) {
-    maybeInitBuilder();
-    if (collectorAddr == null) {
-      builder.clearAppCollectorAddr();
-    }
-    this.collectorAddr = collectorAddr;
-  }
-
-  private ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto p) {
-    return new ApplicationIdPBImpl(p);
-  }
-
-  private ApplicationIdProto convertToProtoFormat(ApplicationId t) {
-    return ((ApplicationIdPBImpl) t).getProto();
-  }
-
-  private void maybeInitBuilder() {
-    if (viaProto || builder == null) {
-      builder = AppCollectorsMapProto.newBuilder(proto);
-    }
-    viaProto = false;
-  }
-
-  private void mergeLocalToProto() {
-    if (viaProto) {
-      maybeInitBuilder();
-    }
-    mergeLocalToBuilder();
-    proto = builder.build();
-    viaProto = true;
-  }
-
-  private void mergeLocalToBuilder() {
-    if (this.appId != null) {
-      builder.setAppId(convertToProtoFormat(this.appId));
-    }
-    if (this.collectorAddr != null) {
-      builder.setAppCollectorAddr(this.collectorAddr);
-    }
-  }
-
-}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/package-info.java
similarity index 84%
copy from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
copy to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/package-info.java
index 5d14c6f..4ce3896f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/package-info.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -15,11 +15,5 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
-import Ember from 'ember';
-
-export default Ember.Route.extend({
-  model() {
-    return this.store.findAll('yarn-queue');
-  },
-});
\ No newline at end of file
+/** Server records PB implementations. */
+package org.apache.hadoop.yarn.server.api.records.impl.pb;
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
index 63d8e42..533f9c8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
@@ -36,6 +36,7 @@
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
 import org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreInvalidInputException;
+import org.apache.hadoop.yarn.server.federation.store.metrics.FederationStateStoreClientMetrics;
 import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest;
 import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterResponse;
 import org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster;
@@ -72,6 +73,8 @@
 import org.apache.hadoop.yarn.server.federation.store.utils.FederationPolicyStoreInputValidator;
 import org.apache.hadoop.yarn.server.federation.store.utils.FederationStateStoreUtils;
 import org.apache.hadoop.yarn.server.records.Version;
+import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hadoop.yarn.util.MonotonicClock;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -137,6 +140,7 @@
   private String url;
   private int maximumPoolSize;
   private HikariDataSource dataSource = null;
+  private final Clock clock = new MonotonicClock();
 
   @Override
   public void init(Configuration conf) throws YarnException {
@@ -203,7 +207,9 @@
       cstmt.registerOutParameter(9, java.sql.Types.INTEGER);
 
       // Execute the query
+      long startTime = clock.getTime();
       cstmt.executeUpdate();
+      long stopTime = clock.getTime();
 
       // Check the ROWCOUNT value, if it is equal to 0 it means the call
       // did not add a new subcluster into FederationStateStore
@@ -222,8 +228,11 @@
 
       LOG.info(
           "Registered the SubCluster " + subClusterId + " into the StateStore");
+      FederationStateStoreClientMetrics
+          .succeededStateStoreCall(stopTime - startTime);
 
     } catch (SQLException e) {
+      FederationStateStoreClientMetrics.failedStateStoreCall();
       FederationStateStoreUtils.logAndThrowRetriableException(LOG,
           "Unable to register the SubCluster " + subClusterId
               + " into the StateStore",
@@ -260,7 +269,9 @@
       cstmt.registerOutParameter(3, java.sql.Types.INTEGER);
 
       // Execute the query
+      long startTime = clock.getTime();
       cstmt.executeUpdate();
+      long stopTime = clock.getTime();
 
       // Check the ROWCOUNT value, if it is equal to 0 it means the call
       // did not deregister the subcluster into FederationStateStore
@@ -278,8 +289,11 @@
 
       LOG.info("Deregistered the SubCluster " + subClusterId + " state to "
           + state.toString());
+      FederationStateStoreClientMetrics
+          .succeededStateStoreCall(stopTime - startTime);
 
     } catch (SQLException e) {
+      FederationStateStoreClientMetrics.failedStateStoreCall();
       FederationStateStoreUtils.logAndThrowRetriableException(LOG,
           "Unable to deregister the sub-cluster " + subClusterId + " state to "
               + state.toString(),
@@ -317,7 +331,9 @@
       cstmt.registerOutParameter(4, java.sql.Types.INTEGER);
 
       // Execute the query
+      long startTime = clock.getTime();
       cstmt.executeUpdate();
+      long stopTime = clock.getTime();
 
       // Check the ROWCOUNT value, if it is equal to 0 it means the call
       // did not update the subcluster into FederationStateStore
@@ -336,8 +352,11 @@
 
       LOG.info("Heartbeated the StateStore for the specified SubCluster "
           + subClusterId);
+      FederationStateStoreClientMetrics
+          .succeededStateStoreCall(stopTime - startTime);
 
     } catch (SQLException e) {
+      FederationStateStoreClientMetrics.failedStateStoreCall();
       FederationStateStoreUtils.logAndThrowRetriableException(LOG,
           "Unable to heartbeat the StateStore for the specified SubCluster "
               + subClusterId,
@@ -378,7 +397,9 @@
       cstmt.registerOutParameter(9, java.sql.Types.VARCHAR);
 
       // Execute the query
+      long startTime = clock.getTime();
       cstmt.execute();
+      long stopTime = clock.getTime();
 
       String amRMAddress = cstmt.getString(2);
       String clientRMAddress = cstmt.getString(3);
@@ -403,6 +424,9 @@
           clientRMAddress, rmAdminAddress, webAppAddress, lastHeartBeat, state,
           lastStartTime, capability);
 
+      FederationStateStoreClientMetrics
+          .succeededStateStoreCall(stopTime - startTime);
+
       // Check if the output it is a valid subcluster
       try {
         FederationMembershipStateStoreInputValidator
@@ -417,6 +441,7 @@
             + subClusterInfo.toString());
       }
     } catch (SQLException e) {
+      FederationStateStoreClientMetrics.failedStateStoreCall();
       FederationStateStoreUtils.logAndThrowRetriableException(LOG,
           "Unable to obtain the SubCluster information for " + subClusterId, e);
     } finally {
@@ -439,7 +464,9 @@
       cstmt = conn.prepareCall(CALL_SP_GET_SUBCLUSTERS);
 
       // Execute the query
+      long startTime = clock.getTime();
       rs = cstmt.executeQuery();
+      long stopTime = clock.getTime();
 
       while (rs.next()) {
 
@@ -459,6 +486,10 @@
             amRMAddress, clientRMAddress, rmAdminAddress, webAppAddress,
             lastHeartBeat, state, lastStartTime, capability);
 
+        FederationStateStoreClientMetrics
+            .succeededStateStoreCall(stopTime - startTime);
+
+
         // Check if the output it is a valid subcluster
         try {
           FederationMembershipStateStoreInputValidator
@@ -477,6 +508,7 @@
       }
 
     } catch (SQLException e) {
+      FederationStateStoreClientMetrics.failedStateStoreCall();
       FederationStateStoreUtils.logAndThrowRetriableException(LOG,
           "Unable to obtain the information for all the SubClusters ", e);
     } finally {
@@ -513,11 +545,16 @@
       cstmt.registerOutParameter(4, java.sql.Types.INTEGER);
 
       // Execute the query
+      long startTime = clock.getTime();
       cstmt.executeUpdate();
+      long stopTime = clock.getTime();
 
       subClusterHome = cstmt.getString(3);
       SubClusterId subClusterIdHome = SubClusterId.newInstance(subClusterHome);
 
+      FederationStateStoreClientMetrics
+          .succeededStateStoreCall(stopTime - startTime);
+
       // For failover reason, we check the returned SubClusterId.
       // If it is equal to the subclusterId we sent, the call added the new
       // application into FederationStateStore. If the call returns a different
@@ -554,6 +591,7 @@
       }
 
     } catch (SQLException e) {
+      FederationStateStoreClientMetrics.failedStateStoreCall();
       FederationStateStoreUtils
           .logAndThrowRetriableException(LOG,
               "Unable to insert the newly generated application "
@@ -592,7 +630,9 @@
       cstmt.registerOutParameter(3, java.sql.Types.INTEGER);
 
       // Execute the query
+      long startTime = clock.getTime();
       cstmt.executeUpdate();
+      long stopTime = clock.getTime();
 
       // Check the ROWCOUNT value, if it is equal to 0 it means the call
       // did not update the application into FederationStateStore
@@ -611,8 +651,11 @@
       LOG.info(
           "Update the SubCluster to {} for application {} in the StateStore",
           subClusterId, appId);
+      FederationStateStoreClientMetrics
+          .succeededStateStoreCall(stopTime - startTime);
 
     } catch (SQLException e) {
+      FederationStateStoreClientMetrics.failedStateStoreCall();
       FederationStateStoreUtils
           .logAndThrowRetriableException(LOG,
               "Unable to update the application "
@@ -645,7 +688,9 @@
       cstmt.registerOutParameter(2, java.sql.Types.VARCHAR);
 
       // Execute the query
+      long startTime = clock.getTime();
       cstmt.execute();
+      long stopTime = clock.getTime();
 
       if (cstmt.getString(2) != null) {
         homeRM = SubClusterId.newInstance(cstmt.getString(2));
@@ -659,7 +704,12 @@
         LOG.debug("Got the information about the specified application  "
             + request.getApplicationId() + ". The AM is running in " + homeRM);
       }
+
+      FederationStateStoreClientMetrics
+          .succeededStateStoreCall(stopTime - startTime);
+
     } catch (SQLException e) {
+      FederationStateStoreClientMetrics.failedStateStoreCall();
       FederationStateStoreUtils.logAndThrowRetriableException(LOG,
           "Unable to obtain the application information "
               + "for the specified application " + request.getApplicationId(),
@@ -688,7 +738,9 @@
       cstmt = conn.prepareCall(CALL_SP_GET_APPLICATIONS_HOME_SUBCLUSTER);
 
       // Execute the query
+      long startTime = clock.getTime();
       rs = cstmt.executeQuery();
+      long stopTime = clock.getTime();
 
       while (rs.next()) {
 
@@ -701,7 +753,11 @@
             SubClusterId.newInstance(homeSubCluster)));
       }
 
+      FederationStateStoreClientMetrics
+          .succeededStateStoreCall(stopTime - startTime);
+
     } catch (SQLException e) {
+      FederationStateStoreClientMetrics.failedStateStoreCall();
       FederationStateStoreUtils.logAndThrowRetriableException(LOG,
           "Unable to obtain the information for all the applications ", e);
     } finally {
@@ -731,7 +787,9 @@
       cstmt.registerOutParameter(2, java.sql.Types.INTEGER);
 
       // Execute the query
+      long startTime = clock.getTime();
       cstmt.executeUpdate();
+      long stopTime = clock.getTime();
 
       // Check the ROWCOUNT value, if it is equal to 0 it means the call
       // did not delete the application from FederationStateStore
@@ -750,8 +808,11 @@
 
       LOG.info("Delete from the StateStore the application: {}",
           request.getApplicationId());
+      FederationStateStoreClientMetrics
+          .succeededStateStoreCall(stopTime - startTime);
 
     } catch (SQLException e) {
+      FederationStateStoreClientMetrics.failedStateStoreCall();
       FederationStateStoreUtils.logAndThrowRetriableException(LOG,
           "Unable to delete the application " + request.getApplicationId(), e);
     } finally {
@@ -782,7 +843,9 @@
       cstmt.registerOutParameter(3, java.sql.Types.VARBINARY);
 
       // Execute the query
+      long startTime = clock.getTime();
       cstmt.executeUpdate();
+      long stopTime = clock.getTime();
 
       // Check if the output it is a valid policy
       if (cstmt.getString(2) != null && cstmt.getBytes(3) != null) {
@@ -798,7 +861,11 @@
         return null;
       }
 
+      FederationStateStoreClientMetrics
+          .succeededStateStoreCall(stopTime - startTime);
+
     } catch (SQLException e) {
+      FederationStateStoreClientMetrics.failedStateStoreCall();
       FederationStateStoreUtils.logAndThrowRetriableException(LOG,
           "Unable to select the policy for the queue :" + request.getQueue(),
           e);
@@ -833,7 +900,9 @@
       cstmt.registerOutParameter(4, java.sql.Types.INTEGER);
 
       // Execute the query
+      long startTime = clock.getTime();
       cstmt.executeUpdate();
+      long stopTime = clock.getTime();
 
       // Check the ROWCOUNT value, if it is equal to 0 it means the call
       // did not add a new policy into FederationStateStore
@@ -852,8 +921,11 @@
 
       LOG.info("Insert into the state store the policy for the queue: "
           + policyConf.getQueue());
+      FederationStateStoreClientMetrics
+          .succeededStateStoreCall(stopTime - startTime);
 
     } catch (SQLException e) {
+      FederationStateStoreClientMetrics.failedStateStoreCall();
       FederationStateStoreUtils.logAndThrowRetriableException(LOG,
           "Unable to insert the newly generated policy for the queue :"
               + policyConf.getQueue(),
@@ -880,7 +952,9 @@
       cstmt = conn.prepareCall(CALL_SP_GET_POLICIES_CONFIGURATIONS);
 
       // Execute the query
+      long startTime = clock.getTime();
       rs = cstmt.executeQuery();
+      long stopTime = clock.getTime();
 
       while (rs.next()) {
 
@@ -894,7 +968,12 @@
                 ByteBuffer.wrap(policyInfo));
         policyConfigurations.add(subClusterPolicyConfiguration);
       }
+
+      FederationStateStoreClientMetrics
+          .succeededStateStoreCall(stopTime - startTime);
+
     } catch (SQLException e) {
+      FederationStateStoreClientMetrics.failedStateStoreCall();
       FederationStateStoreUtils.logAndThrowRetriableException(LOG,
           "Unable to obtain the policy information for all the queues.", e);
     } finally {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java
new file mode 100644
index 0000000..6ae7d3c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java
@@ -0,0 +1,634 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.store.impl;
+
+import static org.apache.hadoop.util.curator.ZKCuratorManager.getNodePath;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.List;
+import java.util.TimeZone;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.curator.ZKCuratorManager;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterIdProto;
+import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterInfoProto;
+import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterPolicyConfigurationProto;
+import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
+import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster;
+import org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState;
+import org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.SubClusterIdPBImpl;
+import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.SubClusterInfoPBImpl;
+import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.SubClusterPolicyConfigurationPBImpl;
+import org.apache.hadoop.yarn.server.federation.store.utils.FederationApplicationHomeSubClusterStoreInputValidator;
+import org.apache.hadoop.yarn.server.federation.store.utils.FederationMembershipStateStoreInputValidator;
+import org.apache.hadoop.yarn.server.federation.store.utils.FederationPolicyStoreInputValidator;
+import org.apache.hadoop.yarn.server.federation.store.utils.FederationStateStoreUtils;
+import org.apache.hadoop.yarn.server.records.Version;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.protobuf.InvalidProtocolBufferException;
+
+/**
+ * ZooKeeper implementation of {@link FederationStateStore}.
+ *
+ * The znode structure is as follows:
+ * ROOT_DIR_PATH
+ * |--- MEMBERSHIP
+ * |     |----- SC1
+ * |     |----- SC2
+ * |--- APPLICATION
+ * |     |----- APP1
+ * |     |----- APP2
+ * |--- POLICY
+ *       |----- QUEUE1
+ *       |----- QUEUE1
+ */
+public class ZookeeperFederationStateStore implements FederationStateStore {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ZookeeperFederationStateStore.class);
+
+  private final static String ROOT_ZNODE_NAME_MEMBERSHIP = "memberships";
+  private final static String ROOT_ZNODE_NAME_APPLICATION = "applications";
+  private final static String ROOT_ZNODE_NAME_POLICY = "policies";
+
+  /** Interface to Zookeeper. */
+  private ZKCuratorManager zkManager;
+
+  /** Directory to store the state store data. */
+  private String baseZNode;
+
+  private String appsZNode;
+  private String membershipZNode;
+  private String policiesZNode;
+
+  @Override
+  public void init(Configuration conf) throws YarnException {
+    LOG.info("Initializing ZooKeeper connection");
+
+    baseZNode = conf.get(
+        YarnConfiguration.FEDERATION_STATESTORE_ZK_PARENT_PATH,
+        YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_ZK_PARENT_PATH);
+    try {
+      this.zkManager = new ZKCuratorManager(conf);
+      this.zkManager.start();
+    } catch (IOException e) {
+      LOG.error("Cannot initialize the ZK connection", e);
+    }
+
+    // Base znodes
+    membershipZNode = getNodePath(baseZNode, ROOT_ZNODE_NAME_MEMBERSHIP);
+    appsZNode = getNodePath(baseZNode, ROOT_ZNODE_NAME_APPLICATION);
+    policiesZNode = getNodePath(baseZNode, ROOT_ZNODE_NAME_POLICY);
+
+    // Create base znode for each entity
+    try {
+      zkManager.createRootDirRecursively(membershipZNode);
+      zkManager.createRootDirRecursively(appsZNode);
+      zkManager.createRootDirRecursively(policiesZNode);
+    } catch (Exception e) {
+      String errMsg = "Cannot create base directories: " + e.getMessage();
+      FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+    }
+
+  }
+
+  @Override
+  public void close() throws Exception {
+    if (zkManager != null) {
+      zkManager.close();
+    }
+  }
+
+  @Override
+  public AddApplicationHomeSubClusterResponse addApplicationHomeSubCluster(
+      AddApplicationHomeSubClusterRequest request) throws YarnException {
+
+    FederationApplicationHomeSubClusterStoreInputValidator.validate(request);
+    ApplicationHomeSubCluster app = request.getApplicationHomeSubCluster();
+    ApplicationId appId = app.getApplicationId();
+
+    // Try to write the subcluster
+    SubClusterId homeSubCluster = app.getHomeSubCluster();
+    try {
+      putApp(appId, homeSubCluster, false);
+    } catch (Exception e) {
+      String errMsg = "Cannot add application home subcluster for " + appId;
+      FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+    }
+
+    // Check for the actual subcluster
+    try {
+      homeSubCluster = getApp(appId);
+    } catch (Exception e) {
+      String errMsg = "Cannot check app home subcluster for " + appId;
+      FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+    }
+
+    return AddApplicationHomeSubClusterResponse
+        .newInstance(homeSubCluster);
+  }
+
+  @Override
+  public UpdateApplicationHomeSubClusterResponse
+      updateApplicationHomeSubCluster(
+          UpdateApplicationHomeSubClusterRequest request)
+              throws YarnException {
+
+    FederationApplicationHomeSubClusterStoreInputValidator.validate(request);
+    ApplicationHomeSubCluster app = request.getApplicationHomeSubCluster();
+    ApplicationId appId = app.getApplicationId();
+    SubClusterId homeSubCluster = getApp(appId);
+    if (homeSubCluster == null) {
+      String errMsg = "Application " + appId + " does not exist";
+      FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+    }
+    SubClusterId newSubClusterId =
+        request.getApplicationHomeSubCluster().getHomeSubCluster();
+    putApp(appId, newSubClusterId, true);
+    return UpdateApplicationHomeSubClusterResponse.newInstance();
+  }
+
+  @Override
+  public GetApplicationHomeSubClusterResponse getApplicationHomeSubCluster(
+      GetApplicationHomeSubClusterRequest request) throws YarnException {
+
+    FederationApplicationHomeSubClusterStoreInputValidator.validate(request);
+    ApplicationId appId = request.getApplicationId();
+    SubClusterId homeSubCluster = getApp(appId);
+    if (homeSubCluster == null) {
+      String errMsg = "Application " + appId + " does not exist";
+      FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+    }
+    return GetApplicationHomeSubClusterResponse.newInstance(
+        ApplicationHomeSubCluster.newInstance(appId, homeSubCluster));
+  }
+
+  @Override
+  public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster(
+      GetApplicationsHomeSubClusterRequest request) throws YarnException {
+    List<ApplicationHomeSubCluster> result = new ArrayList<>();
+
+    try {
+      for (String child : zkManager.getChildren(appsZNode)) {
+        ApplicationId appId = ApplicationId.fromString(child);
+        SubClusterId homeSubCluster = getApp(appId);
+        ApplicationHomeSubCluster app =
+            ApplicationHomeSubCluster.newInstance(appId, homeSubCluster);
+        result.add(app);
+      }
+    } catch (Exception e) {
+      String errMsg = "Cannot get apps: " + e.getMessage();
+      FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+    }
+
+    return GetApplicationsHomeSubClusterResponse.newInstance(result);
+  }
+
+  @Override
+  public DeleteApplicationHomeSubClusterResponse
+      deleteApplicationHomeSubCluster(
+          DeleteApplicationHomeSubClusterRequest request)
+              throws YarnException {
+
+    FederationApplicationHomeSubClusterStoreInputValidator.validate(request);
+    ApplicationId appId = request.getApplicationId();
+    String appZNode = getNodePath(appsZNode, appId.toString());
+
+    boolean exists = false;
+    try {
+      exists = zkManager.exists(appZNode);
+    } catch (Exception e) {
+      String errMsg = "Cannot check app: " + e.getMessage();
+      FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+    }
+    if (!exists) {
+      String errMsg = "Application " + appId + " does not exist";
+      FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+    }
+
+    try {
+      zkManager.delete(appZNode);
+    } catch (Exception e) {
+      String errMsg = "Cannot delete app: " + e.getMessage();
+      FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+    }
+
+    return DeleteApplicationHomeSubClusterResponse.newInstance();
+  }
+
+  @Override
+  public SubClusterRegisterResponse registerSubCluster(
+      SubClusterRegisterRequest request) throws YarnException {
+    FederationMembershipStateStoreInputValidator.validate(request);
+    SubClusterInfo subClusterInfo = request.getSubClusterInfo();
+    SubClusterId subclusterId = subClusterInfo.getSubClusterId();
+
+    // Update the heartbeat time
+    long currentTime = getCurrentTime();
+    subClusterInfo.setLastHeartBeat(currentTime);
+
+    try {
+      putSubclusterInfo(subclusterId, subClusterInfo, true);
+    } catch (Exception e) {
+      String errMsg = "Cannot register subcluster: " + e.getMessage();
+      FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+    }
+    return SubClusterRegisterResponse.newInstance();
+  }
+
+  @Override
+  public SubClusterDeregisterResponse deregisterSubCluster(
+      SubClusterDeregisterRequest request) throws YarnException {
+    FederationMembershipStateStoreInputValidator.validate(request);
+    SubClusterId subClusterId = request.getSubClusterId();
+    SubClusterState state = request.getState();
+
+    // Get the current information and update it
+    SubClusterInfo subClusterInfo = getSubclusterInfo(subClusterId);
+    if (subClusterInfo == null) {
+      String errMsg = "SubCluster " + subClusterId + " not found";
+      FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+    } else {
+      subClusterInfo.setState(state);
+      putSubclusterInfo(subClusterId, subClusterInfo, true);
+    }
+
+    return SubClusterDeregisterResponse.newInstance();
+  }
+
+  @Override
+  public SubClusterHeartbeatResponse subClusterHeartbeat(
+      SubClusterHeartbeatRequest request) throws YarnException {
+
+    FederationMembershipStateStoreInputValidator.validate(request);
+    SubClusterId subClusterId = request.getSubClusterId();
+
+    SubClusterInfo subClusterInfo = getSubclusterInfo(subClusterId);
+    if (subClusterInfo == null) {
+      String errMsg = "SubCluster " + subClusterId
+          + " does not exist; cannot heartbeat";
+      FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+    }
+
+    long currentTime = getCurrentTime();
+    subClusterInfo.setLastHeartBeat(currentTime);
+    subClusterInfo.setState(request.getState());
+    subClusterInfo.setCapability(request.getCapability());
+
+    putSubclusterInfo(subClusterId, subClusterInfo, true);
+
+    return SubClusterHeartbeatResponse.newInstance();
+  }
+
+  @Override
+  public GetSubClusterInfoResponse getSubCluster(
+      GetSubClusterInfoRequest request) throws YarnException {
+
+    FederationMembershipStateStoreInputValidator.validate(request);
+    SubClusterId subClusterId = request.getSubClusterId();
+    SubClusterInfo subClusterInfo = null;
+    try {
+      subClusterInfo = getSubclusterInfo(subClusterId);
+      if (subClusterInfo == null) {
+        LOG.warn("The queried SubCluster: {} does not exist.", subClusterId);
+        return null;
+      }
+    } catch (Exception e) {
+      String errMsg = "Cannot get subcluster: " + e.getMessage();
+      FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+    }
+    return GetSubClusterInfoResponse.newInstance(subClusterInfo);
+  }
+
+  @Override
+  public GetSubClustersInfoResponse getSubClusters(
+      GetSubClustersInfoRequest request) throws YarnException {
+    List<SubClusterInfo> result = new ArrayList<>();
+
+    try {
+      for (String child : zkManager.getChildren(membershipZNode)) {
+        SubClusterId subClusterId = SubClusterId.newInstance(child);
+        SubClusterInfo info = getSubclusterInfo(subClusterId);
+        if (!request.getFilterInactiveSubClusters() ||
+            info.getState().isActive()) {
+          result.add(info);
+        }
+      }
+    } catch (Exception e) {
+      String errMsg = "Cannot get subclusters: " + e.getMessage();
+      FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+    }
+    return GetSubClustersInfoResponse.newInstance(result);
+  }
+
+
+  @Override
+  public GetSubClusterPolicyConfigurationResponse getPolicyConfiguration(
+      GetSubClusterPolicyConfigurationRequest request) throws YarnException {
+
+    FederationPolicyStoreInputValidator.validate(request);
+    String queue = request.getQueue();
+    SubClusterPolicyConfiguration policy = null;
+    try {
+      policy = getPolicy(queue);
+    } catch (Exception e) {
+      String errMsg = "Cannot get policy: " + e.getMessage();
+      FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+    }
+
+    if (policy == null) {
+      LOG.warn("Policy for queue: {} does not exist.", queue);
+      return null;
+    }
+    return GetSubClusterPolicyConfigurationResponse
+        .newInstance(policy);
+  }
+
+  @Override
+  public SetSubClusterPolicyConfigurationResponse setPolicyConfiguration(
+      SetSubClusterPolicyConfigurationRequest request) throws YarnException {
+
+    FederationPolicyStoreInputValidator.validate(request);
+    SubClusterPolicyConfiguration policy =
+        request.getPolicyConfiguration();
+    try {
+      String queue = policy.getQueue();
+      putPolicy(queue, policy, true);
+    } catch (Exception e) {
+      String errMsg = "Cannot set policy: " + e.getMessage();
+      FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+    }
+    return SetSubClusterPolicyConfigurationResponse.newInstance();
+  }
+
+  @Override
+  public GetSubClusterPoliciesConfigurationsResponse getPoliciesConfigurations(
+      GetSubClusterPoliciesConfigurationsRequest request) throws YarnException {
+    List<SubClusterPolicyConfiguration> result = new ArrayList<>();
+
+    try {
+      for (String child : zkManager.getChildren(policiesZNode)) {
+        SubClusterPolicyConfiguration policy = getPolicy(child);
+        result.add(policy);
+      }
+    } catch (Exception e) {
+      String errMsg = "Cannot get policies: " + e.getMessage();
+      FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+    }
+    return GetSubClusterPoliciesConfigurationsResponse.newInstance(result);
+  }
+
+  @Override
+  public Version getCurrentVersion() {
+    return null;
+  }
+
+  @Override
+  public Version loadVersion() {
+    return null;
+  }
+
+  /**
+   * Get the subcluster for an application.
+   * @param appId Application identifier.
+   * @return Subcluster identifier.
+   * @throws Exception If it cannot contact ZooKeeper.
+   */
+  private SubClusterId getApp(final ApplicationId appId) throws YarnException {
+    String appZNode = getNodePath(appsZNode, appId.toString());
+
+    SubClusterId subClusterId = null;
+    byte[] data = get(appZNode);
+    if (data != null) {
+      try {
+        subClusterId = new SubClusterIdPBImpl(
+            SubClusterIdProto.parseFrom(data));
+      } catch (InvalidProtocolBufferException e) {
+        String errMsg = "Cannot parse application at " + appZNode;
+        FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+      }
+    }
+    return subClusterId;
+  }
+
+  /**
+   * Put an application.
+   * @param appId Application identifier.
+   * @param subClusterId Subcluster identifier.
+   * @throws Exception If it cannot contact ZooKeeper.
+   */
+  private void putApp(final ApplicationId appId,
+      final SubClusterId subClusterId, boolean update)
+          throws YarnException {
+    String appZNode = getNodePath(appsZNode, appId.toString());
+    SubClusterIdProto proto =
+        ((SubClusterIdPBImpl)subClusterId).getProto();
+    byte[] data = proto.toByteArray();
+    put(appZNode, data, update);
+  }
+
+  /**
+   * Get the current information for a subcluster from Zookeeper.
+   * @param subclusterId Subcluster identifier.
+   * @return Subcluster information or null if it doesn't exist.
+   * @throws Exception If it cannot contact ZooKeeper.
+   */
+  private SubClusterInfo getSubclusterInfo(final SubClusterId subclusterId)
+      throws YarnException {
+    String memberZNode = getNodePath(membershipZNode, subclusterId.toString());
+
+    SubClusterInfo policy = null;
+    byte[] data = get(memberZNode);
+    if (data != null) {
+      try {
+        policy = new SubClusterInfoPBImpl(
+            SubClusterInfoProto.parseFrom(data));
+      } catch (InvalidProtocolBufferException e) {
+        String errMsg = "Cannot parse subcluster info at " + memberZNode;
+        FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+      }
+    }
+    return policy;
+  }
+
+  /**
+   * Put the subcluster information in Zookeeper.
+   * @param subclusterId Subcluster identifier.
+   * @param subClusterInfo Subcluster information.
+   * @throws Exception If it cannot contact ZooKeeper.
+   */
+  private void putSubclusterInfo(final SubClusterId subclusterId,
+      final SubClusterInfo subClusterInfo, final boolean update)
+          throws YarnException {
+    String memberZNode = getNodePath(membershipZNode, subclusterId.toString());
+    SubClusterInfoProto proto =
+        ((SubClusterInfoPBImpl)subClusterInfo).getProto();
+    byte[] data = proto.toByteArray();
+    put(memberZNode, data, update);
+  }
+
+  /**
+   * Get the queue policy from Zookeeper.
+   * @param queue Name of the queue.
+   * @return Subcluster policy configuration.
+   * @throws YarnException If it cannot contact ZooKeeper.
+   */
+  private SubClusterPolicyConfiguration getPolicy(final String queue)
+      throws YarnException {
+    String policyZNode = getNodePath(policiesZNode, queue);
+
+    SubClusterPolicyConfiguration policy = null;
+    byte[] data = get(policyZNode);
+    if (data != null) {
+      try {
+        policy = new SubClusterPolicyConfigurationPBImpl(
+            SubClusterPolicyConfigurationProto.parseFrom(data));
+      } catch (InvalidProtocolBufferException e) {
+        String errMsg = "Cannot parse policy at " + policyZNode;
+        FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+      }
+    }
+    return policy;
+  }
+
+  /**
+   * Put the subcluster information in Zookeeper.
+   * @param queue Name of the queue.
+   * @param policy Subcluster policy configuration.
+   * @throws YarnException If it cannot contact ZooKeeper.
+   */
+  private void putPolicy(final String queue,
+      final SubClusterPolicyConfiguration policy, boolean update)
+          throws YarnException {
+    String policyZNode = getNodePath(policiesZNode, queue);
+
+    SubClusterPolicyConfigurationProto proto =
+        ((SubClusterPolicyConfigurationPBImpl)policy).getProto();
+    byte[] data = proto.toByteArray();
+    put(policyZNode, data, update);
+  }
+
+  /**
+   * Get data from a znode in Zookeeper.
+   * @param znode Path of the znode.
+   * @return Data in the znode.
+   * @throws YarnException If it cannot contact ZooKeeper.
+   */
+  private byte[] get(String znode) throws YarnException {
+    boolean exists = false;
+    try {
+      exists = zkManager.exists(znode);
+    } catch (Exception e) {
+      String errMsg = "Cannot find znode " + znode;
+      FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+    }
+    if (!exists) {
+      LOG.error("{} does not exist", znode);
+      return null;
+    }
+
+    byte[] data = null;
+    try {
+      data = zkManager.getData(znode);
+    } catch (Exception e) {
+      String errMsg = "Cannot get data from znode " + znode
+          + ": " + e.getMessage();
+      FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+    }
+    return data;
+  }
+
+  /**
+   * Put data into a znode in Zookeeper.
+   * @param znode Path of the znode.
+   * @param data Data to write.
+   * @throws YarnException If it cannot contact ZooKeeper.
+   */
+  private void put(String znode, byte[] data, boolean update)
+      throws YarnException {
+    // Create the znode
+    boolean created = false;
+    try {
+      created = zkManager.create(znode);
+    } catch (Exception e) {
+      String errMsg = "Cannot create znode " + znode + ": " + e.getMessage();
+      FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+    }
+    if (!created) {
+      LOG.debug("{} not created", znode);
+      if (!update) {
+        LOG.info("{} already existed and we are not updating", znode);
+        return;
+      }
+    }
+
+    // Write the data into the znode
+    try {
+      zkManager.setData(znode, data, -1);
+    } catch (Exception e) {
+      String errMsg = "Cannot write data into znode " + znode
+          + ": " + e.getMessage();
+      FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
+    }
+  }
+
+  /**
+   * Get the current time.
+   * @return Current time in milliseconds.
+   */
+  private static long getCurrentTime() {
+    Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
+    return cal.getTimeInMillis();
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/metrics/FederationStateStoreClientMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/metrics/FederationStateStoreClientMetrics.java
new file mode 100644
index 0000000..27b46cd
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/metrics/FederationStateStoreClientMetrics.java
@@ -0,0 +1,184 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.store.metrics;
+
+import java.lang.reflect.Method;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableQuantiles;
+import org.apache.hadoop.metrics2.lib.MutableRate;
+import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Performance metrics for FederationStateStore implementations.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+@Metrics(about = "Performance and usage metrics for Federation StateStore",
+         context = "fedr")
+public final class FederationStateStoreClientMetrics implements MetricsSource {
+  public static final Logger LOG =
+      LoggerFactory.getLogger(FederationStateStoreClientMetrics.class);
+
+  private static final MetricsRegistry REGISTRY =
+      new MetricsRegistry("FederationStateStoreClientMetrics");
+  private final static Method[] STATESTORE_API_METHODS =
+      FederationStateStore.class.getMethods();
+
+  // Map method names to counter objects
+  private static final Map<String, MutableCounterLong> API_TO_FAILED_CALLS =
+      new HashMap<String, MutableCounterLong>();
+  private static final Map<String, MutableRate> API_TO_SUCCESSFUL_CALLS =
+      new HashMap<String, MutableRate>();
+
+  // Provide quantile latency for each api call.
+  private static final Map<String, MutableQuantiles> API_TO_QUANTILE_METRICS =
+      new HashMap<String, MutableQuantiles>();
+
+  // Error string templates for logging calls from methods not in
+  // FederationStateStore API
+  private static final String UNKOWN_FAIL_ERROR_MSG =
+      "Not recording failed call for unknown FederationStateStore method {}";
+  private static final String UNKNOWN_SUCCESS_ERROR_MSG =
+      "Not recording successful call for unknown "
+          + "FederationStateStore method {}";
+
+  // Aggregate metrics are shared, and don't have to be looked up per call
+  @Metric("Total number of successful calls and latency(ms)")
+  private static MutableRate totalSucceededCalls;
+
+  @Metric("Total number of failed StateStore calls")
+  private static MutableCounterLong totalFailedCalls;
+
+  // This after the static members are initialized, or the constructor will
+  // throw a NullPointerException
+  private static final FederationStateStoreClientMetrics S_INSTANCE =
+      DefaultMetricsSystem.instance()
+          .register(new FederationStateStoreClientMetrics());
+
+  synchronized public static FederationStateStoreClientMetrics getInstance() {
+    return S_INSTANCE;
+  }
+
+  private FederationStateStoreClientMetrics() {
+    // Create the metrics for each method and put them into the map
+    for (Method m : STATESTORE_API_METHODS) {
+      String methodName = m.getName();
+      LOG.debug("Registering Federation StateStore Client metrics for {}",
+          methodName);
+
+      // This metric only records the number of failed calls; it does not
+      // capture latency information
+      API_TO_FAILED_CALLS.put(methodName,
+          REGISTRY.newCounter(methodName + "_numFailedCalls",
+              "# failed calls to " + methodName, 0L));
+
+      // This metric records both the number and average latency of successful
+      // calls.
+      API_TO_SUCCESSFUL_CALLS.put(methodName,
+          REGISTRY.newRate(methodName + "_successfulCalls",
+              "# successful calls and latency(ms) for" + methodName));
+
+      // This metric records the quantile-based latency of each successful call,
+      // re-sampled every 10 seconds.
+      API_TO_QUANTILE_METRICS.put(methodName,
+          REGISTRY.newQuantiles(methodName + "Latency",
+              "Quantile latency (ms) for " + methodName, "ops", "latency", 10));
+    }
+  }
+
+  public static void failedStateStoreCall() {
+    String methodName =
+        Thread.currentThread().getStackTrace()[2].getMethodName();
+    MutableCounterLong methodMetric = API_TO_FAILED_CALLS.get(methodName);
+    if (methodMetric == null) {
+      LOG.error(UNKOWN_FAIL_ERROR_MSG, methodName);
+      return;
+    }
+
+    totalFailedCalls.incr();
+    methodMetric.incr();
+  }
+
+  public static void succeededStateStoreCall(long duration) {
+    String methodName =
+        Thread.currentThread().getStackTrace()[2].getMethodName();
+    MutableRate methodMetric = API_TO_SUCCESSFUL_CALLS.get(methodName);
+    MutableQuantiles methodQuantileMetric =
+        API_TO_QUANTILE_METRICS.get(methodName);
+    if (methodMetric == null || methodQuantileMetric == null) {
+      LOG.error(UNKNOWN_SUCCESS_ERROR_MSG, methodName);
+      return;
+    }
+
+    totalSucceededCalls.add(duration);
+    methodMetric.add(duration);
+    methodQuantileMetric.add(duration);
+  }
+
+  @Override
+  public void getMetrics(MetricsCollector collector, boolean all) {
+    REGISTRY.snapshot(collector.addRecord(REGISTRY.info()), all);
+  }
+
+  // Getters for unit testing
+  @VisibleForTesting
+  static long getNumFailedCallsForMethod(String methodName) {
+    return API_TO_FAILED_CALLS.get(methodName).value();
+  }
+
+  @VisibleForTesting
+  static long getNumSucceessfulCallsForMethod(String methodName) {
+    return API_TO_SUCCESSFUL_CALLS.get(methodName).lastStat().numSamples();
+  }
+
+  @VisibleForTesting
+  static double getLatencySucceessfulCallsForMethod(String methodName) {
+    return API_TO_SUCCESSFUL_CALLS.get(methodName).lastStat().mean();
+  }
+
+  @VisibleForTesting
+  static long getNumFailedCalls() {
+    return totalFailedCalls.value();
+  }
+
+  @VisibleForTesting
+  static long getNumSucceededCalls() {
+    return totalSucceededCalls.lastStat().numSamples();
+  }
+
+  @VisibleForTesting
+  static double getLatencySucceededCalls() {
+    return totalSucceededCalls.lastStat().mean();
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/metrics/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/metrics/package-info.java
new file mode 100644
index 0000000..eb548f4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/metrics/package-info.java
@@ -0,0 +1,17 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.yarn.server.federation.store.metrics;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilter.java
similarity index 69%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilter.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilter.java
index ad8dc2c..f6d1863 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilter.java
@@ -23,27 +23,33 @@
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
 import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationFilter;
-import org.apache.hadoop.yarn.server.timeline.security.TimelineDelegationTokenSecretManagerService.TimelineDelegationTokenSecretManager;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
 
+/**
+ * Timeline authentication filter provides delegation token support for ATSv1
+ * and ATSv2.
+ */
 @Private
 @Unstable
 public class TimelineAuthenticationFilter
     extends DelegationTokenAuthenticationFilter {
 
-  private static TimelineDelegationTokenSecretManager secretManager;
+  private static AbstractDelegationTokenSecretManager
+      <TimelineDelegationTokenIdentifier> secretManager;
 
   @Override
   public void init(FilterConfig filterConfig) throws ServletException {
     filterConfig.getServletContext().setAttribute(
-        DelegationTokenAuthenticationFilter.DELEGATION_TOKEN_SECRET_MANAGER_ATTR,
-        secretManager);
+        DelegationTokenAuthenticationFilter.
+            DELEGATION_TOKEN_SECRET_MANAGER_ATTR, secretManager);
     super.init(filterConfig);
   }
 
   public static void setTimelineDelegationTokenSecretManager(
-      TimelineDelegationTokenSecretManager secretManager) {
-    TimelineAuthenticationFilter.secretManager = secretManager;
+      AbstractDelegationTokenSecretManager
+          <TimelineDelegationTokenIdentifier> secretMgr) {
+    TimelineAuthenticationFilter.secretManager = secretMgr;
   }
-
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
similarity index 78%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
index 4e7c29a..3d8ce05 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
@@ -51,30 +51,19 @@
 public class TimelineAuthenticationFilterInitializer extends FilterInitializer {
 
   /**
-   * The configuration prefix of timeline HTTP authentication
+   * The configuration prefix of timeline HTTP authentication.
    */
-  public static final String PREFIX = "yarn.timeline-service.http-authentication.";
+  public static final String PREFIX =
+      "yarn.timeline-service.http-authentication.";
 
   @VisibleForTesting
   Map<String, String> filterConfig;
 
-  /**
-   * Initializes {@link TimelineAuthenticationFilter}
-   * <p>
-   * Propagates to {@link TimelineAuthenticationFilter} configuration all YARN
-   * configuration properties prefixed with {@value #PREFIX}
-   *
-   * @param container
-   *          The filter container
-   * @param conf
-   *          Configuration for run-time parameters
-   */
-  @Override
-  public void initFilter(FilterContainer container, Configuration conf) {
+  protected void setAuthFilterConfig(Configuration conf) {
     filterConfig = new HashMap<String, String>();
 
     // setting the cookie path to root '/' so it is used for all resources.
-    filterConfig.put(TimelineAuthenticationFilter.COOKIE_PATH, "/");
+    filterConfig.put(AuthenticationFilter.COOKIE_PATH, "/");
 
     for (Map.Entry<String, String> entry : conf) {
       String name = entry.getKey();
@@ -95,6 +84,41 @@
       }
     }
 
+    // Resolve _HOST into bind address
+    String bindAddress = conf.get(HttpServer2.BIND_ADDRESS);
+    String principal =
+        filterConfig.get(KerberosAuthenticationHandler.PRINCIPAL);
+    if (principal != null) {
+      try {
+        principal = SecurityUtil.getServerPrincipal(principal, bindAddress);
+      } catch (IOException ex) {
+        throw new RuntimeException("Could not resolve Kerberos principal " +
+            "name: " + ex.toString(), ex);
+      }
+      filterConfig.put(KerberosAuthenticationHandler.PRINCIPAL,
+          principal);
+    }
+  }
+
+  protected Map<String, String> getFilterConfig() {
+    return filterConfig;
+  }
+
+  /**
+   * Initializes {@link TimelineAuthenticationFilter}.
+   * <p>
+   * Propagates to {@link TimelineAuthenticationFilter} configuration all YARN
+   * configuration properties prefixed with {@value #PREFIX}.
+   *
+   * @param container
+   *          The filter container.
+   * @param conf
+   *          Configuration for run-time parameters.
+   */
+  @Override
+  public void initFilter(FilterContainer container, Configuration conf) {
+    setAuthFilterConfig(conf);
+
     String authType = filterConfig.get(AuthenticationFilter.AUTH_TYPE);
     if (authType.equals(PseudoAuthenticationHandler.TYPE)) {
       filterConfig.put(AuthenticationFilter.AUTH_TYPE,
@@ -102,23 +126,7 @@
     } else if (authType.equals(KerberosAuthenticationHandler.TYPE)) {
       filterConfig.put(AuthenticationFilter.AUTH_TYPE,
           KerberosDelegationTokenAuthenticationHandler.class.getName());
-
-      // Resolve _HOST into bind address
-      String bindAddress = conf.get(HttpServer2.BIND_ADDRESS);
-      String principal =
-          filterConfig.get(KerberosAuthenticationHandler.PRINCIPAL);
-      if (principal != null) {
-        try {
-          principal = SecurityUtil.getServerPrincipal(principal, bindAddress);
-        } catch (IOException ex) {
-          throw new RuntimeException(
-              "Could not resolve Kerberos principal name: " + ex.toString(), ex);
-        }
-        filterConfig.put(KerberosAuthenticationHandler.PRINCIPAL,
-            principal);
-      }
     }
-
     filterConfig.put(DelegationTokenAuthenticationHandler.TOKEN_KIND,
         TimelineDelegationTokenIdentifier.KIND_NAME.toString());
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelgationTokenSecretManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelgationTokenSecretManagerService.java
new file mode 100644
index 0000000..2e95af2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelgationTokenSecretManagerService.java
@@ -0,0 +1,83 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timeline.security;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
+
+/**
+ * Abstract implementation of delegation token manager service for different
+ * versions of timeline service.
+ */
+public abstract class TimelineDelgationTokenSecretManagerService extends
+    AbstractService {
+
+  public TimelineDelgationTokenSecretManagerService(String name) {
+    super(name);
+  }
+
+  private static long delegationTokenRemovalScanInterval = 3600000L;
+
+  private AbstractDelegationTokenSecretManager
+      <TimelineDelegationTokenIdentifier> secretManager = null;
+
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    long secretKeyInterval =
+        conf.getLong(YarnConfiguration.TIMELINE_DELEGATION_KEY_UPDATE_INTERVAL,
+            YarnConfiguration.DEFAULT_TIMELINE_DELEGATION_KEY_UPDATE_INTERVAL);
+    long tokenMaxLifetime =
+        conf.getLong(YarnConfiguration.TIMELINE_DELEGATION_TOKEN_MAX_LIFETIME,
+            YarnConfiguration.DEFAULT_TIMELINE_DELEGATION_TOKEN_MAX_LIFETIME);
+    long tokenRenewInterval =
+        conf.getLong(YarnConfiguration.TIMELINE_DELEGATION_TOKEN_RENEW_INTERVAL,
+            YarnConfiguration.DEFAULT_TIMELINE_DELEGATION_TOKEN_RENEW_INTERVAL);
+    secretManager = createTimelineDelegationTokenSecretManager(
+        secretKeyInterval, tokenMaxLifetime, tokenRenewInterval,
+        delegationTokenRemovalScanInterval);
+    super.init(conf);
+  }
+
+  protected abstract
+      AbstractDelegationTokenSecretManager<TimelineDelegationTokenIdentifier>
+          createTimelineDelegationTokenSecretManager(long secretKeyInterval,
+          long tokenMaxLifetime, long tokenRenewInterval,
+          long tokenRemovalScanInterval);
+
+  @Override
+  protected void serviceStart() throws Exception {
+    secretManager.startThreads();
+    super.serviceStart();
+  }
+
+  @Override
+  protected void serviceStop() throws Exception {
+    secretManager.stopThreads();
+    super.stop();
+  }
+
+  public AbstractDelegationTokenSecretManager
+      <TimelineDelegationTokenIdentifier>
+          getTimelineDelegationTokenSecretManager() {
+    return secretManager;
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/package-info.java
similarity index 70%
copy from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
copy to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/package-info.java
index 5d14c6f..14a52e3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/package-info.java
@@ -16,10 +16,11 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
-
-export default Ember.Route.extend({
-  model() {
-    return this.store.findAll('yarn-queue');
-  },
-});
\ No newline at end of file
+/**
+ * Package org.apache.hadoop.server.timeline.security contains classes related
+ * to timeline authentication filters and abstract delegation token service for
+ * ATSv1 and ATSv2.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.yarn.server.timeline.security;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java
index 60a9a27..6531a75 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java
@@ -83,7 +83,7 @@
   private static final Logger LOG =
       LoggerFactory.getLogger(UnmanagedApplicationManager.class);
   private static final long AM_STATE_WAIT_TIMEOUT_MS = 10000;
-  private static final String APP_NAME = "UnmanagedAM";
+  public static final String APP_NAME = "UnmanagedAM";
   private static final String DEFAULT_QUEUE_CONFIG = "uam.default.queue.name";
 
   private BlockingQueue<AsyncAllocateRequestInfo> requestQueue;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java
new file mode 100644
index 0000000..78bf20f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.util.timeline;
+
+import java.util.LinkedHashSet;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.AuthenticationFilterInitializer;
+import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilter;
+import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilterInitializer;
+import org.apache.hadoop.yarn.server.timeline.security.TimelineDelgationTokenSecretManagerService;
+
+/**
+ * Set of utility methods to be used across timeline reader and collector.
+ */
+public final class TimelineServerUtils {
+  private static final Log LOG = LogFactory.getLog(TimelineServerUtils.class);
+
+  private TimelineServerUtils() {
+  }
+
+  /**
+   * Sets filter initializers configuration based on existing configuration and
+   * default filters added by timeline service(such as timeline auth filter and
+   * CORS filter).
+   * @param conf Configuration object.
+   * @param configuredInitializers Comma separated list of filter initializers.
+   * @param defaultInitializers Set of initializers added by default by timeline
+   *     service.
+   */
+  public static void setTimelineFilters(Configuration conf,
+      String configuredInitializers, Set<String> defaultInitializers) {
+    String[] parts = configuredInitializers.split(",");
+    Set<String> target = new LinkedHashSet<String>();
+    for (String filterInitializer : parts) {
+      filterInitializer = filterInitializer.trim();
+      if (filterInitializer.equals(
+          AuthenticationFilterInitializer.class.getName()) ||
+          filterInitializer.isEmpty()) {
+        continue;
+      }
+      target.add(filterInitializer);
+    }
+    target.addAll(defaultInitializers);
+    String actualInitializers =
+        org.apache.commons.lang.StringUtils.join(target, ",");
+    LOG.info("Filter initializers set for timeline service: " +
+        actualInitializers);
+    conf.set("hadoop.http.filter.initializers", actualInitializers);
+  }
+
+  /**
+   * Adds timeline authentication filter to the set of default filter
+   * initializers and assigns the delegation token manager service to it.
+   * @param initializers Comma separated list of filter initializers.
+   * @param defaultInitializers Set of initializers added by default by timeline
+   *     service.
+   * @param delegationTokenMgrService Delegation token manager service.
+   *     This will be used by timeline authentication filter to assign
+   *     delegation tokens.
+   */
+  public static void addTimelineAuthFilter(String initializers,
+      Set<String> defaultInitializers,
+      TimelineDelgationTokenSecretManagerService delegationTokenMgrService) {
+    TimelineAuthenticationFilter.setTimelineDelegationTokenSecretManager(
+        delegationTokenMgrService.getTimelineDelegationTokenSecretManager());
+    if (!initializers.contains(
+        TimelineAuthenticationFilterInitializer.class.getName())) {
+      defaultInitializers.add(
+          TimelineAuthenticationFilterInitializer.class.getName());
+    }
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/package-info.java
similarity index 74%
copy from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
copy to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/package-info.java
index 5d14c6f..75c6973 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/package-info.java
@@ -16,10 +16,10 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
-
-export default Ember.Route.extend({
-  model() {
-    return this.store.findAll('yarn-queue');
-  },
-});
\ No newline at end of file
+/**
+ * Package org.apache.hadoop.server.util.timeline contains utility classes used
+ * by ATSv1 and ATSv2 on the server side.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.yarn.server.util.timeline;
+import org.apache.hadoop.classification.InterfaceAudience;
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
index edb2d9c..c2ba677 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
@@ -22,6 +22,7 @@
 option java_generate_equals_and_hash = true;
 package hadoop.yarn;
 
+import "Security.proto";
 import "yarn_protos.proto";
 import "yarn_server_common_protos.proto";
 import "yarn_service_protos.proto";
@@ -90,7 +91,7 @@
   optional MasterKeyProto last_known_nm_token_master_key = 3;
   optional NodeLabelsProto nodeLabels = 4;
   repeated LogAggregationReportProto log_aggregation_reports_for_apps = 5;
-  repeated AppCollectorsMapProto registered_collectors = 6;
+  repeated AppCollectorDataProto registering_collectors = 6;
 }
 
 message LogAggregationReportProto {
@@ -111,11 +112,14 @@
   repeated ContainerIdProto containers_to_be_removed_from_nm = 9;
   repeated SystemCredentialsForAppsProto system_credentials_for_apps = 10;
   optional bool areNodeLabelsAcceptedByRM = 11 [default = false];
+  // to be deprecated in favour of containers_to_update
   repeated ContainerProto containers_to_decrease = 12;
   repeated SignalContainerRequestProto containers_to_signal = 13;
   optional ResourceProto resource = 14;
   optional ContainerQueuingLimitProto container_queuing_limit = 15;
-  repeated AppCollectorsMapProto app_collectors_map = 16;
+  repeated AppCollectorDataProto app_collectors = 16;
+  // to be used in place of containers_to_decrease
+  repeated ContainerProto containers_to_update = 17;
 }
 
 message ContainerQueuingLimitProto {
@@ -131,16 +135,19 @@
 ////////////////////////////////////////////////////////////////////////
 ////// From collector_nodemanager_protocol ////////////////////////////
 ////////////////////////////////////////////////////////////////////////
-message AppCollectorsMapProto {
-  optional ApplicationIdProto appId = 1;
-  optional string appCollectorAddr = 2;
+message AppCollectorDataProto {
+  optional ApplicationIdProto app_id = 1;
+  optional string app_collector_addr = 2;
+  optional int64 rm_identifier = 3 [default = -1];
+  optional int64 version = 4 [default = -1];
+  optional hadoop.common.TokenProto app_collector_token = 5;
 }
 
 //////////////////////////////////////////////////////
 /////// collector_nodemanager_protocol //////////////
 //////////////////////////////////////////////////////
 message ReportNewCollectorInfoRequestProto {
-  repeated AppCollectorsMapProto app_collectors = 1;
+  repeated AppCollectorDataProto app_collectors = 1;
 }
 
 message ReportNewCollectorInfoResponseProto {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
index 9775f5c..82dfaea 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
@@ -30,6 +30,7 @@
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocolPB;
@@ -72,12 +73,13 @@
 import org.apache.hadoop.yarn.ipc.RPCUtil;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.CollectorNodemanagerProtocol;
 import org.apache.hadoop.yarn.server.api.protocolrecords.GetTimelineCollectorContextRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.GetTimelineCollectorContextResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.ReportNewCollectorInfoRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.ReportNewCollectorInfoResponse;
-import org.apache.hadoop.yarn.server.api.records.AppCollectorsMap;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.util.Records;
 import org.junit.Assert;
 import org.junit.Test;
@@ -93,6 +95,21 @@
       "collectors' number in ReportNewCollectorInfoRequest is not ONE.";
 
   public static final String DEFAULT_COLLECTOR_ADDR = "localhost:0";
+  private static final Token DEFAULT_COLLECTOR_TOKEN;
+  static {
+    TimelineDelegationTokenIdentifier identifier =
+        new TimelineDelegationTokenIdentifier();
+    identifier.setOwner(new Text("user"));
+    identifier.setRenewer(new Text("user"));
+    identifier.setRealUser(new Text("user"));
+    long now = Time.now();
+    identifier.setIssueDate(now);
+    identifier.setMaxDate(now + 1000L);
+    identifier.setMasterKeyId(500);
+    identifier.setSequenceNumber(5);
+    DEFAULT_COLLECTOR_TOKEN = Token.newInstance(identifier.getBytes(),
+        identifier.getKind().toString(), identifier.getBytes(), "localhost:0");
+  }
 
   public static final ApplicationId DEFAULT_APP_ID =
       ApplicationId.newInstance(0, 0);
@@ -173,7 +190,16 @@
     try {
       ReportNewCollectorInfoRequest request =
           ReportNewCollectorInfoRequest.newInstance(
-              DEFAULT_APP_ID, DEFAULT_COLLECTOR_ADDR);
+              DEFAULT_APP_ID, DEFAULT_COLLECTOR_ADDR, null);
+      proxy.reportNewCollectorInfo(request);
+    } catch (YarnException e) {
+      Assert.fail("RPC call failured is not expected here.");
+    }
+
+    try {
+      ReportNewCollectorInfoRequest request =
+          ReportNewCollectorInfoRequest.newInstance(
+              DEFAULT_APP_ID, DEFAULT_COLLECTOR_ADDR, DEFAULT_COLLECTOR_TOKEN);
       proxy.reportNewCollectorInfo(request);
     } catch (YarnException e) {
       Assert.fail("RPC call failured is not expected here.");
@@ -429,14 +455,16 @@
     public ReportNewCollectorInfoResponse reportNewCollectorInfo(
         ReportNewCollectorInfoRequest request)
         throws YarnException, IOException {
-      List<AppCollectorsMap> appCollectors = request.getAppCollectorsList();
+      List<AppCollectorData> appCollectors = request.getAppCollectorsList();
       if (appCollectors.size() == 1) {
         // check default appID and collectorAddr
-        AppCollectorsMap appCollector = appCollectors.get(0);
+        AppCollectorData appCollector = appCollectors.get(0);
         Assert.assertEquals(appCollector.getApplicationId(),
             DEFAULT_APP_ID);
         Assert.assertEquals(appCollector.getCollectorAddr(),
             DEFAULT_COLLECTOR_ADDR);
+        Assert.assertTrue(appCollector.getCollectorToken() == null ||
+            appCollector.getCollectorToken().equals(DEFAULT_COLLECTOR_TOKEN));
       } else {
         throw new YarnException(ILLEGAL_NUMBER_MESSAGE);
       }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
index b670c36..8b1d0bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
@@ -37,6 +37,7 @@
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
@@ -48,6 +49,7 @@
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerRequestPBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerResponsePBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UnRegisterNodeManagerRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.api.records.NodeAction;
 import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
@@ -109,14 +111,14 @@
     original.setLastKnownNMTokenMasterKey(getMasterKey());
     original.setNodeStatus(getNodeStatus());
     original.setNodeLabels(getValidNodeLabels());
-    Map<ApplicationId, String> collectors = getCollectors();
-    original.setRegisteredCollectors(collectors);
+    Map<ApplicationId, AppCollectorData> collectors = getCollectors(false);
+    original.setRegisteringCollectors(collectors);
     NodeHeartbeatRequestPBImpl copy = new NodeHeartbeatRequestPBImpl(
         original.getProto());
     assertEquals(1, copy.getLastKnownContainerTokenMasterKey().getKeyId());
     assertEquals(1, copy.getLastKnownNMTokenMasterKey().getKeyId());
     assertEquals("localhost", copy.getNodeStatus().getNodeId().getHost());
-    assertEquals(collectors, copy.getRegisteredCollectors());
+    assertEquals(collectors, copy.getRegisteringCollectors());
     // check labels are coming with valid values
     Assert.assertTrue(original.getNodeLabels()
         .containsAll(copy.getNodeLabels()));
@@ -128,6 +130,16 @@
     Assert.assertEquals(0, copy.getNodeLabels().size());
   }
 
+  @Test
+  public void testNodeHBRequestPBImplWithNullCollectorToken() {
+    NodeHeartbeatRequestPBImpl original = new NodeHeartbeatRequestPBImpl();
+    Map<ApplicationId, AppCollectorData> collectors = getCollectors(true);
+    original.setRegisteringCollectors(collectors);
+    NodeHeartbeatRequestPBImpl copy = new NodeHeartbeatRequestPBImpl(
+        original.getProto());
+    assertEquals(collectors, copy.getRegisteringCollectors());
+  }
+
   /**
    * Test NodeHeartbeatRequestPBImpl.
    */
@@ -153,8 +165,8 @@
     original.setNextHeartBeatInterval(1000);
     original.setNodeAction(NodeAction.NORMAL);
     original.setResponseId(100);
-    Map<ApplicationId, String> collectors = getCollectors();
-    original.setAppCollectorsMap(collectors);
+    Map<ApplicationId, AppCollectorData> collectors = getCollectors(false);
+    original.setAppCollectors(collectors);
 
     NodeHeartbeatResponsePBImpl copy = new NodeHeartbeatResponsePBImpl(
         original.getProto());
@@ -164,7 +176,7 @@
     assertEquals(1, copy.getContainerTokenMasterKey().getKeyId());
     assertEquals(1, copy.getNMTokenMasterKey().getKeyId());
     assertEquals("testDiagnosticMessage", copy.getDiagnosticsMessage());
-    assertEquals(collectors, copy.getAppCollectorsMap());
+    assertEquals(collectors, copy.getAppCollectors());
     assertEquals(false, copy.getAreNodeLabelsAcceptedByRM());
    }
 
@@ -178,16 +190,26 @@
   }
 
   @Test
+  public void testNodeHBResponsePBImplWithNullCollectorToken() {
+    NodeHeartbeatResponsePBImpl original = new NodeHeartbeatResponsePBImpl();
+    Map<ApplicationId, AppCollectorData> collectors = getCollectors(true);
+    original.setAppCollectors(collectors);
+    NodeHeartbeatResponsePBImpl copy = new NodeHeartbeatResponsePBImpl(
+        original.getProto());
+    assertEquals(collectors, copy.getAppCollectors());
+  }
+
+  @Test
   public void testNodeHeartbeatResponsePBImplWithDecreasedContainers() {
     NodeHeartbeatResponsePBImpl original = new NodeHeartbeatResponsePBImpl();
-    original.addAllContainersToDecrease(
+    original.addAllContainersToUpdate(
         Arrays.asList(getDecreasedContainer(1, 2, 2048, 2),
             getDecreasedContainer(2, 3, 1024, 1)));
     NodeHeartbeatResponsePBImpl copy =
         new NodeHeartbeatResponsePBImpl(original.getProto());
-    assertEquals(1, copy.getContainersToDecrease().get(0)
+    assertEquals(1, copy.getContainersToUpdate().get(0)
         .getId().getContainerId());
-    assertEquals(1024, copy.getContainersToDecrease().get(1)
+    assertEquals(1024, copy.getContainersToUpdate().get(1)
         .getResource().getMemorySize());
   }
 
@@ -347,12 +369,18 @@
     return nodeLabels;
   }
 
-  private Map<ApplicationId, String> getCollectors() {
+  private Map<ApplicationId, AppCollectorData> getCollectors(
+      boolean hasNullCollectorToken) {
     ApplicationId appID = ApplicationId.newInstance(1L, 1);
     String collectorAddr = "localhost:0";
-    Map<ApplicationId, String> collectorMap =
-        new HashMap<ApplicationId, String>();
-    collectorMap.put(appID, collectorAddr);
+    AppCollectorData data = AppCollectorData.newInstance(appID, collectorAddr);
+    if (!hasNullCollectorToken) {
+      data.setCollectorToken(
+          Token.newInstance(new byte[0], "kind", new byte[0], "s"));
+    }
+    Map<ApplicationId, AppCollectorData> collectorMap =
+        new HashMap<>();
+    collectorMap.put(appID, data);
     return collectorMap;
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestZookeeperFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestZookeeperFederationStateStore.java
new file mode 100644
index 0000000..390b803
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestZookeeperFederationStateStore.java
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.store.impl;
+
+import java.io.IOException;
+
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.retry.RetryNTimes;
+import org.apache.curator.test.TestingServer;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
+import org.junit.After;
+import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Unit tests for ZookeeperFederationStateStore.
+ */
+public class TestZookeeperFederationStateStore
+    extends FederationStateStoreBaseTest {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestZookeeperFederationStateStore.class);
+
+  /** Zookeeper test server. */
+  private static TestingServer curatorTestingServer;
+  private static CuratorFramework curatorFramework;
+
+  @Before
+  public void before() throws IOException, YarnException {
+    try {
+      curatorTestingServer = new TestingServer();
+      curatorTestingServer.start();
+      String connectString = curatorTestingServer.getConnectString();
+      curatorFramework = CuratorFrameworkFactory.builder()
+          .connectString(connectString)
+          .retryPolicy(new RetryNTimes(100, 100))
+          .build();
+      curatorFramework.start();
+
+      Configuration conf = new YarnConfiguration();
+      conf.set(CommonConfigurationKeys.ZK_ADDRESS, connectString);
+      setConf(conf);
+    } catch (Exception e) {
+      LOG.error("Cannot initialize ZooKeeper store", e);
+      throw new IOException(e);
+    }
+
+    super.before();
+  }
+
+  @After
+  public void after() throws Exception {
+    super.after();
+
+    curatorFramework.close();
+    try {
+      curatorTestingServer.stop();
+    } catch (IOException e) {
+    }
+  }
+
+  @Override
+  protected FederationStateStore createStateStore() {
+    Configuration conf = new Configuration();
+    super.setConf(conf);
+    return new ZookeeperFederationStateStore();
+  }
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/metrics/TestFederationStateStoreClientMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/metrics/TestFederationStateStoreClientMetrics.java
new file mode 100644
index 0000000..241d5e2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/metrics/TestFederationStateStoreClientMetrics.java
@@ -0,0 +1,146 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.store.metrics;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Unittests for {@link FederationStateStoreClientMetrics}.
+ *
+ */
+public class TestFederationStateStoreClientMetrics {
+  public static final Logger LOG =
+      LoggerFactory.getLogger(TestFederationStateStoreClientMetrics.class);
+
+  private MockBadFederationStateStore badStateStore =
+      new MockBadFederationStateStore();
+  private MockGoodFederationStateStore goodStateStore =
+      new MockGoodFederationStateStore();
+
+  @Test
+  public void testAggregateMetricInit() {
+    LOG.info("Test: aggregate metrics are initialized correctly");
+
+    Assert.assertEquals(0,
+        FederationStateStoreClientMetrics.getNumSucceededCalls());
+    Assert.assertEquals(0,
+        FederationStateStoreClientMetrics.getNumFailedCalls());
+
+    LOG.info("Test: aggregate metrics are updated correctly");
+  }
+
+  @Test
+  public void testSuccessfulCalls() {
+    LOG.info("Test: Aggregate and method successful calls updated correctly");
+
+    long totalGoodBefore =
+        FederationStateStoreClientMetrics.getNumSucceededCalls();
+    long apiGoodBefore = FederationStateStoreClientMetrics
+        .getNumSucceessfulCallsForMethod("registerSubCluster");
+
+    goodStateStore.registerSubCluster(100);
+
+    Assert.assertEquals(totalGoodBefore + 1,
+        FederationStateStoreClientMetrics.getNumSucceededCalls());
+    Assert.assertEquals(100,
+        FederationStateStoreClientMetrics.getLatencySucceededCalls(), 0);
+    Assert.assertEquals(apiGoodBefore + 1,
+        FederationStateStoreClientMetrics.getNumSucceededCalls());
+    Assert.assertEquals(100, FederationStateStoreClientMetrics
+        .getLatencySucceessfulCallsForMethod("registerSubCluster"), 0);
+
+    LOG.info("Test: Running stats correctly calculated for 2 metrics");
+
+    goodStateStore.registerSubCluster(200);
+
+    Assert.assertEquals(totalGoodBefore + 2,
+        FederationStateStoreClientMetrics.getNumSucceededCalls());
+    Assert.assertEquals(150,
+        FederationStateStoreClientMetrics.getLatencySucceededCalls(), 0);
+    Assert.assertEquals(apiGoodBefore + 2,
+        FederationStateStoreClientMetrics.getNumSucceededCalls());
+    Assert.assertEquals(150, FederationStateStoreClientMetrics
+        .getLatencySucceessfulCallsForMethod("registerSubCluster"), 0);
+
+  }
+
+  @Test
+  public void testFailedCalls() {
+
+    long totalBadbefore = FederationStateStoreClientMetrics.getNumFailedCalls();
+    long apiBadBefore = FederationStateStoreClientMetrics
+        .getNumFailedCallsForMethod("registerSubCluster");
+
+    badStateStore.registerSubCluster();
+
+    LOG.info("Test: Aggregate and method failed calls updated correctly");
+    Assert.assertEquals(totalBadbefore + 1,
+        FederationStateStoreClientMetrics.getNumFailedCalls());
+    Assert.assertEquals(apiBadBefore + 1, FederationStateStoreClientMetrics
+        .getNumFailedCallsForMethod("registerSubCluster"));
+
+  }
+
+  @Test
+  public void testCallsUnknownMethod() {
+
+    long totalBadbefore = FederationStateStoreClientMetrics.getNumFailedCalls();
+    long apiBadBefore = FederationStateStoreClientMetrics
+        .getNumFailedCallsForMethod("registerSubCluster");
+    long totalGoodBefore =
+        FederationStateStoreClientMetrics.getNumSucceededCalls();
+    long apiGoodBefore = FederationStateStoreClientMetrics
+        .getNumSucceessfulCallsForMethod("registerSubCluster");
+
+    LOG.info("Calling Metrics class directly");
+    FederationStateStoreClientMetrics.failedStateStoreCall();
+    FederationStateStoreClientMetrics.succeededStateStoreCall(100);
+
+    LOG.info("Test: Aggregate and method calls did not update");
+    Assert.assertEquals(totalBadbefore,
+        FederationStateStoreClientMetrics.getNumFailedCalls());
+    Assert.assertEquals(apiBadBefore, FederationStateStoreClientMetrics
+        .getNumFailedCallsForMethod("registerSubCluster"));
+
+    Assert.assertEquals(totalGoodBefore,
+        FederationStateStoreClientMetrics.getNumSucceededCalls());
+    Assert.assertEquals(apiGoodBefore, FederationStateStoreClientMetrics
+        .getNumSucceessfulCallsForMethod("registerSubCluster"));
+
+  }
+
+  // Records failures for all calls
+  private class MockBadFederationStateStore {
+    public void registerSubCluster() {
+      LOG.info("Mocked: failed registerSubCluster call");
+      FederationStateStoreClientMetrics.failedStateStoreCall();
+    }
+  }
+
+  // Records successes for all calls
+  private class MockGoodFederationStateStore {
+    public void registerSubCluster(long duration) {
+      LOG.info("Mocked: successful registerSubCluster call with duration {}",
+          duration);
+      FederationStateStoreClientMetrics.succeededStateStoreCall(duration);
+    }
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacadeRetry.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacadeRetry.java
index ea43268..868e771 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacadeRetry.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacadeRetry.java
@@ -28,6 +28,7 @@
 import org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreInvalidInputException;
 import org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreRetriableException;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 
 import com.zaxxer.hikari.pool.HikariPool.PoolInitializationException;
@@ -40,14 +41,18 @@
   private int maxRetries = 4;
   private Configuration conf;
 
+  @Before
+  public void setup() {
+    conf = new Configuration();
+    conf.setInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES, maxRetries);
+  }
+
   /*
    * Test to validate that FederationStateStoreRetriableException is a retriable
    * exception.
    */
   @Test
   public void testFacadeRetriableException() throws Exception {
-    conf = new Configuration();
-    conf.setInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES, maxRetries);
     RetryPolicy policy = FederationStateStoreFacade.createRetryPolicy(conf);
     RetryAction action = policy.shouldRetry(
         new FederationStateStoreRetriableException(""), 0, 0, false);
@@ -66,9 +71,6 @@
    */
   @Test
   public void testFacadeYarnException() throws Exception {
-
-    conf = new Configuration();
-    conf.setInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES, maxRetries);
     RetryPolicy policy = FederationStateStoreFacade.createRetryPolicy(conf);
     RetryAction action = policy.shouldRetry(new YarnException(), 0, 0, false);
     Assert.assertEquals(RetryAction.FAIL.action, action.action);
@@ -80,8 +82,6 @@
    */
   @Test
   public void testFacadeStateStoreException() throws Exception {
-    conf = new Configuration();
-    conf.setInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES, maxRetries);
     RetryPolicy policy = FederationStateStoreFacade.createRetryPolicy(conf);
     RetryAction action = policy
         .shouldRetry(new FederationStateStoreException("Error"), 0, 0, false);
@@ -94,8 +94,6 @@
    */
   @Test
   public void testFacadeInvalidInputException() throws Exception {
-    conf = new Configuration();
-    conf.setInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES, maxRetries);
     RetryPolicy policy = FederationStateStoreFacade.createRetryPolicy(conf);
     RetryAction action = policy.shouldRetry(
         new FederationStateStoreInvalidInputException(""), 0, 0, false);
@@ -107,8 +105,6 @@
    */
   @Test
   public void testFacadeCacheRetriableException() throws Exception {
-    conf = new Configuration();
-    conf.setInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES, maxRetries);
     RetryPolicy policy = FederationStateStoreFacade.createRetryPolicy(conf);
     RetryAction action =
         policy.shouldRetry(new CacheLoaderException(""), 0, 0, false);
@@ -128,8 +124,6 @@
   @Test
   public void testFacadePoolInitRetriableException() throws Exception {
     // PoolInitializationException is a retriable exception
-    conf = new Configuration();
-    conf.setInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES, maxRetries);
     RetryPolicy policy = FederationStateStoreFacade.createRetryPolicy(conf);
     RetryAction action = policy.shouldRetry(
         new PoolInitializationException(new YarnException()), 0, 0, false);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterInitializer.java
similarity index 97%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterInitializer.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterInitializer.java
index 430911e..44f63ea 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterInitializer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterInitializer.java
@@ -27,7 +27,9 @@
 import org.junit.Test;
 import org.mockito.Mockito;
 
-
+/**
+ * Tests {@link TimelineAuthenticationFilterInitializer}.
+ */
 public class TestTimelineAuthenticationFilterInitializer {
 
   @Test
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
index 100d7ca..7f2b00d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
 
 list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/../../../../../hadoop-common-project/hadoop-common)
 include(HadoopCommon)
@@ -29,25 +29,7 @@
 string(REGEX MATCH . HCD_ONE "${HADOOP_CONF_DIR}")
 string(COMPARE EQUAL ${HCD_ONE} / HADOOP_CONF_DIR_IS_ABS)
 
-if (CMAKE_VERSION VERSION_LESS "3.1")
-  # subset of CMAKE_<LANG>_COMPILER_ID
-  # https://cmake.org/cmake/help/v3.0/variable/CMAKE_LANG_COMPILER_ID.html
-  if (CMAKE_C_COMPILER_ID STREQUAL "GNU" OR
-      CMAKE_C_COMPILER_ID STREQUAL "Clang" OR
-      CMAKE_C_COMPILER_ID STREQUAL "AppleClang")
-    set (CMAKE_C_FLAGS "-std=c99 -Wall -pedantic-errors ${CMAKE_C_FLAGS}")
-  elseif (CMAKE_C_COMPILER_ID STREQUAL "Intel")
-    set (CMAKE_C_FLAGS "-std=c99 -Wall ${CMAKE_C_FLAGS}")
-  elseif (CMAKE_C_COMPILER_ID STREQUAL "SunPro")
-    set (CMAKE_C_FLAGS "-xc99 ${CMAKE_C_FLAGS}")
-  endif ()
-else ()
-  set (CMAKE_C_STANDARD 99)
-endif ()
-
-# Note: can't use -D_FILE_OFFSET_BITS=64, see MAPREDUCE-4258
-string(REPLACE "-D_FILE_OFFSET_BITS=64" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
-string(REPLACE "-D_FILE_OFFSET_BITS=64" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+set (CMAKE_C_STANDARD 99)
 
 include(CheckIncludeFiles)
 check_include_files("sys/types.h;sys/sysctl.h" HAVE_SYS_SYSCTL_H)
@@ -101,6 +83,10 @@
     main/native/container-executor/impl/container-executor.c
     main/native/container-executor/impl/get_executable.c
     main/native/container-executor/impl/utils/string-utils.c
+    main/native/container-executor/impl/utils/path-utils.c
+    main/native/container-executor/impl/modules/cgroups/cgroups-operations.c
+    main/native/container-executor/impl/modules/common/module-configs.c
+    main/native/container-executor/impl/modules/gpu/gpu-module.c
 )
 
 add_executable(container-executor
@@ -113,12 +99,14 @@
 
 output_directory(container-executor target/usr/local/bin)
 
+# Test cases
 add_executable(test-container-executor
     main/native/container-executor/test/test-container-executor.c
 )
 target_link_libraries(test-container-executor
     container ${EXTRA_LIBS}
 )
+
 output_directory(test-container-executor target/usr/local/bin)
 
 # unit tests for container executor
@@ -126,6 +114,10 @@
         main/native/container-executor/impl/util.c
         main/native/container-executor/test/test_configuration.cc
         main/native/container-executor/test/test_main.cc
+        main/native/container-executor/test/utils/test-string-utils.cc
+        main/native/container-executor/test/utils/test-path-utils.cc
+        main/native/container-executor/test/modules/cgroups/test-cgroups-module.cc
+        main/native/container-executor/test/modules/gpu/test-gpu-module.cc
         main/native/container-executor/test/test_util.cc)
-target_link_libraries(cetest gtest)
+target_link_libraries(cetest gtest container)
 output_directory(cetest test)
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrDecreaseContainersResourceEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrDecreaseContainersResourceEvent.java
deleted file mode 100644
index 9479d0b..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrDecreaseContainersResourceEvent.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.nodemanager;
-
-import org.apache.hadoop.yarn.api.records.Container;
-import java.util.List;
-
-public class CMgrDecreaseContainersResourceEvent extends ContainerManagerEvent {
-
-  private final List<Container> containersToDecrease;
-
-  public CMgrDecreaseContainersResourceEvent(List<Container>
-      containersToDecrease) {
-    super(ContainerManagerEventType.DECREASE_CONTAINERS_RESOURCE);
-    this.containersToDecrease = containersToDecrease;
-  }
-
-  public List<Container> getContainersToDecrease() {
-    return this.containersToDecrease;
-  }
-}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrUpdateContainersEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrUpdateContainersEvent.java
new file mode 100644
index 0000000..5e41701
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrUpdateContainersEvent.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import java.util.List;
+
+/**
+ * Event used by the NodeStatusUpdater to notify the ContainerManager of
+ * container update commands it received from the RM.
+ */
+public class CMgrUpdateContainersEvent extends ContainerManagerEvent {
+
+  private final List<Container> containersToUpdate;
+
+  /**
+   * Create event.
+   * @param containersToUpdate Container to update.
+   */
+  public CMgrUpdateContainersEvent(List<Container> containersToUpdate) {
+    super(ContainerManagerEventType.UPDATE_CONTAINERS);
+    this.containersToUpdate = containersToUpdate;
+  }
+
+  /**
+   * Get containers to update.
+   * @return List of containers to update.
+   */
+  public List<Container> getContainersToUpdate() {
+    return this.containersToUpdate;
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 0581878..b6fb4ec 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -35,11 +35,11 @@
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -68,7 +68,8 @@
  * underlying OS.  All executor implementations must extend ContainerExecutor.
  */
 public abstract class ContainerExecutor implements Configurable {
-  private static final Log LOG = LogFactory.getLog(ContainerExecutor.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(ContainerExecutor.class);
   protected static final String WILDCARD = "*";
 
   /**
@@ -336,6 +337,9 @@
       whitelist.add(param);
     }
 
+    // Add "set -o pipefail -e" to validate launch_container script.
+    sb.setExitOnFailure();
+
     if (environment != null) {
       for (Map.Entry<String, String> env : environment.entrySet()) {
         if (!whitelist.contains(env.getKey())) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerManagerEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerManagerEventType.java
index 8861bc7..8c5f7e2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerManagerEventType.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerManagerEventType.java
@@ -21,6 +21,6 @@
 public enum ContainerManagerEventType {
   FINISH_APPS,
   FINISH_CONTAINERS,
-  DECREASE_CONTAINERS_RESOURCE,
+  UPDATE_CONTAINERS,
   SIGNAL_CONTAINERS
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java
index 16a8497..00bd0ef 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java
@@ -28,6 +28,7 @@
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManager;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
@@ -65,11 +66,18 @@
   Map<ApplicationId, Credentials> getSystemCredentialsForApps();
 
   /**
-   * Get the registered collectors that located on this NM.
-   * @return registered collectors, or null if the timeline service v.2 is not
+   * Get the list of collectors that are registering with the RM from this node.
+   * @return registering collectors, or null if the timeline service v.2 is not
    * enabled
    */
-  Map<ApplicationId, String> getRegisteredCollectors();
+  ConcurrentMap<ApplicationId, AppCollectorData> getRegisteringCollectors();
+
+  /**
+   * Get the list of collectors registered with the RM and known by this node.
+   * @return known collectors, or null if the timeline service v.2 is not
+   * enabled.
+   */
+  ConcurrentMap<ApplicationId, AppCollectorData> getKnownCollectors();
 
   ConcurrentMap<ContainerId, Container> getContainers();
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
index 18604df..8c58056 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
@@ -20,6 +20,8 @@
 
 import static org.apache.hadoop.fs.CreateFlag.CREATE;
 import static org.apache.hadoop.fs.CreateFlag.OVERWRITE;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.DataOutputStream;
 import java.io.File;
@@ -34,8 +36,6 @@
 import java.util.Map;
 
 import org.apache.commons.lang.math.RandomUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileUtil;
@@ -73,8 +73,8 @@
  */
 public class DefaultContainerExecutor extends ContainerExecutor {
 
-  private static final Log LOG = LogFactory
-      .getLog(DefaultContainerExecutor.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(DefaultContainerExecutor.class);
 
   private static final int WIN_MAX_PATH = 260;
 
@@ -423,7 +423,7 @@
         pout = new PrintStream(out, false, "UTF-8");
         writeLocalWrapperScript(launchDst, pidFile, pout);
       } finally {
-        IOUtils.cleanup(LOG, pout, out);
+        IOUtils.cleanupWithLogger(LOG, pout, out);
       }
     }
 
@@ -505,7 +505,7 @@
         String exec = Shell.isSetsidAvailable? "exec setsid" : "exec";
         pout.printf("%s /bin/bash \"%s\"", exec, launchDst.toUri().getPath());
       } finally {
-        IOUtils.cleanup(LOG, pout, out);
+        IOUtils.cleanupWithLogger(LOG, pout, out);
       }
       lfs.setPermission(sessionScriptPath,
           ContainerExecutor.TASK_LAUNCH_SCRIPT_PERMISSION);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
index 38d69a3..ae81dc1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
@@ -19,6 +19,8 @@
 package org.apache.hadoop.yarn.server.nodemanager;
 
 import static java.util.concurrent.TimeUnit.SECONDS;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.HashMap;
@@ -31,8 +33,6 @@
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.service.AbstractService;
@@ -49,7 +49,8 @@
 
 public class DeletionService extends AbstractService {
 
-  private static final Log LOG = LogFactory.getLog(DeletionService.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(DeletionService.class);
 
   private int debugDelay;
   private final ContainerExecutor containerExecutor;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
index ae2a4ef..6aec0ff 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
@@ -33,11 +33,12 @@
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang.RandomStringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
@@ -54,7 +55,8 @@
  * Manages a list of local storage directories.
  */
 public class DirectoryCollection {
-  private static final Log LOG = LogFactory.getLog(DirectoryCollection.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(DirectoryCollection.class);
 
   private final Configuration conf;
   private final DiskValidator diskValidator;
@@ -99,6 +101,7 @@
   private List<String> localDirs;
   private List<String> errorDirs;
   private List<String> fullDirs;
+  private Map<String, DiskErrorInformation> directoryErrorInfo;
 
   // read/write lock for accessing above directories.
   private final ReadLock readLock;
@@ -192,6 +195,7 @@
     localDirs = new CopyOnWriteArrayList<>(dirs);
     errorDirs = new CopyOnWriteArrayList<>();
     fullDirs = new CopyOnWriteArrayList<>();
+    directoryErrorInfo = new ConcurrentHashMap<>();
 
     ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
     this.readLock = lock.readLock();
@@ -248,11 +252,25 @@
   /**
    * @return the directories that have used all disk space
    */
-
   List<String> getFullDirs() {
     this.readLock.lock();
     try {
-      return fullDirs;
+      return Collections.unmodifiableList(fullDirs);
+    } finally {
+      this.readLock.unlock();
+    }
+  }
+
+  /**
+   * @return the directories that have errors - many not have appropriate permissions
+   * or other disk validation checks might have failed in {@link DiskValidator}
+   *
+   */
+  @InterfaceStability.Evolving
+  List<String> getErroredDirs() {
+    this.readLock.lock();
+    try {
+      return Collections.unmodifiableList(errorDirs);
     } finally {
       this.readLock.unlock();
     }
@@ -271,6 +289,39 @@
   }
 
   /**
+   *
+   * @param dirName Absolute path of Directory for which error diagnostics are needed
+   * @return DiskErrorInformation - disk error diagnostics for the specified directory
+   *         null - the disk associated with the directory has passed disk utilization checks
+   *         /error validations in {@link DiskValidator}
+   *
+   */
+  @InterfaceStability.Evolving
+  DiskErrorInformation getDirectoryErrorInfo(String dirName) {
+    this.readLock.lock();
+    try {
+      return directoryErrorInfo.get(dirName);
+    } finally {
+      this.readLock.unlock();
+    }
+  }
+
+  /**
+   *
+   * @param dirName Absolute path of Directory for which the disk has been marked as unhealthy
+   * @return Check if disk associated with the directory is unhealthy
+   */
+  @InterfaceStability.Evolving
+  boolean isDiskUnHealthy(String dirName) {
+    this.readLock.lock();
+    try {
+      return directoryErrorInfo.containsKey(dirName);
+    } finally {
+      this.readLock.unlock();
+    }
+  }
+
+  /**
    * Create any non-existent directories and parent directories, updating the
    * list of valid directories if necessary.
    * @param localFs local file system to use
@@ -297,6 +348,9 @@
         try {
           localDirs.remove(dir);
           errorDirs.add(dir);
+          directoryErrorInfo.put(dir,
+              new DiskErrorInformation(DiskErrorCause.OTHER,
+                  "Cannot create directory : " + dir + ", error " + e.getMessage()));
           numFailures++;
         } finally {
           this.writeLock.unlock();
@@ -343,11 +397,13 @@
       localDirs.clear();
       errorDirs.clear();
       fullDirs.clear();
+      directoryErrorInfo.clear();
 
       for (Map.Entry<String, DiskErrorInformation> entry : dirsFailedCheck
           .entrySet()) {
         String dir = entry.getKey();
         DiskErrorInformation errorInformation = entry.getValue();
+
         switch (entry.getValue().cause) {
         case DISK_FULL:
           fullDirs.add(entry.getKey());
@@ -359,6 +415,8 @@
           LOG.warn(entry.getValue().cause + " is unknown for disk error.");
           break;
         }
+        directoryErrorInfo.put(entry.getKey(), errorInformation);
+
         if (preCheckGoodDirs.contains(dir)) {
           LOG.warn("Directory " + dir + " error, " + errorInformation.message
               + ", removing from list of valid directories");
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index b3e13b4..dc68680 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -20,8 +20,8 @@
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Optional;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -98,8 +98,8 @@
  */
 public class LinuxContainerExecutor extends ContainerExecutor {
 
-  private static final Log LOG = LogFactory
-      .getLog(LinuxContainerExecutor.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(LinuxContainerExecutor.class);
 
   private String nonsecureLocalUser;
   private Pattern nonsecureLocalUserPattern;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
index f8cb4ee..c0630b2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
@@ -27,9 +27,9 @@
 import java.util.Set;
 import java.util.Timer;
 import java.util.TimerTask;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
@@ -51,7 +51,10 @@
  */
 public class LocalDirsHandlerService extends AbstractService {
 
-  private static Log LOG = LogFactory.getLog(LocalDirsHandlerService.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(LocalDirsHandlerService.class);
+
+  private static final String diskCapacityExceededErrorMsg =  "usable space is below configured utilization percentage/no more usable space";
 
   /**
    * Good local directories, use internally,
@@ -344,21 +347,36 @@
     }
 
     StringBuilder report = new StringBuilder();
-    List<String> failedLocalDirsList = localDirs.getFailedDirs();
-    List<String> failedLogDirsList = logDirs.getFailedDirs();
+    List<String> erroredLocalDirsList = localDirs.getErroredDirs();
+    List<String> erroredLogDirsList = logDirs.getErroredDirs();
+    List<String> diskFullLocalDirsList = localDirs.getFullDirs();
+    List<String> diskFullLogDirsList = logDirs.getFullDirs();
     List<String> goodLocalDirsList = localDirs.getGoodDirs();
     List<String> goodLogDirsList = logDirs.getGoodDirs();
-    int numLocalDirs = goodLocalDirsList.size() + failedLocalDirsList.size();
-    int numLogDirs = goodLogDirsList.size() + failedLogDirsList.size();
+
+    int numLocalDirs = goodLocalDirsList.size() + erroredLocalDirsList.size() + diskFullLocalDirsList.size();
+    int numLogDirs = goodLogDirsList.size() + erroredLogDirsList.size() + diskFullLogDirsList.size();
     if (!listGoodDirs) {
-      if (!failedLocalDirsList.isEmpty()) {
-        report.append(failedLocalDirsList.size() + "/" + numLocalDirs
-            + " local-dirs are bad: "
-            + StringUtils.join(",", failedLocalDirsList) + "; ");
+      if (!erroredLocalDirsList.isEmpty()) {
+        report.append(erroredLocalDirsList.size() + "/" + numLocalDirs
+            + " local-dirs have errors: "
+            + buildDiskErrorReport(erroredLocalDirsList, localDirs));
       }
-      if (!failedLogDirsList.isEmpty()) {
-        report.append(failedLogDirsList.size() + "/" + numLogDirs
-            + " log-dirs are bad: " + StringUtils.join(",", failedLogDirsList));
+      if (!diskFullLocalDirsList.isEmpty()) {
+        report.append(diskFullLocalDirsList.size() + "/" + numLocalDirs
+            + " local-dirs " + diskCapacityExceededErrorMsg
+            + buildDiskErrorReport(diskFullLocalDirsList, localDirs) + "; ");
+      }
+
+      if (!erroredLogDirsList.isEmpty()) {
+        report.append(erroredLogDirsList.size() + "/" + numLogDirs
+            + " log-dirs have errors: "
+            + buildDiskErrorReport(erroredLogDirsList, logDirs));
+      }
+      if (!diskFullLogDirsList.isEmpty()) {
+        report.append(diskFullLogDirsList.size() + "/" + numLogDirs
+            + " log-dirs " + diskCapacityExceededErrorMsg
+            + buildDiskErrorReport(diskFullLogDirsList, logDirs));
       }
     } else {
       report.append(goodLocalDirsList.size() + "/" + numLocalDirs
@@ -620,4 +638,24 @@
           logDirs.getGoodDirsDiskUtilizationPercentage());
     }
   }
+
+  private String buildDiskErrorReport(List<String> dirs, DirectoryCollection directoryCollection) {
+    StringBuilder sb = new StringBuilder();
+
+    sb.append(" [ ");
+    for (int i = 0; i < dirs.size(); i++) {
+      final String dirName = dirs.get(i);
+      if ( directoryCollection.isDiskUnHealthy(dirName)) {
+        sb.append(dirName + " : " + directoryCollection.getDirectoryErrorInfo(dirName).message);
+      } else {
+        sb.append(dirName + " : " + "Unknown cause for disk error");
+      }
+
+      if ( i != (dirs.size() - 1)) {
+        sb.append(" , ");
+      }
+    }
+    sb.append(" ] ");
+    return sb.toString();
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NMAuditLogger.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NMAuditLogger.java
index 279e299..3083064 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NMAuditLogger.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NMAuditLogger.java
@@ -18,9 +18,9 @@
 package org.apache.hadoop.yarn.server.nodemanager;
 
 import java.net.InetAddress;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -31,7 +31,8 @@
  * Audit log format is written as key=value pairs. Tab separated.
  */
 public class NMAuditLogger {
-  private static final Log LOG = LogFactory.getLog(NMAuditLogger.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(NMAuditLogger.class);
 
   enum Keys {USER, OPERATION, TARGET, RESULT, IP,
                     DESCRIPTION, APPID, CONTAINERID}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index 3c0e498..3e919c5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -26,9 +26,9 @@
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.atomic.AtomicBoolean;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -57,11 +57,13 @@
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManager;
 import org.apache.hadoop.yarn.server.nodemanager.collectormanager.NMCollectorService;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
 import org.apache.hadoop.yarn.server.nodemanager.nodelabels.ConfigurationNodeLabelsProvider;
@@ -105,7 +107,8 @@
    */
   public static final int SHUTDOWN_HOOK_PRIORITY = 30;
 
-  private static final Log LOG = LogFactory.getLog(NodeManager.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(NodeManager.class);
   private static long nmStartupTime = System.currentTimeMillis();
   protected final NodeManagerMetrics metrics = NodeManagerMetrics.create();
   private JvmPauseMonitor pauseMonitor;
@@ -463,19 +466,57 @@
           if (!rmWorkPreservingRestartEnabled) {
             LOG.info("Cleaning up running containers on resync");
             containerManager.cleanupContainersOnNMResync();
+            // Clear all known collectors for resync.
+            if (context.getKnownCollectors() != null) {
+              context.getKnownCollectors().clear();
+            }
           } else {
             LOG.info("Preserving containers on resync");
+            // Re-register known timeline collectors.
+            reregisterCollectors();
           }
           ((NodeStatusUpdaterImpl) nodeStatusUpdater)
             .rebootNodeStatusUpdaterAndRegisterWithRM();
         } catch (YarnRuntimeException e) {
-          LOG.fatal("Error while rebooting NodeStatusUpdater.", e);
+          LOG.error("Error while rebooting NodeStatusUpdater.", e);
           shutDown(NodeManagerStatus.EXCEPTION.getExitCode());
         }
       }
     }.start();
   }
 
+  /**
+   * Reregisters all collectors known by this node to the RM. This method is
+   * called when the RM needs to resync with the node.
+   */
+  protected void reregisterCollectors() {
+    Map<ApplicationId, AppCollectorData> knownCollectors
+        = context.getKnownCollectors();
+    if (knownCollectors == null) {
+      return;
+    }
+    ConcurrentMap<ApplicationId, AppCollectorData> registeringCollectors
+        = context.getRegisteringCollectors();
+    for (Map.Entry<ApplicationId, AppCollectorData> entry
+        : knownCollectors.entrySet()) {
+      Application app = context.getApplications().get(entry.getKey());
+      if ((app != null)
+          && !ApplicationState.FINISHED.equals(app.getApplicationState())) {
+        registeringCollectors.putIfAbsent(entry.getKey(), entry.getValue());
+        AppCollectorData data = entry.getValue();
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(entry.getKey() + " : " + data.getCollectorAddr() + "@<"
+              + data.getRMIdentifier() + ", " + data.getVersion() + ">");
+        }
+      } else {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Remove collector data for done app " + entry.getKey());
+        }
+      }
+    }
+    knownCollectors.clear();
+  }
+
   public static class NMContext implements Context {
 
     private NodeId nodeId = null;
@@ -491,7 +532,10 @@
     protected final ConcurrentMap<ContainerId, Container> containers =
         new ConcurrentSkipListMap<ContainerId, Container>();
 
-    private Map<ApplicationId, String> registeredCollectors;
+    private ConcurrentMap<ApplicationId, AppCollectorData>
+        registeringCollectors;
+
+    private ConcurrentMap<ApplicationId, AppCollectorData> knownCollectors;
 
     protected final ConcurrentMap<ContainerId,
         org.apache.hadoop.yarn.api.records.Container> increasedContainers =
@@ -525,7 +569,8 @@
         NMStateStoreService stateStore, boolean isDistSchedulingEnabled,
         Configuration conf) {
       if (YarnConfiguration.timelineServiceV2Enabled(conf)) {
-        this.registeredCollectors = new ConcurrentHashMap<>();
+        this.registeringCollectors = new ConcurrentHashMap<>();
+        this.knownCollectors = new ConcurrentHashMap<>();
       }
       this.containerTokenSecretManager = containerTokenSecretManager;
       this.nmTokenSecretManager = nmTokenSecretManager;
@@ -680,18 +725,14 @@
     }
 
     @Override
-    public Map<ApplicationId, String> getRegisteredCollectors() {
-      return this.registeredCollectors;
+    public ConcurrentMap<ApplicationId, AppCollectorData>
+        getRegisteringCollectors() {
+      return this.registeringCollectors;
     }
 
-    public void addRegisteredCollectors(
-        Map<ApplicationId, String> newRegisteredCollectors) {
-      if (registeredCollectors != null) {
-        this.registeredCollectors.putAll(newRegisteredCollectors);
-      } else {
-        LOG.warn("collectors are added when the registered collectors are " +
-            "initialized");
-      }
+    @Override
+    public ConcurrentMap<ApplicationId, AppCollectorData> getKnownCollectors() {
+      return this.knownCollectors;
     }
 
     @Override
@@ -729,7 +770,7 @@
           String message =
               "Failing NodeManager start since we're on a "
                   + "Unix-based system but bash doesn't seem to be available.";
-          LOG.fatal(message);
+          LOG.error(message);
           throw new YarnRuntimeException(message);
         }
       }
@@ -748,7 +789,7 @@
       this.init(conf);
       this.start();
     } catch (Throwable t) {
-      LOG.fatal("Error starting NodeManager", t);
+      LOG.error("Error starting NodeManager", t);
       System.exit(-1);
     }
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java
index e1116da..8b96ba5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java
@@ -18,13 +18,13 @@
 
 package org.apache.hadoop.yarn.server.nodemanager;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.api.records.ResourceUtilization;
 import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Implementation of the node resource monitor. It periodically tracks the
@@ -34,8 +34,8 @@
     NodeResourceMonitor {
 
   /** Logging infrastructure. */
-  final static Log LOG = LogFactory
-      .getLog(NodeResourceMonitorImpl.class);
+  final static Logger LOG =
+       LoggerFactory.getLogger(NodeResourceMonitorImpl.class);
 
   /** Interval to monitor the node resource utilization. */
   private long monitoringInterval;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index b5ec383..35b7cb0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -33,9 +33,9 @@
 import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.ConcurrentLinkedQueue;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.DataInputByteBuffer;
@@ -71,7 +71,7 @@
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
-
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.api.records.ContainerQueuingLimit;
 import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
@@ -99,7 +99,8 @@
   public static final String YARN_NODEMANAGER_DURATION_TO_TRACK_STOPPED_CONTAINERS =
       YarnConfiguration.NM_PREFIX + "duration-to-track-stopped-containers";
 
-  private static final Log LOG = LogFactory.getLog(NodeStatusUpdaterImpl.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(NodeStatusUpdaterImpl.class);
 
   private final Object heartbeatMonitor = new Object();
   private final Object shutdownMonitor = new Object();
@@ -427,7 +428,7 @@
     successfullRegistrationMsg.append(nodeLabelsHandler
         .verifyRMRegistrationResponseForNodeLabels(regNMResponse));
 
-    LOG.info(successfullRegistrationMsg);
+    LOG.info(successfullRegistrationMsg.toString());
   }
 
   private List<ApplicationId> createKeepAliveApplicationList() {
@@ -759,7 +760,6 @@
   }
 
   protected void startStatusUpdater() {
-
     statusUpdaterRunnable = new StatusUpdaterRunnable();
     statusUpdater =
         new Thread(statusUpdaterRunnable, "Node Status Updater");
@@ -1042,7 +1042,7 @@
                       .getNMTokenSecretManager().getCurrentKey(),
                   nodeLabelsForHeartbeat,
                   NodeStatusUpdaterImpl.this.context
-                      .getRegisteredCollectors());
+                      .getRegisteringCollectors());
 
           if (logAggregationEnabled) {
             // pull log aggregation status for application running in this NM
@@ -1099,12 +1099,10 @@
                   parseCredentials(systemCredentials));
             }
             List<org.apache.hadoop.yarn.api.records.Container>
-                containersToDecrease = response.getContainersToDecrease();
-            if (!containersToDecrease.isEmpty()) {
+                containersToUpdate = response.getContainersToUpdate();
+            if (!containersToUpdate.isEmpty()) {
               dispatcher.getEventHandler().handle(
-                  new CMgrDecreaseContainersResourceEvent(
-                      containersToDecrease)
-              );
+                  new CMgrUpdateContainersEvent(containersToUpdate));
             }
 
             // SignalContainer request originally comes from end users via
@@ -1135,7 +1133,7 @@
             }
           }
           if (YarnConfiguration.timelineServiceV2Enabled(context.getConf())) {
-            updateTimelineClientsAddress(response);
+            updateTimelineCollectorData(response);
           }
 
         } catch (ConnectException e) {
@@ -1165,40 +1163,48 @@
       }
     }
 
-    private void updateTimelineClientsAddress(
+    private void updateTimelineCollectorData(
         NodeHeartbeatResponse response) {
-      Map<ApplicationId, String> knownCollectorsMap =
-          response.getAppCollectorsMap();
-      if (knownCollectorsMap == null) {
+      Map<ApplicationId, AppCollectorData> incomingCollectorsMap =
+          response.getAppCollectors();
+      if (incomingCollectorsMap == null) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("No collectors to update RM");
         }
-      } else {
-        Set<Map.Entry<ApplicationId, String>> rmKnownCollectors =
-            knownCollectorsMap.entrySet();
-        for (Map.Entry<ApplicationId, String> entry : rmKnownCollectors) {
-          ApplicationId appId = entry.getKey();
-          String collectorAddr = entry.getValue();
+        return;
+      }
+      Map<ApplicationId, AppCollectorData> knownCollectors =
+          context.getKnownCollectors();
+      for (Map.Entry<ApplicationId, AppCollectorData> entry
+          : incomingCollectorsMap.entrySet()) {
+        ApplicationId appId = entry.getKey();
+        AppCollectorData collectorData = entry.getValue();
 
-          // Only handle applications running on local node.
-          // Not include apps with timeline collectors running in local
-          Application application = context.getApplications().get(appId);
-          // TODO this logic could be problematic if the collector address
-          // gets updated due to NM restart or collector service failure
-          if (application != null &&
-              !context.getRegisteredCollectors().containsKey(appId)) {
+        // Only handle applications running on local node.
+        Application application = context.getApplications().get(appId);
+        if (application != null) {
+          // Update collector data if the newly received data happens after
+          // the known data (updates the known data).
+          AppCollectorData existingData = knownCollectors.get(appId);
+          if (AppCollectorData.happensBefore(existingData, collectorData)) {
             if (LOG.isDebugEnabled()) {
-              LOG.debug("Sync a new collector address: " + collectorAddr +
-                      " for application: " + appId + " from RM.");
+              LOG.debug("Sync a new collector address: "
+                  + collectorData.getCollectorAddr()
+                  + " for application: " + appId + " from RM.");
             }
+            // Update information for clients.
             NMTimelinePublisher nmTimelinePublisher =
                 context.getNMTimelinePublisher();
             if (nmTimelinePublisher != null) {
               nmTimelinePublisher.setTimelineServiceAddress(
-                  application.getAppId(), collectorAddr);
+                  application.getAppId(), collectorData.getCollectorAddr());
             }
+            // Update information for the node manager itself.
+            knownCollectors.put(appId, collectorData);
           }
         }
+        // Remove the registering collector data
+        context.getRegisteringCollectors().remove(entry.getKey());
       }
     }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyTokenSecretManager.java
index aa3c70f..f36d4da 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyTokenSecretManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyTokenSecretManager.java
@@ -27,9 +27,9 @@
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
@@ -52,8 +52,8 @@
 public class AMRMProxyTokenSecretManager extends
     SecretManager<AMRMTokenIdentifier> {
 
-  private static final Log LOG = LogFactory
-      .getLog(AMRMProxyTokenSecretManager.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(AMRMProxyTokenSecretManager.class);
 
   private int serialNo = new SecureRandom().nextInt();
   private MasterKeyData nextMasterKey;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/NMProtoUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/NMProtoUtils.java
index e47b3ee..f9b762a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/NMProtoUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/NMProtoUtils.java
@@ -16,8 +16,6 @@
  */
 package org.apache.hadoop.yarn.server.nodemanager.api.impl.pb;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
 import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
@@ -25,6 +23,8 @@
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTask;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTaskType;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -34,7 +34,8 @@
  */
 public final class NMProtoUtils {
 
-  private static final Log LOG = LogFactory.getLog(NMProtoUtils.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(NMProtoUtils.class);
 
   private NMProtoUtils() { }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
index d667c0e..4648a65 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
@@ -22,10 +22,11 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -37,10 +38,10 @@
 import org.apache.hadoop.yarn.server.api.protocolrecords.GetTimelineCollectorContextResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.ReportNewCollectorInfoRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.ReportNewCollectorInfoResponse;
-import org.apache.hadoop.yarn.server.api.records.AppCollectorsMap;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
-import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
+import org.apache.hadoop.yarn.server.nodemanager.security.authorize.NMPolicyProvider;
 import org.apache.hadoop.yarn.server.nodemanager.timelineservice.NMTimelinePublisher;
 
 /**
@@ -50,7 +51,8 @@
 public class NMCollectorService extends CompositeService implements
     CollectorNodemanagerProtocol {
 
-  private static final Log LOG = LogFactory.getLog(NMCollectorService.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(NMCollectorService.class);
 
   private final Context context;
 
@@ -73,16 +75,21 @@
 
     Configuration serverConf = new Configuration(conf);
 
-    // TODO Security settings.
     YarnRPC rpc = YarnRPC.create(conf);
 
+    // Kerberos based authentication to be used for CollectorNodemanager
+    // protocol if security is enabled.
     server =
         rpc.getServer(CollectorNodemanagerProtocol.class, this,
-            collectorServerAddress, serverConf,
-            this.context.getNMTokenSecretManager(),
+            collectorServerAddress, serverConf, null,
             conf.getInt(YarnConfiguration.NM_COLLECTOR_SERVICE_THREAD_COUNT,
                 YarnConfiguration.DEFAULT_NM_COLLECTOR_SERVICE_THREAD_COUNT));
 
+    if (conf.getBoolean(
+        CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
+      server.refreshServiceAcl(conf, new NMPolicyProvider());
+    }
+
     server.start();
     collectorServerAddress = conf.updateConnectAddr(
         YarnConfiguration.NM_BIND_HOST,
@@ -94,7 +101,6 @@
     LOG.info("NMCollectorService started at " + collectorServerAddress);
   }
 
-
   @Override
   public void serviceStop() throws Exception {
     if (server != null) {
@@ -107,23 +113,31 @@
   @Override
   public ReportNewCollectorInfoResponse reportNewCollectorInfo(
       ReportNewCollectorInfoRequest request) throws YarnException, IOException {
-    List<AppCollectorsMap> newCollectorsList = request.getAppCollectorsList();
+    List<AppCollectorData> newCollectorsList = request.getAppCollectorsList();
     if (newCollectorsList != null && !newCollectorsList.isEmpty()) {
-      Map<ApplicationId, String> newCollectorsMap =
-          new HashMap<ApplicationId, String>();
-      for (AppCollectorsMap collector : newCollectorsList) {
+      Map<ApplicationId, AppCollectorData> newCollectorsMap =
+          new HashMap<>();
+      for (AppCollectorData collector : newCollectorsList) {
         ApplicationId appId = collector.getApplicationId();
-        String collectorAddr = collector.getCollectorAddr();
-        newCollectorsMap.put(appId, collectorAddr);
+        newCollectorsMap.put(appId, collector);
         // set registered collector address to TimelineClient.
+        // TODO: Do we need to do this after we received confirmation from
+        // the RM?
         NMTimelinePublisher nmTimelinePublisher =
             context.getNMTimelinePublisher();
         if (nmTimelinePublisher != null) {
-          nmTimelinePublisher.setTimelineServiceAddress(appId, collectorAddr);
+          nmTimelinePublisher.setTimelineServiceAddress(appId,
+              collector.getCollectorAddr());
         }
       }
-      ((NodeManager.NMContext)context).addRegisteredCollectors(
-          newCollectorsMap);
+      Map<ApplicationId, AppCollectorData> registeringCollectors
+          = context.getRegisteringCollectors();
+      if (registeringCollectors != null) {
+        registeringCollectors.putAll(newCollectorsMap);
+      } else {
+        LOG.warn("collectors are added when the registered collectors are " +
+            "initialized");
+      }
     }
 
     return ReportNewCollectorInfoResponse.newInstance();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
index c0e1f5a..5e0f293 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
@@ -25,9 +25,9 @@
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.regex.Pattern;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -51,7 +51,8 @@
 
   static final String STATE_STORE_ROOT_NAME = "nm-aux-services";
 
-  private static final Log LOG = LogFactory.getLog(AuxServices.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(AuxServices.class);
 
   protected final Map<String,AuxiliaryService> serviceMap;
   protected final Map<String,ByteBuffer> serviceMetaData;
@@ -161,7 +162,7 @@
         }
         s.init(conf);
       } catch (RuntimeException e) {
-        LOG.fatal("Failed to initialize " + sName, e);
+        LOG.error("Failed to initialize " + sName, e);
         throw e;
       }
     }
@@ -205,7 +206,7 @@
 
   @Override
   public void stateChanged(Service service) {
-    LOG.fatal("Service " + service.getName() + " changed state: " +
+    LOG.error("Service " + service.getName() + " changed state: " +
         service.getServiceState());
     stop();
   }
@@ -243,7 +244,8 @@
         for (AuxiliaryService serv : serviceMap.values()) {
           try {
             serv.initializeContainer(new ContainerInitializationContext(
-                event.getUser(), event.getContainer().getContainerId(),
+                event.getContainer().getUser(),
+                event.getContainer().getContainerId(),
                 event.getContainer().getResource(), event.getContainer()
                 .getContainerTokenIdentifier().getContainerType()));
           } catch (Throwable th) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index a1e8ca0..e497f62 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -20,8 +20,8 @@
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.ByteString;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -96,7 +96,7 @@
 import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus;
 import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedAppsEvent;
 import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedContainersEvent;
-import org.apache.hadoop.yarn.server.nodemanager.CMgrDecreaseContainersResourceEvent;
+import org.apache.hadoop.yarn.server.nodemanager.CMgrUpdateContainersEvent;
 import org.apache.hadoop.yarn.server.nodemanager.CMgrSignalContainersEvent;
 import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.ContainerManagerEvent;
@@ -155,6 +155,7 @@
 import org.apache.hadoop.yarn.server.nodemanager.timelineservice.NMTimelinePublisher;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils;
+import org.apache.hadoop.yarn.util.SystemClock;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 
@@ -166,6 +167,7 @@
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -188,7 +190,8 @@
    */
   private static final int SHUTDOWN_CLEANUP_SLOP_MS = 1000;
 
-  private static final Log LOG = LogFactory.getLog(ContainerManagerImpl.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(ContainerManagerImpl.class);
 
   public static final String INVALID_NMTOKEN_MSG = "Invalid NMToken";
   static final String INVALID_CONTAINERTOKEN_MSG =
@@ -399,6 +402,16 @@
         LOG.debug(
             "Recovering Flow context: " + fc + " for an application " + appId);
       }
+    } else {
+      // in upgrade situations, where there is no prior existing flow context,
+      // default would be used.
+      fc = new FlowContext(TimelineUtils.generateDefaultFlowName(null, appId),
+          YarnConfiguration.DEFAULT_FLOW_VERSION, appId.getClusterTimestamp());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(
+            "No prior existing flow context found. Using default Flow context: "
+                + fc + " for an application " + appId);
+      }
     }
 
     LOG.info("Recovering application " + appId);
@@ -1050,10 +1063,11 @@
     Credentials credentials =
         YarnServerSecurityUtils.parseCredentials(launchContext);
 
+    long containerStartTime = SystemClock.getInstance().getTime();
     Container container =
         new ContainerImpl(getConfig(), this.dispatcher,
             launchContext, credentials, metrics, containerTokenIdentifier,
-            context);
+            context, containerStartTime);
     ApplicationId applicationID =
         containerId.getApplicationAttemptId().getApplicationId();
     if (context.getContainers().putIfAbsent(containerId, container) != null) {
@@ -1110,7 +1124,7 @@
         }
 
         this.context.getNMStateStore().storeContainer(containerId,
-            containerTokenIdentifier.getVersion(), request);
+            containerTokenIdentifier.getVersion(), containerStartTime, request);
         dispatcher.getEventHandler().handle(
           new ApplicationContainerInitEvent(container));
 
@@ -1241,10 +1255,19 @@
     org.apache.hadoop.yarn.server.nodemanager.
         containermanager.container.ContainerState currentState =
         container.getContainerState();
-    if (currentState != org.apache.hadoop.yarn.server.
-            nodemanager.containermanager.container.ContainerState.RUNNING &&
-        currentState != org.apache.hadoop.yarn.server.
-            nodemanager.containermanager.container.ContainerState.SCHEDULED) {
+    EnumSet<org.apache.hadoop.yarn.server.nodemanager.containermanager
+        .container.ContainerState> allowedStates = EnumSet.of(
+        org.apache.hadoop.yarn.server.nodemanager.containermanager.container
+            .ContainerState.RUNNING,
+        org.apache.hadoop.yarn.server.nodemanager.containermanager.container
+            .ContainerState.SCHEDULED,
+        org.apache.hadoop.yarn.server.nodemanager.containermanager.container
+            .ContainerState.LOCALIZING,
+        org.apache.hadoop.yarn.server.nodemanager.containermanager.container
+            .ContainerState.REINITIALIZING,
+        org.apache.hadoop.yarn.server.nodemanager.containermanager.container
+            .ContainerState.RELAUNCHING);
+    if (!allowedStates.contains(currentState)) {
       throw RPCUtil.getRemoteException("Container " + containerId.toString()
           + " is in " + currentState.name() + " state."
           + " Resource can only be changed when a container is in"
@@ -1279,17 +1302,12 @@
             org.apache.hadoop.yarn.api.records.Container.newInstance(
                 containerId, null, null, targetResource, null, null,
                 currentExecType);
-      } else {
-        increasedContainer =
-            org.apache.hadoop.yarn.api.records.Container.newInstance(
-                containerId, null, null, currentResource, null, null,
-                targetExecType);
-      }
-      if (context.getIncreasedContainers().putIfAbsent(containerId,
-          increasedContainer) != null){
-        throw RPCUtil.getRemoteException("Container " + containerId.toString()
-            + " resource is being increased -or- " +
-            "is undergoing ExecutionType promoted.");
+        if (context.getIncreasedContainers().putIfAbsent(containerId,
+            increasedContainer) != null){
+          throw RPCUtil.getRemoteException("Container " + containerId.toString()
+              + " resource is being increased -or- " +
+              "is undergoing ExecutionType promoted.");
+        }
       }
     }
     this.readLock.lock();
@@ -1594,11 +1612,11 @@
                   "Container Killed by ResourceManager"));
       }
       break;
-    case DECREASE_CONTAINERS_RESOURCE:
-      CMgrDecreaseContainersResourceEvent containersDecreasedEvent =
-          (CMgrDecreaseContainersResourceEvent) event;
+    case UPDATE_CONTAINERS:
+      CMgrUpdateContainersEvent containersDecreasedEvent =
+          (CMgrUpdateContainersEvent) event;
       for (org.apache.hadoop.yarn.api.records.Container container
-          : containersDecreasedEvent.getContainersToDecrease()) {
+          : containersDecreasedEvent.getContainersToUpdate()) {
         try {
           ContainerTokenIdentifier containerTokenIdentifier =
               BuilderUtils.newContainerTokenIdentifier(
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationContainerFinishedEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationContainerFinishedEvent.java
index 0a8ffdf..09c946b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationContainerFinishedEvent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationContainerFinishedEvent.java
@@ -23,12 +23,16 @@
 
 public class ApplicationContainerFinishedEvent extends ApplicationEvent {
   private ContainerStatus containerStatus;
+  // Required by NMTimelinePublisher.
+  private long containerStartTime;
 
-  public ApplicationContainerFinishedEvent(ContainerStatus containerStatus) {
+  public ApplicationContainerFinishedEvent(ContainerStatus containerStatus,
+      long containerStartTs) {
     super(containerStatus.getContainerId().getApplicationAttemptId().
         getApplicationId(),
         ApplicationEventType.APPLICATION_CONTAINER_FINISHED);
     this.containerStatus = containerStatus;
+    this.containerStartTime = containerStartTs;
   }
 
   public ContainerId getContainerID() {
@@ -39,4 +43,7 @@
     return containerStatus;
   }
 
+  public long getContainerStartTime() {
+    return containerStartTime;
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
index 80863a1..39be7a7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
@@ -25,10 +25,10 @@
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.protobuf.ByteString;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.Credentials;
@@ -45,6 +45,7 @@
 import org.apache.hadoop.yarn.proto.YarnProtos;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.ContainerManagerApplicationProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.FlowContextProto;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEventType;
@@ -85,7 +86,8 @@
   private final WriteLock writeLock;
   private final Context context;
 
-  private static final Log LOG = LogFactory.getLog(ApplicationImpl.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(ApplicationImpl.class);
 
   private LogAggregationContext logAggregationContext;
 
@@ -557,6 +559,29 @@
   @SuppressWarnings("unchecked")
   static class AppCompletelyDoneTransition implements
       SingleArcTransition<ApplicationImpl, ApplicationEvent> {
+
+    private void updateCollectorStatus(ApplicationImpl app) {
+      // Remove collectors info for finished apps.
+      // TODO check we remove related collectors info in failure cases
+      // (YARN-3038)
+      Map<ApplicationId, AppCollectorData> registeringCollectors
+          = app.context.getRegisteringCollectors();
+      if (registeringCollectors != null) {
+        registeringCollectors.remove(app.getAppId());
+      }
+      Map<ApplicationId, AppCollectorData> knownCollectors =
+          app.context.getKnownCollectors();
+      if (knownCollectors != null) {
+        knownCollectors.remove(app.getAppId());
+      }
+      // stop timelineClient when application get finished.
+      NMTimelinePublisher nmTimelinePublisher =
+          app.context.getNMTimelinePublisher();
+      if (nmTimelinePublisher != null) {
+        nmTimelinePublisher.stopTimelineClient(app.getAppId());
+      }
+    }
+
     @Override
     public void transition(ApplicationImpl app, ApplicationEvent event) {
 
@@ -565,20 +590,7 @@
           new LogHandlerAppFinishedEvent(app.appId));
 
       app.context.getNMTokenSecretManager().appFinished(app.getAppId());
-      // Remove collectors info for finished apps.
-      // TODO check we remove related collectors info in failure cases
-      // (YARN-3038)
-      Map<ApplicationId, String> registeredCollectors =
-          app.context.getRegisteredCollectors();
-      if (registeredCollectors != null) {
-        registeredCollectors.remove(app.getAppId());
-      }
-      // stop timelineClient when application get finished.
-      NMTimelinePublisher nmTimelinePublisher =
-          app.context.getNMTimelinePublisher();
-      if (nmTimelinePublisher != null) {
-        nmTimelinePublisher.stopTimelineClient(app.getAppId());
-      }
+      updateCollectorStatus(app);
     }
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
index f6e567c..ac9fbb7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
@@ -37,6 +37,8 @@
 
   ContainerId getContainerId();
 
+  long getContainerStartTime();
+
   Resource getResource();
 
   ContainerTokenIdentifier getContainerTokenIdentifier();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 734a27b..772b6e7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -33,10 +33,10 @@
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.Credentials;
@@ -173,9 +173,10 @@
 
   /** The NM-wide configuration - not specific to this container */
   private final Configuration daemonConf;
+  private final long startTime;
 
-  private static final Log LOG = LogFactory.getLog(ContainerImpl.class);
-
+  private static final Logger LOG =
+       LoggerFactory.getLogger(ContainerImpl.class);
 
   // whether container has been recovered after a restart
   private RecoveredContainerStatus recoveredStatus =
@@ -189,6 +190,16 @@
       ContainerLaunchContext launchContext, Credentials creds,
       NodeManagerMetrics metrics,
       ContainerTokenIdentifier containerTokenIdentifier, Context context) {
+    this(conf, dispatcher, launchContext, creds, metrics,
+        containerTokenIdentifier, context, SystemClock.getInstance().getTime());
+  }
+
+  public ContainerImpl(Configuration conf, Dispatcher dispatcher,
+      ContainerLaunchContext launchContext, Credentials creds,
+      NodeManagerMetrics metrics,
+      ContainerTokenIdentifier containerTokenIdentifier, Context context,
+      long startTs) {
+    this.startTime = startTs;
     this.daemonConf = conf;
     this.dispatcher = dispatcher;
     this.stateStore = context.getNMStateStore();
@@ -262,7 +273,7 @@
       ContainerTokenIdentifier containerTokenIdentifier, Context context,
       RecoveredContainerState rcs) {
     this(conf, dispatcher, launchContext, creds, metrics,
-        containerTokenIdentifier, context);
+        containerTokenIdentifier, context, rcs.getStartTime());
     this.recoveredStatus = rcs.getStatus();
     this.exitCode = rcs.getExitCode();
     this.recoveredAsKilled = rcs.getKilled();
@@ -630,6 +641,11 @@
   }
 
   @Override
+  public long getContainerStartTime() {
+    return this.startTime;
+  }
+
+  @Override
   public Resource getResource() {
     return Resources.clone(
         this.containerTokenIdentifier.getResource());
@@ -693,7 +709,8 @@
     EventHandler eventHandler = dispatcher.getEventHandler();
 
     ContainerStatus containerStatus = cloneAndGetContainerStatus();
-    eventHandler.handle(new ApplicationContainerFinishedEvent(containerStatus));
+    eventHandler.handle(
+        new ApplicationContainerFinishedEvent(containerStatus, startTime));
 
     // Tell the scheduler the container is Done
     eventHandler.handle(new ContainerSchedulerEvent(this,
@@ -702,7 +719,7 @@
     eventHandler.handle(new ContainerStopMonitoringEvent(containerId));
     // Tell the logService too
     eventHandler.handle(new LogHandlerContainerFinishedEvent(
-      containerId, exitCode));
+        containerId, containerTokenIdentifier.getContainerType(), exitCode));
   }
 
   @SuppressWarnings("unchecked") // dispatcher not typed
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DeletionTask.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DeletionTask.java
index 635d7a9..f4353ac 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DeletionTask.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DeletionTask.java
@@ -16,11 +16,11 @@
  */
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
 import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.HashSet;
@@ -34,7 +34,8 @@
  */
 public abstract class DeletionTask implements Runnable {
 
-  static final Log LOG = LogFactory.getLog(DeletionTask.class);
+  static final Logger LOG =
+       LoggerFactory.getLogger(DeletionTask.class);
 
   public static final int INVALID_TASK_ID = -1;
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 0b599a8..50443f3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -20,6 +20,8 @@
 
 import static org.apache.hadoop.fs.CreateFlag.CREATE;
 import static org.apache.hadoop.fs.CreateFlag.OVERWRITE;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.DataOutputStream;
 import java.io.File;
@@ -36,8 +38,6 @@
 import java.util.concurrent.Callable;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileContext;
@@ -89,7 +89,8 @@
 
 public class ContainerLaunch implements Callable<Integer> {
 
-  private static final Log LOG = LogFactory.getLog(ContainerLaunch.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(ContainerLaunch.class);
 
   public static final String CONTAINER_SCRIPT =
     Shell.appendScriptExtension("launch_container");
@@ -269,7 +270,8 @@
         creds.writeTokenStorageToStream(tokensOutStream);
         // /////////// End of writing out container-tokens
       } finally {
-        IOUtils.cleanup(LOG, containerScriptOutStream, tokensOutStream);
+        IOUtils.cleanupWithLogger(LOG, containerScriptOutStream,
+            tokensOutStream);
       }
 
       ret = launchContainer(new ContainerStartContext.Builder()
@@ -518,7 +520,7 @@
   @SuppressWarnings("unchecked")
   protected void handleContainerExitWithFailure(ContainerId containerID,
       int ret, Path containerLogDir, StringBuilder diagnosticInfo) {
-    LOG.warn(diagnosticInfo);
+    LOG.warn(diagnosticInfo.toString());
 
     String errorFileNamePattern =
         conf.get(YarnConfiguration.NM_CONTAINER_STDERR_PATTERN,
@@ -562,14 +564,16 @@
         errorFileIS = fileSystem.open(errorFile);
         errorFileIS.readFully(startPosition, tailBuffer);
 
+        String tailBufferMsg = new String(tailBuffer, StandardCharsets.UTF_8);
         diagnosticInfo.append("Last ").append(tailSizeInBytes)
             .append(" bytes of ").append(errorFile.getName()).append(" :\n")
-            .append(new String(tailBuffer, StandardCharsets.UTF_8));
+            .append(tailBufferMsg).append("\n")
+            .append(analysesErrorMsgOfContainerExitWithFailure(tailBufferMsg));
       }
     } catch (IOException e) {
       LOG.error("Failed to get tail of the container's error log file", e);
     } finally {
-      IOUtils.cleanup(LOG, errorFileIS);
+      IOUtils.cleanupWithLogger(LOG, errorFileIS);
     }
 
     this.dispatcher.getEventHandler()
@@ -578,6 +582,29 @@
             diagnosticInfo.toString()));
   }
 
+  private String analysesErrorMsgOfContainerExitWithFailure(String errorMsg) {
+    StringBuilder analysis = new StringBuilder();
+    if (errorMsg.indexOf("Error: Could not find or load main class"
+        + " org.apache.hadoop.mapreduce") != -1) {
+      analysis.append("Please check whether your etc/hadoop/mapred-site.xml "
+          + "contains the below configuration:\n");
+      analysis.append("<property>\n")
+          .append("  <name>yarn.app.mapreduce.am.env</name>\n")
+          .append("  <value>HADOOP_MAPRED_HOME=${full path of your hadoop "
+              + "distribution directory}</value>\n")
+          .append("</property>\n<property>\n")
+          .append("  <name>mapreduce.map.env</name>\n")
+          .append("  <value>HADOOP_MAPRED_HOME=${full path of your hadoop "
+              + "distribution directory}</value>\n")
+          .append("</property>\n<property>\n")
+          .append("  <name>mapreduce.reduce.e nv</name>\n")
+          .append("  <value>HADOOP_MAPRED_HOME=${full path of your hadoop "
+              + "distribution directory}</value>\n")
+          .append("</property>\n");
+    }
+    return analysis.toString();
+  }
+
   protected String getPidFileSubpath(String appIdStr, String containerIdStr) {
     return getContainerPrivateDir(appIdStr, containerIdStr) + Path.SEPARATOR
         + String.format(ContainerLaunch.PID_FILE_NAME_FMT, containerIdStr);
@@ -904,6 +931,10 @@
       sb.append(LINE_SEPARATOR);
     }
 
+    public void setExitOnFailure() {
+      // Dummy implementation
+    }
+
     protected abstract void link(Path src, Path dst) throws IOException;
 
     protected abstract void mkdir(Path path) throws IOException;
@@ -981,6 +1012,11 @@
           output.toString(), "\"");
       line("find -L . -maxdepth 5 -type l -ls 1>>\"", output.toString(), "\"");
     }
+
+    @Override
+    public void setExitOnFailure() {
+      line("set -o pipefail -e");
+    }
   }
 
   private static final class WindowsShellScriptBuilder
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerRelaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerRelaunch.java
index ac20fa8..2b032e9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerRelaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerRelaunch.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
@@ -38,6 +36,8 @@
 import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
 import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
 import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.List;
@@ -48,7 +48,8 @@
  */
 public class ContainerRelaunch extends ContainerLaunch {
 
-  private static final Log LOG = LogFactory.getLog(ContainerRelaunch.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(ContainerRelaunch.class);
 
   public ContainerRelaunch(Context context, Configuration configuration,
       Dispatcher dispatcher, ContainerExecutor exec, Application app,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java
index d4a7bfd..25909b9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java
@@ -23,9 +23,9 @@
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.ExecutorService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
@@ -55,7 +55,8 @@
 public class ContainersLauncher extends AbstractService
     implements EventHandler<ContainersLauncherEvent> {
 
-  private static final Log LOG = LogFactory.getLog(ContainersLauncher.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(ContainersLauncher.class);
 
   private final Context context;
   private final ContainerExecutor exec;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoveredContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoveredContainerLaunch.java
index a04a23f..2eba0dff 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoveredContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoveredContainerLaunch.java
@@ -21,9 +21,9 @@
 import java.io.File;
 import java.io.InterruptedIOException;
 import java.io.IOException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -47,8 +47,8 @@
  */
 public class RecoveredContainerLaunch extends ContainerLaunch {
 
-  private static final Log LOG = LogFactory.getLog(
-    RecoveredContainerLaunch.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(RecoveredContainerLaunch.class);
 
   public RecoveredContainerLaunch(Context context, Configuration configuration,
       Dispatcher dispatcher, ContainerExecutor exec, Application app,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
index 1d874a7..5a3ce74 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
@@ -22,8 +22,8 @@
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -46,7 +46,8 @@
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public class PrivilegedOperationExecutor {
-  private static final Log LOG = LogFactory.getLog(PrivilegedOperationExecutor
+  private static final Logger LOG =
+       LoggerFactory.getLogger(PrivilegedOperationExecutor
       .class);
   private volatile static PrivilegedOperationExecutor instance;
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsBlkioResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsBlkioResourceHandlerImpl.java
index e0b43d3..42fc634 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsBlkioResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsBlkioResourceHandlerImpl.java
@@ -19,8 +19,8 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -45,8 +45,8 @@
 @InterfaceStability.Unstable
 public class CGroupsBlkioResourceHandlerImpl implements DiskResourceHandler {
 
-  static final Log LOG = LogFactory
-      .getLog(CGroupsBlkioResourceHandlerImpl.class);
+  static final Logger LOG =
+       LoggerFactory.getLogger(CGroupsBlkioResourceHandlerImpl.class);
 
   private CGroupsHandler cGroupsHandler;
   // Arbitrarily choose a weight - all that matters is that all containers
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsCpuResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsCpuResourceHandlerImpl.java
index 830782d..7ea7be2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsCpuResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsCpuResourceHandlerImpl.java
@@ -20,8 +20,8 @@
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -59,7 +59,8 @@
 @InterfaceAudience.Private
 public class CGroupsCpuResourceHandlerImpl implements CpuResourceHandler {
 
-  static final Log LOG = LogFactory.getLog(CGroupsCpuResourceHandlerImpl.class);
+  static final Logger LOG =
+       LoggerFactory.getLogger(CGroupsCpuResourceHandlerImpl.class);
 
   private CGroupsHandler cGroupsHandler;
   private boolean strictResourceUsageMode = false;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
index 9fd20eb..44ae12b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
@@ -22,8 +22,8 @@
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -59,7 +59,8 @@
 @InterfaceStability.Unstable
 class CGroupsHandlerImpl implements CGroupsHandler {
 
-  private static final Log LOG = LogFactory.getLog(CGroupsHandlerImpl.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(CGroupsHandlerImpl.class);
   private static final String MTAB_FILE = "/proc/mounts";
   private static final String CGROUPS_FSTYPE = "cgroup";
 
@@ -243,7 +244,7 @@
         LOG.warn("Error while reading " + mtab, e);
       }
     } finally {
-      IOUtils.cleanup(LOG, in);
+      IOUtils.cleanupWithLogger(LOG, in);
     }
 
     return ret;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
index d159aad..d3e787e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
@@ -19,8 +19,8 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -44,8 +44,8 @@
 @InterfaceStability.Unstable
 public class CGroupsMemoryResourceHandlerImpl implements MemoryResourceHandler {
 
-  static final Log LOG = LogFactory.getLog(
-      CGroupsMemoryResourceHandlerImpl.class);
+  static final Logger LOG =
+       LoggerFactory.getLogger(CGroupsMemoryResourceHandlerImpl.class);
   private static final CGroupsHandler.CGroupController MEMORY =
       CGroupsHandler.CGroupController.MEMORY;
   private static final int OPPORTUNISTIC_SWAPPINESS = 100;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java
index 4d137f0..3c61cd4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java
@@ -21,8 +21,8 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -49,7 +49,8 @@
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public class ResourceHandlerModule {
-  static final Log LOG = LogFactory.getLog(ResourceHandlerModule.class);
+  static final Logger LOG =
+       LoggerFactory.getLogger(ResourceHandlerModule.class);
   private static volatile ResourceHandlerChain resourceHandlerChain;
 
   /**
@@ -267,11 +268,11 @@
       // Collect the valid subsystem names
       cgroupList.retainAll(validCGroups);
       if (!cgroupList.isEmpty()) {
-        if (candidate.isDirectory() && candidate.canWrite()) {
+        if (candidate.isDirectory()) {
           pathSubsystemMappings.put(candidate.getAbsolutePath(), cgroupList);
         } else {
-          LOG.warn("The following cgroup is not a directory or it is not"
-              + " writable" + candidate.getAbsolutePath());
+          LOG.warn("The following cgroup is not a directory " +
+              candidate.getAbsolutePath());
         }
       }
     }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficControlBandwidthHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficControlBandwidthHandlerImpl.java
index 3bb8035..126685f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficControlBandwidthHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficControlBandwidthHandlerImpl.java
@@ -20,8 +20,6 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -31,6 +29,8 @@
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -43,8 +43,8 @@
 public class TrafficControlBandwidthHandlerImpl
     implements OutboundBandwidthResourceHandler {
 
-  private static final Log LOG = LogFactory
-      .getLog(TrafficControlBandwidthHandlerImpl.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(TrafficControlBandwidthHandlerImpl.class);
   //In the absence of 'scheduling' support, we'll 'infer' the guaranteed
   //outbound bandwidth for each container based on this number. This will
   //likely go away once we add support on the RM for this resource type.
@@ -117,7 +117,7 @@
         .append("containerBandwidthMbit soft limit (in mbit/sec) is set to : ")
         .append(containerBandwidthMbit);
 
-    LOG.info(logLine);
+    LOG.info(logLine.toString());
     trafficController.bootstrap(device, rootBandwidthMbit, yarnBandwidthMbit);
 
     return null;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficController.java
index 4698eb3..83db5fc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficController.java
@@ -20,8 +20,6 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -29,6 +27,8 @@
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.*;
 import java.util.ArrayList;
@@ -46,7 +46,8 @@
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable class TrafficController {
-  private static final Log LOG = LogFactory.getLog(TrafficController.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(TrafficController.class);
   private static final int ROOT_QDISC_HANDLE = 42;
   private static final int ZERO_CLASS_ID = 0;
   private static final int ROOT_CLASS_ID = 1;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
index ee94aaa..d09e4a1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
@@ -20,8 +20,6 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -34,6 +32,8 @@
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntime;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.util.List;
 
@@ -48,8 +48,8 @@
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public class DefaultLinuxContainerRuntime implements LinuxContainerRuntime {
-  private static final Log LOG =
-      LogFactory.getLog(DefaultLinuxContainerRuntime.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(DefaultLinuxContainerRuntime.class);
   private final PrivilegedOperationExecutor privilegedOperationExecutor;
   private Configuration conf;
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
index 90b13a2..5273334 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
@@ -20,8 +20,6 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -30,6 +28,8 @@
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntime;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.util.Map;
 
@@ -45,8 +45,8 @@
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public class DelegatingLinuxContainerRuntime implements LinuxContainerRuntime {
-  private static final Log LOG = LogFactory
-      .getLog(DelegatingLinuxContainerRuntime.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(DelegatingLinuxContainerRuntime.class);
   private DefaultLinuxContainerRuntime defaultLinuxContainerRuntime;
   private DockerLinuxContainerRuntime dockerLinuxContainerRuntime;
   private JavaSandboxLinuxContainerRuntime javaSandboxLinuxContainerRuntime;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index e058d6e..8217564 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -21,8 +21,8 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -132,8 +132,8 @@
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
-  private static final Log LOG = LogFactory.getLog(
-      DockerLinuxContainerRuntime.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(DockerLinuxContainerRuntime.class);
 
   // This validates that the image is a proper docker image
   public static final String DOCKER_IMAGE_PATTERN =
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java
index 0b858bc..cfafcde 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java
@@ -19,8 +19,6 @@
 package org.apache.hadoop.yarn.server.nodemanager.
     containermanager.linux.runtime;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -30,7 +28,8 @@
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext;
-import org.apache.log4j.Logger;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.FilePermission;
 import java.io.IOException;
@@ -117,8 +116,8 @@
 @InterfaceStability.Unstable
 public class JavaSandboxLinuxContainerRuntime
     extends DefaultLinuxContainerRuntime {
-  private static final Log LOG =
-      LogFactory.getLog(DefaultLinuxContainerRuntime.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(DefaultLinuxContainerRuntime.class);
   private Configuration configuration;
   private SandboxMode sandboxMode;
 
@@ -254,7 +253,7 @@
       } catch (IOException e) {
         throw new ContainerExecutionException(e);
       } finally {
-        IOUtils.cleanup(LOG, policyOutputStream);
+        IOUtils.cleanupWithLogger(LOG, policyOutputStream);
       }
     }
   }
@@ -417,7 +416,7 @@
         + SEPARATOR + "-\" {%n" +
         "  permission " + AllPermission.class.getCanonicalName() + ";%n};%n";
     static final Logger LOG =
-        Logger.getLogger(NMContainerPolicyUtils.class);
+            LoggerFactory.getLogger(NMContainerPolicyUtils.class);
 
     /**
      * Write new policy file to policyOutStream which will include read access
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
index faf955f..536a22d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
@@ -20,13 +20,13 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.FileOutputStream;
@@ -38,7 +38,8 @@
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public final class DockerClient {
-  private static final Log LOG = LogFactory.getLog(DockerClient.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(DockerClient.class);
   private static final String TMP_FILE_PREFIX = "docker.";
   private static final String TMP_FILE_SUFFIX = ".cmd";
   private final String tmpDirPath;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java
index 9026d22..5739912 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java
@@ -16,13 +16,13 @@
  */
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.util.Map;
 
@@ -30,7 +30,8 @@
  * Utility class for executing common docker operations.
  */
 public final class DockerCommandExecutor {
-  private static final Log LOG = LogFactory.getLog(DockerCommandExecutor.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(DockerCommandExecutor.class);
 
   /**
    * Potential states that the docker status can return.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
index bb4b7f3..2378c45 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
 
 import static org.apache.hadoop.util.Shell.getAllShells;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.DataInputStream;
 import java.io.File;
@@ -44,8 +46,6 @@
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
@@ -86,7 +86,8 @@
 
 public class ContainerLocalizer {
 
-  static final Log LOG = LogFactory.getLog(ContainerLocalizer.class);
+  static final Logger LOG =
+       LoggerFactory.getLogger(ContainerLocalizer.class);
 
   public static final String FILECACHE = "filecache";
   public static final String APPCACHE = "appcache";
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
index 47e6a55..dd31543 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
@@ -26,9 +26,9 @@
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -58,7 +58,8 @@
 
 class LocalResourcesTrackerImpl implements LocalResourcesTracker {
 
-  static final Log LOG = LogFactory.getLog(LocalResourcesTrackerImpl.class);
+  static final Logger LOG =
+       LoggerFactory.getLogger(LocalResourcesTrackerImpl.class);
   private static final String RANDOM_DIR_REGEX = "-?\\d+";
   private static final Pattern RANDOM_DIR_PATTERN = Pattern
       .compile(RANDOM_DIR_REGEX);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
index 98f4e21..bd9602e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
@@ -24,9 +24,9 @@
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.event.Dispatcher;
@@ -53,7 +53,8 @@
  */
 public class LocalizedResource implements EventHandler<ResourceEvent> {
 
-  private static final Log LOG = LogFactory.getLog(LocalizedResource.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(LocalizedResource.class);
 
   volatile Path localPath;
   volatile long size = -1;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index 5bc0da7..c37f2e3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -19,6 +19,8 @@
 
 import static org.apache.hadoop.fs.CreateFlag.CREATE;
 import static org.apache.hadoop.fs.CreateFlag.OVERWRITE;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.DataOutputStream;
 import java.io.File;
@@ -50,8 +52,6 @@
 import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -148,7 +148,8 @@
 public class ResourceLocalizationService extends CompositeService
     implements EventHandler<LocalizationEvent>, LocalizationProtocol {
 
-  private static final Log LOG = LogFactory.getLog(ResourceLocalizationService.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(ResourceLocalizationService.class);
   public static final String NM_PRIVATE_DIR = "nmPrivate";
   public static final FsPermission NM_PRIVATE_PERM = new FsPermission((short) 0700);
 
@@ -956,7 +957,7 @@
           }
         }
       } catch(Throwable t) {
-        LOG.fatal("Error: Shutting down", t);
+        LOG.error("Error: Shutting down", t);
       } finally {
         LOG.info("Public cache exiting");
         threadPool.shutdownNow();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceSet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceSet.java
index 5da3abc..6df10737 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceSet.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceSet.java
@@ -18,11 +18,11 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.net.URISyntaxException;
 import java.util.ArrayList;
@@ -40,7 +40,8 @@
  */
 public class ResourceSet {
 
-  private static final Log LOG = LogFactory.getLog(ResourceSet.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(ResourceSet.class);
 
   // resources by localization state (localized, pending, failed)
   private Map<String, Path> localizedResources =
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerSecurityInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerSecurityInfo.java
index bd204c4..423b528 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerSecurityInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerSecurityInfo.java
@@ -19,9 +19,9 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security;
 
 import java.lang.annotation.Annotation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.SecurityInfo;
@@ -32,7 +32,8 @@
 
 public class LocalizerSecurityInfo extends SecurityInfo {
 
-  private static final Log LOG = LogFactory.getLog(LocalizerSecurityInfo.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(LocalizerSecurityInfo.class);
 
   @Override
   public KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerTokenSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerTokenSelector.java
index 7c4f982..89787af 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerTokenSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerTokenSelector.java
@@ -19,9 +19,9 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security;
 
 import java.util.Collection;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
@@ -30,8 +30,8 @@
 public class LocalizerTokenSelector implements
     TokenSelector<LocalizerTokenIdentifier> {
 
-  private static final Log LOG = LogFactory
-      .getLog(LocalizerTokenSelector.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(LocalizerTokenSelector.class);
 
   @SuppressWarnings("unchecked")
   @Override
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploadService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploadService.java
index 16c36eb2..9afbf3f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploadService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploadService.java
@@ -22,9 +22,9 @@
 import java.net.InetSocketAddress;
 import java.util.Map;
 import java.util.concurrent.ExecutorService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
@@ -50,8 +50,8 @@
  */
 public class SharedCacheUploadService extends AbstractService implements
     EventHandler<SharedCacheUploadEvent> {
-  private static final Log LOG =
-      LogFactory.getLog(SharedCacheUploadService.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SharedCacheUploadService.class);
 
   private boolean enabled;
   private FileSystem fs;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploader.java
index e077f89..23aa5b3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/sharedcache/SharedCacheUploader.java
@@ -24,9 +24,9 @@
 import java.net.URISyntaxException;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ThreadLocalRandom;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -60,7 +60,8 @@
   static final FsPermission FILE_PERMISSION =
       new FsPermission((short)00555);
 
-  private static final Log LOG = LogFactory.getLog(SharedCacheUploader.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(SharedCacheUploader.class);
 
   private final LocalResource resource;
   private final Path localPath;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
index 0d9e686..51c63c4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
@@ -19,11 +19,7 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation;
 
 import java.io.IOException;
-import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Comparator;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -32,14 +28,12 @@
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.atomic.AtomicBoolean;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.security.Credentials;
@@ -57,7 +51,9 @@
 import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat;
 import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey;
 import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogValue;
-import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerContext;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationTFileController;
 import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils;
 import org.apache.hadoop.yarn.server.api.ContainerLogAggregationPolicy;
 import org.apache.hadoop.yarn.server.api.ContainerLogContext;
@@ -71,7 +67,6 @@
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTask;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Times;
 
@@ -83,21 +78,9 @@
 
 public class AppLogAggregatorImpl implements AppLogAggregator {
 
-  private static final Log LOG = LogFactory
-      .getLog(AppLogAggregatorImpl.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(AppLogAggregatorImpl.class);
   private static final int THREAD_SLEEP_TIME = 1000;
-  // This is temporary solution. The configuration will be deleted once
-  // we find a more scalable method to only write a single log file per LRS.
-  private static final String NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP
-      = YarnConfiguration.NM_PREFIX + "log-aggregation.num-log-files-per-app";
-  private static final int
-      DEFAULT_NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP = 30;
-  
-  // This configuration is for debug and test purpose. By setting
-  // this configuration as true. We can break the lower bound of
-  // NM_LOG_AGGREGATION_ROLL_MONITORING_INTERVAL_SECONDS.
-  private static final String NM_LOG_AGGREGATION_DEBUG_ENABLED
-      = YarnConfiguration.NM_PREFIX + "log-aggregation.debug-enabled";
 
   private final LocalDirsHandlerService dirsHandler;
   private final Dispatcher dispatcher;
@@ -118,10 +101,8 @@
   private final FileContext lfs;
   private final LogAggregationContext logAggregationContext;
   private final Context context;
-  private final int retentionSize;
-  private final long rollingMonitorInterval;
-  private final boolean logAggregationInRolling;
   private final NodeId nodeId;
+  private final LogAggregationFileControllerContext logControllerContext;
 
   // These variables are only for testing
   private final AtomicBoolean waiting = new AtomicBoolean(false);
@@ -134,6 +115,8 @@
       new HashMap<ContainerId, ContainerLogAggregator>();
   private final ContainerLogAggregationPolicy logAggPolicy;
 
+  private final LogAggregationFileController logAggregationFileController;
+
 
   /**
    * The value recovered from state store to determine the age of application
@@ -151,7 +134,7 @@
       FileContext lfs, long rollingMonitorInterval) {
     this(dispatcher, deletionService, conf, appId, userUgi, nodeId,
         dirsHandler, remoteNodeLogFileForApp, appAcls,
-        logAggregationContext, context, lfs, rollingMonitorInterval, -1);
+        logAggregationContext, context, lfs, rollingMonitorInterval, -1, null);
   }
 
   public AppLogAggregatorImpl(Dispatcher dispatcher,
@@ -162,6 +145,21 @@
       LogAggregationContext logAggregationContext, Context context,
       FileContext lfs, long rollingMonitorInterval,
       long recoveredLogInitedTime) {
+    this(dispatcher, deletionService, conf, appId, userUgi, nodeId,
+        dirsHandler, remoteNodeLogFileForApp, appAcls,
+        logAggregationContext, context, lfs, rollingMonitorInterval,
+        recoveredLogInitedTime, null);
+  }
+
+  public AppLogAggregatorImpl(Dispatcher dispatcher,
+      DeletionService deletionService, Configuration conf,
+      ApplicationId appId, UserGroupInformation userUgi, NodeId nodeId,
+      LocalDirsHandlerService dirsHandler, Path remoteNodeLogFileForApp,
+      Map<ApplicationAccessType, String> appAcls,
+      LogAggregationContext logAggregationContext, Context context,
+      FileContext lfs, long rollingMonitorInterval,
+      long recoveredLogInitedTime,
+      LogAggregationFileController logAggregationFileController) {
     this.dispatcher = dispatcher;
     this.conf = conf;
     this.delService = deletionService;
@@ -169,31 +167,41 @@
     this.applicationId = appId.toString();
     this.userUgi = userUgi;
     this.dirsHandler = dirsHandler;
-    this.remoteNodeLogFileForApp = remoteNodeLogFileForApp;
-    this.remoteNodeTmpLogFileForApp = getRemoteNodeTmpLogFileForApp();
     this.pendingContainers = new LinkedBlockingQueue<ContainerId>();
     this.appAcls = appAcls;
     this.lfs = lfs;
     this.logAggregationContext = logAggregationContext;
     this.context = context;
     this.nodeId = nodeId;
-    int configuredRentionSize =
-        conf.getInt(NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP,
-            DEFAULT_NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP);
-    if (configuredRentionSize <= 0) {
-      this.retentionSize =
-          DEFAULT_NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP;
-    } else {
-      this.retentionSize = configuredRentionSize;
-    }
-    this.rollingMonitorInterval = rollingMonitorInterval;
-    this.logAggregationInRolling =
-        this.rollingMonitorInterval <= 0 || this.logAggregationContext == null
-            || this.logAggregationContext.getRolledLogsIncludePattern() == null
-            || this.logAggregationContext.getRolledLogsIncludePattern()
-              .isEmpty() ? false : true;
     this.logAggPolicy = getLogAggPolicy(conf);
     this.recoveredLogInitedTime = recoveredLogInitedTime;
+    if (logAggregationFileController == null) {
+      // by default, use T-File Controller
+      this.logAggregationFileController = new LogAggregationTFileController();
+      this.logAggregationFileController.initialize(conf, "TFile");
+      this.logAggregationFileController.verifyAndCreateRemoteLogDir();
+      this.logAggregationFileController.createAppDir(
+          this.userUgi.getShortUserName(), appId, userUgi);
+      this.remoteNodeLogFileForApp = this.logAggregationFileController
+          .getRemoteNodeLogFileForApp(appId,
+              this.userUgi.getShortUserName(), nodeId);
+      this.remoteNodeTmpLogFileForApp = getRemoteNodeTmpLogFileForApp();
+    } else {
+      this.logAggregationFileController = logAggregationFileController;
+      this.remoteNodeLogFileForApp = remoteNodeLogFileForApp;
+      this.remoteNodeTmpLogFileForApp = getRemoteNodeTmpLogFileForApp();
+    }
+    boolean logAggregationInRolling =
+        rollingMonitorInterval <= 0 || this.logAggregationContext == null
+            || this.logAggregationContext.getRolledLogsIncludePattern() == null
+            || this.logAggregationContext.getRolledLogsIncludePattern()
+                .isEmpty() ? false : true;
+    logControllerContext = new LogAggregationFileControllerContext(
+            this.remoteNodeLogFileForApp,
+            this.remoteNodeTmpLogFileForApp,
+            logAggregationInRolling,
+            rollingMonitorInterval,
+            this.appId, this.appAcls, this.nodeId, this.userUgi);
   }
 
   private ContainerLogAggregationPolicy getLogAggPolicy(Configuration conf) {
@@ -293,14 +301,9 @@
     logAggregationTimes++;
     String diagnosticMessage = "";
     boolean logAggregationSucceedInThisCycle = true;
-    try (LogWriter writer = createLogWriter()) {
+    try {
       try {
-        writer.initialize(this.conf, this.remoteNodeTmpLogFileForApp,
-            this.userUgi);
-        // Write ACLs once when the writer is created.
-        writer.writeApplicationACLs(appAcls);
-        writer.writeApplicationOwner(this.userUgi.getShortUserName());
-
+        logAggregationFileController.initializeWriter(logControllerContext);
       } catch (IOException e1) {
         logAggregationSucceedInThisCycle = false;
         LOG.error("Cannot create writer for app " + this.applicationId
@@ -318,8 +321,8 @@
           containerLogAggregators.put(container, aggregator);
         }
         Set<Path> uploadedFilePathsInThisCycle =
-            aggregator.doContainerLogAggregation(writer, appFinished,
-            finishedContainers.contains(container));
+            aggregator.doContainerLogAggregation(logAggregationFileController,
+            appFinished, finishedContainers.contains(container));
         if (uploadedFilePathsInThisCycle.size() > 0) {
           uploadedLogsInThisCycle = true;
           List<Path> uploadedFilePathsInThisCycleList = new ArrayList<>();
@@ -337,60 +340,28 @@
         }
       }
 
-      // Before upload logs, make sure the number of existing logs
-      // is smaller than the configured NM log aggregation retention size.
-      if (uploadedLogsInThisCycle && logAggregationInRolling) {
-        cleanOldLogs();
-        cleanupOldLogTimes++;
-      }
-
-      long currentTime = System.currentTimeMillis();
-      final Path renamedPath = getRenamedPath(currentTime);
-
-      final boolean rename = uploadedLogsInThisCycle;
+      logControllerContext.setUploadedLogsInThisCycle(uploadedLogsInThisCycle);
+      logControllerContext.setLogUploadTimeStamp(System.currentTimeMillis());
+      logControllerContext.increLogAggregationTimes();
       try {
-        userUgi.doAs(new PrivilegedExceptionAction<Object>() {
-          @Override
-          public Object run() throws Exception {
-            FileSystem remoteFS = remoteNodeLogFileForApp.getFileSystem(conf);
-            if (rename) {
-              remoteFS.rename(remoteNodeTmpLogFileForApp, renamedPath);
-            } else {
-              remoteFS.delete(remoteNodeTmpLogFileForApp, false);
-            }
-            return null;
-          }
-        });
-        diagnosticMessage =
-            "Log uploaded successfully for Application: " + appId
-                + " in NodeManager: "
-                + LogAggregationUtils.getNodeString(nodeId) + " at "
-                + Times.format(currentTime) + "\n";
+        this.logAggregationFileController.postWrite(logControllerContext);
+        diagnosticMessage = "Log uploaded successfully for Application: "
+            + appId + " in NodeManager: "
+            + LogAggregationUtils.getNodeString(nodeId) + " at "
+            + Times.format(logControllerContext.getLogUploadTimeStamp())
+            + "\n";
       } catch (Exception e) {
-        LOG.error(
-          "Failed to move temporary log file to final location: ["
-              + remoteNodeTmpLogFileForApp + "] to ["
-              + renamedPath + "]", e);
-        diagnosticMessage =
-            "Log uploaded failed for Application: " + appId
-                + " in NodeManager: "
-                + LogAggregationUtils.getNodeString(nodeId) + " at "
-                + Times.format(currentTime) + "\n";
+        diagnosticMessage = e.getMessage();
         renameTemporaryLogFileFailed = true;
         logAggregationSucceedInThisCycle = false;
       }
     } finally {
       sendLogAggregationReport(logAggregationSucceedInThisCycle,
           diagnosticMessage, appFinished);
+      logAggregationFileController.closeWriter();
     }
   }
 
-  private Path getRenamedPath(long currentTime) {
-    return this.rollingMonitorInterval <= 0 ? remoteNodeLogFileForApp
-        : new Path(remoteNodeLogFileForApp.getParent(),
-        remoteNodeLogFileForApp.getName() + "_" + currentTime);
-  }
-
   private void addCredentials() {
     if (UserGroupInformation.isSecurityEnabled()) {
       Credentials systemCredentials =
@@ -407,11 +378,6 @@
     }
   }
 
-  @VisibleForTesting
-  protected LogWriter createLogWriter() {
-    return new LogWriter();
-  }
-
   private void sendLogAggregationReport(
       boolean logAggregationSucceedInThisCycle, String diagnosticMessage,
       boolean appFinished) {
@@ -442,60 +408,6 @@
     this.context.getLogAggregationStatusForApps().add(report);
   }
 
-  private void cleanOldLogs() {
-    try {
-      final FileSystem remoteFS =
-          this.remoteNodeLogFileForApp.getFileSystem(conf);
-      Path appDir =
-          this.remoteNodeLogFileForApp.getParent().makeQualified(
-            remoteFS.getUri(), remoteFS.getWorkingDirectory());
-      Set<FileStatus> status =
-          new HashSet<FileStatus>(Arrays.asList(remoteFS.listStatus(appDir)));
-
-      Iterable<FileStatus> mask =
-          Iterables.filter(status, new Predicate<FileStatus>() {
-            @Override
-            public boolean apply(FileStatus next) {
-              return next.getPath().getName()
-                .contains(LogAggregationUtils.getNodeString(nodeId))
-                && !next.getPath().getName().endsWith(
-                    LogAggregationUtils.TMP_FILE_SUFFIX);
-            }
-          });
-      status = Sets.newHashSet(mask);
-      // Normally, we just need to delete one oldest log
-      // before we upload a new log.
-      // If we can not delete the older logs in this cycle,
-      // we will delete them in next cycle.
-      if (status.size() >= this.retentionSize) {
-        // sort by the lastModificationTime ascending
-        List<FileStatus> statusList = new ArrayList<FileStatus>(status);
-        Collections.sort(statusList, new Comparator<FileStatus>() {
-          public int compare(FileStatus s1, FileStatus s2) {
-            return s1.getModificationTime() < s2.getModificationTime() ? -1
-                : s1.getModificationTime() > s2.getModificationTime() ? 1 : 0;
-          }
-        });
-        for (int i = 0 ; i <= statusList.size() - this.retentionSize; i++) {
-          final FileStatus remove = statusList.get(i);
-          try {
-            userUgi.doAs(new PrivilegedExceptionAction<Object>() {
-              @Override
-              public Object run() throws Exception {
-                remoteFS.delete(remove.getPath(), false);
-                return null;
-              }
-            });
-          } catch (Exception e) {
-            LOG.error("Failed to delete " + remove.getPath(), e);
-          }
-        }
-      }
-    } catch (Exception e) {
-      LOG.error("Failed to clean old logs", e);
-    }
-  }
-
   @SuppressWarnings("unchecked")
   @Override
   public void run() {
@@ -523,8 +435,8 @@
       synchronized(this) {
         try {
           waiting.set(true);
-          if (logAggregationInRolling) {
-            wait(this.rollingMonitorInterval * 1000);
+          if (logControllerContext.isLogAggregationInRolling()) {
+            wait(logControllerContext.getRollingMonitorInterval() * 1000);
             if (this.appFinishing.get() || this.aborted.get()) {
               break;
             }
@@ -653,7 +565,8 @@
           recoveredLogInitedTime, logRetentionSecs * 1000);
     }
 
-    public Set<Path> doContainerLogAggregation(LogWriter writer,
+    public Set<Path> doContainerLogAggregation(
+        LogAggregationFileController logAggregationFileController,
         boolean appFinished, boolean containerFinished) {
       LOG.info("Uploading logs for container " + containerId
           + ". Current good log dirs are "
@@ -665,7 +578,7 @@
             this.uploadedFileMeta,  retentionContext, appFinished,
             containerFinished);
       try {
-        writer.append(logKey, logValue);
+        logAggregationFileController.write(logKey, logValue);
       } catch (Exception e) {
         LOG.error("Couldn't upload logs for " + containerId
             + ". Skipping this container.", e);
@@ -708,4 +621,15 @@
   int getCleanupOldLogTimes() {
     return this.cleanupOldLogTimes;
   }
+
+  @VisibleForTesting
+  public LogAggregationFileController getLogAggregationFileController() {
+    return this.logAggregationFileController;
+  }
+
+  @VisibleForTesting
+  public LogAggregationFileControllerContext
+      getLogAggregationFileControllerContext() {
+    return this.logControllerContext;
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
index 38315fd..4938939 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
@@ -18,24 +18,19 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation;
 
-import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.security.PrivilegedExceptionAction;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.AbstractService;
@@ -48,7 +43,8 @@
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
-import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerFactory;
 import org.apache.hadoop.yarn.server.api.ContainerLogContext;
 import org.apache.hadoop.yarn.server.api.ContainerType;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
@@ -56,7 +52,6 @@
 import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
-import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.LogHandler;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent;
@@ -69,7 +64,8 @@
 public class LogAggregationService extends AbstractService implements
     LogHandler {
 
-  private static final Log LOG = LogFactory.getLog(LogAggregationService.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(LogAggregationService.class);
   private static final long MIN_LOG_ROLLING_INTERVAL = 3600;
   // This configuration is for debug and test purpose. By setting
   // this configuration as true. We can break the lower bound of
@@ -78,36 +74,14 @@
       = YarnConfiguration.NM_PREFIX + "log-aggregation.debug-enabled";
   private long rollingMonitorInterval;
 
-  /*
-   * Expected deployment TLD will be 1777, owner=<NMOwner>, group=<NMGroup -
-   * Group to which NMOwner belongs> App dirs will be created as 770,
-   * owner=<AppOwner>, group=<NMGroup>: so that the owner and <NMOwner> can
-   * access / modify the files.
-   * <NMGroup> should obviously be a limited access group.
-   */
-  /**
-   * Permissions for the top level directory under which app directories will be
-   * created.
-   */
-  private static final FsPermission TLDIR_PERMISSIONS = FsPermission
-      .createImmutable((short) 01777);
-  /**
-   * Permissions for the Application directory.
-   */
-  private static final FsPermission APP_DIR_PERMISSIONS = FsPermission
-      .createImmutable((short) 0770);
-
   private final Context context;
   private final DeletionService deletionService;
   private final Dispatcher dispatcher;
 
   private LocalDirsHandlerService dirsHandler;
-  Path remoteRootLogDir;
-  String remoteRootLogDirSuffix;
   private NodeId nodeId;
 
   private final ConcurrentMap<ApplicationId, AppLogAggregator> appLogAggregators;
-  private boolean logPermError = true;
 
   @VisibleForTesting
   ExecutorService threadPool;
@@ -124,12 +98,6 @@
   }
 
   protected void serviceInit(Configuration conf) throws Exception {
-    this.remoteRootLogDir =
-        new Path(conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
-            YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
-    this.remoteRootLogDirSuffix =
-        conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX,
-            YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR_SUFFIX);
     int threadPoolSize = getAggregatorThreadPoolSize(conf);
     this.threadPool = HadoopExecutors.newFixedThreadPool(threadPoolSize,
         new ThreadFactoryBuilder()
@@ -217,158 +185,6 @@
     }
   }
 
-  protected FileSystem getFileSystem(Configuration conf) throws IOException {
-    return this.remoteRootLogDir.getFileSystem(conf);
-  }
-
-  void verifyAndCreateRemoteLogDir(Configuration conf) {
-    // Checking the existence of the TLD
-    FileSystem remoteFS = null;
-    try {
-      remoteFS = getFileSystem(conf);
-    } catch (IOException e) {
-      throw new YarnRuntimeException("Unable to get Remote FileSystem instance", e);
-    }
-    boolean remoteExists = true;
-    try {
-      FsPermission perms =
-          remoteFS.getFileStatus(this.remoteRootLogDir).getPermission();
-      if (!perms.equals(TLDIR_PERMISSIONS) && logPermError) {
-        LOG.warn("Remote Root Log Dir [" + this.remoteRootLogDir
-            + "] already exist, but with incorrect permissions. "
-            + "Expected: [" + TLDIR_PERMISSIONS + "], Found: [" + perms
-            + "]." + " The cluster may have problems with multiple users.");
-        logPermError = false;
-      } else {
-        logPermError = true;
-      }
-    } catch (FileNotFoundException e) {
-      remoteExists = false;
-    } catch (IOException e) {
-      throw new YarnRuntimeException(
-          "Failed to check permissions for dir ["
-              + this.remoteRootLogDir + "]", e);
-    }
-    if (!remoteExists) {
-      LOG.warn("Remote Root Log Dir [" + this.remoteRootLogDir
-          + "] does not exist. Attempting to create it.");
-      try {
-        Path qualified =
-            this.remoteRootLogDir.makeQualified(remoteFS.getUri(),
-                remoteFS.getWorkingDirectory());
-        remoteFS.mkdirs(qualified, new FsPermission(TLDIR_PERMISSIONS));
-        remoteFS.setPermission(qualified, new FsPermission(TLDIR_PERMISSIONS));
-
-        UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
-        String primaryGroupName = null;
-        try {
-          primaryGroupName = loginUser.getPrimaryGroupName();
-        } catch (IOException e) {
-          LOG.warn("No primary group found. The remote root log directory" +
-              " will be created with the HDFS superuser being its group " +
-              "owner. JobHistoryServer may be unable to read the directory.");
-        }
-        // set owner on the remote directory only if the primary group exists
-        if (primaryGroupName != null) {
-          remoteFS.setOwner(qualified,
-              loginUser.getShortUserName(), primaryGroupName);
-        }
-      } catch (IOException e) {
-        throw new YarnRuntimeException("Failed to create remoteLogDir ["
-            + this.remoteRootLogDir + "]", e);
-      }
-    }
-  }
-
-  Path getRemoteNodeLogFileForApp(ApplicationId appId, String user) {
-    return LogAggregationUtils.getRemoteNodeLogFileForApp(
-        this.remoteRootLogDir, appId, user, this.nodeId,
-        this.remoteRootLogDirSuffix);
-  }
-
-  Path getRemoteAppLogDir(ApplicationId appId, String user) {
-    return LogAggregationUtils.getRemoteAppLogDir(this.remoteRootLogDir, appId,
-        user, this.remoteRootLogDirSuffix);
-  }
-
-  private void createDir(FileSystem fs, Path path, FsPermission fsPerm)
-      throws IOException {
-    FsPermission dirPerm = new FsPermission(fsPerm);
-    fs.mkdirs(path, dirPerm);
-    FsPermission umask = FsPermission.getUMask(fs.getConf());
-    if (!dirPerm.equals(dirPerm.applyUMask(umask))) {
-      fs.setPermission(path, new FsPermission(fsPerm));
-    }
-  }
-
-  private boolean checkExists(FileSystem fs, Path path, FsPermission fsPerm)
-      throws IOException {
-    boolean exists = true;
-    try {
-      FileStatus appDirStatus = fs.getFileStatus(path);
-      if (!APP_DIR_PERMISSIONS.equals(appDirStatus.getPermission())) {
-        fs.setPermission(path, APP_DIR_PERMISSIONS);
-      }
-    } catch (FileNotFoundException fnfe) {
-      exists = false;
-    }
-    return exists;
-  }
-
-  protected void createAppDir(final String user, final ApplicationId appId,
-      UserGroupInformation userUgi) {
-    try {
-      userUgi.doAs(new PrivilegedExceptionAction<Object>() {
-        @Override
-        public Object run() throws Exception {
-          try {
-            // TODO: Reuse FS for user?
-            FileSystem remoteFS = getFileSystem(getConfig());
-
-            // Only creating directories if they are missing to avoid
-            // unnecessary load on the filesystem from all of the nodes
-            Path appDir = LogAggregationUtils.getRemoteAppLogDir(
-                LogAggregationService.this.remoteRootLogDir, appId, user,
-                LogAggregationService.this.remoteRootLogDirSuffix);
-            appDir = appDir.makeQualified(remoteFS.getUri(),
-                remoteFS.getWorkingDirectory());
-
-            if (!checkExists(remoteFS, appDir, APP_DIR_PERMISSIONS)) {
-              Path suffixDir = LogAggregationUtils.getRemoteLogSuffixedDir(
-                  LogAggregationService.this.remoteRootLogDir, user,
-                  LogAggregationService.this.remoteRootLogDirSuffix);
-              suffixDir = suffixDir.makeQualified(remoteFS.getUri(),
-                  remoteFS.getWorkingDirectory());
-
-              if (!checkExists(remoteFS, suffixDir, APP_DIR_PERMISSIONS)) {
-                Path userDir = LogAggregationUtils.getRemoteLogUserDir(
-                    LogAggregationService.this.remoteRootLogDir, user);
-                userDir = userDir.makeQualified(remoteFS.getUri(),
-                    remoteFS.getWorkingDirectory());
-
-                if (!checkExists(remoteFS, userDir, APP_DIR_PERMISSIONS)) {
-                  createDir(remoteFS, userDir, APP_DIR_PERMISSIONS);
-                }
-
-                createDir(remoteFS, suffixDir, APP_DIR_PERMISSIONS);
-              }
-
-              createDir(remoteFS, appDir, APP_DIR_PERMISSIONS);
-            }
-
-          } catch (IOException e) {
-            LOG.error("Failed to setup application log directory for "
-                + appId, e);
-            throw e;
-          }
-          return null;
-        }
-      });
-    } catch (Exception e) {
-      throw new YarnRuntimeException(e);
-    }
-  }
-
   @SuppressWarnings("unchecked")
   private void initApp(final ApplicationId appId, String user,
       Credentials credentials, Map<ApplicationAccessType, String> appAcls,
@@ -376,7 +192,6 @@
       long recoveredLogInitedTime) {
     ApplicationEvent eventResponse;
     try {
-      verifyAndCreateRemoteLogDir(getConfig());
       initAppAggregator(appId, user, credentials, appAcls,
           logAggregationContext, recoveredLogInitedTime);
       eventResponse = new ApplicationEvent(appId,
@@ -409,14 +224,17 @@
       userUgi.addCredentials(credentials);
     }
 
+    LogAggregationFileController logAggregationFileController
+        = getLogAggregationFileController(getConfig());
+    logAggregationFileController.verifyAndCreateRemoteLogDir();
     // New application
     final AppLogAggregator appLogAggregator =
         new AppLogAggregatorImpl(this.dispatcher, this.deletionService,
             getConfig(), appId, userUgi, this.nodeId, dirsHandler,
-            getRemoteNodeLogFileForApp(appId, user),
-            appAcls, logAggregationContext, this.context,
+            logAggregationFileController.getRemoteNodeLogFileForApp(appId,
+            user, nodeId), appAcls, logAggregationContext, this.context,
             getLocalFileContext(getConfig()), this.rollingMonitorInterval,
-            recoveredLogInitedTime);
+            recoveredLogInitedTime, logAggregationFileController);
     if (this.appLogAggregators.putIfAbsent(appId, appLogAggregator) != null) {
       throw new YarnRuntimeException("Duplicate initApp for " + appId);
     }
@@ -424,7 +242,7 @@
     YarnRuntimeException appDirException = null;
     try {
       // Create the app dir
-      createAppDir(user, appId, userUgi);
+      logAggregationFileController.createAppDir(user, appId, userUgi);
     } catch (Exception e) {
       appLogAggregator.disableLogAggregation();
       if (!(e instanceof YarnRuntimeException)) {
@@ -468,7 +286,8 @@
     return this.appLogAggregators.size();
   }
 
-  private void stopContainer(ContainerId containerId, int exitCode) {
+  private void stopContainer(ContainerId containerId,
+      ContainerType containerType, int exitCode) {
 
     // A container is complete. Put this containers' logs up for aggregation if
     // this containers' logs are needed.
@@ -479,14 +298,6 @@
           + ", did it fail to start?");
       return;
     }
-    Container container = context.getContainers().get(containerId);
-    if (null == container) {
-      LOG.warn("Log aggregation cannot be started for " + containerId
-          + ", as its an absent container");
-      return;
-    }
-    ContainerType containerType =
-        container.getContainerTokenIdentifier().getContainerType();
     aggregator.startContainerLogAggregation(
         new ContainerLogContext(containerId, containerType, exitCode));
   }
@@ -525,6 +336,7 @@
         LogHandlerContainerFinishedEvent containerFinishEvent =
             (LogHandlerContainerFinishedEvent) event;
         stopContainer(containerFinishEvent.getContainerId(),
+            containerFinishEvent.getContainerType(),
             containerFinishEvent.getExitCode());
         break;
       case APPLICATION_FINISHED:
@@ -569,4 +381,14 @@
     }
     return threadPoolSize;
   }
+
+  @VisibleForTesting
+  public LogAggregationFileController getLogAggregationFileController(
+      Configuration conf) {
+    LogAggregationFileControllerFactory factory
+        = new LogAggregationFileControllerFactory(conf);
+    LogAggregationFileController logAggregationFileController = factory
+        .getFileControllerForWrite();
+    return logAggregationFileController;
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/SampleContainerLogAggregationPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/SampleContainerLogAggregationPolicy.java
index 56c760b..03f548c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/SampleContainerLogAggregationPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/SampleContainerLogAggregationPolicy.java
@@ -19,8 +19,8 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation;
 
 import java.util.Collection;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -41,8 +41,8 @@
 @Private
 public class SampleContainerLogAggregationPolicy implements
     ContainerLogAggregationPolicy  {
-  private static final Log LOG =
-      LogFactory.getLog(SampleContainerLogAggregationPolicy.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SampleContainerLogAggregationPolicy.class);
 
   static String SAMPLE_RATE = "SR";
   public static final float DEFAULT_SAMPLE_RATE = 0.2f;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
index 9961748..9c43dde 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
@@ -26,9 +26,9 @@
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.RejectedExecutionException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
@@ -60,8 +60,8 @@
 public class NonAggregatingLogHandler extends AbstractService implements
     LogHandler {
 
-  private static final Log LOG = LogFactory
-      .getLog(NonAggregatingLogHandler.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(NonAggregatingLogHandler.class);
   private final Dispatcher dispatcher;
   private final DeletionService delService;
   private final Map<ApplicationId, String> appOwners;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerContainerFinishedEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerContainerFinishedEvent.java
index 038006e..3f4b6a0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerContainerFinishedEvent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerContainerFinishedEvent.java
@@ -19,16 +19,19 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event;
 
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.server.api.ContainerType;
 
 public class LogHandlerContainerFinishedEvent extends LogHandlerEvent {
 
   private final ContainerId containerId;
+  private final ContainerType containerType;
   private final int exitCode;
 
   public LogHandlerContainerFinishedEvent(ContainerId containerId,
-      int exitCode) {
+      ContainerType containerType, int exitCode) {
     super(LogHandlerEventType.CONTAINER_FINISHED);
     this.containerId = containerId;
+    this.containerType = containerType;
     this.exitCode = exitCode;
   }
 
@@ -36,6 +39,10 @@
     return this.containerId;
   }
 
+  public ContainerType getContainerType() {
+    return containerType;
+  }
+
   public int getExitCode() {
     return this.exitCode;
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
index 13e7491..d764e1d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
@@ -20,8 +20,8 @@
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.service.AbstractService;
@@ -55,8 +55,8 @@
 public class ContainersMonitorImpl extends AbstractService implements
     ContainersMonitor {
 
-  private final static Log LOG = LogFactory
-      .getLog(ContainersMonitorImpl.class);
+  private final static Logger LOG =
+       LoggerFactory.getLogger(ContainersMonitorImpl.class);
 
   private long monitoringInterval;
   private MonitoringThread monitoringThread;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
index 19b4505..644bdae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
@@ -173,6 +173,7 @@
             new ChangeMonitoringContainerResourceEvent(containerId,
                 updateEvent.getUpdatedToken().getResource()));
       } else {
+        // Is Queued or localizing..
         updateEvent.getContainer().setContainerTokenIdentifier(
             updateEvent.getUpdatedToken());
       }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ConfigurationNodeLabelsProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ConfigurationNodeLabelsProvider.java
index 8a385905..7490cc2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ConfigurationNodeLabelsProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ConfigurationNodeLabelsProvider.java
@@ -20,9 +20,9 @@
 
 import java.io.IOException;
 import java.util.TimerTask;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 
@@ -31,8 +31,8 @@
  */
 public class ConfigurationNodeLabelsProvider extends AbstractNodeLabelsProvider {
 
-  private static final Log LOG = LogFactory
-      .getLog(ConfigurationNodeLabelsProvider.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(ConfigurationNodeLabelsProvider.class);
 
   public ConfigurationNodeLabelsProvider() {
     super("Configuration Based NodeLabels Provider");
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ScriptBasedNodeLabelsProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ScriptBasedNodeLabelsProvider.java
index 79d1eb9..32f180a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ScriptBasedNodeLabelsProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ScriptBasedNodeLabelsProvider.java
@@ -25,9 +25,9 @@
 import java.util.Set;
 import java.util.Timer;
 import java.util.TimerTask;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
@@ -132,7 +132,8 @@
    */
   private class NodeLabelsScriptRunner extends TimerTask {
 
-    private final Log LOG = LogFactory.getLog(NodeLabelsScriptRunner.class);
+    private final Logger LOG =
+        LoggerFactory.getLogger(NodeLabelsScriptRunner.class);
 
     public NodeLabelsScriptRunner() {
       ArrayList<String> execScript = new ArrayList<String>();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
index c556b39..a31756e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
@@ -20,6 +20,7 @@
 
 import static org.fusesource.leveldbjni.JniDBFactory.asString;
 import static org.fusesource.leveldbjni.JniDBFactory.bytes;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
@@ -34,8 +35,6 @@
 import java.util.TimerTask;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -69,7 +68,6 @@
 import org.fusesource.leveldbjni.internal.NativeDB;
 import org.iq80.leveldb.DB;
 import org.iq80.leveldb.DBException;
-import org.iq80.leveldb.Logger;
 import org.iq80.leveldb.Options;
 import org.iq80.leveldb.WriteBatch;
 
@@ -79,15 +77,15 @@
 
 public class NMLeveldbStateStoreService extends NMStateStoreService {
 
-  public static final Log LOG =
-      LogFactory.getLog(NMLeveldbStateStoreService.class);
+  public static final org.slf4j.Logger LOG =
+      LoggerFactory.getLogger(NMLeveldbStateStoreService.class);
 
   private static final String DB_NAME = "yarn-nm-state";
   private static final String DB_SCHEMA_VERSION_KEY = "nm-schema-version";
 
   /**
    * Changes from 1.0 to 1.1: Save AMRMProxy state in NMSS.
-   * Changes from 1.2 to 1.2: Save queued container information.
+   * Changes from 1.1 to 1.2: Save queued container information.
    */
   private static final Version CURRENT_VERSION_INFO = Version.newInstance(1, 2);
 
@@ -114,6 +112,7 @@
       "ContainerManager/containers/";
   private static final String CONTAINER_REQUEST_KEY_SUFFIX = "/request";
   private static final String CONTAINER_VERSION_KEY_SUFFIX = "/version";
+  private static final String CONTAINER_START_TIME_KEY_SUFFIX = "/starttime";
   private static final String CONTAINER_DIAGS_KEY_SUFFIX = "/diagnostics";
   private static final String CONTAINER_LAUNCHED_KEY_SUFFIX = "/launched";
   private static final String CONTAINER_QUEUED_KEY_SUFFIX = "/queued";
@@ -259,6 +258,8 @@
             StartContainerRequestProto.parseFrom(entry.getValue()));
       } else if (suffix.equals(CONTAINER_VERSION_KEY_SUFFIX)) {
         rcs.version = Integer.parseInt(asString(entry.getValue()));
+      } else if (suffix.equals(CONTAINER_START_TIME_KEY_SUFFIX)) {
+        rcs.setStartTime(Long.parseLong(asString(entry.getValue())));
       } else if (suffix.equals(CONTAINER_DIAGS_KEY_SUFFIX)) {
         rcs.diagnostics = asString(entry.getValue());
       } else if (suffix.equals(CONTAINER_QUEUED_KEY_SUFFIX)) {
@@ -298,21 +299,23 @@
 
   @Override
   public void storeContainer(ContainerId containerId, int containerVersion,
-      StartContainerRequest startRequest) throws IOException {
+      long startTime, StartContainerRequest startRequest) throws IOException {
     String idStr = containerId.toString();
     if (LOG.isDebugEnabled()) {
       LOG.debug("storeContainer: containerId= " + idStr
           + ", startRequest= " + startRequest);
     }
-    String keyRequest = CONTAINERS_KEY_PREFIX + idStr
-        + CONTAINER_REQUEST_KEY_SUFFIX;
+    String keyRequest = getContainerKey(idStr, CONTAINER_REQUEST_KEY_SUFFIX);
     String keyVersion = getContainerVersionKey(idStr);
+    String keyStartTime =
+        getContainerKey(idStr, CONTAINER_START_TIME_KEY_SUFFIX);
     try {
       WriteBatch batch = db.createWriteBatch();
       try {
         batch.put(bytes(keyRequest),
-            ((StartContainerRequestPBImpl) startRequest)
-                .getProto().toByteArray());
+            ((StartContainerRequestPBImpl) startRequest).getProto().
+                toByteArray());
+        batch.put(bytes(keyStartTime), bytes(Long.toString(startTime)));
         if (containerVersion != 0) {
           batch.put(bytes(keyVersion),
               bytes(Integer.toString(containerVersion)));
@@ -328,7 +331,11 @@
 
   @VisibleForTesting
   String getContainerVersionKey(String containerId) {
-    return CONTAINERS_KEY_PREFIX + containerId + CONTAINER_VERSION_KEY_SUFFIX;
+    return getContainerKey(containerId, CONTAINER_VERSION_KEY_SUFFIX);
+  }
+
+  private String getContainerKey(String containerId, String suffix) {
+    return CONTAINERS_KEY_PREFIX + containerId + suffix;
   }
 
   @Override
@@ -1382,8 +1389,9 @@
     }
   }
 
-  private static class LeveldbLogger implements Logger {
-    private static final Log LOG = LogFactory.getLog(LeveldbLogger.class);
+  private static class LeveldbLogger implements org.iq80.leveldb.Logger {
+    private static final org.slf4j.Logger LOG =
+        LoggerFactory.getLogger(LeveldbLogger.class);
 
     @Override
     public void log(String message) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
index 96c3f9e..86dc99f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
@@ -71,7 +71,7 @@
 
   @Override
   public void storeContainer(ContainerId containerId, int version,
-      StartContainerRequest startRequest) throws IOException {
+      long startTime, StartContainerRequest startRequest) throws IOException {
   }
 
   @Override
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
index 9f87279..ec534bf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
@@ -87,6 +87,7 @@
     int version;
     private RecoveredContainerType recoveryType =
         RecoveredContainerType.RECOVER;
+    private long startTime;
 
     public RecoveredContainerStatus getStatus() {
       return status;
@@ -108,6 +109,14 @@
       return version;
     }
 
+    public long getStartTime() {
+      return startTime;
+    }
+
+    public void setStartTime(long ts) {
+      startTime = ts;
+    }
+
     public StartContainerRequest getStartRequest() {
       return startRequest;
     }
@@ -145,6 +154,7 @@
       return new StringBuffer("Status: ").append(getStatus())
           .append(", Exit code: ").append(exitCode)
           .append(", Version: ").append(version)
+          .append(", Start Time: ").append(startTime)
           .append(", Killed: ").append(getKilled())
           .append(", Diagnostics: ").append(getDiagnostics())
           .append(", Capability: ").append(getCapability())
@@ -365,11 +375,12 @@
    * Record a container start request
    * @param containerId the container ID
    * @param containerVersion the container Version
+   * @param startTime container start time
    * @param startRequest the container start request
    * @throws IOException
    */
   public abstract void storeContainer(ContainerId containerId,
-      int containerVersion, StartContainerRequest startRequest)
+      int containerVersion, long startTime, StartContainerRequest startRequest)
       throws IOException;
 
   /**
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java
index 2a92d40..256f649 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java
@@ -24,9 +24,9 @@
 import java.util.List;
 import java.util.Map.Entry;
 import java.util.TreeMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.token.SecretManager;
@@ -48,8 +48,8 @@
 public class NMContainerTokenSecretManager extends
     BaseContainerTokenSecretManager {
 
-  private static final Log LOG = LogFactory
-      .getLog(NMContainerTokenSecretManager.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(NMContainerTokenSecretManager.class);
   
   private MasterKeyData previousMasterKey;
   private final TreeMap<Long, List<ContainerId>> recentlyStartedContainerTracker;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMTokenSecretManagerInNM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMTokenSecretManagerInNM.java
index 4ed6118..0956e77 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMTokenSecretManagerInNM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMTokenSecretManagerInNM.java
@@ -23,9 +23,9 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -45,8 +45,8 @@
 
 public class NMTokenSecretManagerInNM extends BaseNMTokenSecretManager {
 
-  private static final Log LOG = LogFactory
-    .getLog(NMTokenSecretManagerInNM.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(NMTokenSecretManagerInNM.class);
   
   private MasterKeyData previousMasterKey;
   
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java
index 89e3d78..7b28659 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java
@@ -23,6 +23,7 @@
 import org.apache.hadoop.security.authorize.Service;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocolPB;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.api.CollectorNodemanagerProtocolPB;
 import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocolPB;
 
 /**
@@ -32,18 +33,21 @@
 @InterfaceStability.Unstable
 public class NMPolicyProvider extends PolicyProvider {
   
-  private static final Service[] nodeManagerServices = 
+  private static final Service[] NODE_MANAGER_SERVICES =
       new Service[] {
-    new Service(
-        YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_CONTAINER_MANAGEMENT_PROTOCOL, 
-        ContainerManagementProtocolPB.class),
-    new Service(YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCE_LOCALIZER, 
-        LocalizationProtocolPB.class)
-  };
+          new Service(YarnConfiguration.
+            YARN_SECURITY_SERVICE_AUTHORIZATION_CONTAINER_MANAGEMENT_PROTOCOL,
+            ContainerManagementProtocolPB.class),
+          new Service(YarnConfiguration.
+            YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCE_LOCALIZER,
+            LocalizationProtocolPB.class),
+          new Service(YarnConfiguration.
+            YARN_SECURITY_SERVICE_AUTHORIZATION_COLLECTOR_NODEMANAGER_PROTOCOL,
+            CollectorNodemanagerProtocolPB.class)
+      };
 
   @Override
   public Service[] getServices() {
-    return nodeManagerServices;
+    return NODE_MANAGER_SERVICES;
   }
-
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
index 8aaae79..b8192ca 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
@@ -19,15 +19,18 @@
 package org.apache.hadoop.yarn.server.nodemanager.timelineservice;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
@@ -55,6 +58,7 @@
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl.ContainerMetric;
 import org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree;
+import org.apache.hadoop.yarn.util.TimelineServiceHelper;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -66,7 +70,8 @@
  */
 public class NMTimelinePublisher extends CompositeService {
 
-  private static final Log LOG = LogFactory.getLog(NMTimelinePublisher.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(NMTimelinePublisher.class);
 
   private Dispatcher dispatcher;
 
@@ -76,6 +81,8 @@
 
   private String httpAddress;
 
+  private UserGroupInformation nmLoginUGI;
+
   private final Map<ApplicationId, TimelineV2Client> appToClientMap;
 
   public NMTimelinePublisher(Context context) {
@@ -90,6 +97,9 @@
     dispatcher.register(NMTimelineEventType.class,
         new ForwardingEventHandler());
     addIfService(dispatcher);
+    this.nmLoginUGI =  UserGroupInformation.isSecurityEnabled() ?
+        UserGroupInformation.getLoginUser() :
+        UserGroupInformation.getCurrentUser();
     super.serviceInit(conf);
   }
 
@@ -148,6 +158,8 @@
             Math.round(cpuUsagePercentPerCore));
         entity.addMetric(cpuMetric);
       }
+      entity.setIdPrefix(TimelineServiceHelper.
+          invertLong(container.getContainerStartTime()));
       ApplicationId appId = container.getContainerId().getApplicationAttemptId()
           .getApplicationId();
       try {
@@ -194,15 +206,17 @@
     tEvent.setId(ContainerMetricsConstants.CREATED_EVENT_TYPE);
     tEvent.setTimestamp(event.getTimestamp());
 
+    long containerStartTime = container.getContainerStartTime();
     entity.addEvent(tEvent);
-    entity.setCreatedTime(event.getTimestamp());
+    entity.setCreatedTime(containerStartTime);
+    entity.setIdPrefix(TimelineServiceHelper.invertLong(containerStartTime));
     dispatcher.getEventHandler().handle(new TimelinePublishEvent(entity,
         containerId.getApplicationAttemptId().getApplicationId()));
   }
 
   @SuppressWarnings("unchecked")
   private void publishContainerFinishedEvent(ContainerStatus containerStatus,
-      long timeStamp) {
+      long containerFinishTime, long containerStartTime) {
     ContainerId containerId = containerStatus.getContainerId();
     TimelineEntity entity = createContainerEntity(containerId);
 
@@ -214,13 +228,14 @@
     entityInfo.put(ContainerMetricsConstants.STATE_INFO,
         ContainerState.COMPLETE.toString());
     entityInfo.put(ContainerMetricsConstants.CONTAINER_FINISHED_TIME,
-        timeStamp);
+        containerFinishTime);
     entity.setInfo(entityInfo);
 
     TimelineEvent tEvent = new TimelineEvent();
     tEvent.setId(ContainerMetricsConstants.FINISHED_EVENT_TYPE);
-    tEvent.setTimestamp(timeStamp);
+    tEvent.setTimestamp(containerFinishTime);
     entity.addEvent(tEvent);
+    entity.setIdPrefix(TimelineServiceHelper.invertLong(containerStartTime));
 
     dispatcher.getEventHandler().handle(new TimelinePublishEvent(entity,
         containerId.getApplicationAttemptId().getApplicationId()));
@@ -236,6 +251,8 @@
     tEvent.setId(eventType);
     tEvent.setTimestamp(event.getTimestamp());
     entity.addEvent(tEvent);
+    entity.setIdPrefix(TimelineServiceHelper.
+        invertLong(container.getContainerStartTime()));
 
     ApplicationId appId =
         container.getContainerId().getApplicationAttemptId().getApplicationId();
@@ -299,7 +316,7 @@
       ApplicationContainerFinishedEvent evnt =
           (ApplicationContainerFinishedEvent) event;
       publishContainerFinishedEvent(evnt.getContainerStatus(),
-          event.getTimestamp());
+          event.getTimestamp(), evnt.getContainerStartTime());
       break;
 
     default:
@@ -390,11 +407,23 @@
 
   public void createTimelineClient(ApplicationId appId) {
     if (!appToClientMap.containsKey(appId)) {
-      TimelineV2Client timelineClient =
-          TimelineV2Client.createTimelineClient(appId);
-      timelineClient.init(getConfig());
-      timelineClient.start();
-      appToClientMap.put(appId, timelineClient);
+      try {
+        TimelineV2Client timelineClient =
+            nmLoginUGI.doAs(new PrivilegedExceptionAction<TimelineV2Client>() {
+              @Override
+              public TimelineV2Client run() throws Exception {
+                TimelineV2Client timelineClient =
+                    TimelineV2Client.createTimelineClient(appId);
+                timelineClient.init(getConfig());
+                timelineClient.start();
+                return timelineClient;
+              }
+            });
+        appToClientMap.put(appId, timelineClient);
+      } catch (IOException | InterruptedException | RuntimeException |
+          Error e) {
+        LOG.warn("Unable to create timeline client for app " + appId, e);
+      }
     }
   }
 
@@ -409,11 +438,11 @@
       String collectorAddr) {
     TimelineV2Client client = appToClientMap.get(appId);
     if (client != null) {
-      client.setTimelineServiceAddress(collectorAddr);
+      client.setTimelineCollectorInfo(CollectorInfo.newInstance(collectorAddr));
     }
   }
 
   private TimelineV2Client getTimelineClient(ApplicationId appId) {
     return appToClientMap.get(appId);
   }
-}
\ No newline at end of file
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
index 7a89285..54b6e1c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
@@ -37,11 +37,11 @@
 import java.util.Set;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.io.IOUtils;
@@ -68,8 +68,8 @@
 @Deprecated
 public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
 
-  final static Log LOG = LogFactory
-      .getLog(CgroupsLCEResourcesHandler.class);
+  final static Logger LOG =
+       LoggerFactory.getLogger(CgroupsLCEResourcesHandler.class);
 
   private Configuration conf;
   private String cgroupPrefix;
@@ -435,7 +435,7 @@
     } catch (IOException e) {
       throw new IOException("Error while reading " + getMtabFileName(), e);
     } finally {
-      IOUtils.cleanup(LOG, in);
+      IOUtils.cleanupWithLogger(LOG, in);
     }
 
     return ret;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/DefaultLCEResourcesHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/DefaultLCEResourcesHandler.java
index df2cc52..70c96af 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/DefaultLCEResourcesHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/DefaultLCEResourcesHandler.java
@@ -18,18 +18,18 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.util;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @Deprecated
 public class DefaultLCEResourcesHandler implements LCEResourcesHandler {
 
-  final static Log LOG = LogFactory
-      .getLog(DefaultLCEResourcesHandler.class);
+  final static Logger LOG =
+       LoggerFactory.getLogger(DefaultLCEResourcesHandler.class);
 
   private Configuration conf;
   
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java
index 2726a41..32f73c8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java
@@ -18,13 +18,13 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.util;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Helper class to determine hardware related characteristics such as the
@@ -34,8 +34,8 @@
 @InterfaceStability.Unstable
 public class NodeManagerHardwareUtils {
 
-  private static final Log LOG = LogFactory
-      .getLog(NodeManagerHardwareUtils.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(NodeManagerHardwareUtils.class);
 
   private static boolean isHardwareDetectionEnabled(Configuration conf) {
     return conf.getBoolean(
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/ProcessIdFileReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/ProcessIdFileReader.java
index 41d299e..a4c04ab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/ProcessIdFileReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/ProcessIdFileReader.java
@@ -22,9 +22,9 @@
 import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStreamReader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -35,7 +35,8 @@
  */
 public class ProcessIdFileReader {
 
-  private static final Log LOG = LogFactory.getLog(ProcessIdFileReader.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(ProcessIdFileReader.class);
   
   /**
    * Get the process id from specified file path.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
index 04a889f..3ba7d5c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
@@ -25,6 +25,8 @@
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map.Entry;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
@@ -43,8 +45,6 @@
 import javax.ws.rs.core.UriInfo;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.http.JettyUtils;
@@ -82,7 +82,8 @@
 @Singleton
 @Path("/ws/v1/node")
 public class NMWebServices {
-  private static final Log LOG = LogFactory.getLog(NMWebServices.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(NMWebServices.class);
   private Context nmContext;
   private ResourceView rview;
   private WebApp webapp;
@@ -425,11 +426,9 @@
         public void write(OutputStream os) throws IOException,
             WebApplicationException {
           try {
-            int bufferSize = 65536;
-            byte[] buf = new byte[bufferSize];
-            LogToolUtils.outputContainerLog(containerId.toString(),
-                nmContext.getNodeId().toString(), outputFileName, fileLength,
-                bytes, lastModifiedTime, fis, os, buf,
+            LogToolUtils.outputContainerLogThroughZeroCopy(
+                containerId.toString(), nmContext.getNodeId().toString(),
+                outputFileName, fileLength, bytes, lastModifiedTime, fis, os,
                 ContainerLogAggregationType.LOCAL);
             StringBuilder sb = new StringBuilder();
             String endOfFile = "End of LogType:" + outputFileName;
@@ -450,6 +449,8 @@
             Application app = nmContext.getApplications().get(appId);
             String appOwner = app == null ? null : app.getUser();
             try {
+              int bufferSize = 65536;
+              byte[] buf = new byte[bufferSize];
               LogToolUtils.outputAggregatedContainerLog(nmContext.getConf(),
                   appId, appOwner, containerId.toString(),
                   nmContext.getNodeId().toString(), outputFileName, bytes,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java
index 0a2731e..5cbcff5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java
@@ -18,9 +18,6 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.webapp;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
@@ -30,6 +27,8 @@
 
 import com.google.inject.Inject;
 
+import static org.apache.hadoop.util.GenericsUtil.isLog4jLogger;
+
 public class NavBlock extends HtmlBlock implements YarnWebParams {
 
   private Configuration conf;
@@ -43,8 +42,7 @@
   protected void render(Block html) {
 
     boolean addErrorsAndWarningsLink = false;
-    Log log = LogFactory.getLog(NMErrorsAndWarningsPage.class);
-    if (log instanceof Log4JLogger) {
+    if (isLog4jLogger(NMErrorsAndWarningsPage.class)) {
       Log4jWarningErrorMetricsAppender appender = Log4jWarningErrorMetricsAppender.findAppender();
       if (appender != null) {
         addErrorsAndWarningsLink = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
index 53e529b..813ba14 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
@@ -19,9 +19,9 @@
 package org.apache.hadoop.yarn.server.nodemanager.webapp;
 
 import static org.apache.hadoop.yarn.util.StringHelper.pajoin;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.AuthenticationFilterInitializer;
 import org.apache.hadoop.security.HttpCrossOriginFilterInitializer;
@@ -45,7 +45,8 @@
 
 public class WebServer extends AbstractService {
 
-  private static final Log LOG = LogFactory.getLog(WebServer.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(WebServer.class);
 
   private final Context nmContext;
   private final NMWebApp nmWebApp;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 9f754c4..560ec18 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1384,7 +1384,6 @@
 }
 
 char* parse_docker_command_file(const char* command_file) {
-
   size_t len = 0;
   char *line = NULL;
   ssize_t read;
@@ -1417,9 +1416,10 @@
   char* docker_command = parse_docker_command_file(command_file);
   char* docker_binary = get_section_value(DOCKER_BINARY_KEY, &executor_cfg);
   docker_binary = check_docker_binary(docker_binary);
+  size_t command_size = MIN(sysconf(_SC_ARG_MAX), 128*1024);
 
-  char* docker_command_with_binary = calloc(sizeof(char), EXECUTOR_PATH_MAX);
-  snprintf(docker_command_with_binary, EXECUTOR_PATH_MAX, "%s %s", docker_binary, docker_command);
+  char* docker_command_with_binary = calloc(sizeof(char), command_size);
+  snprintf(docker_command_with_binary, command_size, "%s %s", docker_binary, docker_command);
   char **args = split_delimiter(docker_command_with_binary, " ");
 
   int exit_code = -1;
@@ -1567,16 +1567,24 @@
   char *script_file_dest = NULL;
   char *cred_file_dest = NULL;
   char *exit_code_file = NULL;
-  char docker_command_with_binary[EXECUTOR_PATH_MAX];
-  char docker_wait_command[EXECUTOR_PATH_MAX];
-  char docker_logs_command[EXECUTOR_PATH_MAX];
-  char docker_inspect_command[EXECUTOR_PATH_MAX];
-  char docker_rm_command[EXECUTOR_PATH_MAX];
+  char *docker_command_with_binary = NULL;
+  char *docker_wait_command = NULL;
+  char *docker_logs_command = NULL;
+  char *docker_inspect_command = NULL;
+  char *docker_rm_command = NULL;
   int container_file_source =-1;
   int cred_file_source = -1;
   int BUFFER_SIZE = 4096;
   char buffer[BUFFER_SIZE];
 
+  size_t command_size = MIN(sysconf(_SC_ARG_MAX), 128*1024);
+
+  docker_command_with_binary = calloc(sizeof(char), command_size);
+  docker_wait_command = calloc(sizeof(char), command_size);
+  docker_logs_command = calloc(sizeof(char), command_size);
+  docker_inspect_command = calloc(sizeof(char), command_size);
+  docker_rm_command = calloc(sizeof(char), command_size);
+
   gid_t user_gid = getegid();
   uid_t prev_uid = geteuid();
 
@@ -1621,7 +1629,7 @@
     goto cleanup;
   }
 
-  snprintf(docker_command_with_binary, EXECUTOR_PATH_MAX, "%s %s", docker_binary, docker_command);
+  snprintf(docker_command_with_binary, command_size, "%s %s", docker_binary, docker_command);
 
   fprintf(LOGFILE, "Launching docker container...\n");
   FILE* start_docker = popen(docker_command_with_binary, "r");
@@ -1634,7 +1642,7 @@
     goto cleanup;
   }
 
-  snprintf(docker_inspect_command, EXECUTOR_PATH_MAX,
+  snprintf(docker_inspect_command, command_size,
     "%s inspect --format {{.State.Pid}} %s",
     docker_binary, container_id);
 
@@ -1679,7 +1687,7 @@
       goto cleanup;
     }
 
-    snprintf(docker_wait_command, EXECUTOR_PATH_MAX,
+    snprintf(docker_wait_command, command_size,
       "%s wait %s", docker_binary, container_id);
 
     fprintf(LOGFILE, "Waiting for docker container to finish...\n");
@@ -1693,7 +1701,7 @@
     if(exit_code != 0) {
       fprintf(ERRORFILE, "Docker container exit code was not zero: %d\n",
       exit_code);
-      snprintf(docker_logs_command, EXECUTOR_PATH_MAX, "%s logs --tail=250 %s",
+      snprintf(docker_logs_command, command_size, "%s logs --tail=250 %s",
         docker_binary, container_id);
       FILE* logs = popen(docker_logs_command, "r");
       if(logs != NULL) {
@@ -1723,7 +1731,7 @@
   }
 
   fprintf(LOGFILE, "Removing docker container post-exit...\n");
-  snprintf(docker_rm_command, EXECUTOR_PATH_MAX,
+  snprintf(docker_rm_command, command_size,
     "%s rm %s", docker_binary, container_id);
   FILE* rm_docker = popen(docker_rm_command, "w");
   if (pclose (rm_docker) != 0)
@@ -1763,6 +1771,11 @@
   free(exit_code_file);
   free(script_file_dest);
   free(cred_file_dest);
+  free(docker_command_with_binary);
+  free(docker_wait_command);
+  free(docker_logs_command);
+  free(docker_inspect_command);
+  free(docker_rm_command);
   return exit_code;
 }
 
@@ -2429,3 +2442,12 @@
 int traffic_control_read_stats(char *command_file) {
   return run_traffic_control(TC_READ_STATS_OPTS, command_file);
 }
+
+/**
+ * FIXME: (wangda) it's better to move executor_cfg out of container-executor.c
+ * Now initialize of executor_cfg and data structures are stored inside
+ * container-executor which is not a good design.
+ */
+struct configuration* get_cfg() {
+  return &CFG;
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
index ea8b5e3..aa38abf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
@@ -274,3 +274,5 @@
  * Return 0 on success.
  */
 int validate_docker_image_name(const char *image_name);
+
+struct configuration* get_cfg();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
index b2187c9..a05dc78 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
@@ -20,6 +20,8 @@
 #include "configuration.h"
 #include "container-executor.h"
 #include "util.h"
+#include "modules/gpu/gpu-module.h"
+#include "modules/cgroups/cgroups-operations.h"
 
 #include <errno.h>
 #include <grp.h>
@@ -253,6 +255,14 @@
     return INVALID_ARGUMENT_NUMBER;
   }
 
+  /*
+   * Check if it is a known module, if yes, redirect to module
+   */
+  if (strcmp("--module-gpu", argv[1]) == 0) {
+    return handle_gpu_request(&update_cgroups_parameters, "gpu", argc - 1,
+           &argv[1]);
+  }
+
   if (strcmp("--checksetup", argv[1]) == 0) {
     *operation = CHECK_SETUP;
     return 0;
@@ -332,6 +342,7 @@
         return FEATURE_DISABLED;
     }
   }
+
   /* Now we have to validate 'run as user' operations that don't use
     a 'long option' - we should fix this at some point. The validation/argument
     parsing here is extensive enough that it done in a separate function */
@@ -522,7 +533,7 @@
   open_log_files();
   assert_valid_setup(argv[0]);
 
-  int operation;
+  int operation = -1;
   int ret = validate_arguments(argc, argv, &operation);
 
   if (ret != 0) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/cgroups/cgroups-operations.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/cgroups/cgroups-operations.c
new file mode 100644
index 0000000..b234109
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/cgroups/cgroups-operations.c
@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "configuration.h"
+#include "container-executor.h"
+#include "utils/string-utils.h"
+#include "utils/path-utils.h"
+#include "modules/common/module-configs.h"
+#include "modules/common/constants.h"
+#include "modules/cgroups/cgroups-operations.h"
+#include "util.h"
+
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+
+#define MAX_PATH_LEN 4096
+
+static const struct section* cgroup_cfg_section = NULL;
+
+void reload_cgroups_configuration() {
+  cgroup_cfg_section = get_configuration_section(CGROUPS_SECTION_NAME, get_cfg());
+}
+
+char* get_cgroups_path_to_write(
+    const char* hierarchy_name,
+    const char* param_name,
+    const char* group_id) {
+  int failed = 0;
+  char* buffer = NULL;
+  const char* cgroups_root = get_section_value(CGROUPS_ROOT_KEY,
+     cgroup_cfg_section);
+  const char* yarn_hierarchy_name = get_section_value(
+     CGROUPS_YARN_HIERARCHY_KEY, cgroup_cfg_section);
+
+  // Make sure it is defined.
+  if (!cgroups_root || cgroups_root[0] == 0) {
+    fprintf(ERRORFILE, "%s is not defined in container-executor.cfg\n",
+      CGROUPS_ROOT_KEY);
+    failed = 1;
+    goto cleanup;
+  }
+
+  // Make sure it is defined.
+  if (!yarn_hierarchy_name || yarn_hierarchy_name[0] == 0) {
+    fprintf(ERRORFILE, "%s is not defined in container-executor.cfg\n",
+      CGROUPS_YARN_HIERARCHY_KEY);
+    failed = 1;
+    goto cleanup;
+  }
+
+  buffer = malloc(MAX_PATH_LEN + 1);
+  if (!buffer) {
+    fprintf(ERRORFILE, "Failed to allocate memory for output path.\n");
+    failed = 1;
+    goto cleanup;
+  }
+
+  // Make a path.
+  // CGroups path should not be too long.
+  if (snprintf(buffer, MAX_PATH_LEN, "%s/%s/%s/%s/%s.%s",
+    cgroups_root, hierarchy_name, yarn_hierarchy_name,
+    group_id, hierarchy_name, param_name) < 0) {
+    fprintf(ERRORFILE, "Failed to print output path.\n");
+    failed = 1;
+    goto cleanup;
+  }
+
+cleanup:
+  if (failed) {
+    if (buffer) {
+      free(buffer);
+    }
+    return NULL;
+  }
+  return buffer;
+}
+
+int update_cgroups_parameters(
+   const char* hierarchy_name,
+   const char* param_name,
+   const char* group_id,
+   const char* value) {
+#ifndef __linux
+  fprintf(ERRORFILE, "Failed to update cgroups parameters, not supported\n");
+  return -1;
+#endif
+  int failure = 0;
+
+  if (!cgroup_cfg_section) {
+    reload_cgroups_configuration();
+  }
+
+  char* full_path = get_cgroups_path_to_write(hierarchy_name, param_name,
+    group_id);
+
+  if (!full_path) {
+    fprintf(ERRORFILE,
+      "Failed to get cgroups path to write, it should be a configuration issue");
+    failure = 1;
+    goto cleanup;
+  }
+
+  if (!verify_path_safety(full_path)) {
+    failure = 1;
+    goto cleanup;
+  }
+
+  // Make sure file exists
+  struct stat sb;
+  if (stat(full_path, &sb) != 0) {
+    fprintf(ERRORFILE, "CGroups: Could not find file to write, %s", full_path);
+    failure = 1;
+    goto cleanup;
+  }
+
+  fprintf(ERRORFILE, "CGroups: Updating cgroups, path=%s, value=%s",
+    full_path, value);
+
+  // Write values to file
+  FILE *f;
+  f = fopen(full_path, "a");
+  if (!f) {
+    fprintf(ERRORFILE, "CGroups: Failed to open cgroups file, %s", full_path);
+    failure = 1;
+    goto cleanup;
+  }
+  if (fprintf(f, "%s", value) < 0) {
+    fprintf(ERRORFILE, "CGroups: Failed to write cgroups file, %s", full_path);
+    fclose(f);
+    failure = 1;
+    goto cleanup;
+  }
+  if (fclose(f) != 0) {
+    fprintf(ERRORFILE, "CGroups: Failed to close cgroups file, %s", full_path);
+    failure = 1;
+    goto cleanup;
+  }
+
+cleanup:
+  if (full_path) {
+    free(full_path);
+  }
+  return -failure;
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/cgroups/cgroups-operations.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/cgroups/cgroups-operations.h
new file mode 100644
index 0000000..cf80bcf
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/cgroups/cgroups-operations.h
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _CGROUPS_OPERATIONS_H_
+#define _CGROUPS_OPERATIONS_H_
+
+#define CGROUPS_SECTION_NAME "cgroups"
+#define CGROUPS_ROOT_KEY "root"
+#define CGROUPS_YARN_HIERARCHY_KEY "yarn-hierarchy"
+
+/**
+ * Handle update CGroups parameter update requests:
+ * - hierarchy_name: e.g. devices / cpu,cpuacct
+ * - param_name: e.g. deny
+ * - group_id: e.g. container_x_y
+ * - value: e.g. "a *:* rwm"
+ *
+ * return 0 if succeeded
+ */
+int update_cgroups_parameters(
+   const char* hierarchy_name,
+   const char* param_name,
+   const char* group_id,
+   const char* value);
+
+ /**
+  * Get CGroups path to update. Visible for testing.
+  * Return 0 if succeeded
+  */
+ char* get_cgroups_path_to_write(
+    const char* hierarchy_name,
+    const char* param_name,
+    const char* group_id);
+
+ /**
+  * Reload config from filesystem, visible for testing.
+  */
+ void reload_cgroups_configuration();
+
+#endif
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/common/constants.h
similarity index 74%
copy from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
copy to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/common/constants.h
index 5d14c6f..5c8c4e9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/common/constants.h
@@ -16,10 +16,14 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
+/* FreeBSD protects the getline() prototype. See getline(3) for more */
+#ifdef __FreeBSD__
+#define _WITH_GETLINE
+#endif
 
-export default Ember.Route.extend({
-  model() {
-    return this.store.findAll('yarn-queue');
-  },
-});
\ No newline at end of file
+#ifndef _MODULES_COMMON_CONSTANTS_H_
+#define _MODULES_COMMON_CONSTANTS_H_
+
+#define CONFIGS_MODULES_PREFIX "yarn.container-executor.modules."
+
+#endif
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/common/module-configs.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/common/module-configs.c
new file mode 100644
index 0000000..f0c6d16
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/common/module-configs.c
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "util.h"
+#include "configuration.h"
+#include "container-executor.h"
+#include "modules/common/constants.h"
+
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#define ENABLED_CONFIG_KEY "module.enabled"
+
+int module_enabled(const struct section* section_cfg, const char* module_name) {
+  char* enabled_str = get_section_value(ENABLED_CONFIG_KEY, section_cfg);
+  int enabled = 0;
+  if (enabled_str && 0 == strcmp(enabled_str, "true")) {
+    enabled = 1;
+  } else {
+    fprintf(LOGFILE, "Module %s is disabled\n", module_name);
+  }
+
+  free(enabled_str);
+  return enabled;
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/common/module-configs.h
similarity index 72%
copy from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
copy to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/common/module-configs.h
index 5d14c6f..d58c618 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/common/module-configs.h
@@ -16,10 +16,18 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
+#ifdef __FreeBSD__
+#define _WITH_GETLINE
+#endif
 
-export default Ember.Route.extend({
-  model() {
-    return this.store.findAll('yarn-queue');
-  },
-});
\ No newline at end of file
+#ifndef _MODULES_COMMON_MODULE_CONFIGS_H_
+#define _MODULES_COMMON_MODULE_CONFIGS_H_
+
+
+/**
+ * check if module enabled given name of module.
+ * return 0 if disabled
+ */
+int module_enabled(const struct section* section_cfg, const char* module_name);
+
+#endif
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c
new file mode 100644
index 0000000..f96645d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c
@@ -0,0 +1,229 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "configuration.h"
+#include "container-executor.h"
+#include "utils/string-utils.h"
+#include "modules/gpu/gpu-module.h"
+#include "modules/cgroups/cgroups-operations.h"
+#include "modules/common/module-configs.h"
+#include "modules/common/constants.h"
+#include "util.h"
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <getopt.h>
+#include <unistd.h>
+
+#define EXCLUDED_GPUS_OPTION "excluded_gpus"
+#define CONTAINER_ID_OPTION "container_id"
+#define DEFAULT_NVIDIA_MAJOR_NUMBER 195
+#define MAX_CONTAINER_ID_LEN 128
+
+static const struct section* cfg_section;
+
+static int internal_handle_gpu_request(
+    update_cgroups_parameters_func update_cgroups_parameters_func_p,
+    size_t n_minor_devices_to_block, int minor_devices[],
+    const char* container_id) {
+  char* allowed_minor_numbers_str = NULL;
+  int* allowed_minor_numbers = NULL;
+  size_t n_allowed_minor_numbers = 0;
+  int return_code = 0;
+
+  if (n_minor_devices_to_block == 0) {
+    // no device to block, just return;
+    return 0;
+  }
+
+  // Get major device number from cfg, if not set, major number of (Nvidia)
+  // will be the default value.
+  int major_device_number;
+  char* major_number_str = get_section_value(GPU_MAJOR_NUMBER_CONFIG_KEY,
+     cfg_section);
+  if (!major_number_str || 0 == major_number_str[0]) {
+    // Default major number of Nvidia devices
+    major_device_number = DEFAULT_NVIDIA_MAJOR_NUMBER;
+  } else {
+    major_device_number = strtol(major_number_str, NULL, 0);
+  }
+
+  // Get allowed minor device numbers from cfg, if not set, means all minor
+  // devices can be used by YARN
+  allowed_minor_numbers_str = get_section_value(
+      GPU_ALLOWED_DEVICES_MINOR_NUMBERS,
+      cfg_section);
+  if (!allowed_minor_numbers_str || 0 == allowed_minor_numbers_str[0]) {
+    allowed_minor_numbers = NULL;
+  } else {
+    int rc = get_numbers_split_by_comma(allowed_minor_numbers_str,
+                                        &allowed_minor_numbers,
+                                        &n_allowed_minor_numbers);
+    if (0 != rc) {
+      fprintf(ERRORFILE,
+          "Failed to get allowed minor device numbers from cfg, value=%s\n",
+          allowed_minor_numbers_str);
+      return_code = -1;
+      goto cleanup;
+    }
+
+    // Make sure we're trying to black devices allowed in config
+    for (int i = 0; i < n_minor_devices_to_block; i++) {
+      int found = 0;
+      for (int j = 0; j < n_allowed_minor_numbers; j++) {
+        if (minor_devices[i] == allowed_minor_numbers[j]) {
+          found = 1;
+          break;
+        }
+      }
+
+      if (!found) {
+        fprintf(ERRORFILE,
+          "Trying to blacklist device with minor-number=%d which is not on allowed list\n",
+          minor_devices[i]);
+        return_code = -1;
+        goto cleanup;
+      }
+    }
+  }
+
+  // Use cgroup helpers to blacklist devices
+  for (int i = 0; i < n_minor_devices_to_block; i++) {
+    char param_value[128];
+    memset(param_value, 0, sizeof(param_value));
+    snprintf(param_value, sizeof(param_value), "c %d:%d rwm",
+             major_device_number, i);
+
+    int rc = update_cgroups_parameters_func_p("devices", "deny",
+      container_id, param_value);
+
+    if (0 != rc) {
+      fprintf(ERRORFILE, "CGroups: Failed to update cgroups\n");
+      return_code = -1;
+      goto cleanup;
+    }
+  }
+
+cleanup:
+  if (major_number_str) {
+    free(major_number_str);
+  }
+  if (allowed_minor_numbers) {
+    free(allowed_minor_numbers);
+  }
+  if (allowed_minor_numbers_str) {
+    free(allowed_minor_numbers_str);
+  }
+
+  return return_code;
+}
+
+void reload_gpu_configuration() {
+  cfg_section = get_configuration_section(GPU_MODULE_SECTION_NAME, get_cfg());
+}
+
+/*
+ * Format of GPU request commandline:
+ *
+ * c-e gpu --excluded_gpus 0,1,3 --container_id container_x_y
+ */
+int handle_gpu_request(update_cgroups_parameters_func func,
+    const char* module_name, int module_argc, char** module_argv) {
+  if (!cfg_section) {
+    reload_gpu_configuration();
+  }
+
+  if (!module_enabled(cfg_section, GPU_MODULE_SECTION_NAME)) {
+    fprintf(ERRORFILE,
+      "Please make sure gpu module is enabled before using it.\n");
+    return -1;
+  }
+
+  static struct option long_options[] = {
+    {EXCLUDED_GPUS_OPTION, required_argument, 0, 'e' },
+    {CONTAINER_ID_OPTION, required_argument, 0, 'c' },
+    {0, 0, 0, 0}
+  };
+
+  int rc = 0;
+  int c = 0;
+  int option_index = 0;
+
+  int* minor_devices = NULL;
+  char container_id[MAX_CONTAINER_ID_LEN];
+  memset(container_id, 0, sizeof(container_id));
+  size_t n_minor_devices_to_block = 0;
+  int failed = 0;
+
+  optind = 1;
+  while((c = getopt_long(module_argc, module_argv, "e:c:",
+                         long_options, &option_index)) != -1) {
+    switch(c) {
+      case 'e':
+        rc = get_numbers_split_by_comma(optarg, &minor_devices,
+          &n_minor_devices_to_block);
+        if (0 != rc) {
+          fprintf(ERRORFILE,
+            "Failed to get minor devices number from command line, value=%s\n",
+            optarg);
+          failed = 1;
+          goto cleanup;
+        }
+        break;
+      case 'c':
+        if (!validate_container_id(optarg)) {
+          fprintf(ERRORFILE,
+            "Specified container_id=%s is invalid\n", optarg);
+          failed = 1;
+          goto cleanup;
+        }
+        strncpy(container_id, optarg, MAX_CONTAINER_ID_LEN);
+        break;
+      default:
+        fprintf(ERRORFILE,
+          "Unknown option in gpu command character %d %c, optionindex = %d\n",
+          c, c, optind);
+        failed = 1;
+        goto cleanup;
+    }
+  }
+
+  if (0 == container_id[0]) {
+    fprintf(ERRORFILE,
+      "[%s] --container_id must be specified.\n", __func__);
+    failed = 1;
+    goto cleanup;
+  }
+
+  if (!minor_devices) {
+     // Minor devices is null, skip following call.
+     fprintf(ERRORFILE, "is not specified, skip cgroups call.\n");
+     goto cleanup;
+  }
+
+  failed = internal_handle_gpu_request(func, n_minor_devices_to_block,
+         minor_devices,
+         container_id);
+
+cleanup:
+  if (minor_devices) {
+    free(minor_devices);
+  }
+  return failed;
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.h
new file mode 100644
index 0000000..59d4c7e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.h
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef __FreeBSD__
+#define _WITH_GETLINE
+#endif
+
+#ifndef _MODULES_GPU_GPU_MUDULE_H_
+#define _MODULES_GPU_GPU_MUDULE_H_
+
+#define GPU_MAJOR_NUMBER_CONFIG_KEY "gpu.major-device-number"
+#define GPU_ALLOWED_DEVICES_MINOR_NUMBERS "gpu.allowed-device-minor-numbers"
+#define GPU_MODULE_SECTION_NAME "gpu"
+
+// For unit test stubbing
+typedef int (*update_cgroups_parameters_func)(const char*, const char*,
+   const char*, const char*);
+
+/**
+ * Handle gpu requests
+ */
+int handle_gpu_request(update_cgroups_parameters_func func,
+   const char* module_name, int module_argc, char** module_argv);
+
+/**
+ * Reload config from filesystem, visible for testing.
+ */
+void reload_gpu_configuration();
+
+#endif
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.h
index a8a12a9..fa21def 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.h
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.h
@@ -66,6 +66,13 @@
   ERROR_COMPILING_REGEX = 42
 };
 
+/* Macros for min/max. */
+#ifndef MIN
+#define MIN(a,b) (((a)<(b))?(a):(b))
+#endif /* MIN */
+#ifndef MAX
+#define MAX(a,b) (((a)>(b))?(a):(b))
+#endif  /* MAX */
 
 // the log file for messages
 extern FILE *LOGFILE;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/path-utils.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/path-utils.c
new file mode 100644
index 0000000..dea656b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/path-utils.c
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "util.h"
+
+#include <strings.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+int verify_path_safety(const char* path) {
+  if (!path || path[0] == 0) {
+    return 1;
+  }
+
+  char* dup = strdup(path);
+  if (!dup) {
+    fprintf(ERRORFILE, "%s: Failed to allocate memory for path.\n", __func__);
+    return 0;
+  }
+
+  char* p = strtok(dup, "/");
+  int succeeded = 1;
+
+  while (p != NULL) {
+    if (0 == strcmp(p, "..")) {
+      fprintf(ERRORFILE, "%s: Path included \"..\", path=%s.\n", __func__, path);
+      succeeded = 0;
+      break;
+    }
+
+    p = strtok(NULL, "/");
+  }
+  free(dup);
+
+  return succeeded;
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/path-utils.h
similarity index 68%
copy from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
copy to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/path-utils.h
index 5d14c6f..a42f936 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/path-utils.h
@@ -16,10 +16,20 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
+#ifdef __FreeBSD__
+#define _WITH_GETLINE
+#endif
 
-export default Ember.Route.extend({
-  model() {
-    return this.store.findAll('yarn-queue');
-  },
-});
\ No newline at end of file
+#ifndef _UTILS_PATH_UTILS_H_
+#define _UTILS_PATH_UTILS_H_
+
+/*
+ * Verify if a given path is safe or not. For example, we don't want a path
+ * include ".." which can do things like:
+ * - "/cgroups/cpu,cpuacct/container/../../../etc/passwd"
+ *
+ * return false/true
+ */
+int verify_path_safety(const char* path);
+
+#endif
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c
index 703d484..d19c084 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c
@@ -15,7 +15,10 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+#include "util.h"
 
+#include <limits.h>
+#include <errno.h>
 #include <strings.h>
 #include <string.h>
 #include <stdio.h>
@@ -26,61 +29,131 @@
  * return true/false
  */
 static int all_numbers(char* input) {
-  if (0 == strlen(input)) {
-    return 0;
-  }
-
-  for (int i = 0; i < strlen(input); i++) {
-    if (input[i] < '0' || input[i] > '9') {
+  for (; input[0] != 0; input++) {
+    if (input[0] < '0' || input[0] > '9') {
       return 0;
     }
   }
   return 1;
 }
 
+int get_numbers_split_by_comma(const char* input, int** numbers,
+                               size_t* ret_n_numbers) {
+  size_t allocation_size = 1;
+  int i = 0;
+  while (input[i] != 0) {
+    if (input[i] == ',') {
+      allocation_size++;
+    }
+    i++;
+  }
+
+  (*numbers) = malloc(sizeof(int) * allocation_size);
+  if (!(*numbers)) {
+    fprintf(ERRORFILE, "Failed to allocating memory for *numbers: %s\n",
+            __func__);
+    exit(OUT_OF_MEMORY);
+  }
+  memset(*numbers, 0, sizeof(int) * allocation_size);
+
+  char* input_cpy = strdup(input);
+  if (!input_cpy) {
+    fprintf(ERRORFILE, "Failed to allocating memory for input_cpy: %s\n",
+            __func__);
+    exit(OUT_OF_MEMORY);
+  }
+
+  char* p = strtok(input_cpy, ",");
+  int idx = 0;
+  size_t n_numbers = 0;
+  while (p != NULL) {
+    char *temp;
+    long n = strtol(p, &temp, 0);
+    // According to answer:
+    // https://stackoverflow.com/questions/14176123/correct-usage-of-strtol
+    // We need to properly check errno and overflows
+    if (temp == p || *temp != '\0' ||
+        ((n == LONG_MIN || n == LONG_MAX) && errno == ERANGE)) {
+      fprintf(stderr,
+              "Could not convert '%s' to long and leftover string is: '%s'\n",
+              p, temp);
+      free(input_cpy);
+      return -1;
+    }
+
+    n_numbers++;
+    (*numbers)[idx] = n;
+    p = strtok(NULL, ",");
+    idx++;
+  }
+
+  free(input_cpy);
+  *ret_n_numbers = n_numbers;
+
+  return 0;
+}
+
 int validate_container_id(const char* input) {
+  int is_container_id = 1;
+
   /*
    * Two different forms of container_id
    * container_e17_1410901177871_0001_01_000005
    * container_1410901177871_0001_01_000005
    */
-  char* input_cpy = malloc(strlen(input));
-  strcpy(input_cpy, input);
+  if (!input) {
+    return 0;
+  }
+
+  char* input_cpy = strdup(input);
+  if (!input_cpy) {
+    return 0;
+  }
+
   char* p = strtok(input_cpy, "_");
   int idx = 0;
   while (p != NULL) {
     if (0 == idx) {
       if (0 != strcmp("container", p)) {
-        return 0;
+        is_container_id = 0;
+        goto cleanup;
       }
     } else if (1 == idx) {
       // this could be e[n][n], or [n][n]...
       if (!all_numbers(p)) {
-        if (strlen(p) == 0) {
-          return 0;
+        if (p[0] == 0) {
+          is_container_id = 0;
+          goto cleanup;
         }
         if (p[0] != 'e') {
-          return 0;
+          is_container_id = 0;
+          goto cleanup;
         }
         if (!all_numbers(p + 1)) {
-          return 0;
+          is_container_id = 0;
+          goto cleanup;
         }
       }
     } else {
       // otherwise, should be all numbers
       if (!all_numbers(p)) {
-        return 0;
+        is_container_id = 0;
+        goto cleanup;
       }
     }
 
     p = strtok(NULL, "_");
     idx++;
   }
-  free(input_cpy);
+
+cleanup:
+  if (input_cpy) {
+    free(input_cpy);
+  }
 
   // We should have [5,6] elements split by '_'
   if (idx > 6 || idx < 5) {
-    return 0;
+    is_container_id = 0;
   }
-  return 1;
-}
\ No newline at end of file
+  return is_container_id;
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.h
index 0a41ad1..c095eb6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.h
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.h
@@ -29,4 +29,9 @@
  */
 int validate_container_id(const char* input);
 
-#endif
\ No newline at end of file
+/*
+ * return 0 if succeeded
+ */
+int get_numbers_split_by_comma(const char* input, int** numbers, size_t* n_numbers);
+
+#endif
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/modules/cgroups/test-cgroups-module.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/modules/cgroups/test-cgroups-module.cc
new file mode 100644
index 0000000..8ffbe88
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/modules/cgroups/test-cgroups-module.cc
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <gtest/gtest.h>
+#include <sstream>
+
+extern "C" {
+#include "configuration.h"
+#include "container-executor.h"
+#include "modules/cgroups/cgroups-operations.h"
+#include "test/test-container-executor-common.h"
+#include "util.h"
+}
+
+namespace ContainerExecutor {
+
+class TestCGroupsModule : public ::testing::Test {
+protected:
+  virtual void SetUp() {
+    if (mkdirs(TEST_ROOT, 0755) != 0) {
+      fprintf(ERRORFILE, "Failed to mkdir TEST_ROOT: %s\n", TEST_ROOT);
+      exit(1);
+    }
+    LOGFILE = stdout;
+    ERRORFILE = stderr;
+  }
+
+  virtual void TearDown() {}
+};
+
+TEST_F(TestCGroupsModule, test_cgroups_get_path_without_define_root) {
+  // Write config file.
+  const char *filename = TEST_ROOT "/test_cgroups_get_path_without_root.cfg";
+  FILE *file = fopen(filename, "w");
+  if (file == NULL) {
+    printf("FAIL: Could not open configuration file: %s\n", filename);
+    exit(1);
+  }
+  fprintf(file, "[cgroups]\n");
+  fprintf(file, "yarn-hierarchy=yarn\n");
+  fclose(file);
+
+  // Read config file
+  read_executor_config(filename);
+  reload_cgroups_configuration();
+
+  char* path = get_cgroups_path_to_write("devices", "deny", "container_1");
+
+  ASSERT_TRUE(NULL == path) << "Should fail.\n";
+}
+
+TEST_F(TestCGroupsModule, test_cgroups_get_path_without_define_yarn_hierarchy) {
+  // Write config file.
+  const char *filename = TEST_ROOT "/test_cgroups_get_path_without_root.cfg";
+  FILE *file = fopen(filename, "w");
+
+  ASSERT_TRUE(file) << "FAIL: Could not open configuration file: " << filename
+                    << "\n";
+  fprintf(file, "[cgroups]\n");
+  fprintf(file, "root=/sys/fs/cgroups\n");
+  fclose(file);
+
+  // Read config file
+  read_executor_config(filename);
+  reload_cgroups_configuration();
+  char* path = get_cgroups_path_to_write("devices", "deny", "container_1");
+
+  ASSERT_TRUE(NULL == path) << "Should fail.\n";
+}
+
+TEST_F(TestCGroupsModule, test_cgroups_get_path_succeeded) {
+  // Write config file.
+  const char *filename = TEST_ROOT "/test_cgroups_get_path.cfg";
+  FILE *file = fopen(filename, "w");
+
+  ASSERT_TRUE(file) << "FAIL: Could not open configuration file\n";
+  fprintf(file, "[cgroups]\n");
+  fprintf(file, "root=/sys/fs/cgroups \n");
+  fprintf(file, "yarn-hierarchy=yarn \n");
+  fclose(file);
+
+  // Read config file
+  read_executor_config(filename);
+  reload_cgroups_configuration();
+
+  char* path = get_cgroups_path_to_write("devices", "deny", "container_1");
+  ASSERT_TRUE(NULL != path) << "Should success.\n";
+
+  const char *EXPECTED =
+      "/sys/fs/cgroups/devices/yarn/container_1/devices.deny";
+
+  ASSERT_STREQ(EXPECTED, path)
+      << "Return cgroup-path-to-write is not expected\n";
+}
+} // namespace ContainerExecutor
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/modules/gpu/test-gpu-module.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/modules/gpu/test-gpu-module.cc
new file mode 100644
index 0000000..7e41fb4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/modules/gpu/test-gpu-module.cc
@@ -0,0 +1,203 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vector>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <gtest/gtest.h>
+#include <sstream>
+
+extern "C" {
+#include "configuration.h"
+#include "container-executor.h"
+#include "modules/cgroups/cgroups-operations.h"
+#include "modules/gpu/gpu-module.h"
+#include "test/test-container-executor-common.h"
+#include "util.h"
+}
+
+namespace ContainerExecutor {
+
+class TestGpuModule : public ::testing::Test {
+protected:
+  virtual void SetUp() {
+    if (mkdirs(TEST_ROOT, 0755) != 0) {
+      fprintf(ERRORFILE, "Failed to mkdir TEST_ROOT: %s\n", TEST_ROOT);
+      exit(1);
+    }
+    LOGFILE = stdout;
+    ERRORFILE = stderr;
+  }
+
+  virtual void TearDown() {
+
+  }
+};
+
+static std::vector<const char*> cgroups_parameters_invoked;
+
+static int mock_update_cgroups_parameters(
+   const char* controller_name,
+   const char* param_name,
+   const char* group_id,
+   const char* value) {
+  char* buf = (char*) malloc(128);
+  strcpy(buf, controller_name);
+  cgroups_parameters_invoked.push_back(buf);
+
+  buf = (char*) malloc(128);
+  strcpy(buf, param_name);
+  cgroups_parameters_invoked.push_back(buf);
+
+  buf = (char*) malloc(128);
+  strcpy(buf, group_id);
+  cgroups_parameters_invoked.push_back(buf);
+
+  buf = (char*) malloc(128);
+  strcpy(buf, value);
+  cgroups_parameters_invoked.push_back(buf);
+  return 0;
+}
+
+static void verify_param_updated_to_cgroups(
+    int argc, const char** argv) {
+  ASSERT_EQ(argc, cgroups_parameters_invoked.size());
+
+  int offset = 0;
+  while (offset < argc) {
+    ASSERT_STREQ(argv[offset], cgroups_parameters_invoked[offset]);
+    offset++;
+  }
+}
+
+static void write_and_load_gpu_module_to_cfg(const char* cfg_filepath, int enabled) {
+  FILE *file = fopen(cfg_filepath, "w");
+  if (file == NULL) {
+    printf("FAIL: Could not open configuration file: %s\n", cfg_filepath);
+    exit(1);
+  }
+  fprintf(file, "[gpu]\n");
+  if (enabled) {
+    fprintf(file, "module.enabled=true\n");
+  } else {
+    fprintf(file, "module.enabled=false\n");
+  }
+  fclose(file);
+
+  // Read config file
+  read_executor_config(cfg_filepath);
+  reload_gpu_configuration();
+}
+
+static void test_gpu_module_enabled_disabled(int enabled) {
+  // Write config file.
+  const char *filename = TEST_ROOT "/test_cgroups_module_enabled_disabled.cfg";
+  write_and_load_gpu_module_to_cfg(filename, enabled);
+
+  char* argv[] = { (char*) "--module-gpu", (char*) "--excluded_gpus", (char*) "0,1",
+                   (char*) "--container_id",
+                   (char*) "container_1498064906505_0001_01_000001" };
+
+  int rc = handle_gpu_request(&mock_update_cgroups_parameters,
+              "gpu", 5, argv);
+
+  int EXPECTED_RC;
+  if (enabled) {
+    EXPECTED_RC = 0;
+  } else {
+    EXPECTED_RC = -1;
+  }
+  ASSERT_EQ(EXPECTED_RC, rc);
+}
+
+TEST_F(TestGpuModule, test_verify_gpu_module_calls_cgroup_parameter) {
+  // Write config file.
+  const char *filename = TEST_ROOT "/test_verify_gpu_module_calls_cgroup_parameter.cfg";
+  write_and_load_gpu_module_to_cfg(filename, 1);
+
+  char* container_id = (char*) "container_1498064906505_0001_01_000001";
+  char* argv[] = { (char*) "--module-gpu", (char*) "--excluded_gpus", (char*) "0,1",
+                   (char*) "--container_id",
+                   container_id };
+
+  /* Test case 1: block 2 devices */
+  cgroups_parameters_invoked.clear();
+  int rc = handle_gpu_request(&mock_update_cgroups_parameters,
+     "gpu", 5, argv);
+  ASSERT_EQ(0, rc) << "Should success.\n";
+
+  // Verify cgroups parameters
+  const char* expected_cgroups_argv[] = { "devices", "deny", container_id, "c 195:0 rwm",
+    "devices", "deny", container_id, "c 195:1 rwm"};
+  verify_param_updated_to_cgroups(8, expected_cgroups_argv);
+
+  /* Test case 2: block 0 devices */
+  cgroups_parameters_invoked.clear();
+  char* argv_1[] = { (char*) "--module-gpu", (char*) "--container_id", container_id };
+  rc = handle_gpu_request(&mock_update_cgroups_parameters,
+     "gpu", 3, argv_1);
+  ASSERT_EQ(0, rc) << "Should success.\n";
+
+  // Verify cgroups parameters
+  verify_param_updated_to_cgroups(0, NULL);
+}
+
+TEST_F(TestGpuModule, test_illegal_cli_parameters) {
+  // Write config file.
+  const char *filename = TEST_ROOT "/test_illegal_cli_parameters.cfg";
+  write_and_load_gpu_module_to_cfg(filename, 1);
+
+  // Illegal container id - 1
+  char* argv[] = { (char*) "--module-gpu", (char*) "--excluded_gpus", (char*) "0,1",
+                   (char*) "--container_id", (char*) "xxxx" };
+  int rc = handle_gpu_request(&mock_update_cgroups_parameters,
+     "gpu", 5, argv);
+  ASSERT_NE(0, rc) << "Should fail.\n";
+
+  // Illegal container id - 2
+  char* argv_1[] = { (char*) "--module-gpu", (char*) "--excluded_gpus", (char*) "0,1",
+                   (char*) "--container_id", (char*) "container_1" };
+  rc = handle_gpu_request(&mock_update_cgroups_parameters,
+     "gpu", 5, argv_1);
+  ASSERT_NE(0, rc) << "Should fail.\n";
+
+  // Illegal container id - 3
+  char* argv_2[] = { (char*) "--module-gpu", (char*) "--excluded_gpus", (char*) "0,1" };
+  rc = handle_gpu_request(&mock_update_cgroups_parameters,
+     "gpu", 3, argv_2);
+  ASSERT_NE(0, rc) << "Should fail.\n";
+}
+
+TEST_F(TestGpuModule, test_gpu_module_disabled) {
+  test_gpu_module_enabled_disabled(0);
+}
+
+TEST_F(TestGpuModule, test_gpu_module_enabled) {
+  test_gpu_module_enabled_disabled(1);
+}
+} // namespace ContainerExecutor
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor-common.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor-common.h
new file mode 100644
index 0000000..d353625
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor-common.h
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ #ifdef __APPLE__
+ #include <CoreFoundation/CFString.h>
+ #include <CoreFoundation/CFPreferences.h>
+
+ #define TMPDIR "/private/tmp"
+ #define RELTMPDIR "../.."
+ #else
+ #define RELTMPDIR ".."
+ #define TMPDIR "/tmp"
+ #endif
+
+ #define TEST_ROOT TMPDIR "/test-container-executor"
+
+ #define DONT_TOUCH_FILE "dont-touch-me"
+ #define NM_LOCAL_DIRS       TEST_ROOT "/local-1%" TEST_ROOT "/local-2%" \
+                TEST_ROOT "/local-3%" TEST_ROOT "/local-4%" TEST_ROOT "/local-5"
+ #define NM_LOG_DIRS         TEST_ROOT "/logs/userlogs"
+ #define ARRAY_SIZE 1000
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index 3cfefa0..64ee717 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -19,6 +19,7 @@
 #include "container-executor.h"
 #include "utils/string-utils.h"
 #include "util.h"
+#include "test/test-container-executor-common.h"
 
 #include <inttypes.h>
 #include <errno.h>
@@ -31,25 +32,6 @@
 #include <sys/stat.h>
 #include <sys/wait.h>
 
-#ifdef __APPLE__
-#include <CoreFoundation/CFString.h>
-#include <CoreFoundation/CFPreferences.h>
-
-#define TMPDIR "/private/tmp"
-#define RELTMPDIR "../.."
-#else
-#define RELTMPDIR ".."
-#define TMPDIR "/tmp"
-#endif
-
-#define TEST_ROOT TMPDIR "/test-container-executor"
-
-#define DONT_TOUCH_FILE "dont-touch-me"
-#define NM_LOCAL_DIRS       TEST_ROOT "/local-1%" TEST_ROOT "/local-2%" \
-               TEST_ROOT "/local-3%" TEST_ROOT "/local-4%" TEST_ROOT "/local-5"
-#define NM_LOG_DIRS         TEST_ROOT "/logs/userlogs"
-#define ARRAY_SIZE 1000
-
 static char* username = NULL;
 static char* yarn_username = NULL;
 static char** local_dirs = NULL;
@@ -1486,10 +1468,7 @@
   test_check_user(1);
 #endif
 
-  run("rm -fr " TEST_ROOT);
-
   test_trim_function();
-
   printf("\nFinished tests\n");
 
   free(current_username);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_main.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_main.cc
index d59a3f2..44c9b1b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_main.cc
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_main.cc
@@ -20,10 +20,13 @@
 #include <main/native/container-executor/impl/util.h>
 #include <cstdio>
 
-FILE* ERRORFILE = stderr;
-FILE* LOGFILE = stdout;
+extern "C" {
+#include "util.h"
+}
 
 int main(int argc, char **argv) {
-    testing::InitGoogleTest(&argc, argv);
-    return RUN_ALL_TESTS();
+  ERRORFILE = stderr;
+  LOGFILE = stdout;
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test-path-utils.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test-path-utils.cc
new file mode 100644
index 0000000..a24c0c7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test-path-utils.cc
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ #include <errno.h>
+ #include <fcntl.h>
+ #include <inttypes.h>
+ #include <signal.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
+ #include <sys/stat.h>
+ #include <sys/wait.h>
+ #include <unistd.h>
+
+ #include <gtest/gtest.h>
+ #include <sstream>
+
+ extern "C" {
+ #include "utils/path-utils.h"
+ }
+
+ namespace ContainerExecutor {
+
+ class TestPathUtils : public ::testing::Test {
+ protected:
+   virtual void SetUp() {
+
+   }
+
+   virtual void TearDown() {
+
+   }
+ };
+
+ TEST_F(TestPathUtils, test_path_safety) {
+   const char* input = "./../abc/";
+   int flag = verify_path_safety(input);
+   std::cout << "Testing input=" << input << "\n";
+   ASSERT_FALSE(flag) << "Should failed\n";
+
+   input = "abc/./cde";
+   flag = verify_path_safety(input);
+   std::cout << "Testing input=" << input << "\n";
+   ASSERT_TRUE(flag) << "Should succeeded\n";
+
+   input = "/etc/abc/cde/./x/./y";
+   flag = verify_path_safety(input);
+   std::cout << "Testing input=" << input << "\n";
+   ASSERT_TRUE(flag) << "Should succeeded\n";
+}
+
+} // namespace ContainerExecutor
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test-string-utils.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test-string-utils.cc
new file mode 100644
index 0000000..037816a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test-string-utils.cc
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ #include <errno.h>
+ #include <fcntl.h>
+ #include <inttypes.h>
+ #include <signal.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
+ #include <sys/stat.h>
+ #include <sys/wait.h>
+ #include <unistd.h>
+
+ #include <gtest/gtest.h>
+ #include <sstream>
+
+ extern "C" {
+ #include "utils/string-utils.h"
+ }
+
+ namespace ContainerExecutor {
+
+ class TestStringUtils : public ::testing::Test {
+ protected:
+   virtual void SetUp() {
+
+   }
+
+   virtual void TearDown() {
+
+   }
+ };
+
+ TEST_F(TestStringUtils, test_get_numbers_split_by_comma) {
+   const char* input = ",1,2,3,-1,,1,,0,";
+   int* numbers;
+   size_t n_numbers;
+   int rc = get_numbers_split_by_comma(input, &numbers, &n_numbers);
+
+   std::cout << "Testing input=" << input << "\n";
+   ASSERT_EQ(0, rc) << "Should succeeded\n";
+   ASSERT_EQ(6, n_numbers);
+   ASSERT_EQ(1, numbers[0]);
+   ASSERT_EQ(-1, numbers[3]);
+   ASSERT_EQ(0, numbers[5]);
+
+   input = "3";
+   rc = get_numbers_split_by_comma(input, &numbers, &n_numbers);
+   std::cout << "Testing input=" << input << "\n";
+   ASSERT_EQ(0, rc) << "Should succeeded\n";
+   ASSERT_EQ(1, n_numbers);
+   ASSERT_EQ(3, numbers[0]);
+
+   input = "";
+   rc = get_numbers_split_by_comma(input, &numbers, &n_numbers);
+   std::cout << "Testing input=" << input << "\n";
+   ASSERT_EQ(0, rc) << "Should succeeded\n";
+   ASSERT_EQ(0, n_numbers);
+
+   input = ",,";
+   rc = get_numbers_split_by_comma(input, &numbers, &n_numbers);
+   std::cout << "Testing input=" << input << "\n";
+   ASSERT_EQ(0, rc) << "Should succeeded\n";
+   ASSERT_EQ(0, n_numbers);
+
+   input = "1,2,aa,bb";
+   rc = get_numbers_split_by_comma(input, &numbers, &n_numbers);
+   std::cout << "Testing input=" << input << "\n";
+   ASSERT_TRUE(0 != rc) << "Should failed\n";
+
+   input = "1,2,3,-12312312312312312312321311231231231";
+   rc = get_numbers_split_by_comma(input, &numbers, &n_numbers);
+   std::cout << "Testing input=" << input << "\n";
+   ASSERT_TRUE(0 != rc) << "Should failed\n";
+}
+
+} // namespace ContainerExecutor
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java
index e520a31..b5cb43b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java
@@ -19,12 +19,12 @@
 package org.apache.hadoop.yarn.server.nodemanager;
 
 import static org.junit.Assert.fail;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.Collection;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -60,8 +60,8 @@
 
 public class DummyContainerManager extends ContainerManagerImpl {
 
-  private static final Log LOG = LogFactory
-      .getLog(DummyContainerManager.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(DummyContainerManager.class);
   
   public DummyContainerManager(Context context, ContainerExecutor exec,
       DeletionService deletionContext, NodeStatusUpdater nodeStatusUpdater,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockNodeStatusUpdater.java
index 50487c8..2e80259 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockNodeStatusUpdater.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockNodeStatusUpdater.java
@@ -19,11 +19,11 @@
 package org.apache.hadoop.yarn.server.nodemanager;
 
 import java.io.IOException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.nio.ByteBuffer;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -46,7 +46,8 @@
  * real RM.
  */
 public class MockNodeStatusUpdater extends NodeStatusUpdaterImpl {
-  static final Log LOG = LogFactory.getLog(MockNodeStatusUpdater.class);
+  static final Logger LOG =
+       LoggerFactory.getLogger(MockNodeStatusUpdater.class);
   
   private static final RecordFactory recordFactory = RecordFactoryProvider
       .getRecordFactory(null);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java
index 028db6a..7bf042f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java
@@ -20,9 +20,9 @@
 
 import java.io.File;
 import java.io.IOException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -34,8 +34,8 @@
 
 public class TestContainerManagerWithLCE extends TestContainerManager {
 
-  private static final Log LOG = LogFactory
-      .getLog(TestContainerManagerWithLCE.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(TestContainerManagerWithLCE.class);
 
   public TestContainerManagerWithLCE() throws UnsupportedFileSystemException {
     super();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java
index e529628..095f21a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java
@@ -128,8 +128,12 @@
     DirectoryCollection dc = new DirectoryCollection(dirs, 0.0F);
     dc.checkDirs();
     Assert.assertEquals(0, dc.getGoodDirs().size());
+    Assert.assertEquals(0, dc.getErroredDirs().size());
     Assert.assertEquals(1, dc.getFailedDirs().size());
     Assert.assertEquals(1, dc.getFullDirs().size());
+    Assert.assertNotNull(dc.getDirectoryErrorInfo(dirA));
+    Assert.assertEquals(DirectoryCollection.DiskErrorCause.DISK_FULL, dc.getDirectoryErrorInfo(dirA).cause);
+
     // no good dirs
     Assert.assertEquals(0, dc.getGoodDirsDiskUtilizationPercentage());
 
@@ -139,16 +143,21 @@
             testDir.getTotalSpace());
     dc.checkDirs();
     Assert.assertEquals(1, dc.getGoodDirs().size());
+    Assert.assertEquals(0, dc.getErroredDirs().size());
     Assert.assertEquals(0, dc.getFailedDirs().size());
     Assert.assertEquals(0, dc.getFullDirs().size());
+    Assert.assertNull(dc.getDirectoryErrorInfo(dirA));
+
     Assert.assertEquals(utilizedSpacePerc,
       dc.getGoodDirsDiskUtilizationPercentage());
 
     dc = new DirectoryCollection(dirs, testDir.getTotalSpace() / (1024 * 1024));
     dc.checkDirs();
     Assert.assertEquals(0, dc.getGoodDirs().size());
+    Assert.assertEquals(0, dc.getErroredDirs().size());
     Assert.assertEquals(1, dc.getFailedDirs().size());
     Assert.assertEquals(1, dc.getFullDirs().size());
+    Assert.assertNotNull(dc.getDirectoryErrorInfo(dirA));
     // no good dirs
     Assert.assertEquals(0, dc.getGoodDirsDiskUtilizationPercentage());
 
@@ -158,8 +167,11 @@
             testDir.getTotalSpace());
     dc.checkDirs();
     Assert.assertEquals(1, dc.getGoodDirs().size());
+    Assert.assertEquals(0, dc.getErroredDirs().size());
     Assert.assertEquals(0, dc.getFailedDirs().size());
     Assert.assertEquals(0, dc.getFullDirs().size());
+    Assert.assertNull(dc.getDirectoryErrorInfo(dirA));
+
     Assert.assertEquals(utilizedSpacePerc,
       dc.getGoodDirsDiskUtilizationPercentage());
   }
@@ -209,12 +221,17 @@
     Assert.assertEquals(0, dc.getGoodDirs().size());
     Assert.assertEquals(1, dc.getFailedDirs().size());
     Assert.assertEquals(1, dc.getFullDirs().size());
+    Assert.assertEquals(0, dc.getErroredDirs().size());
+    Assert.assertNotNull(dc.getDirectoryErrorInfo(dirA));
+    Assert.assertEquals(DirectoryCollection.DiskErrorCause.DISK_FULL, dc.getDirectoryErrorInfo(dirA).cause);
 
     dc.setDiskUtilizationPercentageCutoff(100.0F, 100.0F);
     dc.checkDirs();
     Assert.assertEquals(1, dc.getGoodDirs().size());
     Assert.assertEquals(0, dc.getFailedDirs().size());
     Assert.assertEquals(0, dc.getFullDirs().size());
+    Assert.assertEquals(0, dc.getErroredDirs().size());
+    Assert.assertNull(dc.getDirectoryErrorInfo(dirA));
 
     conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
 
@@ -232,12 +249,18 @@
     Assert.assertEquals(0, dc.getGoodDirs().size());
     Assert.assertEquals(1, dc.getFailedDirs().size());
     Assert.assertEquals(0, dc.getFullDirs().size());
+    Assert.assertEquals(1, dc.getErroredDirs().size());
+    Assert.assertNotNull(dc.getDirectoryErrorInfo(dirB));
+    Assert.assertEquals(DirectoryCollection.DiskErrorCause.OTHER, dc.getDirectoryErrorInfo(dirB).cause);
+
     permDirB = new FsPermission((short) 0700);
     localFs.setPermission(pathB, permDirB);
     dc.checkDirs();
     Assert.assertEquals(1, dc.getGoodDirs().size());
     Assert.assertEquals(0, dc.getFailedDirs().size());
     Assert.assertEquals(0, dc.getFullDirs().size());
+    Assert.assertEquals(0, dc.getErroredDirs().size());
+    Assert.assertNull(dc.getDirectoryErrorInfo(dirA));
   }
 
   @Test
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
index d67bd76..d4db6b0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
@@ -27,6 +27,8 @@
 import static org.junit.Assert.fail;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.FileOutputStream;
@@ -40,8 +42,6 @@
 import java.util.List;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileContext;
@@ -137,8 +137,8 @@
  * </ol>
  */
 public class TestLinuxContainerExecutor {
-  private static final Log LOG = LogFactory
-    .getLog(TestLinuxContainerExecutor.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(TestLinuxContainerExecutor.class);
 
   private static File workSpace;
   static {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
index cfd0e36..6ca96db 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
@@ -29,6 +29,8 @@
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.FileReader;
@@ -44,8 +46,6 @@
 import java.util.List;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
@@ -79,8 +79,8 @@
 
 public class TestLinuxContainerExecutorWithMocks {
 
-  private static final Log LOG = LogFactory
-      .getLog(TestLinuxContainerExecutorWithMocks.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(TestLinuxContainerExecutorWithMocks.class);
 
   private static final String MOCK_EXECUTOR =
       "./src/test/resources/mock-container-executor";
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeHealthService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeHealthService.java
index c564c01..8083a56 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeHealthService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeHealthService.java
@@ -22,11 +22,11 @@
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.PrintWriter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Strings;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileUtil;
@@ -47,8 +47,8 @@
 
 public class TestNodeHealthService {
 
-  private static volatile Log LOG = LogFactory
-      .getLog(TestNodeHealthService.class);
+  private static volatile Logger LOG =
+       LoggerFactory.getLogger(TestNodeHealthService.class);
 
   protected static File testRootDir = new File("target",
       TestNodeHealthService.class.getName() + "-localDir").getAbsoluteFile();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
index c1df562..f661cf5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
@@ -22,6 +22,8 @@
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
@@ -32,8 +34,6 @@
 import java.util.List;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
@@ -82,7 +82,8 @@
   private FileContext localFS;
   private MyNodeManager nm;
   private DeletionService delService;
-  static final Log LOG = LogFactory.getLog(TestNodeManagerReboot.class);
+  static final Logger LOG =
+       LoggerFactory.getLogger(TestNodeManagerReboot.class);
 
   @Before
   public void setup() throws UnsupportedFileSystemException {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
index b8cd7dd..97e9922 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
@@ -21,6 +21,8 @@
 import static org.junit.Assert.assertEquals;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
@@ -36,8 +38,6 @@
 import java.util.concurrent.CyclicBarrier;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
@@ -113,8 +113,8 @@
       new NodeManagerEvent(NodeManagerEventType.RESYNC);
   private final long DUMMY_RM_IDENTIFIER = 1234;
 
-  protected static Log LOG = LogFactory
-      .getLog(TestNodeManagerResync.class);
+  protected static final Logger LOG =
+       LoggerFactory.getLogger(TestNodeManagerResync.class);
 
   @Before
   public void setup() throws UnsupportedFileSystemException {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
index ca029be..11c3c35 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
@@ -21,6 +21,8 @@
 import static org.apache.hadoop.yarn.server.utils.YarnServerBuilderUtils.newNodeHeartbeatResponse;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.EOFException;
 import java.io.File;
@@ -45,8 +47,6 @@
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
@@ -125,7 +125,8 @@
     DefaultMetricsSystem.setMiniClusterMode(true);
   }
 
-  static final Log LOG = LogFactory.getLog(TestNodeStatusUpdater.class);
+  static final Logger LOG =
+       LoggerFactory.getLogger(TestNodeStatusUpdater.class);
   static final File basedir =
       new File("target", TestNodeStatusUpdater.class.getName());
   static final File nmLocalDir = new File(basedir, "nm0");
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
index a24c83b..7c8551e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
@@ -33,9 +33,9 @@
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -59,6 +59,7 @@
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
 import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
@@ -87,8 +88,8 @@
  *
  */
 public abstract class BaseAMRMProxyTest {
-  private static final Log LOG = LogFactory
-      .getLog(BaseAMRMProxyTest.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(BaseAMRMProxyTest.class);
   // The AMRMProxyService instance that will be used by all the test cases
   private MockAMRMProxyService amrmProxyService;
 
@@ -659,7 +660,13 @@
     }
 
     @Override
-    public Map<ApplicationId, String> getRegisteredCollectors() {
+    public ConcurrentMap<ApplicationId, AppCollectorData>
+        getRegisteringCollectors() {
+      return null;
+    }
+
+    @Override
+    public ConcurrentMap<ApplicationId, AppCollectorData> getKnownCollectors() {
       return null;
     }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
index 72e5f53..937ede5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
@@ -22,9 +22,9 @@
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
@@ -50,8 +50,8 @@
 
 public class TestAMRMProxyService extends BaseAMRMProxyTest {
 
-  private static final Log LOG = LogFactory
-      .getLog(TestAMRMProxyService.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(TestAMRMProxyService.class);
 
   private static MockResourceManagerFacade mockRM;
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
index 6c96a47..3cafcbd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
@@ -19,6 +19,8 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager;
 
 import static org.mockito.Mockito.spy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
@@ -30,8 +32,6 @@
 import java.util.List;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
@@ -114,8 +114,8 @@
     tmpDir = new File("target", this.getClass().getSimpleName() + "-tmpDir");
   }
 
-  protected static Log LOG = LogFactory
-      .getLog(BaseContainerManagerTest.class);
+  protected static Logger LOG =
+       LoggerFactory.getLogger(BaseContainerManagerTest.class);
 
   protected static final int HTTP_PORT = 5412;
   protected Configuration conf = new YarnConfiguration();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java
index f075713..bfe87c6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java
@@ -26,6 +26,8 @@
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Mockito.mock;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Charsets;
 import com.google.common.collect.Sets;
@@ -42,8 +44,6 @@
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
@@ -73,7 +73,8 @@
 import org.junit.Test;
 
 public class TestAuxServices {
-  private static final Log LOG = LogFactory.getLog(TestAuxServices.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(TestAuxServices.class);
   private static final File TEST_DIR = new File(
       System.getProperty("test.build.data",
           System.getProperty("java.io.tmpdir")),
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
index 9844225..f379c08 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
@@ -40,7 +40,6 @@
 import java.util.Map;
 
 import com.google.common.base.Supplier;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
@@ -110,6 +109,7 @@
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
 import org.mockito.Mockito;
+import org.slf4j.LoggerFactory;
 
 public class TestContainerManager extends BaseContainerManagerTest {
 
@@ -118,7 +118,7 @@
   }
 
   static {
-    LOG = LogFactory.getLog(TestContainerManager.class);
+    LOG = LoggerFactory.getLogger(TestContainerManager.class);
   }
 
   private boolean delayContainers = false;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java
index 05ea036..65558e9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java
@@ -601,7 +601,7 @@
 
     public void containerFinished(int containerNum) {
       app.handle(new ApplicationContainerFinishedEvent(containers.get(
-          containerNum).cloneAndGetContainerStatus()));
+          containerNum).cloneAndGetContainerStatus(), 0));
       drainDispatcherEvents();
     }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index e71ce75..085b60f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -39,6 +39,7 @@
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.StringTokenizer;
@@ -1529,4 +1530,108 @@
     verify(updaterNoCall, never()).reportException(any());
 
   }
+
+  /*
+   * ${foo.version} is substituted to suffix a specific version number
+   */
+  @Test
+  public void testInvalidEnvVariableSubstitutionType1() throws IOException {
+    Map<String, String> env = new HashMap<String, String>();
+    // invalid env
+    env.put("testVar", "version${foo.version}");
+    validateShellExecutorForDifferentEnvs(env);
+  }
+
+  /*
+   * Multiple paths are substituted in a path variable
+   */
+  @Test
+  public void testInvalidEnvVariableSubstitutionType2() throws IOException {
+    Map<String, String> env = new HashMap<String, String>();
+    // invalid env
+    env.put("testPath", "/abc:/${foo.path}:/$bar");
+    validateShellExecutorForDifferentEnvs(env);
+  }
+
+  private void validateShellExecutorForDifferentEnvs(Map<String, String> env)
+      throws IOException {
+    File shellFile = null;
+    try {
+      shellFile = Shell.appendScriptExtension(tmpDir, "hello");
+      Map<Path, List<String>> resources = new HashMap<Path, List<String>>();
+      FileOutputStream fos = new FileOutputStream(shellFile);
+      FileUtil.setExecutable(shellFile, true);
+
+      List<String> commands = new ArrayList<String>();
+      DefaultContainerExecutor executor = new DefaultContainerExecutor();
+      executor.setConf(new Configuration());
+      executor.writeLaunchEnv(fos, env, resources, commands,
+          new Path(localLogDir.getAbsolutePath()), user);
+      fos.flush();
+      fos.close();
+
+      // It is supposed that LANG is set as C.
+      Map<String, String> cmdEnv = new HashMap<String, String>();
+      cmdEnv.put("LANG", "C");
+      Shell.ShellCommandExecutor shexc = new Shell.ShellCommandExecutor(
+          new String[] { shellFile.getAbsolutePath() }, tmpDir, cmdEnv);
+      try {
+        shexc.execute();
+        Assert.fail("Should catch exception");
+      } catch (ExitCodeException e) {
+        Assert.assertTrue(shexc.getExitCode() != 0);
+      }
+    } finally {
+      // cleanup
+      if (shellFile != null && shellFile.exists()) {
+        shellFile.delete();
+      }
+    }
+  }
+
+  @Test
+  public void testValidEnvVariableSubstitution() throws IOException  {
+    File shellFile = null;
+    try {
+      shellFile = Shell.appendScriptExtension(tmpDir, "hello");
+      Map<Path, List<String>> resources =
+          new HashMap<Path, List<String>>();
+      FileOutputStream fos = new FileOutputStream(shellFile);
+      FileUtil.setExecutable(shellFile, true);
+
+      Map<String, String> env = new LinkedHashMap<String, String>();
+      // valid env
+      env.put(
+          "foo", "2.4.6" );
+      env.put(
+          "testVar", "version${foo}" );
+      List<String> commands = new ArrayList<String>();
+      DefaultContainerExecutor executor = new DefaultContainerExecutor();
+      executor.setConf(new Configuration());
+      executor.writeLaunchEnv(fos, env, resources, commands,
+          new Path(localLogDir.getAbsolutePath()), user);
+      fos.flush();
+      fos.close();
+
+      // It is supposed that LANG is set as C.
+      Map<String, String> cmdEnv = new HashMap<String, String>();
+      cmdEnv.put("LANG", "C");
+      Shell.ShellCommandExecutor shexc
+      = new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()},
+        tmpDir, cmdEnv);
+      try {
+        shexc.execute();
+      } catch(ExitCodeException e){
+        Assert.fail("Should not catch exception");
+      }
+      Assert.assertTrue(shexc.getExitCode() == 0);
+    }
+    finally {
+      // cleanup
+      if (shellFile != null
+          && shellFile.exists()) {
+        shellFile.delete();
+      }
+    }
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java
index 7146412..c5b2e97 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/TestPrivilegedOperationExecutor.java
@@ -20,13 +20,13 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.util.ArrayList;
@@ -34,8 +34,8 @@
 import java.util.List;
 
 public class TestPrivilegedOperationExecutor {
-  private static final Log LOG = LogFactory
-      .getLog(TestPrivilegedOperationExecutor.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(TestPrivilegedOperationExecutor.class);
   private String localDataDir;
   private String customExecutorPath;
   private Configuration nullConf = null;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
index ab989cf..996fff0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
@@ -21,8 +21,8 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
@@ -58,8 +58,8 @@
  * Tests for the CGroups handler implementation.
  */
 public class TestCGroupsHandlerImpl {
-  private static final Log LOG =
-      LogFactory.getLog(TestCGroupsHandlerImpl.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestCGroupsHandlerImpl.class);
 
   private PrivilegedOperationExecutor privilegedOperationExecutorMock;
   private String tmpPath;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestResourceHandlerModule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestResourceHandlerModule.java
index 9da37dd..e5414a5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestResourceHandlerModule.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestResourceHandlerModule.java
@@ -20,19 +20,19 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.util.List;
 
 public class TestResourceHandlerModule {
-  private static final Log LOG = LogFactory.
-      getLog(TestResourceHandlerModule.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(TestResourceHandlerModule.class);
   Configuration emptyConf;
   Configuration networkEnabledConf;
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestTrafficControlBandwidthHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestTrafficControlBandwidthHandlerImpl.java
index 13b0188..09f4f1d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestTrafficControlBandwidthHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestTrafficControlBandwidthHandlerImpl.java
@@ -20,8 +20,6 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -35,6 +33,8 @@
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.util.List;
@@ -49,8 +49,8 @@
 import static org.mockito.Mockito.when;
 
 public class TestTrafficControlBandwidthHandlerImpl {
-  private static final Log LOG =
-      LogFactory.getLog(TestTrafficControlBandwidthHandlerImpl.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestTrafficControlBandwidthHandlerImpl.class);
   private static final int ROOT_BANDWIDTH_MBIT = 100;
   private static final int YARN_BANDWIDTH_MBIT = 70;
   private static final int TEST_CLASSID = 100;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestTrafficController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestTrafficController.java
index 7ea7135..c523663 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestTrafficController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestTrafficController.java
@@ -20,8 +20,6 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -33,6 +31,8 @@
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
@@ -49,7 +49,8 @@
 import static org.mockito.Mockito.when;
 
 public class TestTrafficController {
-  private static final Log LOG = LogFactory.getLog(TestTrafficController.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(TestTrafficController.class);
   private static final int ROOT_BANDWIDTH_MBIT = 100;
   private static final int YARN_BANDWIDTH_MBIT = 70;
   private static final int CONTAINER_BANDWIDTH_MBIT = 10;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index 9894dcd..d57d33c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -20,8 +20,6 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
@@ -44,6 +42,8 @@
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
 import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
@@ -64,8 +64,8 @@
 import static org.mockito.Mockito.*;
 
 public class TestDockerContainerRuntime {
-  private static final Log LOG = LogFactory
-      .getLog(TestDockerContainerRuntime.class);
+  private static final Logger LOG =
+       LoggerFactory.getLogger(TestDockerContainerRuntime.class);
   private Configuration conf;
   private PrivilegedOperationExecutor mockExecutor;
   private CGroupsHandler mockCGroupsHandler;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java
index 6f6482f..17eee82 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java
@@ -37,6 +37,8 @@
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
@@ -52,8 +54,6 @@
 import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.AbstractFileSystem;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -94,7 +94,8 @@
 
 public class TestContainerLocalizer {
 
-  static final Log LOG = LogFactory.getLog(TestContainerLocalizer.class);
+  static final Logger LOG =
+       LoggerFactory.getLogger(TestContainerLocalizer.class);
   static final Path basedir =
       new Path("target", TestContainerLocalizer.class.getName());
   static final FsPermission CACHE_DIR_PERM = new FsPermission((short)0710);
@@ -299,7 +300,7 @@
         try {
           localizerA.runLocalization(nmAddr);
         } catch (Exception e) {
-          LOG.warn(e);
+          LOG.warn(e.toString());
         }
       }
     };
@@ -309,7 +310,7 @@
         try {
           localizerB.runLocalization(nmAddr);
         } catch (Exception e) {
-          LOG.warn(e);
+          LOG.warn(e.toString());
         }
       }
     };
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
index b4bd9d7..bedad33 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
@@ -35,7 +35,7 @@
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey;
 import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogValue;
-import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationTFileController;
 import org.apache.hadoop.yarn.server.api.ContainerLogContext;
 import org.apache.hadoop.yarn.server.api.ContainerType;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
@@ -241,8 +241,8 @@
     // verify uploaded files
     ArgumentCaptor<LogValue> logValCaptor =
         ArgumentCaptor.forClass(LogValue.class);
-    verify(appLogAggregator.logWriter).append(any(LogKey.class),
-        logValCaptor.capture());
+    verify(appLogAggregator.getLogAggregationFileController()).write(
+        any(LogKey.class), logValCaptor.capture());
     Set<String> filesUploaded = new HashSet<>();
     LogValue logValue = logValCaptor.getValue();
     for(File file: logValue.getPendingLogFilesToUploadForThisContainer()) {
@@ -287,11 +287,13 @@
     final Context context = createContext(config);
     final FileContext fakeLfs = mock(FileContext.class);
     final Path remoteLogDirForApp = new Path(REMOTE_LOG_FILE.getAbsolutePath());
-
+    LogAggregationTFileController format = spy(
+        new LogAggregationTFileController());
+    format.initialize(config, "TFile");
     return new AppLogAggregatorInTest(dispatcher, deletionService,
         config, applicationId, ugi, nodeId, dirsService,
         remoteLogDirForApp, appAcls, logAggregationContext,
-        context, fakeLfs, recoveredLogInitedTimeMillis);
+        context, fakeLfs, recoveredLogInitedTimeMillis, format);
   }
 
   /**
@@ -402,7 +404,6 @@
 
     final DeletionService deletionService;
     final ApplicationId applicationId;
-    final LogWriter logWriter;
     final ArgumentCaptor<LogValue> logValue;
 
     public AppLogAggregatorInTest(Dispatcher dispatcher,
@@ -411,19 +412,15 @@
         LocalDirsHandlerService dirsHandler, Path remoteNodeLogFileForApp,
         Map<ApplicationAccessType, String> appAcls,
         LogAggregationContext logAggregationContext, Context context,
-        FileContext lfs, long recoveredLogInitedTime) throws IOException {
+        FileContext lfs, long recoveredLogInitedTime,
+        LogAggregationTFileController format) throws IOException {
       super(dispatcher, deletionService, conf, appId, ugi, nodeId,
           dirsHandler, remoteNodeLogFileForApp, appAcls,
-          logAggregationContext, context, lfs, -1, recoveredLogInitedTime);
+          logAggregationContext, context, lfs, -1, recoveredLogInitedTime,
+          format);
       this.applicationId = appId;
       this.deletionService = deletionService;
-      this.logWriter = spy(new LogWriter());
       this.logValue = ArgumentCaptor.forClass(LogValue.class);
     }
-
-    @Override
-    protected LogWriter createLogWriter() {
-      return this.logWriter;
-    }
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
index 37fe77a..a3e01af 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
@@ -60,7 +60,6 @@
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.AbstractFileSystem;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -104,6 +103,9 @@
 import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat;
 import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey;
 import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerFactory;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationTFileController;
 import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
@@ -137,14 +139,14 @@
 import org.eclipse.jetty.util.MultiException;
 
 import com.google.common.base.Supplier;
+import org.slf4j.LoggerFactory;
 
-//@Ignore
 public class TestLogAggregationService extends BaseContainerManagerTest {
 
   private Map<ApplicationAccessType, String> acls = createAppAcls();
   
   static {
-    LOG = LogFactory.getLog(TestLogAggregationService.class);
+    LOG = LoggerFactory.getLogger(TestLogAggregationService.class);
   }
 
   private static RecordFactory recordFactory = RecordFactoryProvider
@@ -161,11 +163,12 @@
   DrainDispatcher dispatcher;
   EventHandler<Event> appEventHandler;
 
+  private NodeId nodeId = NodeId.newInstance("0.0.0.0", 5555);
+
   @Override
   @SuppressWarnings("unchecked")
   public void setup() throws IOException {
     super.setup();
-    NodeId nodeId = NodeId.newInstance("0.0.0.0", 5555);
     ((NMContext)context).setNodeId(nodeId);
     dispatcher = createDispatcher();
     appEventHandler = mock(EventHandler.class);
@@ -204,13 +207,13 @@
 
     ApplicationAttemptId appAttemptId =
         BuilderUtils.newApplicationAttemptId(application1, 1);
-    ContainerId container11 = createContainer(appAttemptId, 1,
-        ContainerType.APPLICATION_MASTER);
+    ContainerId container11 = ContainerId.newContainerId(appAttemptId, 1);
     // Simulate log-file creation
     writeContainerLogs(app1LogDir, container11, new String[] { "stdout",
         "stderr", "syslog" });
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container11, 0));
+        new LogHandlerContainerFinishedEvent(container11,
+            ContainerType.APPLICATION_MASTER, 0));
 
     logAggregationService.handle(new LogHandlerAppFinishedEvent(
         application1));
@@ -246,9 +249,9 @@
     Assert.assertFalse("Directory [" + app1LogDir + "] was not deleted",
       app1LogDir.exists());
 
-    Path logFilePath =
-        logAggregationService.getRemoteNodeLogFileForApp(application1,
-            this.user);
+    Path logFilePath = logAggregationService
+        .getLogAggregationFileController(conf)
+        .getRemoteNodeLogFileForApp(application1, this.user, nodeId);
 
     Assert.assertTrue("Log file [" + logFilePath + "] not found", new File(
         logFilePath.toUri().getPath()).exists());
@@ -328,11 +331,11 @@
 
     ApplicationAttemptId appAttemptId =
         BuilderUtils.newApplicationAttemptId(app, 1);
-    ContainerId cont = createContainer(appAttemptId, 1,
-        ContainerType.APPLICATION_MASTER);
+    ContainerId cont = ContainerId.newContainerId(appAttemptId, 1);
     writeContainerLogs(appLogDir, cont, new String[] { "stdout",
         "stderr", "syslog" });
-    logAggregationService.handle(new LogHandlerContainerFinishedEvent(cont, 0));
+    logAggregationService.handle(new LogHandlerContainerFinishedEvent(cont,
+        ContainerType.APPLICATION_MASTER, 0));
     logAggregationService.handle(new LogHandlerAppFinishedEvent(app));
     logAggregationService.stop();
     delSrvc.stop();
@@ -369,9 +372,10 @@
 
     logAggregationService.stop();
     assertEquals(0, logAggregationService.getNumAggregators());
-
-    Assert.assertFalse(new File(logAggregationService
-        .getRemoteNodeLogFileForApp(application1, this.user).toUri().getPath())
+    LogAggregationFileController format1 =
+        logAggregationService.getLogAggregationFileController(conf);
+    Assert.assertFalse(new File(format1.getRemoteNodeLogFileForApp(
+        application1, this.user, this.nodeId).toUri().getPath())
         .exists());
 
     dispatcher.await();
@@ -416,13 +420,13 @@
 
     ApplicationAttemptId appAttemptId1 =
         BuilderUtils.newApplicationAttemptId(application1, 1);
-    ContainerId container11 = createContainer(appAttemptId1, 1,
-        ContainerType.APPLICATION_MASTER);
+    ContainerId container11 = ContainerId.newContainerId(appAttemptId1, 1);
 
     // Simulate log-file creation
     writeContainerLogs(app1LogDir, container11, fileNames);
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container11, 0));
+        new LogHandlerContainerFinishedEvent(container11,
+            ContainerType.APPLICATION_MASTER, 0));
 
     ApplicationId application2 = BuilderUtils.newApplicationId(1234, 2);
     ApplicationAttemptId appAttemptId2 =
@@ -439,19 +443,19 @@
     logAggregationService.handle(new LogHandlerAppStartedEvent(
         application2, this.user, null, this.acls, contextWithAMOnly));
 
-    ContainerId container21 = createContainer(appAttemptId2, 1,
-        ContainerType.APPLICATION_MASTER);
+    ContainerId container21 = ContainerId.newContainerId(appAttemptId2, 1);
 
     writeContainerLogs(app2LogDir, container21, fileNames);
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container21, 0));
+        new LogHandlerContainerFinishedEvent(container21,
+            ContainerType.APPLICATION_MASTER, 0));
 
-    ContainerId container12 = createContainer(appAttemptId1, 2,
-        ContainerType.TASK);
+    ContainerId container12 = ContainerId.newContainerId(appAttemptId1, 2);
 
     writeContainerLogs(app1LogDir, container12, fileNames);
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container12, 0));
+        new LogHandlerContainerFinishedEvent(container12,
+            ContainerType.TASK, 0));
 
     ApplicationId application3 = BuilderUtils.newApplicationId(1234, 3);
     ApplicationAttemptId appAttemptId3 =
@@ -483,29 +487,29 @@
     checkEvents(appEventHandler, expectedInitEvents, false, "getType", "getApplicationID");
     reset(appEventHandler);
     
-    ContainerId container31 = createContainer(appAttemptId3, 1,
-        ContainerType.APPLICATION_MASTER);
+    ContainerId container31 = ContainerId.newContainerId(appAttemptId3, 1);
     writeContainerLogs(app3LogDir, container31, fileNames);
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container31, 0));
+        new LogHandlerContainerFinishedEvent(container31,
+            ContainerType.APPLICATION_MASTER, 0));
 
-    ContainerId container32 = createContainer(appAttemptId3, 2,
-        ContainerType.TASK);
+    ContainerId container32 = ContainerId.newContainerId(appAttemptId3, 2);
     writeContainerLogs(app3LogDir, container32, fileNames);
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container32, 1)); // Failed 
+        new LogHandlerContainerFinishedEvent(container32,
+            ContainerType.TASK, 1)); // Failed
 
-    ContainerId container22 = createContainer(appAttemptId2, 2,
-        ContainerType.TASK);
+    ContainerId container22 = ContainerId.newContainerId(appAttemptId2, 2);
     writeContainerLogs(app2LogDir, container22, fileNames);
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container22, 0));
+        new LogHandlerContainerFinishedEvent(container22,
+            ContainerType.TASK, 0));
 
-    ContainerId container33 = createContainer(appAttemptId3, 3,
-        ContainerType.TASK);
+    ContainerId container33 = ContainerId.newContainerId(appAttemptId3, 3);
     writeContainerLogs(app3LogDir, container33, fileNames);
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container33, 0));
+        new LogHandlerContainerFinishedEvent(container33,
+            ContainerType.TASK, 0));
 
     logAggregationService.handle(new LogHandlerAppFinishedEvent(
         application2));
@@ -541,26 +545,33 @@
     };
     checkEvents(appEventHandler, expectedFinishedEvents, false, "getType", "getApplicationID");
   }
-  
+
   @Test
   public void testVerifyAndCreateRemoteDirsFailure()
       throws Exception {
     this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
     this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
         this.remoteRootLogDir.getAbsolutePath());
-    
+    LogAggregationFileControllerFactory factory
+        = new LogAggregationFileControllerFactory(conf);
+    LogAggregationFileController logAggregationFileFormat = factory
+        .getFileControllerForWrite();
+    LogAggregationFileController spyLogAggregationFileFormat =
+        spy(logAggregationFileFormat);
+    YarnRuntimeException e = new YarnRuntimeException("KABOOM!");
+    doThrow(e).doNothing().when(spyLogAggregationFileFormat)
+        .verifyAndCreateRemoteLogDir();
     LogAggregationService logAggregationService = spy(
         new LogAggregationService(dispatcher, this.context, this.delSrvc,
-                                  super.dirsHandler));
+            super.dirsHandler) {
+        @Override
+        public LogAggregationFileController getLogAggregationFileController(
+            Configuration conf) {
+          return spyLogAggregationFileFormat;
+        }
+      });
     logAggregationService.init(this.conf);
-    
-    YarnRuntimeException e = new YarnRuntimeException("KABOOM!");
-    doThrow(e)
-      .when(logAggregationService).verifyAndCreateRemoteLogDir(
-          any(Configuration.class));
-        
     logAggregationService.start();
-    
     // Now try to start an application
     ApplicationId appId =
         BuilderUtils.newApplicationId(System.currentTimeMillis(),
@@ -607,8 +618,7 @@
     
     logAggregationService.stop();
   }
-  
-  
+
   @Test
   public void testVerifyAndCreateRemoteDirNonExistence()
       throws Exception {
@@ -621,14 +631,24 @@
         new LogAggregationService(dispatcher, this.context, this.delSrvc,
                                   super.dirsHandler));
     logAggregationService.init(this.conf);
+    logAggregationService.start();
     boolean existsBefore = aNewFile.exists();
     assertTrue("The new file already exists!", !existsBefore);
 
-    logAggregationService.verifyAndCreateRemoteLogDir(this.conf);
-    
+    ApplicationId appId = ApplicationId.newInstance(
+        System.currentTimeMillis(), 1);
+    LogAggregationContext contextWithAMAndFailed =
+        Records.newRecord(LogAggregationContext.class);
+    contextWithAMAndFailed.setLogAggregationPolicyClassName(
+        AMOrFailedContainerLogAggregationPolicy.class.getName());
+    logAggregationService.handle(new LogHandlerAppStartedEvent(appId,
+        this.user, null, this.acls, contextWithAMAndFailed));
+    dispatcher.await();
+
     boolean existsAfter = aNewFile.exists();
     assertTrue("The new aggregate file is not successfully created", existsAfter);
     aNewFile.delete(); //housekeeping
+    logAggregationService.stop();
   }
 
   @Test
@@ -641,7 +661,17 @@
     LogAggregationService logAggregationService = new LogAggregationService(
         dispatcher, this.context, this.delSrvc, super.dirsHandler);
     logAggregationService.init(this.conf);
-    logAggregationService.verifyAndCreateRemoteLogDir(this.conf);
+    logAggregationService.start();
+
+    ApplicationId appId = ApplicationId.newInstance(
+        System.currentTimeMillis(), 1);
+    LogAggregationContext contextWithAMAndFailed =
+        Records.newRecord(LogAggregationContext.class);
+    contextWithAMAndFailed.setLogAggregationPolicyClassName(
+        AMOrFailedContainerLogAggregationPolicy.class.getName());
+    logAggregationService.handle(new LogHandlerAppStartedEvent(appId,
+        this.user, null, this.acls, contextWithAMAndFailed));
+    dispatcher.await();
 
     String targetGroup =
         UserGroupInformation.getLoginUser().getPrimaryGroupName();
@@ -651,6 +681,7 @@
         fileStatus.getGroup(), targetGroup);
 
     fs.delete(aNewFile, true);
+    logAggregationService.stop();
   }
 
   @Test
@@ -669,14 +700,23 @@
     FileSystem fs = FileSystem.get(this.conf);
     final FileSystem spyFs = spy(FileSystem.get(this.conf));
 
+    final LogAggregationTFileController spyFileFormat
+        = new LogAggregationTFileController() {
+          @Override
+          public FileSystem getFileSystem(Configuration conf)
+              throws IOException {
+            return spyFs;
+          }
+        };
+    spyFileFormat.initialize(conf, "TFile");
     LogAggregationService aggSvc = new LogAggregationService(dispatcher,
         this.context, this.delSrvc, super.dirsHandler) {
       @Override
-      protected FileSystem getFileSystem(Configuration conf) {
-        return spyFs;
+      public LogAggregationFileController getLogAggregationFileController(
+          Configuration conf) {
+        return spyFileFormat;
       }
     };
-
     aggSvc.init(this.conf);
     aggSvc.start();
 
@@ -759,7 +799,8 @@
     // verify trying to collect logs for containers/apps we don't know about
     // doesn't blow up and tear down the NM
     logAggregationService.handle(new LogHandlerContainerFinishedEvent(
-        BuilderUtils.newContainerId(4, 1, 1, 1), 0));
+        BuilderUtils.newContainerId(4, 1, 1, 1),
+        ContainerType.APPLICATION_MASTER, 0));
     dispatcher.await();
     logAggregationService.handle(new LogHandlerAppFinishedEvent(
         BuilderUtils.newApplicationId(1, 5)));
@@ -769,18 +810,36 @@
   @Test
   public void testLogAggregationCreateDirsFailsWithoutKillingNM()
       throws Exception {
-    
-    this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
+
+    this.conf.set(YarnConfiguration.NM_LOG_DIRS,
+        localLogDir.getAbsolutePath());
     this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
         this.remoteRootLogDir.getAbsolutePath());
 
     DeletionService spyDelSrvc = spy(this.delSrvc);
+    LogAggregationFileControllerFactory factory
+        = new LogAggregationFileControllerFactory(conf);
+    LogAggregationFileController logAggregationFileFormat = factory
+        .getFileControllerForWrite();
+    LogAggregationFileController spyLogAggregationFileFormat =
+        spy(logAggregationFileFormat);
+    Exception e = new RuntimeException("KABOOM!");
+    doThrow(e).when(spyLogAggregationFileFormat)
+        .createAppDir(any(String.class), any(ApplicationId.class),
+            any(UserGroupInformation.class));
     LogAggregationService logAggregationService = spy(
         new LogAggregationService(dispatcher, this.context, spyDelSrvc,
-                                  super.dirsHandler));
+            super.dirsHandler){
+        @Override
+        public LogAggregationFileController getLogAggregationFileController(
+            Configuration conf) {
+          return spyLogAggregationFileFormat;
+        }
+      });
+
     logAggregationService.init(this.conf);
     logAggregationService.start();
-    
+
     ApplicationId appId =
         BuilderUtils.newApplicationId(System.currentTimeMillis(),
           (int) (Math.random() * 1000));
@@ -789,10 +848,6 @@
         new File(localLogDir, appId.toString());
     appLogDir.mkdir();
 
-    Exception e = new RuntimeException("KABOOM!");
-    doThrow(e)
-      .when(logAggregationService).createAppDir(any(String.class),
-          any(ApplicationId.class), any(UserGroupInformation.class));
     LogAggregationContext contextWithAMAndFailed =
         Records.newRecord(LogAggregationContext.class);
     contextWithAMAndFailed.setLogAggregationPolicyClassName(
@@ -811,7 +866,8 @@
     // verify trying to collect logs for containers/apps we don't know about
     // doesn't blow up and tear down the NM
     logAggregationService.handle(new LogHandlerContainerFinishedEvent(
-        BuilderUtils.newContainerId(4, 1, 1, 1), 0));
+        BuilderUtils.newContainerId(4, 1, 1, 1),
+        ContainerType.APPLICATION_MASTER, 0));
     dispatcher.await();
     logAggregationService.handle(new LogHandlerAppFinishedEvent(
         BuilderUtils.newApplicationId(1, 5)));
@@ -867,7 +923,8 @@
       int minNumOfContainers, int maxNumOfContainers,
       String[] logFiles, int numOfLogsPerContainer, boolean multiLogs)
     throws IOException {
-    Path appLogDir = logAggregationService.getRemoteAppLogDir(appId, this.user);
+    Path appLogDir = logAggregationService.getLogAggregationFileController(
+        conf).getRemoteAppLogDir(appId, this.user);
     RemoteIterator<FileStatus> nodeFiles = null;
     try {
       Path qualifiedLogDir =
@@ -1472,14 +1529,13 @@
 
     ApplicationAttemptId appAttemptId1 =
         BuilderUtils.newApplicationAttemptId(application1, 1);
-    ContainerId container1 = createContainer(appAttemptId1, 1,
-        ContainerType.APPLICATION_MASTER);
+    ContainerId container1 = ContainerId.newContainerId(appAttemptId1, 1);
 
     // Simulate log-file creation
     writeContainerLogs(appLogDir1, container1, new String[] { "stdout",
         "stderr", "syslog" });
     logAggregationService.handle(new LogHandlerContainerFinishedEvent(
-      container1, 0));
+        container1, ContainerType.APPLICATION_MASTER, 0));
 
     // LogContext for application2 has excludePatten which includes
     // stdout and syslog.
@@ -1495,13 +1551,13 @@
         AMOnlyLogAggregationPolicy.class.getName());
     logAggregationService.handle(new LogHandlerAppStartedEvent(application2,
       this.user, null, this.acls, LogAggregationContextWithExcludePatterns));
-    ContainerId container2 = createContainer(appAttemptId2, 1,
-        ContainerType.APPLICATION_MASTER);
+    ContainerId container2 = ContainerId.newContainerId(appAttemptId2, 1);
 
     writeContainerLogs(app2LogDir, container2, new String[] { "stdout",
         "stderr", "syslog" });
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container2, 0));
+        new LogHandlerContainerFinishedEvent(container2,
+            ContainerType.APPLICATION_MASTER, 0));
 
     // LogContext for application3 has includePattern which is *.log and
     // excludePatten which includes std.log and sys.log.
@@ -1520,12 +1576,12 @@
         AMOnlyLogAggregationPolicy.class.getName());
     logAggregationService.handle(new LogHandlerAppStartedEvent(application3,
       this.user, null, this.acls, context1));
-    ContainerId container3 = createContainer(appAttemptId3, 1,
-        ContainerType.APPLICATION_MASTER);
+    ContainerId container3 = ContainerId.newContainerId(appAttemptId3, 1);
     writeContainerLogs(app3LogDir, container3, new String[] { "stdout",
         "sys.log", "std.log", "out.log", "err.log", "log" });
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container3, 0));
+        new LogHandlerContainerFinishedEvent(container3,
+            ContainerType.APPLICATION_MASTER, 0));
 
     // LogContext for application4 has includePattern
     // which includes std.log and sys.log and
@@ -1545,12 +1601,12 @@
         AMOnlyLogAggregationPolicy.class.getName());
     logAggregationService.handle(new LogHandlerAppStartedEvent(application4,
       this.user, null, this.acls, context2));
-    ContainerId container4 = createContainer(appAttemptId4, 1,
-        ContainerType.APPLICATION_MASTER);
+    ContainerId container4 = ContainerId.newContainerId(appAttemptId4, 1);
     writeContainerLogs(app4LogDir, container4, new String[] { "stdout",
         "sys.log", "std.log", "out.log", "err.log", "log" });
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container4, 0));
+        new LogHandlerContainerFinishedEvent(container4,
+            ContainerType.APPLICATION_MASTER, 0));
 
     dispatcher.await();
     ApplicationEvent expectedInitEvents[] =
@@ -1677,7 +1733,8 @@
         new ContainerId[] {container}, logFiles, 1, true);
 
     logAggregationService.handle(
-        new LogHandlerContainerFinishedEvent(container, 0));
+        new LogHandlerContainerFinishedEvent(container,
+            ContainerType.APPLICATION_MASTER, 0));
 
     dispatcher.await();
 
@@ -1801,14 +1858,8 @@
     ApplicationAttemptId appAttemptId1 =
         BuilderUtils.newApplicationAttemptId(appId, 1);
     ContainerId containerId = BuilderUtils.newContainerId(appAttemptId1, 2l);
-    try {
-      logAggregationService.handle(new LogHandlerContainerFinishedEvent(
-          containerId, 100));
-      assertTrue("Should skip when null containerID", true);
-    } catch (Exception e) {
-      Assert.assertFalse("Exception not expected should skip null containerid",
-          true);
-    }
+    logAggregationService.handle(new LogHandlerContainerFinishedEvent(
+        containerId, ContainerType.APPLICATION_MASTER, 100));
   }
 
   @Test (timeout = 50000)
@@ -2108,7 +2159,7 @@
     }
     logAggregationService.handle(new LogHandlerAppStartedEvent(appId,
         this.user, null, this.acls, logAggContext));
-
+    dispatcher.await();
     return logAggregationService;
   }
 
@@ -2133,8 +2184,7 @@
       long cId, int exitCode, String[] logFiles) throws IOException {
     ApplicationAttemptId appAttemptId1 =
         BuilderUtils.newApplicationAttemptId(application1, 1);
-    ContainerId containerId = createContainer(appAttemptId1, cId,
-        containerType);
+    ContainerId containerId = ContainerId.newContainerId(appAttemptId1, cId);
     // Simulate log-file creation
     File appLogDir1 =
         new File(localLogDir, application1.toString());
@@ -2142,7 +2192,7 @@
     writeContainerLogs(appLogDir1, containerId, logFiles);
 
     logAggregationService.handle(new LogHandlerContainerFinishedEvent(
-        containerId, exitCode));
+        containerId, containerType, exitCode));
     return containerId;
 
   }
@@ -2332,7 +2382,8 @@
     writeContainerLogs(appLogDir, container, logFiles3);
 
     logAggregationService.handle(
-      new LogHandlerContainerFinishedEvent(container, 0));
+        new LogHandlerContainerFinishedEvent(container,
+            ContainerType.APPLICATION_MASTER, 0));
 
     dispatcher.await();
     logAggregationService.handle(new LogHandlerAppFinishedEvent(application));
@@ -2462,17 +2513,20 @@
     logAggregationService.stop();
 
     assertEquals(expectedLogAggregationTimes,
-        aggregator.getLogAggregationTimes());
+        aggregator.getLogAggregationFileControllerContext()
+        .getLogAggregationTimes());
     assertEquals(expectedAggregationReportNum,
         this.context.getLogAggregationStatusForApps().size());
     assertEquals(expectedCleanupOldLogsTimes,
-        aggregator.getCleanupOldLogTimes());
+        aggregator.getLogAggregationFileControllerContext()
+        .getCleanOldLogsTimes());
   }
 
   private int numOfLogsAvailable(LogAggregationService logAggregationService,
       ApplicationId appId, boolean sizeLimited, String lastLogFile)
       throws IOException {
-    Path appLogDir = logAggregationService.getRemoteAppLogDir(appId, this.user);
+    Path appLogDir = logAggregationService.getLogAggregationFileController(
+        conf).getRemoteAppLogDir(appId, this.user);
     RemoteIterator<FileStatus> nodeFiles = null;
     try {
       Path qualifiedLogDir =
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java
index 7a4ea88..c6fa16d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java
@@ -65,6 +65,7 @@
 import org.apache.hadoop.yarn.event.DrainDispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.event.InlineDispatcher;
+import org.apache.hadoop.yarn.server.api.ContainerType;
 import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
 import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
@@ -156,7 +157,8 @@
 
     logHandler.handle(new LogHandlerAppStartedEvent(appId, user, null, null));
 
-    logHandler.handle(new LogHandlerContainerFinishedEvent(container11, 0));
+    logHandler.handle(new LogHandlerContainerFinishedEvent(container11,
+        ContainerType.APPLICATION_MASTER, 0));
 
     logHandler.handle(new LogHandlerAppFinishedEvent(appId));
 
@@ -196,7 +198,8 @@
 
     logHandler.handle(new LogHandlerAppStartedEvent(appId, user, null, null));
 
-    logHandler.handle(new LogHandlerContainerFinishedEvent(container11, 0));
+    logHandler.handle(new LogHandlerContainerFinishedEvent(container11,
+        ContainerType.APPLICATION_MASTER, 0));
 
     logHandler.handle(new LogHandlerAppFinishedEvent(appId));
 
@@ -365,7 +368,8 @@
     logHandler.start();
 
     logHandler.handle(new LogHandlerAppStartedEvent(appId, user, null, null));
-    logHandler.handle(new LogHandlerContainerFinishedEvent(container11, 0));
+    logHandler.handle(new LogHandlerContainerFinishedEvent(container11,
+        ContainerType.APPLICATION_MASTER, 0));
     logHandler.handle(new LogHandlerAppFinishedEvent(appId));
 
     // simulate a restart and verify deletion is rescheduled
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
index 0f1c6f5..6f7fadf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
@@ -36,7 +36,6 @@
 import java.util.regex.Pattern;
 
 import com.google.common.base.Supplier;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
@@ -76,6 +75,7 @@
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.LoggerFactory;
 
 public class TestContainersMonitor extends BaseContainerManagerTest {
 
@@ -84,7 +84,7 @@
   }
 
   static {
-    LOG = LogFactory.getLog(TestContainersMonitor.class);
+    LOG = LoggerFactory.getLogger(TestContainersMonitor.class);
   }
   @Before
   public void setup() throws IOException {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
index a1c247b..9676568 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
@@ -24,7 +24,6 @@
 import java.util.HashMap;
 import java.util.List;
 
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest;
@@ -57,6 +56,7 @@
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.junit.Assert;
 import org.junit.Test;
+import org.slf4j.LoggerFactory;
 
 import static org.mockito.Mockito.spy;
 
@@ -70,7 +70,7 @@
   }
 
   static {
-    LOG = LogFactory.getLog(TestContainerSchedulerQueuing.class);
+    LOG = LoggerFactory.getLogger(TestContainerSchedulerQueuing.class);
   }
 
   private boolean delayContainers = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java
index 0e03994..c1638df 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java
@@ -126,10 +126,12 @@
 
   @Override
   public synchronized void storeContainer(ContainerId containerId,
-      int version, StartContainerRequest startRequest) throws IOException {
+      int version, long startTime, StartContainerRequest startRequest)
+      throws IOException {
     RecoveredContainerState rcs = new RecoveredContainerState();
     rcs.startRequest = startRequest;
     rcs.version = version;
+    rcs.setStartTime(startTime);
     containerStates.put(containerId, rcs);
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
index 0133156..b0a9bc9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
@@ -234,7 +234,8 @@
     StartContainerRequest containerReq = createContainerRequest(containerId);
 
     // store a container and verify recovered
-    stateStore.storeContainer(containerId, 0, containerReq);
+    long containerStartTime = System.currentTimeMillis();
+    stateStore.storeContainer(containerId, 0, containerStartTime, containerReq);
 
     // verify the container version key is not stored for new containers
     DB db = stateStore.getDB();
@@ -246,6 +247,7 @@
     assertEquals(1, recoveredContainers.size());
     RecoveredContainerState rcs = recoveredContainers.get(0);
     assertEquals(0, rcs.getVersion());
+    assertEquals(containerStartTime, rcs.getStartTime());
     assertEquals(RecoveredContainerStatus.REQUESTED, rcs.getStatus());
     assertEquals(ContainerExitStatus.INVALID, rcs.getExitCode());
     assertEquals(false, rcs.getKilled());
@@ -998,7 +1000,7 @@
     StartContainerRequest containerReq = StartContainerRequest.newInstance(clc,
         containerToken);
 
-    stateStore.storeContainer(containerId, 0, containerReq);
+    stateStore.storeContainer(containerId, 0, 0, containerReq);
 
     // add a invalid key
     byte[] invalidKey = ("ContainerManager/containers/"
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java
index 4561e85c..57bee8c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java
@@ -235,4 +235,8 @@
   public boolean isRecovering() {
     return false;
   }
+
+  public long getContainerStartTime() {
+    return 0;
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java
index be1dae1..0a71a91 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java
@@ -218,8 +218,8 @@
       Context context = mock(Context.class);
       Container container =
           new ContainerImpl(conf, dispatcher, launchContext,
-            null, metrics,
-            BuilderUtils.newContainerTokenIdentifier(containerToken), context) {
+            null, metrics, BuilderUtils.newContainerTokenIdentifier(
+                containerToken), context) {
 
             @Override
             public ContainerState getContainerState() {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
index 9b8f8af..6c016fe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
@@ -203,6 +203,7 @@
     <dependency>
       <groupId>org.apache.curator</groupId>
       <artifactId>curator-test</artifactId>
+      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.zookeeper</groupId>
@@ -212,9 +213,11 @@
       <groupId>org.fusesource.leveldbjni</groupId>
       <artifactId>leveldbjni-all</artifactId>
     </dependency>
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
     <dependency>
       <groupId>org.apache.zookeeper</groupId>
       <artifactId>zookeeper</artifactId>
+      <scope>test</scope>
       <type>test-jar</type>
     </dependency>
     <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
index f77d09e..479aa43 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
@@ -45,8 +45,6 @@
 import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
-
-
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
@@ -61,6 +59,7 @@
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
@@ -81,6 +80,8 @@
 public class ApplicationMasterService extends AbstractService implements
     ApplicationMasterProtocol {
   private static final Log LOG = LogFactory.getLog(ApplicationMasterService.class);
+  private static final int PRE_REGISTER_RESPONSE_ID = -1;
+
   private final AMLivelinessMonitor amLivelinessMonitor;
   private YarnScheduler rScheduler;
   protected InetSocketAddress masterServiceAddress;
@@ -252,7 +253,7 @@
 
     // Remove collector address when app get finished.
     if (YarnConfiguration.timelineServiceV2Enabled(getConfig())) {
-      rmApp.removeCollectorAddr();
+      ((RMAppImpl) rmApp).removeCollectorData();
     }
     // checking whether the app exits in RMStateStore at first not to throw
     // ApplicationDoesNotExistInCacheException before and after
@@ -325,6 +326,11 @@
   protected static final Allocation EMPTY_ALLOCATION = new Allocation(
       EMPTY_CONTAINER_LIST, Resources.createResource(0), null, null, null);
 
+  private int getNextResponseId(int responseId) {
+    // Loop between 0 to Integer.MAX_VALUE
+    return (responseId + 1) & Integer.MAX_VALUE;
+  }
+
   @Override
   public AllocateResponse allocate(AllocateRequest request)
       throws YarnException, IOException {
@@ -357,14 +363,17 @@
         throw new ApplicationMasterNotRegisteredException(message);
       }
 
-      if ((request.getResponseId() + 1) == lastResponse.getResponseId()) {
-        /* old heartbeat */
+      // Normally request.getResponseId() == lastResponse.getResponseId()
+      if (getNextResponseId(request.getResponseId()) == lastResponse
+          .getResponseId()) {
+        // heartbeat one step old, simply return lastReponse
         return lastResponse;
-      } else if (request.getResponseId() + 1 < lastResponse.getResponseId()) {
+      } else if (request.getResponseId() != lastResponse.getResponseId()) {
         String message =
             "Invalid responseId in AllocateRequest from application attempt: "
                 + appAttemptId + ", expect responseId to be "
-                + (lastResponse.getResponseId() + 1);
+                + lastResponse.getResponseId() + ", but get "
+                + request.getResponseId();
         throw new InvalidApplicationMasterRequestException(message);
       }
 
@@ -404,7 +413,7 @@
        * need to worry about unregister call occurring in between (which
        * removes the lock object).
        */
-      response.setResponseId(lastResponse.getResponseId() + 1);
+      response.setResponseId(getNextResponseId(lastResponse.getResponseId()));
       lock.setAllocateResponse(response);
       return response;
     }
@@ -415,12 +424,23 @@
         recordFactory.newRecordInstance(AllocateResponse.class);
     // set response id to -1 before application master for the following
     // attemptID get registered
-    response.setResponseId(-1);
+    response.setResponseId(PRE_REGISTER_RESPONSE_ID);
     LOG.info("Registering app attempt : " + attemptId);
     responseMap.put(attemptId, new AllocateResponseLock(response));
     rmContext.getNMTokenSecretManager().registerApplicationAttempt(attemptId);
   }
 
+  @VisibleForTesting
+  protected boolean setAttemptLastResponseId(ApplicationAttemptId attemptId,
+      int lastResponseId) {
+    AllocateResponseLock lock = responseMap.get(attemptId);
+    if (lock == null || lock.getAllocateResponse() == null) {
+      return false;
+    }
+    lock.getAllocateResponse().setResponseId(lastResponseId);
+    return true;
+  }
+
   public void unregisterAttempt(ApplicationAttemptId attemptId) {
     LOG.info("Unregistering app attempt : " + attemptId);
     responseMap.remove(attemptId);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 8b28d65..e6c25ad 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -29,6 +29,7 @@
 import java.util.Collection;
 import java.util.Collections;
 import java.util.EnumSet;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -765,21 +766,14 @@
     response.setClusterMetrics(ymetrics);
     return response;
   }
-  
-  @Override
-  public GetApplicationsResponse getApplications(
-      GetApplicationsRequest request) throws YarnException {
-    return getApplications(request, true);
-  }
 
   /**
    * Get applications matching the {@link GetApplicationsRequest}. If
    * caseSensitive is set to false, applicationTypes in
    * GetApplicationRequest are expected to be in all-lowercase
    */
-  @Private
-  public GetApplicationsResponse getApplications(
-      GetApplicationsRequest request, boolean caseSensitive)
+  @Override
+  public GetApplicationsResponse getApplications(GetApplicationsRequest request)
       throws YarnException {
     UserGroupInformation callerUGI;
     try {
@@ -789,7 +783,7 @@
       throw RPCUtil.getRemoteException(ie);
     }
 
-    Set<String> applicationTypes = request.getApplicationTypes();
+    Set<String> applicationTypes = getLowerCasedAppTypes(request);
     EnumSet<YarnApplicationState> applicationStates =
         request.getApplicationStates();
     Set<String> users = request.getUsers();
@@ -853,9 +847,8 @@
       }
 
       if (applicationTypes != null && !applicationTypes.isEmpty()) {
-        String appTypeToMatch = caseSensitive
-            ? application.getApplicationType()
-            : StringUtils.toLowerCase(application.getApplicationType());
+        String appTypeToMatch =
+            StringUtils.toLowerCase(application.getApplicationType());
         if (!applicationTypes.contains(appTypeToMatch)) {
           continue;
         }
@@ -915,6 +908,17 @@
     return response;
   }
 
+  private Set<String> getLowerCasedAppTypes(GetApplicationsRequest request) {
+    Set<String> applicationTypes = new HashSet<>();
+    if (request.getApplicationTypes() != null && !request.getApplicationTypes()
+        .isEmpty()) {
+      for (String type : request.getApplicationTypes()) {
+        applicationTypes.add(StringUtils.toLowerCase(type));
+      }
+    }
+    return applicationTypes;
+  }
+
   @Override
   public GetClusterNodesResponse getClusterNodes(GetClusterNodesRequest request)
       throws YarnException {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
index 052ec22..a993d69 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
@@ -32,6 +32,7 @@
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
@@ -293,9 +294,10 @@
     // add collector address for this application
     if (YarnConfiguration.timelineServiceV2Enabled(
         getRmContext().getYarnConfiguration())) {
-      response.setCollectorAddr(
-          getRmContext().getRMApps().get(appAttemptId.getApplicationId())
-              .getCollectorAddr());
+      CollectorInfo collectorInfo = app.getCollectorInfo();
+      if (collectorInfo != null) {
+        response.setCollectorInfo(collectorInfo);
+      }
     }
 
     // add preemption to the allocateResponse message (if any)
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
index 3c278de..4fc2916 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
@@ -391,6 +391,8 @@
       break;
     case NODE_LABELS_UPDATE:
       break;
+    case RELEASE_CONTAINER:
+      break;
     // <-- IGNORED EVENTS : END -->
     default:
       LOG.error("Unknown event arrived at" +
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
index aa7f524..cc47e02 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
@@ -27,6 +27,7 @@
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
@@ -63,12 +64,14 @@
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.api.records.NodeAction;
 import org.apache.hadoop.yarn.server.api.records.NodeStatus;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NodeLabelsUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.DynamicResourceConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
@@ -118,6 +121,8 @@
   private boolean isDelegatedCentralizedNodeLabelsConf;
   private DynamicResourceConfiguration drConf;
 
+  private final AtomicLong timelineCollectorVersion = new AtomicLong(0);
+
   public ResourceTrackerService(RMContext rmContext,
       NodesListManager nodesListManager,
       NMLivelinessMonitor nmLivelinessMonitor,
@@ -521,16 +526,6 @@
           message);
     }
 
-    boolean timelineV2Enabled =
-        YarnConfiguration.timelineServiceV2Enabled(getConfig());
-    if (timelineV2Enabled) {
-      // Check & update collectors info from request.
-      // TODO make sure it won't have race condition issue for AM failed over
-      // case that the older registration could possible override the newer
-      // one.
-      updateAppCollectorsMap(request);
-    }
-
     // Evaluate whether a DECOMMISSIONING node is ready to be DECOMMISSIONED.
     if (rmNode.getState() == NodeState.DECOMMISSIONING &&
         decommissioningWatcher.checkReadyToBeDecommissioned(
@@ -545,13 +540,20 @@
           NodeAction.SHUTDOWN, message);
     }
 
+    boolean timelineV2Enabled =
+        YarnConfiguration.timelineServiceV2Enabled(getConfig());
+    if (timelineV2Enabled) {
+      // Check & update collectors info from request.
+      updateAppCollectorsMap(request);
+    }
+
     // Heartbeat response
     NodeHeartbeatResponse nodeHeartBeatResponse = YarnServerBuilderUtils
         .newNodeHeartbeatResponse(lastNodeHeartbeatResponse.
             getResponseId() + 1, NodeAction.NORMAL, null, null, null, null,
             nextHeartBeatInterval);
     rmNode.updateNodeHeartbeatResponseForCleanup(nodeHeartBeatResponse);
-    rmNode.updateNodeHeartbeatResponseForContainersDecreasing(
+    rmNode.updateNodeHeartbeatResponseForUpdatedContainers(
         nodeHeartBeatResponse);
 
     populateKeys(request, nodeHeartBeatResponse);
@@ -613,44 +615,66 @@
 
   private void setAppCollectorsMapToResponse(
       List<ApplicationId> runningApps, NodeHeartbeatResponse response) {
-    Map<ApplicationId, String> liveAppCollectorsMap = new
-        HashMap<ApplicationId, String>();
+    Map<ApplicationId, AppCollectorData> liveAppCollectorsMap = new
+        HashMap<>();
     Map<ApplicationId, RMApp> rmApps = rmContext.getRMApps();
     // Set collectors for all running apps on this node.
     for (ApplicationId appId : runningApps) {
-      String appCollectorAddr = rmApps.get(appId).getCollectorAddr();
-      if (appCollectorAddr != null) {
-        liveAppCollectorsMap.put(appId, appCollectorAddr);
-      } else {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Collector for applicaton: " + appId +
-              " hasn't registered yet!");
+      RMApp app = rmApps.get(appId);
+      if (app != null) {
+        AppCollectorData appCollectorData = rmApps.get(appId)
+            .getCollectorData();
+        if (appCollectorData != null) {
+          liveAppCollectorsMap.put(appId, appCollectorData);
+        } else {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Collector for applicaton: " + appId +
+                " hasn't registered yet!");
+          }
         }
       }
     }
-    response.setAppCollectorsMap(liveAppCollectorsMap);
+    response.setAppCollectors(liveAppCollectorsMap);
   }
 
   private void updateAppCollectorsMap(NodeHeartbeatRequest request) {
-    Map<ApplicationId, String> registeredCollectorsMap =
-        request.getRegisteredCollectors();
-    if (registeredCollectorsMap != null
-        && !registeredCollectorsMap.isEmpty()) {
+    Map<ApplicationId, AppCollectorData> registeringCollectorsMap =
+        request.getRegisteringCollectors();
+    if (registeringCollectorsMap != null
+        && !registeringCollectorsMap.isEmpty()) {
       Map<ApplicationId, RMApp> rmApps = rmContext.getRMApps();
-      for (Map.Entry<ApplicationId, String> entry:
-          registeredCollectorsMap.entrySet()) {
+      for (Map.Entry<ApplicationId, AppCollectorData> entry:
+          registeringCollectorsMap.entrySet()) {
         ApplicationId appId = entry.getKey();
-        String collectorAddr = entry.getValue();
-        if (collectorAddr != null && !collectorAddr.isEmpty()) {
+        AppCollectorData collectorData = entry.getValue();
+        if (collectorData != null) {
+          if (!collectorData.isStamped()) {
+            // Stamp the collector if we have not done so
+            collectorData.setRMIdentifier(
+                ResourceManager.getClusterTimeStamp());
+            collectorData.setVersion(
+                timelineCollectorVersion.getAndIncrement());
+          }
           RMApp rmApp = rmApps.get(appId);
           if (rmApp == null) {
             LOG.warn("Cannot update collector info because application ID: " +
                 appId + " is not found in RMContext!");
           } else {
-            String previousCollectorAddr = rmApp.getCollectorAddr();
-            if (previousCollectorAddr == null
-                || !previousCollectorAddr.equals(collectorAddr)) {
-              rmApp.setCollectorAddr(collectorAddr);
+            synchronized (rmApp) {
+              AppCollectorData previousCollectorData = rmApp.getCollectorData();
+              if (AppCollectorData.happensBefore(previousCollectorData,
+                  collectorData)) {
+                // Sending collector update event.
+                // Note: RM has to store the newly received collector data
+                // synchronously. Otherwise, the RM may send out stale collector
+                // data before this update is done, and the RM then crashes, the
+                // newly updated collector data will get lost.
+                LOG.info("Update collector information for application " + appId
+                    + " with new address: " + collectorData.getCollectorAddr()
+                    + " timestamp: " + collectorData.getRMIdentifier()
+                    + ", " + collectorData.getVersion());
+                ((RMAppImpl) rmApp).setCollectorData(collectorData);
+              }
             }
           }
         }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
index a3a2ebc..7eaa6e7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
@@ -58,6 +58,7 @@
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.timelineservice.RMTimelineCollectorManager;
 import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollector;
+import org.apache.hadoop.yarn.util.TimelineServiceHelper;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -294,8 +295,8 @@
   @Override
   public void appAttemptRegistered(RMAppAttempt appAttempt,
       long registeredTime) {
-    TimelineEntity entity =
-        createAppAttemptEntity(appAttempt.getAppAttemptId());
+    ApplicationAttemptId attemptId = appAttempt.getAppAttemptId();
+    TimelineEntity entity = createAppAttemptEntity(attemptId);
     entity.setCreatedTime(registeredTime);
 
     TimelineEvent tEvent = new TimelineEvent();
@@ -317,6 +318,8 @@
           appAttempt.getMasterContainer().getId().toString());
     }
     entity.setInfo(entityInfo);
+    entity.setIdPrefix(
+        TimelineServiceHelper.invertLong(attemptId.getAttemptId()));
 
     getDispatcher().getEventHandler().handle(
         new TimelineV2PublishEvent(SystemMetricsEventType.PUBLISH_ENTITY,
@@ -327,7 +330,7 @@
   @Override
   public void appAttemptFinished(RMAppAttempt appAttempt,
       RMAppAttemptState appAttemtpState, RMApp app, long finishedTime) {
-
+    ApplicationAttemptId attemptId = appAttempt.getAppAttemptId();
     ApplicationAttemptEntity entity =
         createAppAttemptEntity(appAttempt.getAppAttemptId());
 
@@ -346,7 +349,8 @@
     entityInfo.put(AppAttemptMetricsConstants.STATE_INFO, RMServerUtils
         .createApplicationAttemptState(appAttemtpState).toString());
     entity.setInfo(entityInfo);
-
+    entity.setIdPrefix(
+        TimelineServiceHelper.invertLong(attemptId.getAttemptId()));
 
     getDispatcher().getEventHandler().handle(
         new TimelineV2PublishEvent(SystemMetricsEventType.PUBLISH_ENTITY,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
index 1e3f691..631d1a0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
@@ -100,9 +100,11 @@
       try {
         //invoke the preemption policy
         invokePolicy();
-      } catch (YarnRuntimeException e) {
-        LOG.error("YarnRuntimeException raised while executing preemption"
-            + " checker, skip this run..., exception=", e);
+      } catch (Throwable t) {
+        // The preemption monitor does not alter structures nor do structures
+        // persist across invocations. Therefore, log, skip, and retry.
+        LOG.error("Exception raised while executing preemption"
+            + " checker, skip this run..., exception=", t);
       }
     }
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
index 4bf6760..00ae3da 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
@@ -397,10 +397,16 @@
         ResourceUsage userResourceUsage = tq.leafQueue.getUser(userName)
             .getResourceUsage();
 
+        // perUserAMUsed was populated with running apps, now we are looping
+        // through both running and pending apps.
+        Resource userSpecificAmUsed = perUserAMUsed.get(userName);
+        amUsed = (userSpecificAmUsed == null)
+            ? Resources.none() : userSpecificAmUsed;
+
         TempUserPerPartition tmpUser = new TempUserPerPartition(
             tq.leafQueue.getUser(userName), tq.queueName,
             Resources.clone(userResourceUsage.getUsed(partition)),
-            Resources.clone(perUserAMUsed.get(userName)),
+            Resources.clone(userSpecificAmUsed),
             Resources.clone(userResourceUsage.getReserved(partition)),
             Resources.none());
 
@@ -547,15 +553,17 @@
     Collection<FiCaSchedulerApp> runningApps = leafQueue.getApplications();
     Resource amUsed = Resources.createResource(0, 0);
 
-    for (FiCaSchedulerApp app : runningApps) {
-      Resource userAMResource = perUserAMUsed.get(app.getUser());
-      if (null == userAMResource) {
-        userAMResource = Resources.createResource(0, 0);
-        perUserAMUsed.put(app.getUser(), userAMResource);
-      }
+    synchronized (leafQueue) {
+      for (FiCaSchedulerApp app : runningApps) {
+        Resource userAMResource = perUserAMUsed.get(app.getUser());
+        if (null == userAMResource) {
+          userAMResource = Resources.createResource(0, 0);
+          perUserAMUsed.put(app.getUser(), userAMResource);
+        }
 
-      Resources.addTo(userAMResource, app.getAMResource(partition));
-      Resources.addTo(amUsed, app.getAMResource(partition));
+        Resources.addTo(userAMResource, app.getAMResource(partition));
+        Resources.addTo(amUsed, app.getAMResource(partition));
+      }
     }
 
     return amUsed;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
index 4b6e82c..ac67dcd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
@@ -19,12 +19,9 @@
 package org.apache.hadoop.yarn.server.resourcemanager.recovery;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.curator.framework.CuratorFramework;
-import org.apache.curator.framework.api.transaction.CuratorTransaction;
-import org.apache.curator.framework.api.transaction.CuratorTransactionFinal;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
@@ -32,6 +29,7 @@
 import org.apache.hadoop.security.token.delegation.DelegationKey;
 import org.apache.hadoop.util.ZKUtil;
 import org.apache.hadoop.util.curator.ZKCuratorManager;
+import org.apache.hadoop.util.curator.ZKCuratorManager.SafeTransaction;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ReservationId;
@@ -338,7 +336,7 @@
   @Override
   public synchronized void startInternal() throws Exception {
     // ensure root dirs exist
-    createRootDirRecursively(znodeWorkingPath);
+    zkManager.createRootDirRecursively(znodeWorkingPath);
     create(zkRootNodePath);
     setRootNodeAcls();
     delete(fencingNodePath);
@@ -417,9 +415,10 @@
         ((VersionPBImpl) CURRENT_VERSION_INFO).getProto().toByteArray();
 
     if (exists(versionNodePath)) {
-      safeSetData(versionNodePath, data, -1);
+      zkManager.safeSetData(versionNodePath, data, -1, zkAcl, fencingNodePath);
     } else {
-      safeCreate(versionNodePath, data, zkAcl, CreateMode.PERSISTENT);
+      zkManager.safeCreate(versionNodePath, data, zkAcl, CreateMode.PERSISTENT,
+          zkAcl, fencingNodePath);
     }
   }
 
@@ -448,12 +447,14 @@
       // increment epoch and store it
       byte[] storeData = Epoch.newInstance(currentEpoch + 1).getProto()
           .toByteArray();
-      safeSetData(epochNodePath, storeData, -1);
+      zkManager.safeSetData(epochNodePath, storeData, -1, zkAcl,
+          fencingNodePath);
     } else {
       // initialize epoch node with 1 for the next time.
       byte[] storeData = Epoch.newInstance(currentEpoch + 1).getProto()
           .toByteArray();
-      safeCreate(epochNodePath, storeData, zkAcl, CreateMode.PERSISTENT);
+      zkManager.safeCreate(epochNodePath, storeData, zkAcl,
+          CreateMode.PERSISTENT, zkAcl, fencingNodePath);
     }
 
     return currentEpoch;
@@ -722,7 +723,7 @@
       // No apps stored under parent path.
       if (children != null && children.isEmpty()) {
         try {
-          safeDelete(parentAppNode);
+          zkManager.safeDelete(parentAppNode, zkAcl, fencingNodePath);
           if (LOG.isDebugEnabled()) {
             LOG.debug("No leaf app node exists. Removing parent node " +
                 parentAppNode);
@@ -750,7 +751,8 @@
 
     byte[] appStateData = appStateDataPB.getProto().toByteArray();
     if (appStateData.length <= zknodeLimit) {
-      safeCreate(nodeCreatePath, appStateData, zkAcl, CreateMode.PERSISTENT);
+      zkManager.safeCreate(nodeCreatePath, appStateData, zkAcl,
+          CreateMode.PERSISTENT, zkAcl, fencingNodePath);
     } else {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Application state data size for " + appId + " is "
@@ -781,7 +783,8 @@
           String rootNode =
               getSplitAppNodeParent(nodeUpdatePath, appIdNodeSplitIndex);
           if (!exists(rootNode)) {
-            safeCreate(rootNode, null, zkAcl, CreateMode.PERSISTENT);
+            zkManager.safeCreate(rootNode, null, zkAcl, CreateMode.PERSISTENT,
+                zkAcl, fencingNodePath);
           }
         }
       }
@@ -795,9 +798,11 @@
     byte[] appStateData = appStateDataPB.getProto().toByteArray();
 
     if (pathExists) {
-      safeSetData(nodeUpdatePath, appStateData, -1);
+      zkManager.safeSetData(nodeUpdatePath, appStateData, -1, zkAcl,
+          fencingNodePath);
     } else {
-      safeCreate(nodeUpdatePath, appStateData, zkAcl, CreateMode.PERSISTENT);
+      zkManager.safeCreate(nodeUpdatePath, appStateData, zkAcl,
+          CreateMode.PERSISTENT, zkAcl, fencingNodePath);
       if (LOG.isDebugEnabled()) {
         LOG.debug("Path " + nodeUpdatePath + " for " + appId + " didn't " +
             "exist. Creating a new znode to update the application state.");
@@ -840,9 +845,11 @@
     switch (operation) {
     case UPDATE:
       if (exists(path)) {
-        safeSetData(path, attemptStateData, -1);
+        zkManager.safeSetData(path, attemptStateData, -1, zkAcl,
+            fencingNodePath);
       } else {
-        safeCreate(path, attemptStateData, zkAcl, CreateMode.PERSISTENT);
+        zkManager.safeCreate(path, attemptStateData, zkAcl,
+            CreateMode.PERSISTENT, zkAcl, fencingNodePath);
         if (LOG.isDebugEnabled()) {
           LOG.debug("Path " + path + " for " + appAttemptId + " didn't exist." +
               " Created a new znode to update the application attempt state.");
@@ -850,10 +857,11 @@
       }
       break;
     case STORE:
-      safeCreate(path, attemptStateData, zkAcl, CreateMode.PERSISTENT);
+      zkManager.safeCreate(path, attemptStateData, zkAcl, CreateMode.PERSISTENT,
+          zkAcl, fencingNodePath);
       break;
     case REMOVE:
-      safeDelete(path);
+      zkManager.safeDelete(path, zkAcl, fencingNodePath);
       break;
     default:
       break;
@@ -931,10 +939,10 @@
         for (ApplicationAttemptId attemptId : attempts) {
           String attemptRemovePath =
               getNodePath(appIdRemovePath, attemptId.toString());
-          safeDelete(attemptRemovePath);
+          zkManager.safeDelete(attemptRemovePath, zkAcl, fencingNodePath);
         }
       }
-      safeDelete(appIdRemovePath);
+      zkManager.safeDelete(appIdRemovePath, zkAcl, fencingNodePath);
     } else {
       CuratorFramework curatorFramework = zkManager.getCurator();
       curatorFramework.delete().deletingChildrenIfNeeded().
@@ -948,7 +956,7 @@
   protected synchronized void storeRMDelegationTokenState(
       RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate)
       throws Exception {
-    SafeTransaction trx = new SafeTransaction();
+    SafeTransaction trx = zkManager.createTransaction(zkAcl, fencingNodePath);
     addStoreOrUpdateOps(trx, rmDTIdentifier, renewDate, false);
     trx.commit();
   }
@@ -965,14 +973,14 @@
           + rmDTIdentifier.getSequenceNumber());
     }
 
-    safeDelete(nodeRemovePath);
+    zkManager.safeDelete(nodeRemovePath, zkAcl, fencingNodePath);
   }
 
   @Override
   protected synchronized void updateRMDelegationTokenState(
       RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate)
       throws Exception {
-    SafeTransaction trx = new SafeTransaction();
+    SafeTransaction trx = zkManager.createTransaction(zkAcl, fencingNodePath);
     String nodeRemovePath =
         getNodePath(delegationTokensRootPath, DELEGATION_TOKEN_PREFIX
             + rmDTIdentifier.getSequenceNumber());
@@ -1036,8 +1044,8 @@
     ByteArrayOutputStream os = new ByteArrayOutputStream();
     try(DataOutputStream fsOut = new DataOutputStream(os)) {
       delegationKey.write(fsOut);
-      safeCreate(nodeCreatePath, os.toByteArray(), zkAcl,
-          CreateMode.PERSISTENT);
+      zkManager.safeCreate(nodeCreatePath, os.toByteArray(), zkAcl,
+          CreateMode.PERSISTENT, zkAcl, fencingNodePath);
     }
   }
 
@@ -1052,7 +1060,7 @@
       LOG.debug("Removing RMDelegationKey_" + delegationKey.getKeyId());
     }
 
-    safeDelete(nodeRemovePath);
+    zkManager.safeDelete(nodeRemovePath, zkAcl, fencingNodePath);
   }
 
   @Override
@@ -1079,7 +1087,8 @@
         AMRMTokenSecretManagerState.newInstance(amrmTokenSecretManagerState);
     byte[] stateData = data.getProto().toByteArray();
 
-    safeSetData(amrmTokenSecretManagerRoot, stateData, -1);
+    zkManager.safeSetData(amrmTokenSecretManagerRoot, stateData, -1, zkAcl,
+        fencingNodePath);
   }
 
   @Override
@@ -1093,12 +1102,12 @@
           + " for" + " plan " + planName);
     }
 
-    safeDelete(reservationPath);
+    zkManager.safeDelete(reservationPath, zkAcl, fencingNodePath);
 
     List<String> reservationNodes = getChildren(planNodePath);
 
     if (reservationNodes.isEmpty()) {
-      safeDelete(planNodePath);
+      zkManager.safeDelete(planNodePath, zkAcl, fencingNodePath);
     }
   }
 
@@ -1106,7 +1115,7 @@
   protected synchronized void storeReservationState(
       ReservationAllocationStateProto reservationAllocation, String planName,
       String reservationIdName) throws Exception {
-    SafeTransaction trx = new SafeTransaction();
+    SafeTransaction trx = zkManager.createTransaction(zkAcl, fencingNodePath);
     addOrUpdateReservationState(reservationAllocation, planName,
         reservationIdName, trx, false);
     trx.commit();
@@ -1147,22 +1156,6 @@
   }
 
   /**
-   * Utility function to ensure that the configured base znode exists.
-   * This recursively creates the znode as well as all of its parents.
-   */
-  private void createRootDirRecursively(String path) throws Exception {
-    String pathParts[] = path.split("/");
-    Preconditions.checkArgument(pathParts.length >= 1 && pathParts[0].isEmpty(),
-        "Invalid path: %s", path);
-    StringBuilder sb = new StringBuilder();
-
-    for (int i = 1; i < pathParts.length; i++) {
-      sb.append("/").append(pathParts[i]);
-      create(sb.toString());
-    }
-  }
-
-  /**
    * Get alternate path for app id if path according to configured split index
    * does not exist. We look for path based on all possible split indices.
    * @param appId
@@ -1208,7 +1201,8 @@
         getNodePath(rootNode, nodeName.substring(0, splitIdx));
     if (createParentIfNotExists && !exists(rootNodePath)) {
       try {
-        safeCreate(rootNodePath, null, zkAcl, CreateMode.PERSISTENT);
+        zkManager.safeCreate(rootNodePath, null, zkAcl, CreateMode.PERSISTENT,
+            zkAcl, fencingNodePath);
       } catch (KeeperException.NodeExistsException e) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Unable to create app parent node " + rootNodePath +
@@ -1265,76 +1259,6 @@
     zkManager.delete(path);
   }
 
-  private void safeCreate(String path, byte[] data, List<ACL> acl,
-      CreateMode mode) throws Exception {
-    if (!exists(path)) {
-      SafeTransaction transaction = new SafeTransaction();
-      transaction.create(path, data, acl, mode);
-      transaction.commit();
-    }
-  }
-
-  /**
-   * Deletes the path. Checks for existence of path as well.
-   * @param path Path to be deleted.
-   * @throws Exception if any problem occurs while performing deletion.
-   */
-  private void safeDelete(final String path) throws Exception {
-    if (exists(path)) {
-      SafeTransaction transaction = new SafeTransaction();
-      transaction.delete(path);
-      transaction.commit();
-    }
-  }
-
-  private void safeSetData(String path, byte[] data, int version)
-      throws Exception {
-    SafeTransaction transaction = new SafeTransaction();
-    transaction.setData(path, data, version);
-    transaction.commit();
-  }
-
-  /**
-   * Use curator transactions to ensure zk-operations are performed in an all
-   * or nothing fashion. This is equivalent to using ZooKeeper#multi.
-   *
-   * TODO (YARN-3774): Curator 3.0 introduces CuratorOp similar to Op. We ll
-   * have to rewrite this inner class when we adopt that.
-   */
-  private class SafeTransaction {
-    private CuratorTransactionFinal transactionFinal;
-
-    SafeTransaction() throws Exception {
-      CuratorFramework curatorFramework = zkManager.getCurator();
-      CuratorTransaction transaction = curatorFramework.inTransaction();
-      transactionFinal = transaction.create()
-          .withMode(CreateMode.PERSISTENT).withACL(zkAcl)
-          .forPath(fencingNodePath, new byte[0]).and();
-    }
-
-    public void commit() throws Exception {
-      transactionFinal = transactionFinal.delete()
-          .forPath(fencingNodePath).and();
-      transactionFinal.commit();
-    }
-
-    public void create(String path, byte[] data, List<ACL> acl, CreateMode mode)
-        throws Exception {
-      transactionFinal = transactionFinal.create()
-          .withMode(mode).withACL(acl).forPath(path, data).and();
-    }
-
-    public void delete(String path) throws Exception {
-      transactionFinal = transactionFinal.delete().forPath(path).and();
-    }
-
-    public void setData(String path, byte[] data, int version)
-        throws Exception {
-      transactionFinal = transactionFinal.setData()
-          .withVersion(version).forPath(path, data).and();
-    }
-  }
-
   /**
    * Helper class that periodically attempts creating a znode to ensure that
    * this RM continues to be the Active.
@@ -1349,7 +1273,7 @@
       try {
         while (!isFencedState()) {
           // Create and delete fencing node
-          new SafeTransaction().commit();
+          zkManager.createTransaction(zkAcl, fencingNodePath).commit();
           Thread.sleep(zkSessionTimeout);
         }
       } catch (InterruptedException ie) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
index 43fd1fb..93c41b6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
@@ -23,6 +23,8 @@
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.ipc.CallerContext;
 import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@@ -30,6 +32,7 @@
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -39,6 +42,7 @@
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 
@@ -180,28 +184,27 @@
   String getTrackingUrl();
 
   /**
-   * The collector address for the application. It should be used only if the
-   * timeline service v.2 is enabled.
+   * The timeline collector information for the application. It should be used
+   * only if the timeline service v.2 is enabled.
    *
-   * @return the address for the application's collector, or null if the
-   * timeline service v.2 is not enabled.
+   * @return the data for the application's collector, including collector
+   * address, RM ID, version and collector token. Return null if the timeline
+   * service v.2 is not enabled.
    */
-  String getCollectorAddr();
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
+  AppCollectorData getCollectorData();
 
   /**
-   * Set collector address for the application. It should be used only if the
-   * timeline service v.2 is enabled.
+   * The timeline collector information to be sent to AM. It should be used
+   * only if the timeline service v.2 is enabled.
    *
-   * @param collectorAddr the address of collector
+   * @return collector info, including collector address and collector token.
+   * Return null if the timeline service v.2 is not enabled.
    */
-  void setCollectorAddr(String collectorAddr);
-
-  /**
-   * Remove collector address when application is finished or killed. It should
-   * be used only if the timeline service v.2 is enabled.
-   */
-  void removeCollectorAddr();
-
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
+  CollectorInfo getCollectorInfo();
   /**
    * The original tracking url for the application master.
    * @return the original tracking url for the application master.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java
index 04d2db5..514efd4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java
@@ -30,9 +30,6 @@
   // Source: Scheduler
   APP_ACCEPTED,
 
-  // TODO add source later
-  COLLECTOR_UPDATE,
-
   // Source: RMAppAttempt
   ATTEMPT_REGISTERED,
   ATTEMPT_UNREGISTERED,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 03be793..13b0792 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -55,6 +55,7 @@
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ApplicationTimeout;
 import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -72,6 +73,7 @@
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService;
 import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEventType;
@@ -165,7 +167,8 @@
   private long storedFinishTime = 0;
   private int firstAttemptIdInStateStore = 1;
   private int nextAttemptId = 1;
-  private volatile String collectorAddr;
+  private AppCollectorData collectorData;
+  private CollectorInfo collectorInfo;
   // This field isn't protected by readlock now.
   private volatile RMAppAttempt currentAttempt;
   private String queue;
@@ -529,7 +532,7 @@
    */
   public void startTimelineCollector() {
     AppLevelTimelineCollector collector =
-        new AppLevelTimelineCollector(applicationId);
+        new AppLevelTimelineCollector(applicationId, user);
     rmContext.getRMTimelineCollectorManager().putIfAbsent(
         applicationId, collector);
   }
@@ -611,18 +614,22 @@
   }
 
   @Override
-  public String getCollectorAddr() {
-    return this.collectorAddr;
+  public AppCollectorData getCollectorData() {
+    return this.collectorData;
   }
 
-  @Override
-  public void setCollectorAddr(String collectorAddress) {
-    this.collectorAddr = collectorAddress;
+  public void setCollectorData(AppCollectorData incomingData) {
+    this.collectorData = incomingData;
+    this.collectorInfo = CollectorInfo.newInstance(
+        incomingData.getCollectorAddr(), incomingData.getCollectorToken());
   }
 
-  @Override
-  public void removeCollectorAddr() {
-    this.collectorAddr = null;
+  public CollectorInfo getCollectorInfo() {
+    return this.collectorInfo;
+  }
+
+  public void removeCollectorData() {
+    this.collectorData = null;
   }
 
   @Override
@@ -1086,8 +1093,6 @@
                   app.submissionContext.getCancelTokensWhenComplete(),
                   app.getUser(),
                   BuilderUtils.parseTokensConf(app.submissionContext));
-          // set the memory free
-          app.submissionContext.getAMContainerSpec().setTokensConf(null);
         } catch (Exception e) {
           String msg = "Failed to fetch user credentials from application:"
               + e.getMessage();
@@ -1140,8 +1145,6 @@
           app.submissionContext, false, app.applicationPriority));
       // send the ATS create Event
       app.sendATSCreateEvent();
-      // Set the memory free after submission context is persisted
-      app.submissionContext.getAMContainerSpec().setTokensConf(null);
     }
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 7d453bd..65412df 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -38,7 +38,6 @@
 
 import javax.crypto.SecretKey;
 
-import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -110,6 +109,7 @@
 import org.apache.hadoop.yarn.state.SingleArcTransition;
 import org.apache.hadoop.yarn.state.StateMachine;
 import org.apache.hadoop.yarn.state.StateMachineFactory;
+import org.apache.hadoop.yarn.util.BoundedAppender;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -184,7 +184,10 @@
       new ExpiredTransition();
   private static final AttemptFailedTransition FAILED_TRANSITION =
       new AttemptFailedTransition();
-
+  private static final AMRegisteredTransition REGISTERED_TRANSITION =
+      new AMRegisteredTransition();
+  private static final AMLaunchedTransition LAUNCHED_TRANSITION =
+      new AMLaunchedTransition();
   private RMAppAttemptEvent eventCausingFinalSaving;
   private RMAppAttemptState targetedFinalState;
   private RMAppAttemptState recoveredFinalState;
@@ -314,7 +317,7 @@
 
        // Transitions from ALLOCATED State
       .addTransition(RMAppAttemptState.ALLOCATED, RMAppAttemptState.LAUNCHED,
-          RMAppAttemptEventType.LAUNCHED, new AMLaunchedTransition())
+          RMAppAttemptEventType.LAUNCHED, LAUNCHED_TRANSITION)
       .addTransition(RMAppAttemptState.ALLOCATED, RMAppAttemptState.FINAL_SAVING,
           RMAppAttemptEventType.LAUNCH_FAILED,
           new FinalSavingTransition(new LaunchFailedTransition(),
@@ -328,6 +331,8 @@
           RMAppAttemptEventType.FAIL,
           new FinalSavingTransition(FAILED_TRANSITION,
               RMAppAttemptState.FAILED))
+      .addTransition(RMAppAttemptState.ALLOCATED, RMAppAttemptState.RUNNING,
+          RMAppAttemptEventType.REGISTERED, REGISTERED_TRANSITION)
       .addTransition(RMAppAttemptState.ALLOCATED, RMAppAttemptState.FINAL_SAVING,
           RMAppAttemptEventType.CONTAINER_FINISHED,
           new FinalSavingTransition(
@@ -335,7 +340,7 @@
 
        // Transitions from LAUNCHED State
       .addTransition(RMAppAttemptState.LAUNCHED, RMAppAttemptState.RUNNING,
-          RMAppAttemptEventType.REGISTERED, new AMRegisteredTransition())
+          RMAppAttemptEventType.REGISTERED, REGISTERED_TRANSITION)
       .addTransition(RMAppAttemptState.LAUNCHED,
           EnumSet.of(RMAppAttemptState.LAUNCHED, RMAppAttemptState.FINAL_SAVING),
           RMAppAttemptEventType.CONTAINER_FINISHED,
@@ -357,6 +362,8 @@
               RMAppAttemptState.FAILED))
 
        // Transitions from RUNNING State
+      .addTransition(RMAppAttemptState.RUNNING, RMAppAttemptState.RUNNING,
+          RMAppAttemptEventType.LAUNCHED)
       .addTransition(RMAppAttemptState.RUNNING, RMAppAttemptState.FINAL_SAVING,
           RMAppAttemptEventType.UNREGISTERED, new AMUnregisteredTransition())
       .addTransition(RMAppAttemptState.RUNNING, RMAppAttemptState.RUNNING,
@@ -421,6 +428,7 @@
           RMAppAttemptState.FAILED,
           RMAppAttemptState.FAILED,
           EnumSet.of(
+              RMAppAttemptEventType.LAUNCHED,
               RMAppAttemptEventType.EXPIRE,
               RMAppAttemptEventType.KILL,
               RMAppAttemptEventType.FAIL,
@@ -438,6 +446,7 @@
           new FinalTransition(RMAppAttemptState.FINISHED))
       .addTransition(RMAppAttemptState.FINISHING, RMAppAttemptState.FINISHING,
           EnumSet.of(
+              RMAppAttemptEventType.LAUNCHED,
               RMAppAttemptEventType.UNREGISTERED,
               RMAppAttemptEventType.STATUS_UPDATE,
               RMAppAttemptEventType.CONTAINER_ALLOCATED,
@@ -451,6 +460,7 @@
           RMAppAttemptState.FINISHED,
           RMAppAttemptState.FINISHED,
           EnumSet.of(
+              RMAppAttemptEventType.LAUNCHED,
               RMAppAttemptEventType.EXPIRE,
               RMAppAttemptEventType.UNREGISTERED,
               RMAppAttemptEventType.CONTAINER_ALLOCATED,
@@ -1291,7 +1301,7 @@
          * 2) OR AMLivelinessMonitor expires this attempt (when am doesn't
          * heart beat back).  
          */
-        (new AMLaunchedTransition()).transition(appAttempt, event);
+        LAUNCHED_TRANSITION.transition(appAttempt, event);
         return RMAppAttemptState.LAUNCHED;
       }
     }
@@ -1316,7 +1326,7 @@
     // AFTER the initial saving on app-attempt-start
     // These fields can be visible from outside only after they are saved in
     // StateStore
-    BoundedAppender diags = new BoundedAppender(diagnostics.limit);
+    BoundedAppender diags = new BoundedAppender(diagnostics.getLimit());
 
     // don't leave the tracking URL pointing to a non-existent AM
     if (conf.getBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED,
@@ -1516,7 +1526,8 @@
     @Override
     public void transition(RMAppAttemptImpl appAttempt,
                             RMAppAttemptEvent event) {
-      if (event.getType() == RMAppAttemptEventType.LAUNCHED) {
+      if (event.getType() == RMAppAttemptEventType.LAUNCHED
+          || event.getType() == RMAppAttemptEventType.REGISTERED) {
         appAttempt.launchAMEndTime = System.currentTimeMillis();
         long delay = appAttempt.launchAMEndTime -
             appAttempt.launchAMStartTime;
@@ -1651,6 +1662,10 @@
     @Override
     public void transition(RMAppAttemptImpl appAttempt,
         RMAppAttemptEvent event) {
+      if (!RMAppAttemptState.LAUNCHED.equals(appAttempt.getState())) {
+        // registered received before launch
+        LAUNCHED_TRANSITION.transition(appAttempt, event);
+      }
       long delay = System.currentTimeMillis() - appAttempt.launchAMEndTime;
       ClusterMetrics.getMetrics().addAMRegisterDelay(delay);
       RMAppAttemptRegistrationEvent registrationEvent
@@ -2280,114 +2295,4 @@
     return Collections.EMPTY_SET;
   }
 
-  /**
-   * A {@link CharSequence} appender that considers its {@link #limit} as upper
-   * bound.
-   * <p>
-   * When {@link #limit} would be reached on append, past messages will be
-   * truncated from head, and a header telling the user about truncation will be
-   * prepended, with ellipses in between header and messages.
-   * <p>
-   * Note that header and ellipses are not counted against {@link #limit}.
-   * <p>
-   * An example:
-   *
-   * <pre>
-   * {@code
-   *   // At the beginning it's an empty string
-   *   final Appendable shortAppender = new BoundedAppender(80);
-   *   // The whole message fits into limit
-   *   shortAppender.append(
-   *       "message1 this is a very long message but fitting into limit\n");
-   *   // The first message is truncated, the second not
-   *   shortAppender.append("message2 this is shorter than the previous one\n");
-   *   // The first message is deleted, the second truncated, the third
-   *   // preserved
-   *   shortAppender.append("message3 this is even shorter message, maybe.\n");
-   *   // The first two are deleted, the third one truncated, the last preserved
-   *   shortAppender.append("message4 the shortest one, yet the greatest :)");
-   *   // Current contents are like this:
-   *   // Diagnostic messages truncated, showing last 80 chars out of 199:
-   *   // ...s is even shorter message, maybe.
-   *   // message4 the shortest one, yet the greatest :)
-   * }
-   * </pre>
-   * <p>
-   * Note that <tt>null</tt> values are {@link #append(CharSequence) append}ed
-   * just like in {@link StringBuilder#append(CharSequence) original
-   * implementation}.
-   * <p>
-   * Note that this class is not thread safe.
-   */
-  @VisibleForTesting
-  static class BoundedAppender {
-    @VisibleForTesting
-    static final String TRUNCATED_MESSAGES_TEMPLATE =
-        "Diagnostic messages truncated, showing last "
-            + "%d chars out of %d:%n...%s";
-
-    private final int limit;
-    private final StringBuilder messages = new StringBuilder();
-    private int totalCharacterCount = 0;
-
-    BoundedAppender(final int limit) {
-      Preconditions.checkArgument(limit > 0, "limit should be positive");
-
-      this.limit = limit;
-    }
-
-    /**
-     * Append a {@link CharSequence} considering {@link #limit}, truncating
-     * from the head of {@code csq} or {@link #messages} when necessary.
-     *
-     * @param csq the {@link CharSequence} to append
-     * @return this
-     */
-    BoundedAppender append(final CharSequence csq) {
-      appendAndCount(csq);
-      checkAndCut();
-
-      return this;
-    }
-
-    private void appendAndCount(final CharSequence csq) {
-      final int before = messages.length();
-      messages.append(csq);
-      final int after = messages.length();
-      totalCharacterCount += after - before;
-    }
-
-    private void checkAndCut() {
-      if (messages.length() > limit) {
-        final int newStart = messages.length() - limit;
-        messages.delete(0, newStart);
-      }
-    }
-
-    /**
-     * Get current length of messages considering truncates
-     * without header and ellipses.
-     *
-     * @return current length
-     */
-    int length() {
-      return messages.length();
-    }
-
-    /**
-     * Get a string representation of the actual contents, displaying also a
-     * header and ellipses when there was a truncate.
-     *
-     * @return String representation of the {@link #messages}
-     */
-    @Override
-    public String toString() {
-      if (messages.length() < totalCharacterCount) {
-        return String.format(TRUNCATED_MESSAGES_TEMPLATE, messages.length(),
-            totalCharacterCount, messages.toString());
-      }
-
-      return messages.toString();
-    }
-  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
index e089050..0655609 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
@@ -31,6 +31,7 @@
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -125,14 +126,16 @@
     long vcoreSeconds = finishedVcoreSeconds.get();
 
     // Only add in the running containers if this is the active attempt.
-    RMAppAttempt currentAttempt = rmContext.getRMApps()
-                   .get(attemptId.getApplicationId()).getCurrentAppAttempt();
-    if (currentAttempt.getAppAttemptId().equals(attemptId)) {
-      ApplicationResourceUsageReport appResUsageReport = rmContext
-            .getScheduler().getAppResourceUsageReport(attemptId);
-      if (appResUsageReport != null) {
-        memorySeconds += appResUsageReport.getMemorySeconds();
-        vcoreSeconds += appResUsageReport.getVcoreSeconds();
+    RMApp rmApp = rmContext.getRMApps().get(attemptId.getApplicationId());
+    if (null != rmApp) {
+      RMAppAttempt currentAttempt = rmApp.getCurrentAppAttempt();
+      if (currentAttempt.getAppAttemptId().equals(attemptId)) {
+        ApplicationResourceUsageReport appResUsageReport = rmContext
+                .getScheduler().getAppResourceUsageReport(attemptId);
+        if (appResUsageReport != null) {
+          memorySeconds += appResUsageReport.getMemorySeconds();
+          vcoreSeconds += appResUsageReport.getVcoreSeconds();
+        }
       }
     }
     return new AggregateAppResourceUsage(memorySeconds, vcoreSeconds);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index 1e9463a..f49db7e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -51,8 +51,7 @@
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent;
-import org.apache.hadoop.yarn.server.resourcemanager.rmnode
-    .RMNodeDecreaseContainerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeUpdateContainerEvent;
 import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
 import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
 import org.apache.hadoop.yarn.state.MultipleArcTransition;
@@ -284,7 +283,6 @@
   @Override
   public RMContainerState getState() {
     this.readLock.lock();
-
     try {
       return this.stateMachine.getCurrentState();
     } finally {
@@ -598,7 +596,7 @@
       RMContainerUpdatesAcquiredEvent acquiredEvent =
           (RMContainerUpdatesAcquiredEvent) event;
       if (acquiredEvent.isIncreasedContainer()) {
-        // If container is increased but not acquired by AM, we will start
+        // If container is increased but not started by AM, we will start
         // containerAllocationExpirer for this container in this transition. 
         container.containerAllocationExpirer.register(
             new AllocationExpirationInfo(event.getContainerId(), true));
@@ -641,7 +639,7 @@
         container.lastConfirmedResource = rmContainerResource;
         container.containerAllocationExpirer.unregister(
             new AllocationExpirationInfo(event.getContainerId()));
-        container.eventHandler.handle(new RMNodeDecreaseContainerEvent(
+        container.eventHandler.handle(new RMNodeUpdateContainerEvent(
             container.nodeId,
             Collections.singletonList(container.getContainer())));
       } else if (Resources.fitsIn(nmContainerResource, rmContainerResource)) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
index 86f8679..ab15c95 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
@@ -144,7 +144,7 @@
    * applications to clean up for this node.
    * @param response the {@link NodeHeartbeatResponse} to update
    */
-  public void updateNodeHeartbeatResponseForCleanup(NodeHeartbeatResponse response);
+  void updateNodeHeartbeatResponseForCleanup(NodeHeartbeatResponse response);
 
   public NodeHeartbeatResponse getLastNodeHeartBeatResponse();
 
@@ -169,9 +169,9 @@
   public Set<String> getNodeLabels();
   
   /**
-   * Update containers to be decreased
+   * Update containers to be updated
    */
-  public void updateNodeHeartbeatResponseForContainersDecreasing(
+  void updateNodeHeartbeatResponseForUpdatedContainers(
       NodeHeartbeatResponse response);
   
   public List<Container> pullNewlyIncreasedContainers();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java
index b28fef3..a3b2ed7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java
@@ -42,7 +42,7 @@
   // Source: Container
   CONTAINER_ALLOCATED,
   CLEANUP_CONTAINER,
-  DECREASE_CONTAINER,
+  UPDATE_CONTAINER,
 
   // Source: ClientRMService
   SIGNAL_CONTAINER,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index 1f121f8..1bdaa98 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -171,7 +171,7 @@
   private final List<ApplicationId> runningApplications =
       new ArrayList<ApplicationId>();
   
-  private final Map<ContainerId, Container> toBeDecreasedContainers =
+  private final Map<ContainerId, Container> toBeUpdatedContainers =
       new HashMap<>();
   
   private final Map<ContainerId, Container> nmReportedIncreasedContainers =
@@ -228,8 +228,8 @@
       .addTransition(NodeState.RUNNING, NodeState.RUNNING,
           RMNodeEventType.RESOURCE_UPDATE, new UpdateNodeResourceWhenRunningTransition())
       .addTransition(NodeState.RUNNING, NodeState.RUNNING,
-          RMNodeEventType.DECREASE_CONTAINER,
-          new DecreaseContainersTransition())
+          RMNodeEventType.UPDATE_CONTAINER,
+          new UpdateContainersTransition())
       .addTransition(NodeState.RUNNING, NodeState.RUNNING,
           RMNodeEventType.SIGNAL_CONTAINER, new SignalContainerTransition())
       .addTransition(NodeState.RUNNING, NodeState.SHUTDOWN,
@@ -614,18 +614,18 @@
   };
   
   @VisibleForTesting
-  public Collection<Container> getToBeDecreasedContainers() {
-    return toBeDecreasedContainers.values(); 
+  public Collection<Container> getToBeUpdatedContainers() {
+    return toBeUpdatedContainers.values();
   }
   
   @Override
-  public void updateNodeHeartbeatResponseForContainersDecreasing(
+  public void updateNodeHeartbeatResponseForUpdatedContainers(
       NodeHeartbeatResponse response) {
     this.writeLock.lock();
     
     try {
-      response.addAllContainersToDecrease(toBeDecreasedContainers.values());
-      toBeDecreasedContainers.clear();
+      response.addAllContainersToUpdate(toBeUpdatedContainers.values());
+      toBeUpdatedContainers.clear();
     } finally {
       this.writeLock.unlock();
     }
@@ -1031,16 +1031,19 @@
           RMNodeFinishedContainersPulledByAMEvent) event).getContainers());
     }
   }
-  
-  public static class DecreaseContainersTransition
+
+  /**
+   * Transition to Update a container.
+   */
+  public static class UpdateContainersTransition
       implements SingleArcTransition<RMNodeImpl, RMNodeEvent> {
  
     @Override
     public void transition(RMNodeImpl rmNode, RMNodeEvent event) {
-      RMNodeDecreaseContainerEvent de = (RMNodeDecreaseContainerEvent) event;
+      RMNodeUpdateContainerEvent de = (RMNodeUpdateContainerEvent) event;
 
-      for (Container c : de.getToBeDecreasedContainers()) {
-        rmNode.toBeDecreasedContainers.put(c.getId(), c);
+      for (Container c : de.getToBeUpdatedContainers()) {
+        rmNode.toBeUpdatedContainers.put(c.getId(), c);
       }
     }
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeDecreaseContainerEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeUpdateContainerEvent.java
similarity index 66%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeDecreaseContainerEvent.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeUpdateContainerEvent.java
index 62925ad..73af563 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeDecreaseContainerEvent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeUpdateContainerEvent.java
@@ -23,17 +23,22 @@
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.NodeId;
 
-public class RMNodeDecreaseContainerEvent extends RMNodeEvent {
-  final List<Container> toBeDecreasedContainers;
+/**
+ * This class is used to create update container event
+ * for the containers running on a node.
+ *
+ */
+public class RMNodeUpdateContainerEvent extends RMNodeEvent {
+  private List<Container> toBeUpdatedContainers;
 
-  public RMNodeDecreaseContainerEvent(NodeId nodeId,
-      List<Container> toBeDecreasedContainers) {
-    super(nodeId, RMNodeEventType.DECREASE_CONTAINER);
-    
-    this.toBeDecreasedContainers = toBeDecreasedContainers;
+  public RMNodeUpdateContainerEvent(NodeId nodeId,
+      List<Container> toBeUpdatedContainers) {
+    super(nodeId, RMNodeEventType.UPDATE_CONTAINER);
+
+    this.toBeUpdatedContainers = toBeUpdatedContainers;
   }
-  
-  public List<Container> getToBeDecreasedContainers() {
-    return toBeDecreasedContainers;
+
+  public List<Container> getToBeUpdatedContainers() {
+    return toBeUpdatedContainers;
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index 79caab0..2c27017 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -67,7 +67,6 @@
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
-import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
@@ -89,6 +88,7 @@
 
 
 
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ReleaseContainerEvent;
 import org.apache.hadoop.yarn.server.scheduler.OpportunisticContainerContext;
 import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
@@ -150,6 +150,10 @@
    */
   protected final ReentrantReadWriteLock.WriteLock writeLock;
 
+  // If set to true, then ALL container updates will be automatically sent to
+  // the NM in the next heartbeat.
+  private boolean autoUpdateContainers = false;
+
   /**
    * Construct the service.
    *
@@ -178,6 +182,9 @@
         configuredMaximumAllocationWaitTime);
     maxClusterLevelAppPriority = getMaxPriorityFromConf(conf);
     createReleaseCache();
+    autoUpdateContainers =
+        conf.getBoolean(YarnConfiguration.RM_AUTO_UPDATE_CONTAINERS,
+            YarnConfiguration.DEFAULT_RM_AUTO_UPDATE_CONTAINERS);
     super.serviceInit(conf);
   }
 
@@ -235,6 +242,10 @@
     return nodeTracker.getNodes(nodeFilter);
   }
 
+  public boolean shouldContainersBeAutoUpdated() {
+    return this.autoUpdateContainers;
+  }
+
   @Override
   public Resource getClusterResource() {
     return nodeTracker.getClusterCapacity();
@@ -1262,4 +1273,14 @@
   public List<NodeId> getNodeIds(String resourceName) {
     return nodeTracker.getNodeIdsByResourceName(resourceName);
   }
+
+  /**
+   * To be used to release a container via a Scheduler Event rather than
+   * in the same thread.
+   * @param container Container.
+   */
+  public void asyncContainerRelease(RMContainer container) {
+    this.rmContext.getDispatcher().getEventHandler()
+        .handle(new ReleaseContainerEvent(container));
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index 397d507..f9a7219 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -74,9 +74,9 @@
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerUpdatesAcquiredEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent;
 
-import org.apache.hadoop.yarn.server.resourcemanager.rmnode
-    .RMNodeDecreaseContainerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeUpdateContainerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode;
+
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SchedulingPlacementSet;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.SchedulableEntity;
 
@@ -663,20 +663,38 @@
           + " an updated container " + container.getId(), e);
       return null;
     }
-    
-    if (updateType == null ||
-        ContainerUpdateType.PROMOTE_EXECUTION_TYPE == updateType ||
-        ContainerUpdateType.DEMOTE_EXECUTION_TYPE == updateType) {
+
+    if (updateType == null) {
+      // This is a newly allocated container
       rmContainer.handle(new RMContainerEvent(
           rmContainer.getContainerId(), RMContainerEventType.ACQUIRED));
     } else {
-      rmContainer.handle(new RMContainerUpdatesAcquiredEvent(
-          rmContainer.getContainerId(),
-          ContainerUpdateType.INCREASE_RESOURCE == updateType));
-      if (ContainerUpdateType.DECREASE_RESOURCE == updateType) {
+      // Resource increase is handled as follows:
+      // If the AM does not use the updated token to increase the container
+      // for a configured period of time, the RM will automatically rollback
+      // the update by performing a container decrease. This rollback (which
+      // essentially is another resource decrease update) is notified to the
+      // NM heartbeat response. If autoUpdate flag is set, then AM does not
+      // need to do anything - same code path as resource decrease.
+      //
+      // Resource Decrease is always automatic: the AM never has to do
+      // anything. It is always via NM heartbeat response.
+      //
+      // ExecutionType updates (both Promotion and Demotion) are either
+      // always automatic (if the flag is set) or the AM has to explicitly
+      // call updateContainer() on the NM. There is no expiry
+      boolean autoUpdate =
+          ContainerUpdateType.DECREASE_RESOURCE == updateType ||
+              ((AbstractYarnScheduler)rmContext.getScheduler())
+                  .shouldContainersBeAutoUpdated();
+      if (autoUpdate) {
         this.rmContext.getDispatcher().getEventHandler().handle(
-            new RMNodeDecreaseContainerEvent(rmContainer.getNodeId(),
+            new RMNodeUpdateContainerEvent(rmContainer.getNodeId(),
                 Collections.singletonList(rmContainer.getContainer())));
+      } else {
+        rmContainer.handle(new RMContainerUpdatesAcquiredEvent(
+            rmContainer.getContainerId(),
+            ContainerUpdateType.INCREASE_RESOURCE == updateType));
       }
     }
     return container;
@@ -849,10 +867,13 @@
         // Mark container for release (set RRs to null, so RM does not think
         // it is a recoverable container)
         ((RMContainerImpl) c).setResourceRequests(null);
-        ((AbstractYarnScheduler) rmContext.getScheduler()).completedContainer(c,
-            SchedulerUtils.createAbnormalContainerStatus(c.getContainerId(),
-                SchedulerUtils.UPDATED_CONTAINER),
-            RMContainerEventType.KILL);
+
+        // Release this container async-ly so as to prevent
+        // 'LeafQueue::completedContainer()' from trying to acquire a lock
+        // on the app and queue which can contended for in the reverse order
+        // by the Scheduler thread.
+        ((AbstractYarnScheduler)rmContext.getScheduler())
+            .asyncContainerRelease(c);
         tempIter.remove();
       }
       return updatedContainers;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index e4ca003..fde84c4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -124,6 +124,8 @@
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeResourceUpdateSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
+
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ReleaseContainerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSet;
@@ -1491,6 +1493,16 @@
       }
     }
     break;
+    case RELEASE_CONTAINER:
+    {
+      RMContainer container = ((ReleaseContainerEvent) event).getContainer();
+      completedContainer(container,
+          SchedulerUtils.createAbnormalContainerStatus(
+            container.getContainerId(),
+            SchedulerUtils.RELEASED_CONTAINER),
+          RMContainerEventType.RELEASED);
+    }
+    break;
     case KILL_RESERVED_CONTAINER:
     {
       ContainerPreemptEvent killReservedContainerEvent =
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/NodeQueueLoadMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/NodeQueueLoadMonitor.java
index fb67270..ed0ee1e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/NodeQueueLoadMonitor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/NodeQueueLoadMonitor.java
@@ -203,6 +203,10 @@
     LOG.debug("Node update event from: " + rmNode.getNodeID());
     OpportunisticContainersStatus opportunisticContainersStatus =
         rmNode.getOpportunisticContainersStatus();
+    if (opportunisticContainersStatus == null) {
+      opportunisticContainersStatus =
+          OpportunisticContainersStatus.newInstance();
+    }
     int estimatedQueueWaitTime =
         opportunisticContainersStatus.getEstimatedQueueWaitTime();
     int waitQueueLength = opportunisticContainersStatus.getWaitQueueLength();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/ReleaseContainerEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/ReleaseContainerEvent.java
new file mode 100644
index 0000000..4f31684
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/ReleaseContainerEvent.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.event;
+
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+
+/**
+ * Event used to release a container.
+ */
+public class ReleaseContainerEvent extends SchedulerEvent {
+
+  private final RMContainer container;
+
+  /**
+   * Create Event.
+   * @param rmContainer RMContainer.
+   */
+  public ReleaseContainerEvent(RMContainer rmContainer) {
+    super(SchedulerEventType.RELEASE_CONTAINER);
+    this.container = rmContainer;
+  }
+
+  /**
+   * Get RMContainer.
+   * @return RMContainer.
+   */
+  public RMContainer getContainer() {
+    return container;
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/SchedulerEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/SchedulerEventType.java
index 35b7c14..229e0bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/SchedulerEventType.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/SchedulerEventType.java
@@ -38,6 +38,9 @@
   // Source: ContainerAllocationExpirer
   CONTAINER_EXPIRED,
 
+  // Source: SchedulerAppAttempt::pullNewlyUpdatedContainer.
+  RELEASE_CONTAINER,
+
   /* Source: SchedulingEditPolicy */
   KILL_RESERVED_CONTAINER,
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 5dfef73..fe57d1b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -82,8 +82,9 @@
 
   // Preemption related variables
   private final Object preemptionVariablesLock = new Object();
-  private final Resource preemptedResources = Resources.clone(Resources.none());
-  private final Set<RMContainer> containersToPreempt = new HashSet<>();
+  private final Set<RMContainer> containersToBePreempted = new HashSet<>();
+  private final Resource resourcesToBePreempted =
+      Resources.clone(Resources.none());
 
   private Resource fairshareStarvation = Resources.none();
   private long lastTimeAtFairShare;
@@ -565,16 +566,17 @@
 
   void trackContainerForPreemption(RMContainer container) {
     synchronized (preemptionVariablesLock) {
-      if (containersToPreempt.add(container)) {
-        Resources.addTo(preemptedResources, container.getAllocatedResource());
+      if (containersToBePreempted.add(container)) {
+        Resources.addTo(resourcesToBePreempted,
+            container.getAllocatedResource());
       }
     }
   }
 
   private void untrackContainerForPreemption(RMContainer container) {
     synchronized (preemptionVariablesLock) {
-      if (containersToPreempt.remove(container)) {
-        Resources.subtractFrom(preemptedResources,
+      if (containersToBePreempted.remove(container)) {
+        Resources.subtractFrom(resourcesToBePreempted,
             container.getAllocatedResource());
       }
     }
@@ -583,7 +585,7 @@
   Set<ContainerId> getPreemptionContainerIds() {
     synchronized (preemptionVariablesLock) {
       Set<ContainerId> preemptionContainerIds = new HashSet<>();
-      for (RMContainer container : containersToPreempt) {
+      for (RMContainer container : containersToBePreempted) {
         preemptionContainerIds.add(container.getContainerId());
       }
       return preemptionContainerIds;
@@ -604,7 +606,7 @@
     }
 
     synchronized (preemptionVariablesLock) {
-      if (containersToPreempt.contains(container)) {
+      if (containersToBePreempted.contains(container)) {
         // The container is already under consideration for preemption
         return false;
       }
@@ -1271,9 +1273,9 @@
     // Subtract copies the object, so that we have a snapshot,
     // in case usage changes, while the caller is using the value
     synchronized (preemptionVariablesLock) {
-      return containersToPreempt.isEmpty()
+      return containersToBePreempted.isEmpty()
           ? getCurrentConsumption()
-          : Resources.subtract(getCurrentConsumption(), preemptedResources);
+          : Resources.subtract(getCurrentConsumption(), resourcesToBePreempted);
     }
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 0f417c3..c521250 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -83,6 +83,8 @@
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeResourceUpdateSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
+
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ReleaseContainerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
@@ -1195,6 +1197,17 @@
           appAttemptRemovedEvent.getFinalAttemptState(),
           appAttemptRemovedEvent.getKeepContainersAcrossAppAttempts());
       break;
+    case RELEASE_CONTAINER:
+      if (!(event instanceof ReleaseContainerEvent)) {
+        throw new RuntimeException("Unexpected event type: " + event);
+      }
+      RMContainer container = ((ReleaseContainerEvent) event).getContainer();
+      completedContainer(container,
+          SchedulerUtils.createAbnormalContainerStatus(
+              container.getContainerId(),
+              SchedulerUtils.RELEASED_CONTAINER),
+          RMContainerEventType.RELEASED);
+      break;
     case CONTAINER_EXPIRED:
       if (!(event instanceof ContainerExpiredSchedulerEvent)) {
         throw new RuntimeException("Unexpected event type: " + event);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
index 92a88b9..94c7e16 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
@@ -65,7 +65,6 @@
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedContainerChangeRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
@@ -80,6 +79,8 @@
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeResourceUpdateSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
+
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ReleaseContainerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk;
 import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
@@ -820,6 +821,18 @@
           RMContainerEventType.EXPIRE);
     }
     break;
+    case RELEASE_CONTAINER: {
+      if (!(event instanceof ReleaseContainerEvent)) {
+        throw new RuntimeException("Unexpected event type: " + event);
+      }
+      RMContainer container = ((ReleaseContainerEvent) event).getContainer();
+      completedContainer(container,
+          SchedulerUtils.createAbnormalContainerStatus(
+              container.getContainerId(),
+              SchedulerUtils.RELEASED_CONTAINER),
+          RMContainerEventType.RELEASED);
+    }
+    break;
     default:
       LOG.error("Invalid eventtype " + event.getType() + ". Ignoring!");
     }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index f3ab5b0..93e1c97 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -228,9 +228,10 @@
           resourcesUsed = userInfo.getResourceUsageInfo()
               .getPartitionResourceUsageInfo(nodeLabel).getUsed();
         }
-        ResourceInfo amUsed = (resourceUsages.getAmUsed() == null)
-            ? new ResourceInfo(Resources.none())
-            : resourceUsages.getAmUsed();
+        ResourceInfo amUsed = userInfo.getAMResourcesUsed();
+        if (amUsed == null) {
+          amUsed = new ResourceInfo(Resources.none());
+        }
         String highlightIfAsking =
             userInfo.getIsActive() ? ACTIVE_USER : null;
         tbody.tr().$style(highlightIfAsking).td(userInfo.getUsername())
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index 6ce47de..7ffe106 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -568,7 +568,7 @@
 
     List<ApplicationReport> appReports = null;
     try {
-      appReports = rm.getClientRMService().getApplications(request, false)
+      appReports = rm.getClientRMService().getApplications(request)
           .getApplicationList();
     } catch (YarnException e) {
       LOG.error("Unable to retrieve apps from ClientRMService", e);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
index f11939a..9fb8fb5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
@@ -49,6 +49,7 @@
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 
 @XmlRootElement(name = "app")
@@ -71,9 +72,9 @@
   // these are ok for any user to see
   protected String id;
   protected String user;
-  protected String name;
+  private String name;
   protected String queue;
-  protected YarnApplicationState state;
+  private YarnApplicationState state;
   protected FinalApplicationStatus finalStatus;
   protected float progress;
   protected String trackingUI;
@@ -91,21 +92,21 @@
   protected String amContainerLogs;
   protected String amHostHttpAddress;
   private String amRPCAddress;
-  protected long allocatedMB;
-  protected long allocatedVCores;
-  protected long reservedMB;
-  protected long reservedVCores;
-  protected int runningContainers;
-  protected long memorySeconds;
-  protected long vcoreSeconds;
+  private long allocatedMB;
+  private long allocatedVCores;
+  private long reservedMB;
+  private long reservedVCores;
+  private int runningContainers;
+  private long memorySeconds;
+  private long vcoreSeconds;
   protected float queueUsagePercentage;
   protected float clusterUsagePercentage;
 
   // preemption info fields
-  protected long preemptedResourceMB;
-  protected long preemptedResourceVCores;
-  protected int numNonAMContainerPreempted;
-  protected int numAMContainerPreempted;
+  private long preemptedResourceMB;
+  private long preemptedResourceVCores;
+  private int numNonAMContainerPreempted;
+  private int numAMContainerPreempted;
   private long preemptedMemorySeconds;
   private long preemptedVcoreSeconds;
 
@@ -142,12 +143,11 @@
           || YarnApplicationState.NEW_SAVING == this.state
           || YarnApplicationState.SUBMITTED == this.state
           || YarnApplicationState.ACCEPTED == this.state;
-      this.trackingUI = this.trackingUrlIsNotReady ? "UNASSIGNED" : (app
-          .getFinishTime() == 0 ? "ApplicationMaster" : "History");
+      this.trackingUI = this.trackingUrlIsNotReady ? "UNASSIGNED"
+          : (app.getFinishTime() == 0 ? "ApplicationMaster" : "History");
       if (!trackingUrlIsNotReady) {
         this.trackingUrl =
-            WebAppUtils.getURLWithScheme(schemePrefix,
-                trackingUrl);
+            WebAppUtils.getURLWithScheme(schemePrefix, trackingUrl);
         this.trackingUrlPretty = this.trackingUrl;
       } else {
         this.trackingUrlPretty = "UNASSIGNED";
@@ -162,15 +162,15 @@
       this.priority = 0;
 
       if (app.getApplicationPriority() != null) {
-        this.priority = app.getApplicationPriority()
-            .getPriority();
+        this.priority = app.getApplicationPriority().getPriority();
       }
       this.progress = app.getProgress() * 100;
       this.diagnostics = app.getDiagnostics().toString();
       if (diagnostics == null || diagnostics.isEmpty()) {
         this.diagnostics = "";
       }
-      if (app.getApplicationTags() != null && !app.getApplicationTags().isEmpty()) {
+      if (app.getApplicationTags() != null
+          && !app.getApplicationTags().isEmpty()) {
         this.applicationTags = Joiner.on(',').join(app.getApplicationTags());
       }
       this.finalStatus = app.getFinalApplicationStatus();
@@ -178,8 +178,8 @@
       if (hasAccess) {
         this.startedTime = app.getStartTime();
         this.finishedTime = app.getFinishTime();
-        this.elapsedTime = Times.elapsed(app.getStartTime(),
-            app.getFinishTime());
+        this.elapsedTime =
+            Times.elapsed(app.getStartTime(), app.getFinishTime());
         this.logAggregationStatus = app.getLogAggregationStatusForAppReport();
         RMAppAttempt attempt = app.getCurrentAppAttempt();
         if (attempt != null) {
@@ -194,8 +194,8 @@
 
           this.amRPCAddress = getAmRPCAddressFromRMAppAttempt(attempt);
 
-          ApplicationResourceUsageReport resourceReport = attempt
-              .getApplicationResourceUsageReport();
+          ApplicationResourceUsageReport resourceReport =
+              attempt.getApplicationResourceUsageReport();
           if (resourceReport != null) {
             Resource usedResources = resourceReport.getUsedResources();
             Resource reservedResources = resourceReport.getReservedResources();
@@ -208,10 +208,11 @@
             clusterUsagePercentage = resourceReport.getClusterUsagePercentage();
           }
 
-          /* When the deSelects parameter contains "resourceRequests",
-             it skips returning massive ResourceRequest objects and vice versa.
-             Default behavior is no skipping. (YARN-6280)
-          */
+          /*
+           * When the deSelects parameter contains "resourceRequests", it skips
+           * returning massive ResourceRequest objects and vice versa. Default
+           * behavior is no skipping. (YARN-6280)
+           */
           if (!deSelects.contains(DeSelectType.RESOURCE_REQUESTS)) {
             List<ResourceRequest> resourceRequestsRaw = rm.getRMContext()
                 .getScheduler().getPendingResourceRequestsForAttempt(
@@ -228,12 +229,9 @@
 
       // copy preemption info fields
       RMAppMetrics appMetrics = app.getRMAppMetrics();
-      numAMContainerPreempted =
-          appMetrics.getNumAMContainersPreempted();
-      preemptedResourceMB =
-          appMetrics.getResourcePreempted().getMemorySize();
-      numNonAMContainerPreempted =
-          appMetrics.getNumNonAMContainersPreempted();
+      numAMContainerPreempted = appMetrics.getNumAMContainersPreempted();
+      preemptedResourceMB = appMetrics.getResourcePreempted().getMemorySize();
+      numNonAMContainerPreempted = appMetrics.getNumNonAMContainersPreempted();
       preemptedResourceVCores =
           appMetrics.getResourcePreempted().getVirtualCores();
       memorySeconds = appMetrics.getMemorySeconds();
@@ -242,8 +240,7 @@
       preemptedVcoreSeconds = appMetrics.getPreemptedVcoreSeconds();
       ApplicationSubmissionContext appSubmissionContext =
           app.getApplicationSubmissionContext();
-      unmanagedApplication =
-          appSubmissionContext.getUnmanagedAM();
+      unmanagedApplication = appSubmissionContext.getUnmanagedAM();
       appNodeLabelExpression =
           app.getApplicationSubmissionContext().getNodeLabelExpression();
       amNodeLabelExpression = (unmanagedApplication) ? null
@@ -286,6 +283,7 @@
           timeouts.add(timeout);
         }
       }
+
     }
   }
 
@@ -396,19 +394,19 @@
   public String getApplicationTags() {
     return this.applicationTags;
   }
-  
+
   public int getRunningContainers() {
     return this.runningContainers;
   }
-  
+
   public long getAllocatedMB() {
     return this.allocatedMB;
   }
-  
+
   public long getAllocatedVCores() {
     return this.allocatedVCores;
   }
-  
+
   public long getReservedMB() {
     return this.reservedMB;
   }
@@ -417,22 +415,6 @@
     return this.reservedVCores;
   }
 
-  public long getPreemptedMB() {
-    return preemptedResourceMB;
-  }
-
-  public long getPreemptedVCores() {
-    return preemptedResourceVCores;
-  }
-
-  public int getNumNonAMContainersPreempted() {
-    return numNonAMContainerPreempted;
-  }
-  
-  public int getNumAMContainersPreempted() {
-    return numAMContainerPreempted;
-  }
- 
   public long getMemorySeconds() {
     return memorySeconds;
   }
@@ -448,10 +430,15 @@
   public long getPreemptedVcoreSeconds() {
     return preemptedVcoreSeconds;
   }
+
   public List<ResourceRequestInfo> getResourceRequests() {
     return this.resourceRequests;
   }
 
+  public void setResourceRequests(List<ResourceRequestInfo> resourceRequests) {
+    this.resourceRequests = resourceRequests;
+  }
+
   public LogAggregationStatus getLogAggregationStatus() {
     return this.logAggregationStatus;
   }
@@ -475,4 +462,89 @@
   public ResourcesInfo getResourceInfo() {
     return resourceInfo;
   }
+
+  public long getPreemptedResourceMB() {
+    return preemptedResourceMB;
+  }
+
+  public void setPreemptedResourceMB(long preemptedResourceMB) {
+    this.preemptedResourceMB = preemptedResourceMB;
+  }
+
+  public long getPreemptedResourceVCores() {
+    return preemptedResourceVCores;
+  }
+
+  public void setPreemptedResourceVCores(long preemptedResourceVCores) {
+    this.preemptedResourceVCores = preemptedResourceVCores;
+  }
+
+  public int getNumNonAMContainerPreempted() {
+    return numNonAMContainerPreempted;
+  }
+
+  public void setNumNonAMContainerPreempted(int numNonAMContainerPreempted) {
+    this.numNonAMContainerPreempted = numNonAMContainerPreempted;
+  }
+
+  public int getNumAMContainerPreempted() {
+    return numAMContainerPreempted;
+  }
+
+  public void setNumAMContainerPreempted(int numAMContainerPreempted) {
+    this.numAMContainerPreempted = numAMContainerPreempted;
+  }
+
+  public void setPreemptedMemorySeconds(long preemptedMemorySeconds) {
+    this.preemptedMemorySeconds = preemptedMemorySeconds;
+  }
+
+  public void setPreemptedVcoreSeconds(long preemptedVcoreSeconds) {
+    this.preemptedVcoreSeconds = preemptedVcoreSeconds;
+  }
+
+  public void setAllocatedMB(long allocatedMB) {
+    this.allocatedMB = allocatedMB;
+  }
+
+  public void setAllocatedVCores(long allocatedVCores) {
+    this.allocatedVCores = allocatedVCores;
+  }
+
+  public void setReservedMB(long reservedMB) {
+    this.reservedMB = reservedMB;
+  }
+
+  public void setReservedVCores(long reservedVCores) {
+    this.reservedVCores = reservedVCores;
+  }
+
+  public void setRunningContainers(int runningContainers) {
+    this.runningContainers = runningContainers;
+  }
+
+  public void setMemorySeconds(long memorySeconds) {
+    this.memorySeconds = memorySeconds;
+  }
+
+  public void setVcoreSeconds(long vcoreSeconds) {
+    this.vcoreSeconds = vcoreSeconds;
+  }
+
+  public void setAppId(String appId) {
+    this.id = appId;
+  }
+
+  @VisibleForTesting
+  public void setAMHostHttpAddress(String amHost) {
+    this.amHostHttpAddress = amHost;
+  }
+
+  public void setState(YarnApplicationState state) {
+    this.state = state;
+  }
+
+  public void setName(String name) {
+    this.name = name;
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppsInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppsInfo.java
index 84f68f1..39837b3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppsInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppsInfo.java
@@ -40,4 +40,8 @@
     return app;
   }
 
+  public void addAll(ArrayList<AppInfo> appsInfo) {
+    app.addAll(appsInfo);
+  }
+
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
index a4607c2..913513c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
@@ -48,8 +48,6 @@
   @XmlTransient
   private float fractionMemFairShare;
   @XmlTransient
-  private float fractionMemMinShare;
-  @XmlTransient
   private float fractionMemMaxShare;
   
   private ResourceInfo minResources;
@@ -63,7 +61,6 @@
   private ResourceInfo clusterResources;
   private ResourceInfo reservedResources;
 
-  private long pendingContainers;
   private long allocatedContainers;
   private long reservedContainers;
 
@@ -99,7 +96,6 @@
     steadyFairResources = new ResourceInfo(queue.getSteadyFairShare());
     fairResources = new ResourceInfo(queue.getFairShare());
     minResources = new ResourceInfo(queue.getMinShare());
-    maxResources = new ResourceInfo(queue.getMaxShare());
     maxResources = new ResourceInfo(
         Resources.componentwiseMin(queue.getMaxShare(),
             scheduler.getClusterResource()));
@@ -109,12 +105,10 @@
         (float)steadyFairResources.getMemorySize() / clusterResources.getMemorySize();
     fractionMemFairShare = (float) fairResources.getMemorySize()
         / clusterResources.getMemorySize();
-    fractionMemMinShare = (float)minResources.getMemorySize() / clusterResources.getMemorySize();
     fractionMemMaxShare = (float)maxResources.getMemorySize() / clusterResources.getMemorySize();
     
     maxApps = queue.getMaxRunningApps();
 
-    pendingContainers = queue.getMetrics().getPendingContainers();
     allocatedContainers = queue.getMetrics().getAllocatedContainers();
     reservedContainers = queue.getMetrics().getReservedContainers();
 
@@ -127,10 +121,6 @@
     childQueues = getChildQueues(queue, scheduler);
   }
 
-  public long getPendingContainers() {
-    return pendingContainers;
-  }
-
   public long getAllocatedContainers() {
     return allocatedContainers;
   }
@@ -235,14 +225,6 @@
   }
 
   /**
-   * Returns the queue's min share in as a fraction of the entire
-   * cluster capacity.
-   */
-  public float getMinShareMemoryFraction() {
-    return fractionMemMinShare;
-  }
-  
-  /**
    * Returns the memory used by this queue as a fraction of the entire 
    * cluster capacity.
    */
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
index 2451c1e..a698ecf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
@@ -126,6 +126,18 @@
     }
   }
 
+  public boolean setApplicationLastResponseId(int newLastResponseId) {
+    ApplicationMasterService applicationMasterService =
+        (ApplicationMasterService) amRMProtocol;
+    responseId = newLastResponseId;
+    return applicationMasterService.setAttemptLastResponseId(attemptId,
+        newLastResponseId);
+  }
+
+  public int getResponseId() {
+    return responseId;
+  }
+
   public void addRequests(String[] hosts, int memory, int priority,
       int containers) throws Exception {
     addRequests(hosts, memory, priority, containers, 0L);
@@ -272,19 +284,22 @@
 
   public AllocateResponse doAllocateAs(UserGroupInformation ugi,
       final AllocateRequest req) throws Exception {
-    req.setResponseId(++responseId);
+    req.setResponseId(responseId);
     try {
-      return ugi.doAs(new PrivilegedExceptionAction<AllocateResponse>() {
-        @Override
-        public AllocateResponse run() throws Exception {
-          return amRMProtocol.allocate(req);
-        }
-      });
+      AllocateResponse response =
+          ugi.doAs(new PrivilegedExceptionAction<AllocateResponse>() {
+            @Override
+            public AllocateResponse run() throws Exception {
+              return amRMProtocol.allocate(req);
+            }
+          });
+      responseId = response.getResponseId();
+      return response;
     } catch (UndeclaredThrowableException e) {
       throw (Exception) e.getCause();
     }
   }
-  
+
   public AllocateResponse doHeartbeat() throws Exception {
     return allocate(null, null);
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java
index 2d76127..4a8ff00 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java
@@ -24,6 +24,7 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -39,6 +40,7 @@
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
 import org.apache.hadoop.yarn.server.api.records.NodeStatus;
@@ -60,6 +62,8 @@
   private String version;
   private Map<ContainerId, ContainerStatus> containerStats =
       new HashMap<ContainerId, ContainerStatus>();
+  private Map<ApplicationId, AppCollectorData> registeringCollectors
+      = new ConcurrentHashMap<>();
 
   public MockNM(String nodeIdStr, int memory, ResourceTrackerService resourceTracker) {
     // scale vcores based on the requested memory
@@ -117,6 +121,15 @@
         true, ++responseId);
   }
 
+  public void addRegisteringCollector(ApplicationId appId,
+      AppCollectorData data) {
+    this.registeringCollectors.put(appId, data);
+  }
+
+  public Map<ApplicationId, AppCollectorData> getRegisteringCollectors() {
+    return this.registeringCollectors;
+  }
+
   public RegisterNodeManagerResponse registerNode() throws Exception {
     return registerNode(null, null);
   }
@@ -229,6 +242,9 @@
     req.setNodeStatus(status);
     req.setLastKnownContainerTokenMasterKey(this.currentContainerTokenMasterKey);
     req.setLastKnownNMTokenMasterKey(this.currentNMTokenMasterKey);
+
+    req.setRegisteringCollectors(this.registeringCollectors);
+
     NodeHeartbeatResponse heartbeatResponse =
         resourceTracker.nodeHeartbeat(req);
     
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
index 91170d1..611c7f2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
@@ -242,7 +242,7 @@
     }
 
     @Override
-    public void updateNodeHeartbeatResponseForContainersDecreasing(
+    public void updateNodeHeartbeatResponseForUpdatedContainers(
         NodeHeartbeatResponse response) {
       
     }
@@ -263,7 +263,7 @@
     }
 
     public OpportunisticContainersStatus getOpportunisticContainersStatus() {
-      return null;
+      return OpportunisticContainersStatus.newInstance();
     }
 
     @Override
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index e967807..1235774 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -513,6 +513,17 @@
         false, false, null, 0, null, true, priority);
   }
 
+  public RMApp submitApp(int masterMemory, Priority priority,
+      Credentials cred, ByteBuffer tokensConf) throws Exception {
+    Resource resource = Resource.newInstance(masterMemory, 0);
+    return submitApp(resource, "", UserGroupInformation.getCurrentUser()
+        .getShortUserName(), null, false, null,
+        super.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
+            YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), cred, null, true,
+        false, false, null, 0, null, true, priority, null, null,
+        tokensConf);
+  }
+
   public RMApp submitApp(int masterMemory, boolean unmanaged)
       throws Exception {
     return submitApp(masterMemory, "", UserGroupInformation.getCurrentUser()
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
index 85a36e7..e684f3c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
@@ -272,7 +272,42 @@
     Assert.assertEquals(MockRM.getClusterTimeStamp(), tokenId.getRMIdentifier());
     rm.stop();
   }
-  
+
+  @Test(timeout = 3000000)
+  public void testAllocateResponseIdOverflow() throws Exception {
+    MockRM rm = new MockRM(conf);
+    try {
+      rm.start();
+
+      // Register node1
+      MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
+
+      // Submit an application
+      RMApp app1 = rm.submitApp(2048);
+
+      // kick the scheduling
+      nm1.nodeHeartbeat(true);
+      RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
+      MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
+      am1.registerAppAttempt();
+
+      // Set the last reponseId to be MAX_INT
+      Assert.assertTrue(am1.setApplicationLastResponseId(Integer.MAX_VALUE));
+
+      // Both allocate should succeed
+      am1.schedule(); // send allocate with reponseId = MAX_INT
+      Assert.assertEquals(0, am1.getResponseId());
+
+      am1.schedule(); // send allocate with reponseId = 0
+      Assert.assertEquals(1, am1.getResponseId());
+
+    } finally {
+      if (rm != null) {
+        rm.stop();
+      }
+    }
+  }
+
   @Test(timeout=600000)
   public void testInvalidContainerReleaseRequest() throws Exception {
     MockRM rm = new MockRM(conf);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
index 9ce02bc..ea733a4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
@@ -994,6 +994,18 @@
     Assert.assertEquals(appId2,
         getAllApplicationsResponse.getApplicationList()
             .get(0).getApplicationId());
+
+    // Test query with uppercase appType also works
+    appTypes = new HashSet<String>();
+    appTypes.add("MATCHTYPE");
+    getAllAppsRequest = GetApplicationsRequest.newInstance(appTypes);
+    getAllApplicationsResponse =
+        rmService.getApplications(getAllAppsRequest);
+    Assert.assertEquals(1,
+        getAllApplicationsResponse.getApplicationList().size());
+    Assert.assertEquals(appId2,
+        getAllApplicationsResponse.getApplicationList()
+            .get(0).getApplicationId());
   }
 
   @Test
@@ -1087,7 +1099,7 @@
     assertEquals("Incorrect number of applications in queue", 2,
         rmService.getApplications(request).getApplicationList().size());
     assertEquals("Incorrect number of applications in queue", 2,
-        rmService.getApplications(request, false).getApplicationList().size());
+        rmService.getApplications(request).getApplicationList().size());
 
     queueSet.add(queues[1]);
     assertEquals("Incorrect number of applications in queue", 3,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java
index 6819395..9b9eb3c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java
@@ -43,11 +43,15 @@
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
+import org.apache.hadoop.yarn.api.records.UpdatedContainer;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.DrainDispatcher;
 import org.apache.hadoop.yarn.server.api.DistributedSchedulingAMProtocolPB;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.DistributedSchedulingAllocateRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.DistributedSchedulingAllocateResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterDistributedSchedulingAMResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
@@ -106,6 +110,7 @@
   private static final int GB = 1024;
 
   private MockRM rm;
+  private DrainDispatcher dispatcher;
 
   @Before
   public void createAndStartRM() {
@@ -118,7 +123,31 @@
         YarnConfiguration.OPPORTUNISTIC_CONTAINER_ALLOCATION_ENABLED, true);
     conf.setInt(
         YarnConfiguration.NM_CONTAINER_QUEUING_SORTING_NODES_INTERVAL_MS, 100);
-    rm = new MockRM(conf);
+    startRM(conf);
+  }
+
+  public void createAndStartRMWithAutoUpdateContainer() {
+    CapacitySchedulerConfiguration csConf =
+        new CapacitySchedulerConfiguration();
+    YarnConfiguration conf = new YarnConfiguration(csConf);
+    conf.setBoolean(YarnConfiguration.RM_AUTO_UPDATE_CONTAINERS, true);
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+    conf.setBoolean(
+        YarnConfiguration.OPPORTUNISTIC_CONTAINER_ALLOCATION_ENABLED, true);
+    conf.setInt(
+        YarnConfiguration.NM_CONTAINER_QUEUING_SORTING_NODES_INTERVAL_MS, 100);
+    startRM(conf);
+  }
+
+  private void startRM(final YarnConfiguration conf) {
+    dispatcher = new DrainDispatcher();
+    rm = new MockRM(conf) {
+      @Override
+      protected Dispatcher createDispatcher() {
+        return dispatcher;
+      }
+    };
     rm.start();
   }
 
@@ -163,17 +192,6 @@
     nm3.nodeHeartbeat(true);
     nm4.nodeHeartbeat(true);
 
-    ((RMNodeImpl) rmNode1)
-        .setOpportunisticContainersStatus(getOppurtunisticStatus(-1, 100));
-    ((RMNodeImpl) rmNode2)
-        .setOpportunisticContainersStatus(getOppurtunisticStatus(-1, 100));
-    ((RMNodeImpl) rmNode3)
-        .setOpportunisticContainersStatus(getOppurtunisticStatus(-1, 100));
-    ((RMNodeImpl) rmNode4)
-        .setOpportunisticContainersStatus(getOppurtunisticStatus(-1, 100));
-
-    OpportunisticContainerContext ctxt = ((CapacityScheduler) scheduler)
-        .getApplicationAttempt(attemptId).getOpportunisticContainerContext();
     // Send add and update node events to AM Service.
     amservice.handle(new NodeAddedSchedulerEvent(rmNode1));
     amservice.handle(new NodeAddedSchedulerEvent(rmNode2));
@@ -229,6 +247,9 @@
     allocateResponse =  am1.allocate(new ArrayList<>(), new ArrayList<>());
     Assert.assertEquals(0, allocateResponse.getUpdatedContainers().size());
 
+    // Wait for scheduler to process all events
+    dispatcher.waitForEventThreadToWait();
+    Thread.sleep(1000);
     // Verify Metrics After OPP allocation (Nothing should change again)
     verifyMetrics(metrics, 15360, 15, 1024, 1, 1);
 
@@ -302,6 +323,8 @@
     Assert.assertEquals(uc.getId(), container.getId());
     Assert.assertEquals(uc.getVersion(), container.getVersion() + 2);
 
+    // Wait for scheduler to finish processing events
+    dispatcher.waitForEventThreadToWait();
     // Verify Metrics After OPP allocation :
     // Everything should have reverted to what it was
     verifyMetrics(metrics, 15360, 15, 1024, 1, 1);
@@ -548,6 +571,159 @@
     verifyMetrics(metrics, 7168, 7, 1024, 1, 1);
   }
 
+  @Test(timeout = 600000)
+  public void testContainerAutoUpdateContainer() throws Exception {
+    rm.stop();
+    createAndStartRMWithAutoUpdateContainer();
+    MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
+    nm1.registerNode();
+
+    OpportunisticContainerAllocatorAMService amservice =
+        (OpportunisticContainerAllocatorAMService) rm
+            .getApplicationMasterService();
+    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
+    ApplicationAttemptId attemptId =
+        app1.getCurrentAppAttempt().getAppAttemptId();
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1);
+    ResourceScheduler scheduler = rm.getResourceScheduler();
+    RMNode rmNode1 = rm.getRMContext().getRMNodes().get(nm1.getNodeId());
+
+    nm1.nodeHeartbeat(true);
+
+    ((RMNodeImpl) rmNode1)
+        .setOpportunisticContainersStatus(getOppurtunisticStatus(-1, 100));
+
+    OpportunisticContainerContext ctxt =
+        ((CapacityScheduler) scheduler).getApplicationAttempt(attemptId)
+            .getOpportunisticContainerContext();
+    // Send add and update node events to AM Service.
+    amservice.handle(new NodeAddedSchedulerEvent(rmNode1));
+    amservice.handle(new NodeUpdateSchedulerEvent(rmNode1));
+
+    nm1.nodeHeartbeat(true);
+    Thread.sleep(1000);
+
+    AllocateResponse allocateResponse = am1.allocate(Arrays.asList(
+        ResourceRequest.newInstance(Priority.newInstance(1), "*",
+            Resources.createResource(1 * GB), 2, true, null,
+            ExecutionTypeRequest
+                .newInstance(ExecutionType.OPPORTUNISTIC, true))), null);
+    List<Container> allocatedContainers =
+        allocateResponse.getAllocatedContainers();
+    Assert.assertEquals(2, allocatedContainers.size());
+    Container container = allocatedContainers.get(0);
+    // Start Container in NM
+    nm1.nodeHeartbeat(Arrays.asList(ContainerStatus
+        .newInstance(container.getId(), ExecutionType.OPPORTUNISTIC,
+            ContainerState.RUNNING, "", 0)), true);
+    Thread.sleep(200);
+
+    // Verify that container is actually running wrt the RM..
+    RMContainer rmContainer = ((CapacityScheduler) scheduler)
+        .getApplicationAttempt(container.getId().getApplicationAttemptId())
+        .getRMContainer(container.getId());
+    Assert.assertEquals(RMContainerState.RUNNING, rmContainer.getState());
+
+    // Send Promotion req... this should result in update error
+    // Since the container doesn't exist anymore..
+    allocateResponse = am1.sendContainerUpdateRequest(Arrays.asList(
+        UpdateContainerRequest.newInstance(0, container.getId(),
+            ContainerUpdateType.PROMOTE_EXECUTION_TYPE, null,
+            ExecutionType.GUARANTEED)));
+
+    nm1.nodeHeartbeat(Arrays.asList(ContainerStatus
+        .newInstance(container.getId(), ExecutionType.OPPORTUNISTIC,
+            ContainerState.RUNNING, "", 0)), true);
+    Thread.sleep(200);
+    // Get the update response on next allocate
+    allocateResponse = am1.allocate(new ArrayList<>(), new ArrayList<>());
+    // Check the update response from YARNRM
+    Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size());
+    UpdatedContainer uc = allocateResponse.getUpdatedContainers().get(0);
+    Assert.assertEquals(container.getId(), uc.getContainer().getId());
+    Assert.assertEquals(ExecutionType.GUARANTEED,
+        uc.getContainer().getExecutionType());
+    // Check that the container is updated in NM through NM heartbeat response
+    NodeHeartbeatResponse response = nm1.nodeHeartbeat(true);
+    Assert.assertEquals(1, response.getContainersToUpdate().size());
+    Container containersFromNM = response.getContainersToUpdate().get(0);
+    Assert.assertEquals(container.getId(), containersFromNM.getId());
+    Assert.assertEquals(ExecutionType.GUARANTEED,
+        containersFromNM.getExecutionType());
+
+    //Increase resources
+    allocateResponse = am1.sendContainerUpdateRequest(Arrays.asList(
+        UpdateContainerRequest.newInstance(1, container.getId(),
+            ContainerUpdateType.INCREASE_RESOURCE,
+            Resources.createResource(2 * GB, 1), null)));
+    response = nm1.nodeHeartbeat(Arrays.asList(ContainerStatus
+        .newInstance(container.getId(), ExecutionType.GUARANTEED,
+            ContainerState.RUNNING, "", 0)), true);
+
+    Thread.sleep(200);
+    if (allocateResponse.getUpdatedContainers().size() == 0) {
+      allocateResponse = am1.allocate(new ArrayList<>(), new ArrayList<>());
+    }
+    Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size());
+    uc = allocateResponse.getUpdatedContainers().get(0);
+    Assert.assertEquals(container.getId(), uc.getContainer().getId());
+    Assert.assertEquals(Resource.newInstance(2 * GB, 1),
+        uc.getContainer().getResource());
+    Thread.sleep(1000);
+
+    // Check that the container resources are increased in
+    // NM through NM heartbeat response
+    if (response.getContainersToUpdate().size() == 0) {
+      response = nm1.nodeHeartbeat(true);
+    }
+    Assert.assertEquals(1, response.getContainersToUpdate().size());
+    Assert.assertEquals(Resource.newInstance(2 * GB, 1),
+        response.getContainersToUpdate().get(0).getResource());
+
+    //Decrease resources
+    allocateResponse = am1.sendContainerUpdateRequest(Arrays.asList(
+        UpdateContainerRequest.newInstance(2, container.getId(),
+            ContainerUpdateType.DECREASE_RESOURCE,
+            Resources.createResource(1 * GB, 1), null)));
+    Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size());
+    Thread.sleep(1000);
+
+    // Check that the container resources are decreased
+    // in NM through NM heartbeat response
+    response = nm1.nodeHeartbeat(true);
+    Assert.assertEquals(1, response.getContainersToUpdate().size());
+    Assert.assertEquals(Resource.newInstance(1 * GB, 1),
+        response.getContainersToUpdate().get(0).getResource());
+
+    nm1.nodeHeartbeat(true);
+    // DEMOTE the container
+    allocateResponse = am1.sendContainerUpdateRequest(Arrays.asList(
+        UpdateContainerRequest.newInstance(3, container.getId(),
+            ContainerUpdateType.DEMOTE_EXECUTION_TYPE, null,
+            ExecutionType.OPPORTUNISTIC)));
+
+    response = nm1.nodeHeartbeat(Arrays.asList(ContainerStatus
+        .newInstance(container.getId(), ExecutionType.GUARANTEED,
+            ContainerState.RUNNING, "", 0)), true);
+    Thread.sleep(200);
+    if (allocateResponse.getUpdatedContainers().size() == 0) {
+      // Get the update response on next allocate
+      allocateResponse = am1.allocate(new ArrayList<>(), new ArrayList<>());
+    }
+    // Check the update response from YARNRM
+    Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size());
+    uc = allocateResponse.getUpdatedContainers().get(0);
+    Assert.assertEquals(ExecutionType.OPPORTUNISTIC,
+        uc.getContainer().getExecutionType());
+    // Check that the container is updated in NM through NM heartbeat response
+    if (response.getContainersToUpdate().size() == 0) {
+      response = nm1.nodeHeartbeat(true);
+    }
+    Assert.assertEquals(1, response.getContainersToUpdate().size());
+    Assert.assertEquals(ExecutionType.OPPORTUNISTIC,
+        response.getContainersToUpdate().get(0).getExecutionType());
+  }
+
   private void verifyMetrics(QueueMetrics metrics, long availableMB,
       int availableVirtualCores, long allocatedMB,
       int allocatedVirtualCores, int allocatedContainers) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHATimelineCollectors.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHATimelineCollectors.java
new file mode 100644
index 0000000..fa0d318
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHATimelineCollectors.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.timelineservice.storage.FileSystemTimelineWriterImpl;
+import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Test if the new active RM could recover collector status on a state
+ * transition.
+ */
+public class TestRMHATimelineCollectors extends RMHATestBase {
+  public static final Log LOG = LogFactory
+      .getLog(TestSubmitApplicationWithRMHA.class);
+
+  @Before
+  @Override
+  public void setup() throws Exception {
+    super.setup();
+    confForRM1.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+    confForRM2.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+    confForRM1.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS,
+        FileSystemTimelineWriterImpl.class, TimelineWriter.class);
+    confForRM1.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
+    confForRM2.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
+    confForRM2.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS,
+        FileSystemTimelineWriterImpl.class, TimelineWriter.class);
+  }
+
+  @Test
+  public void testRebuildCollectorDataOnFailover() throws Exception {
+    startRMs();
+    MockNM nm1
+        = new MockNM("127.0.0.1:1234", 15120, rm2.getResourceTrackerService());
+    MockNM nm2
+        = new MockNM("127.0.0.1:5678", 15121, rm2.getResourceTrackerService());
+    RMApp app1 = rm1.submitApp(1024);
+    String collectorAddr1 = "1.2.3.4:5";
+    AppCollectorData data1 = AppCollectorData.newInstance(
+        app1.getApplicationId(), collectorAddr1);
+    nm1.addRegisteringCollector(app1.getApplicationId(), data1);
+
+    String collectorAddr2 = "5.4.3.2:1";
+    RMApp app2 = rm1.submitApp(1024);
+    AppCollectorData data2 = AppCollectorData.newInstance(
+        app2.getApplicationId(), collectorAddr2, rm1.getStartTime(), 1);
+    nm1.addRegisteringCollector(app2.getApplicationId(), data2);
+
+    explicitFailover();
+
+    List<ApplicationId> runningApps = new ArrayList<>();
+    runningApps.add(app1.getApplicationId());
+    runningApps.add(app2.getApplicationId());
+    nm1.registerNode(runningApps);
+    nm2.registerNode(runningApps);
+
+    String collectorAddr12 = "1.2.3.4:56";
+    AppCollectorData data12 = AppCollectorData.newInstance(
+        app1.getApplicationId(), collectorAddr12, rm1.getStartTime(), 0);
+    nm2.addRegisteringCollector(app1.getApplicationId(), data12);
+
+    String collectorAddr22 = "5.4.3.2:10";
+    AppCollectorData data22 = AppCollectorData.newInstance(
+        app2.getApplicationId(), collectorAddr22, rm1.getStartTime(), 2);
+    nm2.addRegisteringCollector(app2.getApplicationId(), data22);
+
+    Map<ApplicationId, AppCollectorData> results1
+        = nm1.nodeHeartbeat(true).getAppCollectors();
+    assertEquals(collectorAddr1,
+        results1.get(app1.getApplicationId()).getCollectorAddr());
+    assertEquals(collectorAddr2,
+        results1.get(app2.getApplicationId()).getCollectorAddr());
+
+    Map<ApplicationId, AppCollectorData> results2
+        = nm2.nodeHeartbeat(true).getAppCollectors();
+    // addr of app1 should be collectorAddr1 since it's registering (no time
+    // stamp).
+    assertEquals(collectorAddr1,
+        results2.get(app1.getApplicationId()).getCollectorAddr());
+    // addr of app2 should be collectorAddr22 since its version number is
+    // greater.
+    assertEquals(collectorAddr22,
+        results2.get(app2.getApplicationId()).getCollectorAddr());
+
+    // Now nm1 should get updated collector list
+    nm1.getRegisteringCollectors().clear();
+    Map<ApplicationId, AppCollectorData> results12
+        = nm1.nodeHeartbeat(true).getAppCollectors();
+    assertEquals(collectorAddr1,
+        results12.get(app1.getApplicationId()).getCollectorAddr());
+    assertEquals(collectorAddr22,
+        results12.get(app2.getApplicationId()).getCollectorAddr());
+
+
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
index 3130ad1..5cbcdbc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
@@ -59,6 +59,7 @@
 import org.apache.hadoop.security.token.delegation.DelegationKey;
 import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
@@ -67,6 +68,7 @@
 import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateRequestPBImpl;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -107,6 +109,7 @@
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.TestSchedulerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
@@ -2504,6 +2507,22 @@
 
   @Test(timeout = 60000)
   public void testRMRestartAfterNodeLabelDisabled() throws Exception {
+    if (getSchedulerType() != SchedulerType.CAPACITY) {
+      return;
+    }
+
+    // Initial FS node label store root dir to a random tmp dir
+    File nodeLabelFsStoreDir = new File("target",
+        this.getClass().getSimpleName()
+            + "-testRMRestartAfterNodeLabelDisabled");
+    if (nodeLabelFsStoreDir.exists()) {
+      FileUtils.deleteDirectory(nodeLabelFsStoreDir);
+    }
+    nodeLabelFsStoreDir.deleteOnExit();
+    String nodeLabelFsStoreDirURI = nodeLabelFsStoreDir.toURI().toString();
+    conf.set(YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR,
+        nodeLabelFsStoreDirURI);
+
     conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
 
     MockRM rm1 = new MockRM(
@@ -2567,4 +2586,116 @@
       rm2.stop();
     }
   }
+
+  @Test(timeout = 20000)
+  public void testRMRestartAfterPriorityChangesInAllocatedResponse()
+      throws Exception {
+    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+        "kerberos");
+    UserGroupInformation.setConfiguration(conf);
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+
+    // Set Max Application Priority as 10
+    conf.setInt(YarnConfiguration.MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY,
+        10);
+    conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
+    conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED,
+        false);
+
+    //Start RM
+    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
+    MockRM rm = new TestSecurityMockRM(conf);
+    rm.start();
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm.getRMStateStore();
+
+    // Register node1
+    MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * 1024);
+
+    // Submit an application
+    Priority appPriority1 = Priority.newInstance(5);
+    RMApp app1 = rm.submitApp(2048, appPriority1,
+        getCreds(), getTokensConf());
+
+    nm1.nodeHeartbeat(true);
+    RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
+    MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
+    am1.registerAppAttempt();
+
+    AllocateRequestPBImpl allocateRequest = new AllocateRequestPBImpl();
+    List<ContainerId> release = new ArrayList<ContainerId>();
+    List<ResourceRequest> ask = new ArrayList<ResourceRequest>();
+    allocateRequest.setReleaseList(release);
+    allocateRequest.setAskList(ask);
+
+    AllocateResponse response1 = am1.allocate(allocateRequest);
+    Assert.assertEquals(appPriority1, response1.getApplicationPriority());
+
+    // Change the priority of App1 to 8
+    Priority appPriority2 = Priority.newInstance(8);
+    UserGroupInformation ugi = UserGroupInformation
+        .createRemoteUser(app1.getUser());
+    rm.getRMAppManager().updateApplicationPriority(ugi,
+        app1.getApplicationId(), appPriority2);
+
+    AllocateResponse response2 = am1.allocate(allocateRequest);
+    Assert.assertEquals(appPriority2, response2.getApplicationPriority());
+
+    /*
+     * Ensure tokensConf has been retained even after UPDATE_APP event in
+     * RMStateStore, which gets triggered because of change in priority.
+     *
+     */
+    Map<ApplicationId, ApplicationStateData> rmAppState =
+        memStore.getState().getApplicationState();
+    ApplicationStateData appState =
+        rmAppState.get(app1.getApplicationId());
+    Assert.assertEquals(getTokensConf(),
+        appState.getApplicationSubmissionContext().
+        getAMContainerSpec().getTokensConf());
+
+
+    MockRM rm2 = new TestSecurityMockRM(conf, memStore);
+    rm2.start();
+
+    AllocateResponse response3 = am1.allocate(allocateRequest);
+    Assert.assertEquals(appPriority2, response3.getApplicationPriority());
+
+    /*
+     * Ensure tokensConf has been retained even after RECOVER event in
+     * RMStateStore, which gets triggered as part of RM START.
+     */
+    Map<ApplicationId, ApplicationStateData> rmAppStateNew =
+        memStore.getState().getApplicationState();
+    ApplicationStateData appStateNew =
+        rmAppStateNew.get(app1.getApplicationId());
+    Assert.assertEquals(getTokensConf(),
+        appStateNew.getApplicationSubmissionContext().
+        getAMContainerSpec().getTokensConf());
+
+    rm.stop();
+    rm2.stop();
+  }
+
+  private Credentials getCreds() throws IOException {
+    Credentials ts = new Credentials();
+    DataOutputBuffer dob = new DataOutputBuffer();
+    ts.writeTokenStorageToStream(dob);
+    return ts;
+  }
+
+  private ByteBuffer getTokensConf() throws IOException {
+    DataOutputBuffer dob = new DataOutputBuffer();
+    Configuration appConf = new Configuration(false);
+    appConf.clear();
+    appConf.set("dfs.nameservices", "mycluster1,mycluster2");
+    appConf.set("dfs.namenode.rpc-address.mycluster2.nn1",
+        "123.0.0.1");
+    appConf.set("dfs.namenode.rpc-address.mycluster3.nn2",
+        "123.0.0.2");
+    appConf.write(dob);
+    ByteBuffer tokenConf =
+        ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
+    return tokenConf;
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
index 1e63fdf..5ed3278 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
@@ -63,6 +63,7 @@
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.api.records.NodeAction;
 import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
 import org.apache.hadoop.yarn.server.api.records.NodeStatus;
@@ -70,6 +71,7 @@
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
@@ -1011,13 +1013,23 @@
     RMNodeImpl node2 =
         (RMNodeImpl) rm.getRMContext().getRMNodes().get(nm2.getNodeId());
 
-    RMApp app1 = rm.submitApp(1024);
+    RMAppImpl app1 = (RMAppImpl) rm.submitApp(1024);
     String collectorAddr1 = "1.2.3.4:5";
-    app1.setCollectorAddr(collectorAddr1);
+    app1.setCollectorData(AppCollectorData.newInstance(
+        app1.getApplicationId(), collectorAddr1));
 
     String collectorAddr2 = "5.4.3.2:1";
-    RMApp app2 = rm.submitApp(1024);
-    app2.setCollectorAddr(collectorAddr2);
+    RMAppImpl app2 = (RMAppImpl) rm.submitApp(1024);
+    app2.setCollectorData(AppCollectorData.newInstance(
+        app2.getApplicationId(), collectorAddr2));
+
+    String collectorAddr3 = "5.4.3.2:2";
+    app2.setCollectorData(AppCollectorData.newInstance(
+        app2.getApplicationId(), collectorAddr3, 0, 1));
+
+    String collectorAddr4 = "5.4.3.2:3";
+    app2.setCollectorData(AppCollectorData.newInstance(
+        app2.getApplicationId(), collectorAddr4, 1, 0));
 
     // Create a running container for app1 running on nm1
     ContainerId runningContainerId1 = BuilderUtils.newContainerId(
@@ -1055,14 +1067,18 @@
     Assert.assertEquals(app2.getApplicationId(), node2.getRunningApps().get(0));
 
     nodeHeartbeat1 = nm1.nodeHeartbeat(true);
-    Map<ApplicationId, String> map1 = nodeHeartbeat1.getAppCollectorsMap();
+    Map<ApplicationId, AppCollectorData> map1
+        = nodeHeartbeat1.getAppCollectors();
     Assert.assertEquals(1, map1.size());
-    Assert.assertEquals(collectorAddr1, map1.get(app1.getApplicationId()));
+    Assert.assertEquals(collectorAddr1,
+        map1.get(app1.getApplicationId()).getCollectorAddr());
 
     nodeHeartbeat2 = nm2.nodeHeartbeat(true);
-    Map<ApplicationId, String> map2 = nodeHeartbeat2.getAppCollectorsMap();
+    Map<ApplicationId, AppCollectorData> map2
+        = nodeHeartbeat2.getAppCollectors();
     Assert.assertEquals(1, map2.size());
-    Assert.assertEquals(collectorAddr2, map2.get(app2.getApplicationId()));
+    Assert.assertEquals(collectorAddr4,
+        map2.get(app2.getApplicationId()).getCollectorAddr());
   }
 
   private void checkRebootedNMCount(MockRM rm2, int count)
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestTokenClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestTokenClientRMService.java
index 2a4e49d..78271c6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestTokenClientRMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestTokenClientRMService.java
@@ -28,6 +28,7 @@
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
@@ -80,6 +81,8 @@
     conf.set("hadoop.security.authentication", "kerberos");
     conf.set("hadoop.security.auth_to_local", kerberosRule);
     UserGroupInformation.setConfiguration(conf);
+    UserGroupInformation.getLoginUser()
+       .setAuthenticationMethod(AuthenticationMethod.KERBEROS);
   }
 
   @AfterClass
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
index 5246eb7..f826631 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
@@ -31,6 +31,7 @@
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
@@ -43,6 +44,7 @@
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppMetrics;
@@ -97,15 +99,7 @@
       throw new UnsupportedOperationException("Not supported yet.");
     }
     @Override
-    public String getCollectorAddr() {
-      throw new UnsupportedOperationException("Not supported yet.");
-    }
-    @Override
-    public void setCollectorAddr(String collectorAddr) {
-      throw new UnsupportedOperationException("Not supported yet.");
-    }
-    @Override
-    public void removeCollectorAddr() {
+    public AppCollectorData getCollectorData() {
       throw new UnsupportedOperationException("Not supported yet.");
     }
     @Override
@@ -246,6 +240,11 @@
     public boolean isAppInCompletedStates() {
       throw new UnsupportedOperationException("Not supported yet.");
     }
+
+    @Override
+    public CollectorInfo getCollectorInfo() {
+      throw new UnsupportedOperationException("Not supported yet.");
+    }
   }
 
   public static RMApp newApplication(int i) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
index ec09945..c6bfcc7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
@@ -69,6 +69,7 @@
 import org.apache.hadoop.yarn.server.timelineservice.storage.FileSystemTimelineReaderImpl;
 import org.apache.hadoop.yarn.server.timelineservice.storage.FileSystemTimelineWriterImpl;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter;
+import org.apache.hadoop.yarn.util.TimelineServiceHelper;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -216,7 +217,8 @@
             + FileSystemTimelineWriterImpl.TIMELINE_SERVICE_STORAGE_EXTENSION;
     File appFile = new File(outputDirApp, timelineServiceFileName);
     Assert.assertTrue(appFile.exists());
-    verifyEntity(appFile, 3, ApplicationMetricsConstants.CREATED_EVENT_TYPE, 8);
+    verifyEntity(
+        appFile, 3, ApplicationMetricsConstants.CREATED_EVENT_TYPE, 8, 0);
   }
 
   @Test(timeout = 10000)
@@ -251,7 +253,7 @@
     File appFile = new File(outputDirApp, timelineServiceFileName);
     Assert.assertTrue(appFile.exists());
     verifyEntity(appFile, 2, AppAttemptMetricsConstants.REGISTERED_EVENT_TYPE,
-        0);
+        0, TimelineServiceHelper.invertLong(appAttemptId.getAttemptId()));
   }
 
   @Test(timeout = 10000)
@@ -283,7 +285,7 @@
     File appFile = new File(outputDirApp, timelineServiceFileName);
     Assert.assertTrue(appFile.exists());
     verifyEntity(appFile, 2,
-        ContainerMetricsConstants.CREATED_IN_RM_EVENT_TYPE, 0);
+        ContainerMetricsConstants.CREATED_IN_RM_EVENT_TYPE, 0, 0);
   }
 
   private RMApp createAppAndRegister(ApplicationId appId) {
@@ -297,7 +299,8 @@
   }
 
   private static void verifyEntity(File entityFile, long expectedEvents,
-      String eventForCreatedTime, long expectedMetrics) throws IOException {
+      String eventForCreatedTime, long expectedMetrics, long idPrefix)
+      throws IOException {
     BufferedReader reader = null;
     String strLine;
     long count = 0;
@@ -309,6 +312,7 @@
           TimelineEntity entity = FileSystemTimelineReaderImpl.
               getTimelineRecordFromJSON(strLine.trim(), TimelineEntity.class);
           metricsCount = entity.getMetrics().size();
+          assertEquals(idPrefix, entity.getIdPrefix());
           for (TimelineEvent event : entity.getEvents()) {
             if (event.getId().equals(eventForCreatedTime)) {
               assertTrue(entity.getCreatedTime() > 0);
@@ -394,6 +398,7 @@
     when(appAttempt.getTrackingUrl()).thenReturn("test tracking url");
     when(appAttempt.getOriginalTrackingUrl()).thenReturn(
         "test original tracking url");
+    when(appAttempt.getStartTime()).thenReturn(200L);
     return appAttempt;
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
index 9290ff8..39a7f99 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
@@ -32,6 +32,7 @@
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -43,6 +44,7 @@
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 
@@ -305,17 +307,8 @@
     throw new UnsupportedOperationException("Not supported yet.");
   }
 
-  public String getCollectorAddr() {
-    throw new UnsupportedOperationException("Not supported yet.");
-  }
-
   @Override
-  public void removeCollectorAddr() {
-    throw new UnsupportedOperationException("Not supported yet.");
-  }
-
-  @Override
-  public void setCollectorAddr(String collectorAddr) {
+  public AppCollectorData getCollectorData() {
     throw new UnsupportedOperationException("Not supported yet.");
   }
 
@@ -333,4 +326,9 @@
   public boolean isAppInCompletedStates() {
     return false;
   }
+
+  @Override
+  public CollectorInfo getCollectorInfo() {
+    throw new UnsupportedOperationException("Not supported yet.");
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptImplDiagnostics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptImplDiagnostics.java
index 19b5dd9..295b59f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptImplDiagnostics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptImplDiagnostics.java
@@ -26,6 +26,7 @@
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.util.BoundedAppender;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
@@ -86,7 +87,7 @@
     appAttempt.appendDiagnostics(beyondLimit);
 
     final String truncated = String.format(
-        RMAppAttemptImpl.BoundedAppender.TRUNCATED_MESSAGES_TEMPLATE, 1024,
+        BoundedAppender.TRUNCATED_MESSAGES_TEMPLATE, 1024,
         1025, beyondLimit.substring(1));
 
     assertEquals("messages beyond limit should be truncated", truncated,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
index 7702ab1..f6406ff3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
@@ -526,12 +526,9 @@
     verifyApplicationAttemptFinished(RMAppAttemptState.FAILED);
   }
 
-  /**
-   * {@link RMAppAttemptState#LAUNCHED}
-   */
-  private void testAppAttemptLaunchedState(Container container) {
-    assertEquals(RMAppAttemptState.LAUNCHED, 
-        applicationAttempt.getAppAttemptState());
+  private void testAppAttemptLaunchedState(Container container,
+                                                RMAppAttemptState state) {
+    assertEquals(state, applicationAttempt.getAppAttemptState());
     assertEquals(container, applicationAttempt.getMasterContainer());
     if (UserGroupInformation.isSecurityEnabled()) {
       // ClientTokenMasterKey has been registered in SecretManager, it's able to
@@ -686,13 +683,18 @@
   }
   
   private void launchApplicationAttempt(Container container) {
+    launchApplicationAttempt(container, RMAppAttemptState.LAUNCHED);
+  }
+
+  private void launchApplicationAttempt(Container container,
+                                        RMAppAttemptState state) {
     applicationAttempt.handle(
-        new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(), 
+        new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),
             RMAppAttemptEventType.LAUNCHED));
 
-    testAppAttemptLaunchedState(container);    
+    testAppAttemptLaunchedState(container, state);
   }
-  
+
   private void runApplicationAttempt(Container container,
       String host, 
       int rpcPort, 
@@ -723,7 +725,7 @@
     when(submissionContext.getUnmanagedAM()).thenReturn(true);
     // submit AM and check it goes to LAUNCHED state
     scheduleApplicationAttempt();
-    testAppAttemptLaunchedState(null);
+    testAppAttemptLaunchedState(null, RMAppAttemptState.LAUNCHED);
     verify(amLivelinessMonitor, times(1)).register(
         applicationAttempt.getAppAttemptId());
 
@@ -930,7 +932,15 @@
         applicationAttempt.createApplicationAttemptState());
     testAppAttemptFailedState(amContainer, diagnostics);
   }
-  
+
+  @Test(timeout = 10000)
+  public void testAllocatedToRunning() {
+    Container amContainer = allocateApplicationAttempt();
+    // Register attempt event arrives before launched attempt event
+    runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl", false);
+    launchApplicationAttempt(amContainer, RMAppAttemptState.RUNNING);
+  }
+
   @Test(timeout = 10000)
   public void testCreateAppAttemptReport() {
     RMAppAttemptState[] attemptStates = RMAppAttemptState.values();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java
index b4b05ed..541539d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java
@@ -37,6 +37,8 @@
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.DrainDispatcher;
 import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
@@ -51,8 +53,7 @@
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler
-    .SchedulerApplicationAttempt;
+
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica
@@ -60,11 +61,9 @@
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSet;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SimplePlacementSet;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.Assert;
 import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Test;
 
 public class TestContainerResizing {
@@ -166,11 +165,17 @@
      * Application has a container running, try to decrease the container and
      * check queue's usage and container resource will be updated.
      */
+    final DrainDispatcher dispatcher = new DrainDispatcher();
     MockRM rm1 = new MockRM() {
       @Override
       public RMNodeLabelsManager createNodeLabelManager() {
         return mgr;
       }
+
+      @Override
+      protected Dispatcher createDispatcher() {
+        return dispatcher;
+      }
     };
     rm1.start();
     MockNM nm1 = rm1.registerNode("h1:1234", 20 * GB);
@@ -197,6 +202,10 @@
                 Resources.createResource(1 * GB), null)));
 
     verifyContainerDecreased(response, containerId1, 1 * GB);
+
+    // Wait for scheduler to finish processing kill events..
+    dispatcher.waitForEventThreadToWait();
+
     checkUsedResource(rm1, "default", 1 * GB, null);
     Assert.assertEquals(1 * GB,
         app.getAppAttemptResourceUsage().getUsed().getMemorySize());
@@ -205,7 +214,7 @@
     RMNodeImpl rmNode =
         (RMNodeImpl) rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
     Collection<Container> decreasedContainers =
-        rmNode.getToBeDecreasedContainers();
+        rmNode.getToBeUpdatedContainers();
     boolean rmNodeReceivedDecreaseContainer = false;
     for (Container c : decreasedContainers) {
       if (c.getId().equals(containerId1)
@@ -510,11 +519,17 @@
      * the increase request reserved, it decreases the reserved container,
      * container should be decreased and reservation will be cancelled
      */
+    final DrainDispatcher dispatcher = new DrainDispatcher();
     MockRM rm1 = new MockRM() {
       @Override
       public RMNodeLabelsManager createNodeLabelManager() {
         return mgr;
       }
+
+      @Override
+      protected Dispatcher createDispatcher() {
+        return dispatcher;
+      }
     };
     rm1.start();
     MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB);
@@ -589,7 +604,8 @@
                 Resources.createResource(1 * GB), null)));
     // Trigger a node heartbeat..
     cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
-    
+
+    dispatcher.waitForEventThreadToWait();
     /* Check statuses after reservation satisfied */
     // Increase request should be unreserved
     Assert.assertTrue(app.getReservedContainers().isEmpty());
@@ -620,11 +636,17 @@
      * So increase container request will be reserved. When app releases
      * container2, reserved part should be released as well.
      */
+    final DrainDispatcher dispatcher = new DrainDispatcher();
     MockRM rm1 = new MockRM() {
       @Override
       public RMNodeLabelsManager createNodeLabelManager() {
         return mgr;
       }
+
+      @Override
+      protected Dispatcher createDispatcher() {
+        return dispatcher;
+      }
     };
     rm1.start();
     MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB);
@@ -690,6 +712,10 @@
 
     cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
     am1.allocate(null, null);
+
+    // Wait for scheduler to process all events.
+    dispatcher.waitForEventThreadToWait();
+
     /* Check statuses after reservation satisfied */
     // Increase request should be unreserved
     Assert.assertTrue(app.getReservedContainers().isEmpty());
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java
index 184e854..d2e28be 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java
@@ -28,6 +28,8 @@
 import org.apache.hadoop.yarn.api.records.UpdateContainerError;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.DrainDispatcher;
 import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
@@ -155,7 +157,13 @@
      */
     // Set the allocation expiration to 5 seconds
     conf.setLong(YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS, 5000);
-    MockRM rm1 = new MockRM(conf);
+    final DrainDispatcher disp = new DrainDispatcher();
+    MockRM rm1 = new MockRM(conf) {
+      @Override
+      protected Dispatcher createDispatcher() {
+        return disp;
+      }
+    };
     rm1.start();
     MockNM nm1 = rm1.registerNode("127.0.0.1:1234", 20 * GB);
     RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "default");
@@ -204,6 +212,7 @@
     Assert.assertEquals(
         1 * GB, rm1.getResourceScheduler().getRMContainer(containerId2)
             .getAllocatedResource().getMemorySize());
+    disp.waitForEventThreadToWait();
     // Verify total resource usage is 2G
     checkUsedResource(rm1, "default", 2 * GB, null);
     Assert.assertEquals(2 * GB,
@@ -319,7 +328,7 @@
     verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 16 * GB);
     // Verify NM receives the decrease message (3G)
     List<Container> containersToDecrease =
-        nm1.nodeHeartbeat(true).getContainersToDecrease();
+        nm1.nodeHeartbeat(true).getContainersToUpdate();
     Assert.assertEquals(1, containersToDecrease.size());
     Assert.assertEquals(
         3 * GB, containersToDecrease.get(0).getResource().getMemorySize());
@@ -420,7 +429,7 @@
     nm1.containerIncreaseStatus(getContainer(
         rm1, containerId4, Resources.createResource(6 * GB)));
     // Wait for containerId3 token to expire,
-    Thread.sleep(10000);
+    Thread.sleep(12000);
 
     am1.allocate(null, null);
 
@@ -435,14 +444,22 @@
             .getAllocatedResource().getMemorySize());
     // Verify NM receives 2 decrease message
     List<Container> containersToDecrease =
-        nm1.nodeHeartbeat(true).getContainersToDecrease();
-    Assert.assertEquals(2, containersToDecrease.size());
+        nm1.nodeHeartbeat(true).getContainersToUpdate();
+    // NOTE: Can be more that 2 depending on which event arrives first.
+    // What is important is the final size of the containers.
+    Assert.assertTrue(containersToDecrease.size() >= 2);
+
     // Sort the list to make sure containerId3 is the first
     Collections.sort(containersToDecrease);
+    int i = 0;
+    if (containersToDecrease.size() > 2) {
+      Assert.assertEquals(
+          2 * GB, containersToDecrease.get(i++).getResource().getMemorySize());
+    }
     Assert.assertEquals(
-        3 * GB, containersToDecrease.get(0).getResource().getMemorySize());
+        3 * GB, containersToDecrease.get(i++).getResource().getMemorySize());
     Assert.assertEquals(
-        4 * GB, containersToDecrease.get(1).getResource().getMemorySize());
+        4 * GB, containersToDecrease.get(i++).getResource().getMemorySize());
     rm1.stop();
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
index 9fb9d42..c708b92 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
@@ -1262,9 +1262,27 @@
     Assert.assertFalse(Renewer.cancelled);
 
     finishAMAndWaitForComplete(app3, rm, nm1, am3, dttr);
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return !renewer.getAllTokens().containsKey(token1);
+      }
+    }, 10, 5000);
     Assert.assertFalse(renewer.getAllTokens().containsKey(token1));
     Assert.assertTrue(dttr.referringAppIds.isEmpty());
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return dttr.isTimerCancelled();
+      }
+    }, 10, 5000);
     Assert.assertTrue(dttr.isTimerCancelled());
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return Renewer.cancelled;
+      }
+    }, 10, 5000);
     Assert.assertTrue(Renewer.cancelled);
 
     // make sure the token also has been removed from appTokens
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
index 06c642a..640293c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
@@ -29,6 +29,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.DelegationKey;
 import org.apache.hadoop.util.ExitUtil;
@@ -74,6 +75,9 @@
     Configuration conf = new Configuration(testConf);
     conf.set("hadoop.security.authentication", "kerberos");
     UserGroupInformation.setConfiguration(conf);
+    UserGroupInformation.getLoginUser()
+        .setAuthenticationMethod(AuthenticationMethod.KERBEROS);
+
     MemoryRMStateStore memStore = new MockRMMemoryStateStore();
     memStore.init(conf);
     RMState rmState = memStore.getState();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
index 4f7ab54..f93a3fc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
@@ -22,7 +22,6 @@
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
-import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Matchers.isA;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
@@ -647,8 +646,8 @@
     when(mockAppsResponse.getApplicationList())
       .thenReturn(Arrays.asList(new ApplicationReport[] { mockReport }));
     ClientRMService mockClientSvc = mock(ClientRMService.class);
-    when(mockClientSvc.getApplications(isA(GetApplicationsRequest.class),
-        anyBoolean())).thenReturn(mockAppsResponse);
+    when(mockClientSvc.getApplications(isA(GetApplicationsRequest.class)))
+        .thenReturn(mockAppsResponse);
     ResourceManager mockRM = mock(ResourceManager.class);
     RMContextImpl rmContext = new RMContextImpl(null, null, null, null, null,
         null, null, null, null, null);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java
new file mode 100644
index 0000000..6d75471
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.router;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.*;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.apache.hadoop.metrics2.lib.Interns.info;
+
+/**
+ * This class is for maintaining the various Router Federation Interceptor
+ * activity statistics and publishing them through the metrics interfaces.
+ */
+@InterfaceAudience.Private
+@Metrics(about = "Metrics for Router Federation Interceptor", context = "fedr")
+public final class RouterMetrics {
+
+  private static final MetricsInfo RECORD_INFO =
+      info("RouterMetrics", "Router Federation Interceptor");
+  private static AtomicBoolean isInitialized = new AtomicBoolean(false);
+
+  // Metrics for operation failed
+  @Metric("# of applications failed to be submitted")
+  private MutableGaugeInt numAppsFailedSubmitted;
+  @Metric("# of applications failed to be created")
+  private MutableGaugeInt numAppsFailedCreated;
+  @Metric("# of applications failed to be killed")
+  private MutableGaugeInt numAppsFailedKilled;
+  @Metric("# of application reports failed to be retrieved")
+  private MutableGaugeInt numAppsFailedRetrieved;
+  @Metric("# of multiple applications reports failed to be retrieved")
+  private MutableGaugeInt numMultipleAppsFailedRetrieved;
+
+  // Aggregate metrics are shared, and don't have to be looked up per call
+  @Metric("Total number of successful Submitted apps and latency(ms)")
+  private MutableRate totalSucceededAppsSubmitted;
+  @Metric("Total number of successful Killed apps and latency(ms)")
+  private MutableRate totalSucceededAppsKilled;
+  @Metric("Total number of successful Created apps and latency(ms)")
+  private MutableRate totalSucceededAppsCreated;
+  @Metric("Total number of successful Retrieved app reports and latency(ms)")
+  private MutableRate totalSucceededAppsRetrieved;
+  @Metric("Total number of successful Retrieved multiple apps reports and "
+      + "latency(ms)")
+  private MutableRate totalSucceededMultipleAppsRetrieved;
+
+  /**
+   * Provide quantile counters for all latencies.
+   */
+  private MutableQuantiles submitApplicationLatency;
+  private MutableQuantiles getNewApplicationLatency;
+  private MutableQuantiles killApplicationLatency;
+  private MutableQuantiles getApplicationReportLatency;
+  private MutableQuantiles getApplicationsReportLatency;
+
+  private static volatile RouterMetrics INSTANCE = null;
+  private static MetricsRegistry registry;
+
+  private RouterMetrics() {
+    registry = new MetricsRegistry(RECORD_INFO);
+    registry.tag(RECORD_INFO, "Router");
+    getNewApplicationLatency = registry.newQuantiles("getNewApplicationLatency",
+        "latency of get new application", "ops", "latency", 10);
+    submitApplicationLatency = registry.newQuantiles("submitApplicationLatency",
+        "latency of submit application", "ops", "latency", 10);
+    killApplicationLatency = registry.newQuantiles("killApplicationLatency",
+        "latency of kill application", "ops", "latency", 10);
+    getApplicationReportLatency =
+        registry.newQuantiles("getApplicationReportLatency",
+            "latency of get application report", "ops", "latency", 10);
+    getApplicationsReportLatency =
+        registry.newQuantiles("getApplicationsReportLatency",
+            "latency of get applications report", "ops", "latency", 10);
+  }
+
+  public static RouterMetrics getMetrics() {
+    if (!isInitialized.get()) {
+      synchronized (RouterMetrics.class) {
+        if (INSTANCE == null) {
+          INSTANCE = DefaultMetricsSystem.instance().register("RouterMetrics",
+              "Metrics for the Yarn Router", new RouterMetrics());
+          isInitialized.set(true);
+        }
+      }
+    }
+    return INSTANCE;
+  }
+
+  @VisibleForTesting
+  synchronized static void destroy() {
+    isInitialized.set(false);
+    INSTANCE = null;
+  }
+
+  @VisibleForTesting
+  public long getNumSucceededAppsCreated() {
+    return totalSucceededAppsCreated.lastStat().numSamples();
+  }
+
+  @VisibleForTesting
+  public long getNumSucceededAppsSubmitted() {
+    return totalSucceededAppsSubmitted.lastStat().numSamples();
+  }
+
+  @VisibleForTesting
+  public long getNumSucceededAppsKilled() {
+    return totalSucceededAppsKilled.lastStat().numSamples();
+  }
+
+  @VisibleForTesting
+  public long getNumSucceededAppsRetrieved() {
+    return totalSucceededAppsRetrieved.lastStat().numSamples();
+  }
+
+  @VisibleForTesting
+  public long getNumSucceededMultipleAppsRetrieved() {
+    return totalSucceededMultipleAppsRetrieved.lastStat().numSamples();
+  }
+
+  @VisibleForTesting
+  public double getLatencySucceededAppsCreated() {
+    return totalSucceededAppsCreated.lastStat().mean();
+  }
+
+  @VisibleForTesting
+  public double getLatencySucceededAppsSubmitted() {
+    return totalSucceededAppsSubmitted.lastStat().mean();
+  }
+
+  @VisibleForTesting
+  public double getLatencySucceededAppsKilled() {
+    return totalSucceededAppsKilled.lastStat().mean();
+  }
+
+  @VisibleForTesting
+  public double getLatencySucceededGetAppReport() {
+    return totalSucceededAppsRetrieved.lastStat().mean();
+  }
+
+  @VisibleForTesting
+  public double getLatencySucceededMultipleGetAppReport() {
+    return totalSucceededMultipleAppsRetrieved.lastStat().mean();
+  }
+
+  @VisibleForTesting
+  public int getAppsFailedCreated() {
+    return numAppsFailedCreated.value();
+  }
+
+  @VisibleForTesting
+  public int getAppsFailedSubmitted() {
+    return numAppsFailedSubmitted.value();
+  }
+
+  @VisibleForTesting
+  public int getAppsFailedKilled() {
+    return numAppsFailedKilled.value();
+  }
+
+  @VisibleForTesting
+  public int getAppsFailedRetrieved() {
+    return numAppsFailedRetrieved.value();
+  }
+
+  @VisibleForTesting
+  public int getMultipleAppsFailedRetrieved() {
+    return numMultipleAppsFailedRetrieved.value();
+  }
+
+  public void succeededAppsCreated(long duration) {
+    totalSucceededAppsCreated.add(duration);
+    getNewApplicationLatency.add(duration);
+  }
+
+  public void succeededAppsSubmitted(long duration) {
+    totalSucceededAppsSubmitted.add(duration);
+    submitApplicationLatency.add(duration);
+  }
+
+  public void succeededAppsKilled(long duration) {
+    totalSucceededAppsKilled.add(duration);
+    killApplicationLatency.add(duration);
+  }
+
+  public void succeededAppsRetrieved(long duration) {
+    totalSucceededAppsRetrieved.add(duration);
+    getApplicationReportLatency.add(duration);
+  }
+
+  public void succeededMultipleAppsRetrieved(long duration) {
+    totalSucceededMultipleAppsRetrieved.add(duration);
+    getApplicationsReportLatency.add(duration);
+  }
+
+  public void incrAppsFailedCreated() {
+    numAppsFailedCreated.incr();
+  }
+
+  public void incrAppsFailedSubmitted() {
+    numAppsFailedSubmitted.incr();
+  }
+
+  public void incrAppsFailedKilled() {
+    numAppsFailedKilled.incr();
+  }
+
+  public void incrAppsFailedRetrieved() {
+    numAppsFailedRetrieved.incr();
+  }
+
+  public void incrMultipleAppsFailedRetrieved() {
+    numMultipleAppsFailedRetrieved.incr();
+  }
+
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java
index 7268ebd..3a36eec 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java
@@ -98,7 +98,10 @@
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
 import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+import org.apache.hadoop.yarn.server.router.RouterMetrics;
 import org.apache.hadoop.yarn.server.router.RouterServerUtil;
+import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hadoop.yarn.util.MonotonicClock;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -130,6 +133,8 @@
   private FederationStateStoreFacade federationFacade;
   private Random rand;
   private RouterPolicyFacade policyFacade;
+  private RouterMetrics routerMetrics;
+  private final Clock clock = new MonotonicClock();
 
   @Override
   public void init(String userName) {
@@ -153,7 +158,7 @@
 
     clientRMProxies =
         new ConcurrentHashMap<SubClusterId, ApplicationClientProtocol>();
-
+    routerMetrics = RouterMetrics.getMetrics();
   }
 
   @Override
@@ -220,6 +225,9 @@
   @Override
   public GetNewApplicationResponse getNewApplication(
       GetNewApplicationRequest request) throws YarnException, IOException {
+
+    long startTime = clock.getTime();
+
     Map<SubClusterId, SubClusterInfo> subClustersActive =
         federationFacade.getSubClusters(true);
 
@@ -238,6 +246,9 @@
       }
 
       if (response != null) {
+
+        long stopTime = clock.getTime();
+        routerMetrics.succeededAppsCreated(stopTime - startTime);
         return response;
       } else {
         // Empty response from the ResourceManager.
@@ -247,6 +258,7 @@
 
     }
 
+    routerMetrics.incrAppsFailedCreated();
     String errMsg = "Fail to create a new application.";
     LOG.error(errMsg);
     throw new YarnException(errMsg);
@@ -320,9 +332,13 @@
   @Override
   public SubmitApplicationResponse submitApplication(
       SubmitApplicationRequest request) throws YarnException, IOException {
+
+    long startTime = clock.getTime();
+
     if (request == null || request.getApplicationSubmissionContext() == null
         || request.getApplicationSubmissionContext()
             .getApplicationId() == null) {
+      routerMetrics.incrAppsFailedSubmitted();
       RouterServerUtil
           .logAndThrowException("Missing submitApplication request or "
               + "applicationSubmissionContex information.", null);
@@ -350,6 +366,7 @@
           subClusterId =
               federationFacade.addApplicationHomeSubCluster(appHomeSubCluster);
         } catch (YarnException e) {
+          routerMetrics.incrAppsFailedSubmitted();
           String message = "Unable to insert the ApplicationId " + applicationId
               + " into the FederationStateStore";
           RouterServerUtil.logAndThrowException(message, e);
@@ -368,6 +385,7 @@
             LOG.info("Application " + applicationId
                 + " already submitted on SubCluster " + subClusterId);
           } else {
+            routerMetrics.incrAppsFailedSubmitted();
             RouterServerUtil.logAndThrowException(message, e);
           }
         }
@@ -388,6 +406,8 @@
         LOG.info("Application "
             + request.getApplicationSubmissionContext().getApplicationName()
             + " with appId " + applicationId + " submitted on " + subClusterId);
+        long stopTime = clock.getTime();
+        routerMetrics.succeededAppsSubmitted(stopTime - startTime);
         return response;
       } else {
         // Empty response from the ResourceManager.
@@ -396,6 +416,7 @@
       }
     }
 
+    routerMetrics.incrAppsFailedSubmitted();
     String errMsg = "Application "
         + request.getApplicationSubmissionContext().getApplicationName()
         + " with appId " + applicationId + " failed to be submitted.";
@@ -423,7 +444,10 @@
   public KillApplicationResponse forceKillApplication(
       KillApplicationRequest request) throws YarnException, IOException {
 
+    long startTime = clock.getTime();
+
     if (request == null || request.getApplicationId() == null) {
+      routerMetrics.incrAppsFailedKilled();
       RouterServerUtil.logAndThrowException(
           "Missing forceKillApplication request or ApplicationId.", null);
     }
@@ -434,6 +458,7 @@
       subClusterId = federationFacade
           .getApplicationHomeSubCluster(request.getApplicationId());
     } catch (YarnException e) {
+      routerMetrics.incrAppsFailedKilled();
       RouterServerUtil.logAndThrowException("Application " + applicationId
           + " does not exist in FederationStateStore", e);
     }
@@ -447,6 +472,7 @@
           + subClusterId);
       response = clientRMProxy.forceKillApplication(request);
     } catch (Exception e) {
+      routerMetrics.incrAppsFailedKilled();
       LOG.error("Unable to kill the application report for "
           + request.getApplicationId() + "to SubCluster "
           + subClusterId.getId(), e);
@@ -458,6 +484,8 @@
           + applicationId + " to SubCluster " + subClusterId.getId());
     }
 
+    long stopTime = clock.getTime();
+    routerMetrics.succeededAppsKilled(stopTime - startTime);
     return response;
   }
 
@@ -481,7 +509,10 @@
   public GetApplicationReportResponse getApplicationReport(
       GetApplicationReportRequest request) throws YarnException, IOException {
 
+    long startTime = clock.getTime();
+
     if (request == null || request.getApplicationId() == null) {
+      routerMetrics.incrAppsFailedRetrieved();
       RouterServerUtil.logAndThrowException(
           "Missing getApplicationReport request or applicationId information.",
           null);
@@ -493,6 +524,7 @@
       subClusterId = federationFacade
           .getApplicationHomeSubCluster(request.getApplicationId());
     } catch (YarnException e) {
+      routerMetrics.incrAppsFailedRetrieved();
       RouterServerUtil
           .logAndThrowException("Application " + request.getApplicationId()
               + " does not exist in FederationStateStore", e);
@@ -505,6 +537,7 @@
     try {
       response = clientRMProxy.getApplicationReport(request);
     } catch (Exception e) {
+      routerMetrics.incrAppsFailedRetrieved();
       LOG.error("Unable to get the application report for "
           + request.getApplicationId() + "to SubCluster "
           + subClusterId.getId(), e);
@@ -517,6 +550,8 @@
           + subClusterId.getId());
     }
 
+    long stopTime = clock.getTime();
+    routerMetrics.succeededAppsRetrieved(stopTime - startTime);
     return response;
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
index 8ecc19da..3a91e35 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
@@ -18,7 +18,24 @@
 
 package org.apache.hadoop.yarn.server.router.webapp;
 
-import com.google.common.annotations.VisibleForTesting;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
+
 import org.apache.commons.lang.NotImplementedException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.authorize.AuthorizationException;
@@ -36,20 +53,42 @@
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
 import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebAppUtil;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.*;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ActivitiesInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppActivitiesInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppPriority;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppQueue;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppState;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationStatisticsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.DelegationToken;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsToNodesInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeLabelsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsEntryList;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationDeleteRequestInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationSubmissionRequestInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationUpdateRequestInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo;
+import org.apache.hadoop.yarn.server.router.RouterMetrics;
 import org.apache.hadoop.yarn.server.router.RouterServerUtil;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo;
+import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hadoop.yarn.util.MonotonicClock;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.Status;
-import java.io.IOException;
-import java.util.*;
+import com.google.common.annotations.VisibleForTesting;
 
 /**
  * Extends the {@code AbstractRESTRequestInterceptor} class and provides an
@@ -66,9 +105,17 @@
   private FederationStateStoreFacade federationFacade;
   private Random rand;
   private RouterPolicyFacade policyFacade;
+  private RouterMetrics routerMetrics;
+  private final Clock clock = new MonotonicClock();
+  private boolean returnPartialReport;
 
   private Map<SubClusterId, DefaultRequestInterceptorREST> interceptors;
 
+  /**
+   * Thread pool used for asynchronous operations.
+   */
+  private ExecutorService threadpool;
+
   @Override
   public void init(String user) {
     federationFacade = FederationStateStoreFacade.getInstance();
@@ -88,6 +135,12 @@
             YarnConfiguration.DEFAULT_ROUTER_CLIENTRM_SUBMIT_RETRY);
 
     interceptors = new HashMap<SubClusterId, DefaultRequestInterceptorREST>();
+    routerMetrics = RouterMetrics.getMetrics();
+    threadpool = Executors.newCachedThreadPool();
+
+    returnPartialReport =
+        conf.getBoolean(YarnConfiguration.ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED,
+            YarnConfiguration.DEFAULT_ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED);
   }
 
   private SubClusterId getRandomActiveSubCluster(
@@ -191,10 +244,14 @@
   @Override
   public Response createNewApplication(HttpServletRequest hsr)
       throws AuthorizationException, IOException, InterruptedException {
+
+    long startTime = clock.getTime();
+
     Map<SubClusterId, SubClusterInfo> subClustersActive;
     try {
       subClustersActive = federationFacade.getSubClusters(true);
     } catch (YarnException e) {
+      routerMetrics.incrAppsFailedCreated();
       return Response.status(Status.INTERNAL_SERVER_ERROR)
           .entity(e.getLocalizedMessage()).build();
     }
@@ -207,6 +264,7 @@
       try {
         subClusterId = getRandomActiveSubCluster(subClustersActive, blacklist);
       } catch (YarnException e) {
+        routerMetrics.incrAppsFailedCreated();
         return Response.status(Status.SERVICE_UNAVAILABLE)
             .entity(e.getLocalizedMessage()).build();
       }
@@ -226,6 +284,10 @@
       }
 
       if (response != null && response.getStatus() == 200) {
+
+        long stopTime = clock.getTime();
+        routerMetrics.succeededAppsCreated(stopTime - startTime);
+
         return response;
       } else {
         // Empty response from the ResourceManager.
@@ -236,6 +298,7 @@
 
     String errMsg = "Fail to create a new application.";
     LOG.error(errMsg);
+    routerMetrics.incrAppsFailedCreated();
     return Response.status(Status.INTERNAL_SERVER_ERROR).entity(errMsg).build();
   }
 
@@ -308,7 +371,11 @@
   public Response submitApplication(ApplicationSubmissionContextInfo newApp,
       HttpServletRequest hsr)
       throws AuthorizationException, IOException, InterruptedException {
+
+    long startTime = clock.getTime();
+
     if (newApp == null || newApp.getApplicationId() == null) {
+      routerMetrics.incrAppsFailedSubmitted();
       String errMsg = "Missing ApplicationSubmissionContextInfo or "
           + "applicationSubmissionContex information.";
       return Response.status(Status.BAD_REQUEST).entity(errMsg).build();
@@ -318,6 +385,7 @@
     try {
       applicationId = ApplicationId.fromString(newApp.getApplicationId());
     } catch (IllegalArgumentException e) {
+      routerMetrics.incrAppsFailedSubmitted();
       return Response.status(Status.BAD_REQUEST).entity(e.getLocalizedMessage())
           .build();
     }
@@ -333,6 +401,7 @@
       try {
         subClusterId = policyFacade.getHomeSubcluster(context, blacklist);
       } catch (YarnException e) {
+        routerMetrics.incrAppsFailedSubmitted();
         return Response.status(Status.SERVICE_UNAVAILABLE)
             .entity(e.getLocalizedMessage()).build();
       }
@@ -349,6 +418,7 @@
           subClusterId =
               federationFacade.addApplicationHomeSubCluster(appHomeSubCluster);
         } catch (YarnException e) {
+          routerMetrics.incrAppsFailedSubmitted();
           String errMsg = "Unable to insert the ApplicationId " + applicationId
               + " into the FederationStateStore";
           return Response.status(Status.SERVICE_UNAVAILABLE)
@@ -367,6 +437,7 @@
             subClusterIdInStateStore =
                 federationFacade.getApplicationHomeSubCluster(applicationId);
           } catch (YarnException e1) {
+            routerMetrics.incrAppsFailedSubmitted();
             return Response.status(Status.SERVICE_UNAVAILABLE)
                 .entity(e1.getLocalizedMessage()).build();
           }
@@ -374,6 +445,7 @@
             LOG.info("Application " + applicationId
                 + " already submitted on SubCluster " + subClusterId);
           } else {
+            routerMetrics.incrAppsFailedSubmitted();
             return Response.status(Status.SERVICE_UNAVAILABLE).entity(errMsg)
                 .build();
           }
@@ -384,6 +456,7 @@
       try {
         subClusterInfo = federationFacade.getSubCluster(subClusterId);
       } catch (YarnException e) {
+        routerMetrics.incrAppsFailedSubmitted();
         return Response.status(Status.SERVICE_UNAVAILABLE)
             .entity(e.getLocalizedMessage()).build();
       }
@@ -401,6 +474,10 @@
       if (response != null && response.getStatus() == 202) {
         LOG.info("Application " + context.getApplicationName() + " with appId "
             + applicationId + " submitted on " + subClusterId);
+
+        long stopTime = clock.getTime();
+        routerMetrics.succeededAppsSubmitted(stopTime - startTime);
+
         return response;
       } else {
         // Empty response from the ResourceManager.
@@ -409,6 +486,7 @@
       }
     }
 
+    routerMetrics.incrAppsFailedSubmitted();
     String errMsg = "Application " + newApp.getApplicationName()
         + " with appId " + applicationId + " failed to be submitted.";
     LOG.error(errMsg);
@@ -435,10 +513,13 @@
   public AppInfo getApp(HttpServletRequest hsr, String appId,
       Set<String> unselectedFields) {
 
+    long startTime = clock.getTime();
+
     ApplicationId applicationId = null;
     try {
       applicationId = ApplicationId.fromString(appId);
     } catch (IllegalArgumentException e) {
+      routerMetrics.incrAppsFailedRetrieved();
       return null;
     }
 
@@ -448,16 +529,23 @@
       subClusterId =
           federationFacade.getApplicationHomeSubCluster(applicationId);
       if (subClusterId == null) {
+        routerMetrics.incrAppsFailedRetrieved();
         return null;
       }
       subClusterInfo = federationFacade.getSubCluster(subClusterId);
     } catch (YarnException e) {
+      routerMetrics.incrAppsFailedRetrieved();
       return null;
     }
 
-    return getOrCreateInterceptorForSubCluster(subClusterId,
+    AppInfo response = getOrCreateInterceptorForSubCluster(subClusterId,
         subClusterInfo.getRMWebServiceAddress()).getApp(hsr, appId,
             unselectedFields);
+
+    long stopTime = clock.getTime();
+    routerMetrics.succeededAppsRetrieved(stopTime - startTime);
+
+    return response;
   }
 
   /**
@@ -481,23 +569,130 @@
       String appId) throws AuthorizationException, YarnException,
       InterruptedException, IOException {
 
+    long startTime = clock.getTime();
+
     ApplicationId applicationId = null;
     try {
       applicationId = ApplicationId.fromString(appId);
     } catch (IllegalArgumentException e) {
+      routerMetrics.incrAppsFailedKilled();
       return Response.status(Status.BAD_REQUEST).entity(e.getLocalizedMessage())
           .build();
     }
 
-    SubClusterId subClusterId =
-        federationFacade.getApplicationHomeSubCluster(applicationId);
+    SubClusterInfo subClusterInfo = null;
+    SubClusterId subClusterId = null;
+    try {
+      subClusterId =
+          federationFacade.getApplicationHomeSubCluster(applicationId);
+      subClusterInfo = federationFacade.getSubCluster(subClusterId);
+    } catch (YarnException e) {
+      routerMetrics.incrAppsFailedKilled();
+      return Response.status(Status.BAD_REQUEST).entity(e.getLocalizedMessage())
+          .build();
+    }
 
-    SubClusterInfo subClusterInfo =
-        federationFacade.getSubCluster(subClusterId);
-
-    return getOrCreateInterceptorForSubCluster(subClusterId,
+    Response response = getOrCreateInterceptorForSubCluster(subClusterId,
         subClusterInfo.getRMWebServiceAddress()).updateAppState(targetState,
             hsr, appId);
+
+    long stopTime = clock.getTime();
+    routerMetrics.succeededAppsRetrieved(stopTime - startTime);
+
+    return response;
+  }
+
+  /**
+   * The Yarn Router will forward the request to all the Yarn RMs in parallel,
+   * after that it will group all the ApplicationReports by the ApplicationId.
+   * <p>
+   * Possible failure:
+   * <p>
+   * Client: identical behavior as {@code RMWebServices}.
+   * <p>
+   * Router: the Client will timeout and resubmit the request.
+   * <p>
+   * ResourceManager: the Router calls each Yarn RM in parallel by using one
+   * thread for each Yarn RM. In case a Yarn RM fails, a single call will
+   * timeout. However the Router will merge the ApplicationReports it got, and
+   * provides a partial list to the client.
+   * <p>
+   * State Store: the Router will timeout and it will retry depending on the
+   * FederationFacade settings - if the failure happened before the select
+   * operation.
+   */
+  @Override
+  public AppsInfo getApps(HttpServletRequest hsr, String stateQuery,
+      Set<String> statesQuery, String finalStatusQuery, String userQuery,
+      String queueQuery, String count, String startedBegin, String startedEnd,
+      String finishBegin, String finishEnd, Set<String> applicationTypes,
+      Set<String> applicationTags, Set<String> unselectedFields) {
+    AppsInfo apps = new AppsInfo();
+    long startTime = clock.getTime();
+
+    Map<SubClusterId, SubClusterInfo> subClustersActive = null;
+    try {
+      subClustersActive = federationFacade.getSubClusters(true);
+    } catch (YarnException e) {
+      routerMetrics.incrMultipleAppsFailedRetrieved();
+      return null;
+    }
+
+    // Send the requests in parallel
+
+    ExecutorCompletionService<AppsInfo> compSvc =
+        new ExecutorCompletionService<AppsInfo>(this.threadpool);
+
+    for (final SubClusterInfo info : subClustersActive.values()) {
+      compSvc.submit(new Callable<AppsInfo>() {
+        @Override
+        public AppsInfo call() {
+          DefaultRequestInterceptorREST interceptor =
+              getOrCreateInterceptorForSubCluster(info.getSubClusterId(),
+                  info.getClientRMServiceAddress());
+          AppsInfo rmApps = interceptor.getApps(hsr, stateQuery, statesQuery,
+              finalStatusQuery, userQuery, queueQuery, count, startedBegin,
+              startedEnd, finishBegin, finishEnd, applicationTypes,
+              applicationTags, unselectedFields);
+
+          if (rmApps == null) {
+            routerMetrics.incrMultipleAppsFailedRetrieved();
+            LOG.error("Subcluster " + info.getSubClusterId()
+                + " failed to return appReport.");
+            return null;
+          }
+          return rmApps;
+        }
+      });
+    }
+
+    // Collect all the responses in parallel
+
+    for (int i = 0; i < subClustersActive.values().size(); i++) {
+      try {
+        Future<AppsInfo> future = compSvc.take();
+        AppsInfo appsResponse = future.get();
+
+        long stopTime = clock.getTime();
+        routerMetrics.succeededMultipleAppsRetrieved(stopTime - startTime);
+
+        if (appsResponse != null) {
+          apps.addAll(appsResponse.getApps());
+        }
+      } catch (Throwable e) {
+        routerMetrics.incrMultipleAppsFailedRetrieved();
+        LOG.warn("Failed to get application report ", e);
+      }
+    }
+
+    if (apps.getApps().isEmpty()) {
+      return null;
+    }
+
+    // Merge all the application reports got from all the available Yarn RMs
+
+    return RouterWebServiceUtil.mergeAppsInfo(apps.getApps(),
+        returnPartialReport);
   }
 
   @Override
@@ -554,15 +749,6 @@
   }
 
   @Override
-  public AppsInfo getApps(HttpServletRequest hsr, String stateQuery,
-      Set<String> statesQuery, String finalStatusQuery, String userQuery,
-      String queueQuery, String count, String startedBegin, String startedEnd,
-      String finishBegin, String finishEnd, Set<String> applicationTypes,
-      Set<String> applicationTags, Set<String> unselectedFields) {
-    throw new NotImplementedException();
-  }
-
-  @Override
   public AppState getAppState(HttpServletRequest hsr, String appId)
       throws AuthorizationException {
     throw new NotImplementedException();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServiceUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServiceUtil.java
index 18618ee..e633b6a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServiceUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServiceUtil.java
@@ -20,6 +20,8 @@
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -33,7 +35,11 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebAppUtil;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
+import org.apache.hadoop.yarn.server.uam.UnmanagedApplicationManager;
 import org.apache.hadoop.yarn.webapp.BadRequestException;
 import org.apache.hadoop.yarn.webapp.ForbiddenException;
 import org.apache.hadoop.yarn.webapp.NotFoundException;
@@ -55,6 +61,8 @@
   private static final Log LOG =
       LogFactory.getLog(RouterWebServiceUtil.class.getName());
 
+  private final static String PARTIAL_REPORT = "Partial Report ";
+
   /** Disable constructor. */
   private RouterWebServiceUtil() {
   }
@@ -224,4 +232,103 @@
 
   }
 
-}
\ No newline at end of file
+  /**
+   * Merges a list of AppInfo grouping by ApplicationId. Our current policy
+   * is to merge the application reports from the reacheable SubClusters.
+   * Via configuration parameter, we decide whether to return applications
+   * for which the primary AM is missing or to omit them.
+   *
+   * @param appsInfo a list of AppInfo to merge
+   * @param returnPartialResult if the merge AppsInfo should contain partial
+   *          result or not
+   * @return the merged AppsInfo
+   */
+  public static AppsInfo mergeAppsInfo(ArrayList<AppInfo> appsInfo,
+      boolean returnPartialResult) {
+    AppsInfo allApps = new AppsInfo();
+
+    Map<String, AppInfo> federationAM = new HashMap<String, AppInfo>();
+    Map<String, AppInfo> federationUAMSum = new HashMap<String, AppInfo>();
+    for (AppInfo a : appsInfo) {
+      // Check if this AppInfo is an AM
+      if (a.getAMHostHttpAddress() != null) {
+        // Insert in the list of AM
+        federationAM.put(a.getAppId(), a);
+        // Check if there are any UAM found before
+        if (federationUAMSum.containsKey(a.getAppId())) {
+          // Merge the current AM with the found UAM
+          mergeAMWithUAM(a, federationUAMSum.get(a.getAppId()));
+          // Remove the sum of the UAMs
+          federationUAMSum.remove(a.getAppId());
+        }
+        // This AppInfo is an UAM
+      } else {
+        if (federationAM.containsKey(a.getAppId())) {
+          // Merge the current UAM with its own AM
+          mergeAMWithUAM(federationAM.get(a.getAppId()), a);
+        } else if (federationUAMSum.containsKey(a.getAppId())) {
+          // Merge the current UAM with its own UAM and update the list of UAM
+          federationUAMSum.put(a.getAppId(),
+              mergeUAMWithUAM(federationUAMSum.get(a.getAppId()), a));
+        } else {
+          // Insert in the list of UAM
+          federationUAMSum.put(a.getAppId(), a);
+        }
+      }
+    }
+
+    // Check the remaining UAMs are depending or not from federation
+    for (AppInfo a : federationUAMSum.values()) {
+      if (returnPartialResult || (a.getName() != null
+          && !(a.getName().startsWith(UnmanagedApplicationManager.APP_NAME)
+              || a.getName().startsWith(PARTIAL_REPORT)))) {
+        federationAM.put(a.getAppId(), a);
+      }
+    }
+
+    allApps.addAll(new ArrayList<AppInfo>(federationAM.values()));
+    return allApps;
+  }
+
+  private static AppInfo mergeUAMWithUAM(AppInfo uam1, AppInfo uam2) {
+    AppInfo partialReport = new AppInfo();
+    partialReport.setAppId(uam1.getAppId());
+    partialReport.setName(PARTIAL_REPORT + uam1.getAppId());
+    // We pick the status of the first uam
+    partialReport.setState(uam1.getState());
+    // Merge the newly partial AM with UAM1 and then with UAM2
+    mergeAMWithUAM(partialReport, uam1);
+    mergeAMWithUAM(partialReport, uam2);
+    return partialReport;
+  }
+
+  private static void mergeAMWithUAM(AppInfo am, AppInfo uam) {
+    am.setPreemptedResourceMB(
+        am.getPreemptedResourceMB() + uam.getPreemptedResourceMB());
+    am.setPreemptedResourceVCores(
+        am.getPreemptedResourceVCores() + uam.getPreemptedResourceVCores());
+    am.setNumNonAMContainerPreempted(am.getNumNonAMContainerPreempted()
+        + uam.getNumNonAMContainerPreempted());
+    am.setNumAMContainerPreempted(
+        am.getNumAMContainerPreempted() + uam.getNumAMContainerPreempted());
+    am.setPreemptedMemorySeconds(
+        am.getPreemptedMemorySeconds() + uam.getPreemptedMemorySeconds());
+    am.setPreemptedVcoreSeconds(
+        am.getPreemptedVcoreSeconds() + uam.getPreemptedVcoreSeconds());
+
+    if (am.getState() == YarnApplicationState.RUNNING
+        && uam.getState() == am.getState()) {
+
+      am.getResourceRequests().addAll(uam.getResourceRequests());
+
+      am.setAllocatedMB(am.getAllocatedMB() + uam.getAllocatedMB());
+      am.setAllocatedVCores(am.getAllocatedVCores() + uam.getAllocatedVCores());
+      am.setReservedMB(am.getReservedMB() + uam.getReservedMB());
+      am.setReservedVCores(am.getReservedVCores() + uam.getReservedMB());
+      am.setRunningContainers(
+          am.getRunningContainers() + uam.getRunningContainers());
+      am.setMemorySeconds(am.getMemorySeconds() + uam.getMemorySeconds());
+      am.setVcoreSeconds(am.getVcoreSeconds() + uam.getVcoreSeconds());
+    }
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/TestRouterMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/TestRouterMetrics.java
new file mode 100644
index 0000000..4c18ace
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/TestRouterMetrics.java
@@ -0,0 +1,298 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.router;
+
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class validates the correctness of Router Federation Interceptor
+ * Metrics.
+ */
+public class TestRouterMetrics {
+  public static final Logger LOG =
+      LoggerFactory.getLogger(TestRouterMetrics.class);
+
+  // All the operations in the bad subcluster failed.
+  private MockBadSubCluster badSubCluster = new MockBadSubCluster();
+  // All the operations in the bad subcluster succeed.
+  private MockGoodSubCluster goodSubCluster = new MockGoodSubCluster();
+
+  private static RouterMetrics metrics = RouterMetrics.getMetrics();
+
+  @BeforeClass
+  public static void init() {
+
+    LOG.info("Test: aggregate metrics are initialized correctly");
+
+    Assert.assertEquals(0, metrics.getNumSucceededAppsCreated());
+    Assert.assertEquals(0, metrics.getNumSucceededAppsSubmitted());
+    Assert.assertEquals(0, metrics.getNumSucceededAppsKilled());
+    Assert.assertEquals(0, metrics.getNumSucceededAppsRetrieved());
+
+    Assert.assertEquals(0, metrics.getAppsFailedCreated());
+    Assert.assertEquals(0, metrics.getAppsFailedSubmitted());
+    Assert.assertEquals(0, metrics.getAppsFailedKilled());
+    Assert.assertEquals(0, metrics.getAppsFailedRetrieved());
+
+    LOG.info("Test: aggregate metrics are updated correctly");
+  }
+
+  /**
+   * This test validates the correctness of the metric: Created Apps
+   * successfully.
+   */
+  @Test
+  public void testSucceededAppsCreated() {
+
+    long totalGoodBefore = metrics.getNumSucceededAppsCreated();
+
+    goodSubCluster.getNewApplication(100);
+
+    Assert.assertEquals(totalGoodBefore + 1,
+        metrics.getNumSucceededAppsCreated());
+    Assert.assertEquals(100, metrics.getLatencySucceededAppsCreated(), 0);
+
+    goodSubCluster.getNewApplication(200);
+
+    Assert.assertEquals(totalGoodBefore + 2,
+        metrics.getNumSucceededAppsCreated());
+    Assert.assertEquals(150, metrics.getLatencySucceededAppsCreated(), 0);
+  }
+
+  /**
+   * This test validates the correctness of the metric: Failed to create Apps.
+   */
+  @Test
+  public void testAppsFailedCreated() {
+
+    long totalBadbefore = metrics.getAppsFailedCreated();
+
+    badSubCluster.getNewApplication();
+
+    Assert.assertEquals(totalBadbefore + 1, metrics.getAppsFailedCreated());
+  }
+
+  /**
+   * This test validates the correctness of the metric: Submitted Apps
+   * successfully.
+   */
+  @Test
+  public void testSucceededAppsSubmitted() {
+
+    long totalGoodBefore = metrics.getNumSucceededAppsSubmitted();
+
+    goodSubCluster.submitApplication(100);
+
+    Assert.assertEquals(totalGoodBefore + 1,
+        metrics.getNumSucceededAppsSubmitted());
+    Assert.assertEquals(100, metrics.getLatencySucceededAppsSubmitted(), 0);
+
+    goodSubCluster.submitApplication(200);
+
+    Assert.assertEquals(totalGoodBefore + 2,
+        metrics.getNumSucceededAppsSubmitted());
+    Assert.assertEquals(150, metrics.getLatencySucceededAppsSubmitted(), 0);
+  }
+
+  /**
+   * This test validates the correctness of the metric: Failed to submit Apps.
+   */
+  @Test
+  public void testAppsFailedSubmitted() {
+
+    long totalBadbefore = metrics.getAppsFailedSubmitted();
+
+    badSubCluster.submitApplication();
+
+    Assert.assertEquals(totalBadbefore + 1, metrics.getAppsFailedSubmitted());
+  }
+
+  /**
+   * This test validates the correctness of the metric: Killed Apps
+   * successfully.
+   */
+  @Test
+  public void testSucceededAppsKilled() {
+
+    long totalGoodBefore = metrics.getNumSucceededAppsKilled();
+
+    goodSubCluster.forceKillApplication(100);
+
+    Assert.assertEquals(totalGoodBefore + 1,
+        metrics.getNumSucceededAppsKilled());
+    Assert.assertEquals(100, metrics.getLatencySucceededAppsKilled(), 0);
+
+    goodSubCluster.forceKillApplication(200);
+
+    Assert.assertEquals(totalGoodBefore + 2,
+        metrics.getNumSucceededAppsKilled());
+    Assert.assertEquals(150, metrics.getLatencySucceededAppsKilled(), 0);
+  }
+
+  /**
+   * This test validates the correctness of the metric: Failed to kill Apps.
+   */
+  @Test
+  public void testAppsFailedKilled() {
+
+    long totalBadbefore = metrics.getAppsFailedKilled();
+
+    badSubCluster.forceKillApplication();
+
+    Assert.assertEquals(totalBadbefore + 1, metrics.getAppsFailedKilled());
+  }
+
+  /**
+   * This test validates the correctness of the metric: Retrieved Apps
+   * successfully.
+   */
+  @Test
+  public void testSucceededAppsReport() {
+
+    long totalGoodBefore = metrics.getNumSucceededAppsRetrieved();
+
+    goodSubCluster.getApplicationReport(100);
+
+    Assert.assertEquals(totalGoodBefore + 1,
+        metrics.getNumSucceededAppsRetrieved());
+    Assert.assertEquals(100, metrics.getLatencySucceededGetAppReport(), 0);
+
+    goodSubCluster.getApplicationReport(200);
+
+    Assert.assertEquals(totalGoodBefore + 2,
+        metrics.getNumSucceededAppsRetrieved());
+    Assert.assertEquals(150, metrics.getLatencySucceededGetAppReport(), 0);
+  }
+
+  /**
+   * This test validates the correctness of the metric: Failed to retrieve Apps.
+   */
+  @Test
+  public void testAppsReportFailed() {
+
+    long totalBadbefore = metrics.getAppsFailedRetrieved();
+
+    badSubCluster.getApplicationReport();
+
+    Assert.assertEquals(totalBadbefore + 1, metrics.getAppsFailedRetrieved());
+  }
+
+  /**
+   * This test validates the correctness of the metric: Retrieved Multiple Apps
+   * successfully.
+   */
+  @Test
+  public void testSucceededMultipleAppsReport() {
+
+    long totalGoodBefore = metrics.getNumSucceededMultipleAppsRetrieved();
+
+    goodSubCluster.getApplicationsReport(100);
+
+    Assert.assertEquals(totalGoodBefore + 1,
+        metrics.getNumSucceededMultipleAppsRetrieved());
+    Assert.assertEquals(100, metrics.getLatencySucceededMultipleGetAppReport(),
+        0);
+
+    goodSubCluster.getApplicationsReport(200);
+
+    Assert.assertEquals(totalGoodBefore + 2,
+        metrics.getNumSucceededMultipleAppsRetrieved());
+    Assert.assertEquals(150, metrics.getLatencySucceededMultipleGetAppReport(),
+        0);
+  }
+
+  /**
+   * This test validates the correctness of the metric: Failed to retrieve
+   * Multiple Apps.
+   */
+  @Test
+  public void testMulipleAppsReportFailed() {
+
+    long totalBadbefore = metrics.getMultipleAppsFailedRetrieved();
+
+    badSubCluster.getApplicationsReport();
+
+    Assert.assertEquals(totalBadbefore + 1,
+        metrics.getMultipleAppsFailedRetrieved());
+  }
+
+  // Records failures for all calls
+  private class MockBadSubCluster {
+    public void getNewApplication() {
+      LOG.info("Mocked: failed getNewApplication call");
+      metrics.incrAppsFailedCreated();
+    }
+
+    public void submitApplication() {
+      LOG.info("Mocked: failed submitApplication call");
+      metrics.incrAppsFailedSubmitted();
+    }
+
+    public void forceKillApplication() {
+      LOG.info("Mocked: failed forceKillApplication call");
+      metrics.incrAppsFailedKilled();
+    }
+
+    public void getApplicationReport() {
+      LOG.info("Mocked: failed getApplicationReport call");
+      metrics.incrAppsFailedRetrieved();
+    }
+
+    public void getApplicationsReport() {
+      LOG.info("Mocked: failed getApplicationsReport call");
+      metrics.incrMultipleAppsFailedRetrieved();
+    }
+  }
+
+  // Records successes for all calls
+  private class MockGoodSubCluster {
+    public void getNewApplication(long duration) {
+      LOG.info("Mocked: successful getNewApplication call with duration {}",
+          duration);
+      metrics.succeededAppsCreated(duration);
+    }
+
+    public void submitApplication(long duration) {
+      LOG.info("Mocked: successful submitApplication call with duration {}",
+          duration);
+      metrics.succeededAppsSubmitted(duration);
+    }
+
+    public void forceKillApplication(long duration) {
+      LOG.info("Mocked: successful forceKillApplication call with duration {}",
+          duration);
+      metrics.succeededAppsKilled(duration);
+    }
+
+    public void getApplicationReport(long duration) {
+      LOG.info("Mocked: successful getApplicationReport call with duration {}",
+          duration);
+      metrics.succeededAppsRetrieved(duration);
+    }
+
+    public void getApplicationsReport(long duration) {
+      LOG.info("Mocked: successful getApplicationsReport call with duration {}",
+          duration);
+      metrics.succeededMultipleAppsRetrieved(duration);
+    }
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockDefaultRequestInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockDefaultRequestInterceptorREST.java
index 91e601e..93527e5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockDefaultRequestInterceptorREST.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockDefaultRequestInterceptorREST.java
@@ -18,26 +18,32 @@
 
 package org.apache.hadoop.yarn.server.router.webapp;
 
-import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.*;
-import org.apache.hadoop.yarn.webapp.NotFoundException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.servlet.http.HttpServletRequest;
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.Status;
 import java.io.IOException;
 import java.net.ConnectException;
 import java.util.HashSet;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import javax.servlet.http.HttpServletRequest;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
+
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppState;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NewApplication;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceInfo;
+import org.apache.hadoop.yarn.webapp.NotFoundException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
  * This class mocks the RESTRequestInterceptor.
  */
@@ -101,6 +107,27 @@
   }
 
   @Override
+  public AppsInfo getApps(HttpServletRequest hsr, String stateQuery,
+      Set<String> statesQuery, String finalStatusQuery, String userQuery,
+      String queueQuery, String count, String startedBegin, String startedEnd,
+      String finishBegin, String finishEnd, Set<String> applicationTypes,
+      Set<String> applicationTags, Set<String> unselectedFields) {
+    if (!isRunning) {
+      throw new RuntimeException("RM is stopped");
+    }
+    AppsInfo appsInfo = new AppsInfo();
+    AppInfo appInfo = new AppInfo();
+
+    appInfo.setAppId(
+        ApplicationId.newInstance(Integer.valueOf(getSubClusterId().getId()),
+            applicationCounter.incrementAndGet()).toString());
+    appInfo.setAMHostHttpAddress("http://i_am_the_AM:1234");
+
+    appsInfo.add(appInfo);
+    return appsInfo;
+  }
+
+  @Override
   public Response updateAppState(AppState targetState, HttpServletRequest hsr,
       String appId) throws AuthorizationException, YarnException,
       InterruptedException, IOException {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationInterceptorREST.java
index d918149..2ee62af 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationInterceptorREST.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationInterceptorREST.java
@@ -35,6 +35,7 @@
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppState;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NewApplication;
 import org.junit.Assert;
 import org.junit.Test;
@@ -276,13 +277,11 @@
     ApplicationId appId =
         ApplicationId.newInstance(System.currentTimeMillis(), 1);
     AppState appState = new AppState("KILLED");
-    try {
-      interceptor.updateAppState(appState, null, appId.toString());
-      Assert.fail();
-    } catch (YarnException e) {
-      Assert.assertTrue(
-          e.getMessage().equals("Application " + appId + " does not exist"));
-    }
+
+    Response response =
+        interceptor.updateAppState(appState, null, appId.toString());
+    Assert.assertEquals(BAD_REQUEST, response.getStatus());
+
   }
 
   /**
@@ -376,4 +375,20 @@
     Assert.assertNull(response);
   }
 
+  /**
+   * This test validates the correctness of GetApplicationsReport in case each
+   * subcluster provided one application.
+   */
+  @Test
+  public void testGetApplicationsReport()
+      throws YarnException, IOException, InterruptedException {
+
+    AppsInfo responseGet = interceptor.getApps(null, null, null, null, null,
+        null, null, null, null, null, null, null, null, null);
+
+    Assert.assertNotNull(responseGet);
+    Assert.assertEquals(NUM_SUBCLUSTER, responseGet.getApps().size());
+    // The merged operations will be tested in TestRouterWebServiceUtil
+  }
+
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationInterceptorRESTRetry.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationInterceptorRESTRetry.java
index 48bc1a8..38b1027 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationInterceptorRESTRetry.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationInterceptorRESTRetry.java
@@ -36,6 +36,7 @@
 import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
 import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreTestUtil;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NewApplication;
 import org.apache.hadoop.yarn.server.router.clientrm.PassThroughClientRequestInterceptor;
 import org.apache.hadoop.yarn.server.router.clientrm.TestableFederationClientInterceptor;
@@ -271,4 +272,48 @@
             .getApplicationHomeSubCluster().getHomeSubCluster());
   }
 
+  /**
+   * This test validates the correctness of GetApps in case the cluster is
+   * composed of only 1 bad SubCluster.
+   */
+  @Test
+  public void testGetAppsOneBadSC()
+      throws YarnException, IOException, InterruptedException {
+
+    setupCluster(Arrays.asList(bad2));
+
+    AppsInfo response = interceptor.getApps(null, null, null, null, null, null,
+        null, null, null, null, null, null, null, null);
+    Assert.assertNull(response);
+  }
+
+  /**
+   * This test validates the correctness of GetApps in case the cluster is
+   * composed of only 2 bad SubClusters.
+   */
+  @Test
+  public void testGetAppsTwoBadSCs()
+      throws YarnException, IOException, InterruptedException {
+    setupCluster(Arrays.asList(bad1, bad2));
+
+    AppsInfo response = interceptor.getApps(null, null, null, null, null, null,
+        null, null, null, null, null, null, null, null);
+    Assert.assertNull(response);
+  }
+
+  /**
+   * This test validates the correctness of GetApps in case the cluster is
+   * composed of only 1 bad SubCluster and a good one.
+   */
+  @Test
+  public void testGetAppsOneBadOneGood()
+      throws YarnException, IOException, InterruptedException {
+    setupCluster(Arrays.asList(good, bad2));
+
+    AppsInfo response = interceptor.getApps(null, null, null, null, null, null,
+        null, null, null, null, null, null, null, null);
+    Assert.assertNotNull(response);
+    Assert.assertEquals(1, response.getApps().size());
+  }
+
 }
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServiceUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServiceUtil.java
new file mode 100644
index 0000000..810432a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServiceUtil.java
@@ -0,0 +1,311 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.router.webapp;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceRequestInfo;
+import org.apache.hadoop.yarn.server.uam.UnmanagedApplicationManager;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test class to validate RouterWebServiceUtil methods.
+ */
+public class TestRouterWebServiceUtil {
+
+  private static final ApplicationId APPID1 = ApplicationId.newInstance(1, 1);
+  private static final ApplicationId APPID2 = ApplicationId.newInstance(2, 1);
+  private static final ApplicationId APPID3 = ApplicationId.newInstance(3, 1);
+  private static final ApplicationId APPID4 = ApplicationId.newInstance(4, 1);
+
+  /**
+   * This test validates the correctness of RouterWebServiceUtil#mergeAppsInfo
+   * in case we want to merge 4 AMs. The expected result would be the same 4
+   * AMs.
+   */
+  @Test
+  public void testMerge4DifferentApps() {
+
+    AppsInfo apps = new AppsInfo();
+    int value = 1000;
+
+    AppInfo app1 = new AppInfo();
+    app1.setAppId(APPID1.toString());
+    app1.setAMHostHttpAddress("http://i_am_the_AM1:1234");
+    app1.setState(YarnApplicationState.FINISHED);
+    app1.setNumAMContainerPreempted(value);
+    apps.add(app1);
+
+    AppInfo app2 = new AppInfo();
+    app2.setAppId(APPID2.toString());
+    app2.setAMHostHttpAddress("http://i_am_the_AM2:1234");
+    app2.setState(YarnApplicationState.ACCEPTED);
+    app2.setAllocatedVCores(2 * value);
+
+    apps.add(app2);
+
+    AppInfo app3 = new AppInfo();
+    app3.setAppId(APPID3.toString());
+    app3.setAMHostHttpAddress("http://i_am_the_AM3:1234");
+    app3.setState(YarnApplicationState.RUNNING);
+    app3.setReservedMB(3 * value);
+    apps.add(app3);
+
+    AppInfo app4 = new AppInfo();
+    app4.setAppId(APPID4.toString());
+    app4.setAMHostHttpAddress("http://i_am_the_AM4:1234");
+    app4.setState(YarnApplicationState.NEW);
+    app4.setAllocatedMB(4 * value);
+    apps.add(app4);
+
+    AppsInfo result = RouterWebServiceUtil.mergeAppsInfo(apps.getApps(), false);
+    Assert.assertNotNull(result);
+    Assert.assertEquals(4, result.getApps().size());
+
+    List<String> appIds = new ArrayList<String>();
+    AppInfo appInfo1 = null, appInfo2 = null, appInfo3 = null, appInfo4 = null;
+    for (AppInfo app : result.getApps()) {
+      appIds.add(app.getAppId());
+      if (app.getAppId().equals(APPID1.toString())) {
+        appInfo1 = app;
+      }
+      if (app.getAppId().equals(APPID2.toString())) {
+        appInfo2 = app;
+      }
+      if (app.getAppId().equals(APPID3.toString())) {
+        appInfo3 = app;
+      }
+      if (app.getAppId().equals(APPID4.toString())) {
+        appInfo4 = app;
+      }
+    }
+
+    Assert.assertTrue(appIds.contains(APPID1.toString()));
+    Assert.assertTrue(appIds.contains(APPID2.toString()));
+    Assert.assertTrue(appIds.contains(APPID3.toString()));
+    Assert.assertTrue(appIds.contains(APPID4.toString()));
+
+    // Check preservations APP1
+    Assert.assertEquals(app1.getState(), appInfo1.getState());
+    Assert.assertEquals(app1.getNumAMContainerPreempted(),
+        appInfo1.getNumAMContainerPreempted());
+
+    // Check preservations APP2
+    Assert.assertEquals(app2.getState(), appInfo2.getState());
+    Assert.assertEquals(app3.getAllocatedVCores(),
+        appInfo3.getAllocatedVCores());
+
+    // Check preservations APP3
+    Assert.assertEquals(app3.getState(), appInfo3.getState());
+    Assert.assertEquals(app3.getReservedMB(), appInfo3.getReservedMB());
+
+    // Check preservations APP3
+    Assert.assertEquals(app4.getState(), appInfo4.getState());
+    Assert.assertEquals(app3.getAllocatedMB(), appInfo3.getAllocatedMB());
+  }
+
+  /**
+   * This test validates the correctness of RouterWebServiceUtil#mergeAppsInfo
+   * in case we want to merge 2 UAMs and their own AM. The status of the AM is
+   * FINISHED, so we check the correctness of the merging of the historical
+   * values. The expected result would be 1 report with the merged information.
+   */
+  @Test
+  public void testMergeAppsFinished() {
+
+    AppsInfo apps = new AppsInfo();
+
+    String amHost = "http://i_am_the_AM1:1234";
+    AppInfo am = new AppInfo();
+    am.setAppId(APPID1.toString());
+    am.setAMHostHttpAddress(amHost);
+    am.setState(YarnApplicationState.FINISHED);
+
+    int value = 1000;
+    setAppInfoFinished(am, value);
+
+    apps.add(am);
+
+    AppInfo uam1 = new AppInfo();
+    uam1.setAppId(APPID1.toString());
+    apps.add(uam1);
+
+    setAppInfoFinished(uam1, value);
+
+    AppInfo uam2 = new AppInfo();
+    uam2.setAppId(APPID1.toString());
+    apps.add(uam2);
+
+    setAppInfoFinished(uam2, value);
+
+    // in this case the result does not change if we enable partial result
+    AppsInfo result = RouterWebServiceUtil.mergeAppsInfo(apps.getApps(), false);
+    Assert.assertNotNull(result);
+    Assert.assertEquals(1, result.getApps().size());
+
+    AppInfo app = result.getApps().get(0);
+
+    Assert.assertEquals(APPID1.toString(), app.getAppId());
+    Assert.assertEquals(amHost, app.getAMHostHttpAddress());
+    Assert.assertEquals(value * 3, app.getPreemptedResourceMB());
+    Assert.assertEquals(value * 3, app.getPreemptedResourceVCores());
+    Assert.assertEquals(value * 3, app.getNumNonAMContainerPreempted());
+    Assert.assertEquals(value * 3, app.getNumAMContainerPreempted());
+    Assert.assertEquals(value * 3, app.getPreemptedMemorySeconds());
+    Assert.assertEquals(value * 3, app.getPreemptedVcoreSeconds());
+  }
+
+  private void setAppInfoFinished(AppInfo am, int value) {
+    am.setPreemptedResourceMB(value);
+    am.setPreemptedResourceVCores(value);
+    am.setNumNonAMContainerPreempted(value);
+    am.setNumAMContainerPreempted(value);
+    am.setPreemptedMemorySeconds(value);
+    am.setPreemptedVcoreSeconds(value);
+  }
+
+  /**
+   * This test validates the correctness of RouterWebServiceUtil#mergeAppsInfo
+   * in case we want to merge 2 UAMs and their own AM. The status of the AM is
+   * RUNNING, so we check the correctness of the merging of the runtime values.
+   * The expected result would be 1 report with the merged information.
+   */
+  @Test
+  public void testMergeAppsRunning() {
+
+    AppsInfo apps = new AppsInfo();
+
+    String amHost = "http://i_am_the_AM2:1234";
+    AppInfo am = new AppInfo();
+    am.setAppId(APPID2.toString());
+    am.setAMHostHttpAddress(amHost);
+    am.setState(YarnApplicationState.RUNNING);
+
+    int value = 1000;
+    setAppInfoRunning(am, value);
+
+    apps.add(am);
+
+    AppInfo uam1 = new AppInfo();
+    uam1.setAppId(APPID2.toString());
+    uam1.setState(YarnApplicationState.RUNNING);
+    apps.add(uam1);
+
+    setAppInfoRunning(uam1, value);
+
+    AppInfo uam2 = new AppInfo();
+    uam2.setAppId(APPID2.toString());
+    uam2.setState(YarnApplicationState.RUNNING);
+    apps.add(uam2);
+
+    setAppInfoRunning(uam2, value);
+
+    // in this case the result does not change if we enable partial result
+    AppsInfo result = RouterWebServiceUtil.mergeAppsInfo(apps.getApps(), false);
+    Assert.assertNotNull(result);
+    Assert.assertEquals(1, result.getApps().size());
+
+    AppInfo app = result.getApps().get(0);
+
+    Assert.assertEquals(APPID2.toString(), app.getAppId());
+    Assert.assertEquals(amHost, app.getAMHostHttpAddress());
+    Assert.assertEquals(value * 3, app.getAllocatedMB());
+    Assert.assertEquals(value * 3, app.getAllocatedVCores());
+    Assert.assertEquals(value * 3, app.getReservedMB());
+    Assert.assertEquals(value * 3, app.getReservedVCores());
+    Assert.assertEquals(value * 3, app.getRunningContainers());
+    Assert.assertEquals(value * 3, app.getMemorySeconds());
+    Assert.assertEquals(value * 3, app.getVcoreSeconds());
+    Assert.assertEquals(3, app.getResourceRequests().size());
+  }
+
+  private void setAppInfoRunning(AppInfo am, int value) {
+    am.getResourceRequests().add(new ResourceRequestInfo());
+    am.setAllocatedMB(value);
+    am.setAllocatedVCores(value);
+    am.setReservedMB(value);
+    am.setReservedVCores(value);
+    am.setRunningContainers(value);
+    am.setMemorySeconds(value);
+    am.setVcoreSeconds(value);
+  }
+
+  /**
+   * This test validates the correctness of RouterWebServiceUtil#mergeAppsInfo
+   * in case we want to merge 2 UAMs without their own AM. The expected result
+   * would be an empty report or a partial report of the 2 UAMs depending on the
+   * selected policy.
+   */
+  @Test
+  public void testMerge2UAM() {
+
+    AppsInfo apps = new AppsInfo();
+
+    AppInfo app1 = new AppInfo();
+    app1.setAppId(APPID1.toString());
+    app1.setName(UnmanagedApplicationManager.APP_NAME);
+    app1.setState(YarnApplicationState.RUNNING);
+    apps.add(app1);
+
+    AppInfo app2 = new AppInfo();
+    app2.setAppId(APPID1.toString());
+    app2.setName(UnmanagedApplicationManager.APP_NAME);
+    app2.setState(YarnApplicationState.RUNNING);
+    apps.add(app2);
+
+    AppsInfo result = RouterWebServiceUtil.mergeAppsInfo(apps.getApps(), false);
+    Assert.assertNotNull(result);
+    Assert.assertEquals(0, result.getApps().size());
+
+    // By enabling partial result, the expected result would be a partial report
+    // of the 2 UAMs
+    AppsInfo result2 = RouterWebServiceUtil.mergeAppsInfo(apps.getApps(), true);
+    Assert.assertNotNull(result2);
+    Assert.assertEquals(1, result2.getApps().size());
+    Assert.assertEquals(YarnApplicationState.RUNNING,
+        result2.getApps().get(0).getState());
+  }
+
+  /**
+   * This test validates the correctness of RouterWebServiceUtil#mergeAppsInfo
+   * in case we want to merge 1 UAM that does not depend on Federation. The
+   * excepted result would be the same app report.
+   */
+  @Test
+  public void testMergeUAM() {
+
+    AppsInfo apps = new AppsInfo();
+
+    AppInfo app1 = new AppInfo();
+    app1.setAppId(APPID1.toString());
+    app1.setName("Test");
+    apps.add(app1);
+
+    // in this case the result does not change if we enable partial result
+    AppsInfo result = RouterWebServiceUtil.mergeAppsInfo(apps.getApps(), false);
+    Assert.assertNotNull(result);
+    Assert.assertEquals(1, result.getApps().size());
+  }
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
index f182236..babe9fe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
@@ -125,6 +125,17 @@
       <groupId>commons-logging</groupId>
       <artifactId>commons-logging</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.bouncycastle</groupId>
+      <artifactId>bcprov-jdk16</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-auth</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
   </dependencies>
 
   <build>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestRMNMSecretKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestRMNMSecretKeys.java
index 8cc2dee..ba14491 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestRMNMSecretKeys.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestRMNMSecretKeys.java
@@ -18,10 +18,13 @@
 
 package org.apache.hadoop.yarn.server;
 
+import java.io.File;
 import java.io.IOException;
+import java.util.UUID;
 
+import org.junit.AfterClass;
 import org.junit.Assert;
-
+import org.junit.BeforeClass;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -32,9 +35,38 @@
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.kerby.util.IOUtil;
 import org.junit.Test;
 
 public class TestRMNMSecretKeys {
+  private static final String KRB5_CONF = "java.security.krb5.conf";
+  private static final File KRB5_CONF_ROOT_DIR = new File(
+      System.getProperty("test.build.dir", "target/test-dir"),
+          UUID.randomUUID().toString());
+
+  @BeforeClass
+  public static void setup() throws IOException {
+    KRB5_CONF_ROOT_DIR.mkdir();
+    File krb5ConfFile = new File(KRB5_CONF_ROOT_DIR, "krb5.conf");
+    krb5ConfFile.createNewFile();
+    String content = "[libdefaults]\n" +
+        "    default_realm = APACHE.ORG\n" +
+        "    udp_preference_limit = 1\n"+
+        "    extra_addresses = 127.0.0.1\n" +
+        "[realms]\n" +
+        "    APACHE.ORG = {\n" +
+        "        admin_server = localhost:88\n" +
+        "        kdc = localhost:88\n}\n" +
+        "[domain_realm]\n" +
+        "    localhost = APACHE.ORG";
+    IOUtil.writeFile(content, krb5ConfFile);
+    System.setProperty(KRB5_CONF, krb5ConfFile.getAbsolutePath());
+  }
+
+  @AfterClass
+  public static void tearDown() throws IOException {
+    KRB5_CONF_ROOT_DIR.delete();
+  }
 
   @Test(timeout = 1000000)
   public void testNMUpdation() throws Exception {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
index 07058f6..6a5ef55 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
@@ -33,6 +33,7 @@
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationAttemptEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
@@ -80,7 +81,7 @@
       auxService =
           PerNodeTimelineCollectorsAuxService.launchServer(new String[0],
               collectorManager, conf);
-      auxService.addApplication(ApplicationId.newInstance(0, 1));
+      auxService.addApplication(ApplicationId.newInstance(0, 1), "user");
     } catch (ExitUtil.ExitException e) {
       fail();
     }
@@ -99,9 +100,9 @@
     TimelineV2Client client =
         TimelineV2Client.createTimelineClient(ApplicationId.newInstance(0, 1));
     try {
-      // set the timeline service address manually
-      client.setTimelineServiceAddress(
-          collectorManager.getRestServerBindAddress());
+      // Set the timeline service address manually.
+      client.setTimelineCollectorInfo(CollectorInfo.newInstance(
+          collectorManager.getRestServerBindAddress()));
       client.init(conf);
       client.start();
       TimelineEntity entity = new TimelineEntity();
@@ -126,9 +127,9 @@
     TimelineV2Client client =
         TimelineV2Client.createTimelineClient(appId);
     try {
-      // set the timeline service address manually
-      client.setTimelineServiceAddress(
-          collectorManager.getRestServerBindAddress());
+      // Set the timeline service address manually.
+      client.setTimelineCollectorInfo(CollectorInfo.newInstance(
+          collectorManager.getRestServerBindAddress()));
       client.init(conf);
       client.start();
       ClusterEntity cluster = new ClusterEntity();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
new file mode 100644
index 0000000..75f17fb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
@@ -0,0 +1,478 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.security;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.atLeastOnce;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.KerberosTestUtils;
+import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
+import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.client.api.TimelineV2Client;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
+import org.apache.hadoop.yarn.server.api.CollectorNodemanagerProtocol;
+import org.apache.hadoop.yarn.server.api.protocolrecords.GetTimelineCollectorContextRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.GetTimelineCollectorContextResponse;
+import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilterInitializer;
+import org.apache.hadoop.yarn.server.timelineservice.collector.AppLevelTimelineCollector;
+import org.apache.hadoop.yarn.server.timelineservice.collector.NodeTimelineCollectorManager;
+import org.apache.hadoop.yarn.server.timelineservice.collector.PerNodeTimelineCollectorsAuxService;
+import org.apache.hadoop.yarn.server.timelineservice.storage.FileSystemTimelineReaderImpl;
+import org.apache.hadoop.yarn.server.timelineservice.storage.FileSystemTimelineWriterImpl;
+import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+/**
+ * Tests timeline authentication filter based security for timeline service v2.
+ */
+@RunWith(Parameterized.class)
+public class TestTimelineAuthFilterForV2 {
+
+  private static final String FOO_USER = "foo";
+  private static final String HTTP_USER = "HTTP";
+  private static final File TEST_ROOT_DIR = new File(
+      System.getProperty("test.build.dir", "target" + File.separator +
+          "test-dir"), UUID.randomUUID().toString());
+  private static final String BASEDIR =
+      System.getProperty("test.build.dir", "target/test-dir") + "/"
+          + TestTimelineAuthFilterForV2.class.getSimpleName();
+  private static File httpSpnegoKeytabFile = new File(KerberosTestUtils.
+      getKeytabFile());
+  private static String httpSpnegoPrincipal = KerberosTestUtils.
+      getServerPrincipal();
+
+  // First param indicates whether HTTPS access or HTTP access and second param
+  // indicates whether it is kerberos access or token based access.
+  @Parameterized.Parameters
+  public static Collection<Object[]> params() {
+    return Arrays.asList(new Object[][] {{false, true}, {false, false},
+        {true, false}, {true, true}});
+  }
+
+  private static MiniKdc testMiniKDC;
+  private static String keystoresDir;
+  private static String sslConfDir;
+  private static Configuration conf;
+  private static UserGroupInformation nonKerberosUser;
+  static {
+    try {
+      nonKerberosUser = UserGroupInformation.getCurrentUser();
+    } catch (IOException e) {}
+  }
+  // Indicates whether HTTPS or HTTP access.
+  private boolean withSsl;
+  // Indicates whether Kerberos based login is used or token based access is
+  // done.
+  private boolean withKerberosLogin;
+  private NodeTimelineCollectorManager collectorManager;
+  private PerNodeTimelineCollectorsAuxService auxService;
+  public TestTimelineAuthFilterForV2(boolean withSsl,
+      boolean withKerberosLogin) {
+    this.withSsl = withSsl;
+    this.withKerberosLogin = withKerberosLogin;
+  }
+
+  @BeforeClass
+  public static void setup() {
+    try {
+      testMiniKDC = new MiniKdc(MiniKdc.createConf(), TEST_ROOT_DIR);
+      testMiniKDC.start();
+      testMiniKDC.createPrincipal(
+          httpSpnegoKeytabFile, HTTP_USER + "/localhost");
+    } catch (Exception e) {
+      fail("Couldn't setup MiniKDC.");
+    }
+
+    // Setup timeline service v2.
+    try {
+      conf = new Configuration(false);
+      conf.setStrings(TimelineAuthenticationFilterInitializer.PREFIX + "type",
+          "kerberos");
+      conf.set(TimelineAuthenticationFilterInitializer.PREFIX +
+          KerberosAuthenticationHandler.PRINCIPAL, httpSpnegoPrincipal);
+      conf.set(TimelineAuthenticationFilterInitializer.PREFIX +
+          KerberosAuthenticationHandler.KEYTAB,
+          httpSpnegoKeytabFile.getAbsolutePath());
+      conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+          "kerberos");
+      conf.set(YarnConfiguration.TIMELINE_SERVICE_PRINCIPAL,
+          httpSpnegoPrincipal);
+      conf.set(YarnConfiguration.TIMELINE_SERVICE_KEYTAB,
+          httpSpnegoKeytabFile.getAbsolutePath());
+      // Enable timeline service v2
+      conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+      conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
+      conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS,
+          FileSystemTimelineWriterImpl.class, TimelineWriter.class);
+      conf.set(YarnConfiguration.TIMELINE_SERVICE_BIND_HOST, "localhost");
+      conf.set(FileSystemTimelineWriterImpl.TIMELINE_SERVICE_STORAGE_DIR_ROOT,
+          TEST_ROOT_DIR.getAbsolutePath());
+      conf.set("hadoop.proxyuser.HTTP.hosts", "*");
+      conf.set("hadoop.proxyuser.HTTP.users", FOO_USER);
+      UserGroupInformation.setConfiguration(conf);
+    } catch (Exception e) {
+      fail("Couldn't setup TimelineServer V2.");
+    }
+  }
+
+  @Before
+  public void initialize() throws Exception {
+    if (withSsl) {
+      conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,
+          HttpConfig.Policy.HTTPS_ONLY.name());
+      File base = new File(BASEDIR);
+      FileUtil.fullyDelete(base);
+      base.mkdirs();
+      keystoresDir = new File(BASEDIR).getAbsolutePath();
+      sslConfDir =
+          KeyStoreTestUtil.getClasspathDir(TestTimelineAuthFilterForV2.class);
+      KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
+    } else  {
+      conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,
+          HttpConfig.Policy.HTTP_ONLY.name());
+    }
+    if (!withKerberosLogin) {
+      // For timeline delegation token based access, set delegation token renew
+      // interval to 100 ms. to test if timeline delegation token for the app is
+      // renewed automatically if app is still alive.
+      conf.setLong(
+          YarnConfiguration.TIMELINE_DELEGATION_TOKEN_RENEW_INTERVAL, 100);
+      // Set token max lifetime to 4 seconds to test if timeline delegation
+      // token for the app is regenerated automatically if app is still alive.
+      conf.setLong(
+          YarnConfiguration.TIMELINE_DELEGATION_TOKEN_MAX_LIFETIME, 4000);
+    }
+    UserGroupInformation.setConfiguration(conf);
+    collectorManager = new DummyNodeTimelineCollectorManager();
+    auxService = PerNodeTimelineCollectorsAuxService.launchServer(
+        new String[0], collectorManager, conf);
+    if (withKerberosLogin) {
+      SecurityUtil.login(conf, YarnConfiguration.TIMELINE_SERVICE_KEYTAB,
+          YarnConfiguration.TIMELINE_SERVICE_PRINCIPAL, "localhost");
+    }
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    auxService.addApplication(
+        appId, UserGroupInformation.getCurrentUser().getUserName());
+    if (!withKerberosLogin) {
+      AppLevelTimelineCollector collector =
+          (AppLevelTimelineCollector)collectorManager.get(appId);
+      Token<TimelineDelegationTokenIdentifier> token =
+          collector.getDelegationTokenForApp();
+      token.setService(new Text("localhost" + token.getService().toString().
+          substring(token.getService().toString().indexOf(":"))));
+      UserGroupInformation.getCurrentUser().addToken(token);
+    }
+  }
+
+  private TimelineV2Client createTimelineClientForUGI(ApplicationId appId) {
+    TimelineV2Client client =
+        TimelineV2Client.createTimelineClient(ApplicationId.newInstance(0, 1));
+    // set the timeline service address.
+    String restBindAddr = collectorManager.getRestServerBindAddress();
+    String addr =
+        "localhost" + restBindAddr.substring(restBindAddr.indexOf(":"));
+    client.setTimelineCollectorInfo(CollectorInfo.newInstance(addr));
+    client.init(conf);
+    client.start();
+    return client;
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    if (testMiniKDC != null) {
+      testMiniKDC.stop();
+    }
+    FileUtil.fullyDelete(TEST_ROOT_DIR);
+  }
+
+  @After
+  public void destroy() throws Exception {
+    if (auxService != null) {
+      auxService.stop();
+    }
+    if (withSsl) {
+      KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
+      FileUtil.fullyDelete(new File(BASEDIR));
+    }
+    if (withKerberosLogin) {
+      UserGroupInformation.getCurrentUser().logoutUserFromKeytab();
+    }
+    // Reset the user for next run.
+    UserGroupInformation.setLoginUser(
+        UserGroupInformation.createRemoteUser(nonKerberosUser.getUserName()));
+  }
+
+  private static TimelineEntity createEntity(String id, String type) {
+    TimelineEntity entityToStore = new TimelineEntity();
+    entityToStore.setId(id);
+    entityToStore.setType(type);
+    entityToStore.setCreatedTime(0L);
+    return entityToStore;
+  }
+
+  private static void verifyEntity(File entityTypeDir, String id, String type)
+      throws IOException {
+    File entityFile = new File(entityTypeDir, id +
+        FileSystemTimelineWriterImpl.TIMELINE_SERVICE_STORAGE_EXTENSION);
+    assertTrue(entityFile.exists());
+    TimelineEntity entity = readEntityFile(entityFile);
+    assertNotNull(entity);
+    assertEquals(id, entity.getId());
+    assertEquals(type, entity.getType());
+  }
+
+  private static TimelineEntity readEntityFile(File entityFile)
+      throws IOException {
+    BufferedReader reader = null;
+    String strLine;
+    try {
+      reader = new BufferedReader(new FileReader(entityFile));
+      while ((strLine = reader.readLine()) != null) {
+        if (strLine.trim().length() > 0) {
+          return FileSystemTimelineReaderImpl.
+              getTimelineRecordFromJSON(strLine.trim(), TimelineEntity.class);
+        }
+      }
+      return null;
+    } finally {
+      reader.close();
+    }
+  }
+
+  private void publishAndVerifyEntity(ApplicationId appId, File entityTypeDir,
+      String entityType, int numEntities) throws Exception {
+    TimelineV2Client client = createTimelineClientForUGI(appId);
+    try {
+    // Sync call. Results available immediately.
+      client.putEntities(createEntity("entity1", entityType));
+      assertEquals(numEntities, entityTypeDir.listFiles().length);
+      verifyEntity(entityTypeDir, "entity1", entityType);
+      // Async call.
+      client.putEntitiesAsync(createEntity("entity2", entityType));
+    } finally {
+      client.stop();
+    }
+  }
+
+  private boolean publishWithRetries(ApplicationId appId, File entityTypeDir,
+      String entityType, int numEntities) throws Exception {
+    for (int i = 0; i < 10; i++) {
+      try {
+        publishAndVerifyEntity(appId, entityTypeDir, entityType, numEntities);
+      } catch (YarnException e) {
+        Thread.sleep(50);
+        continue;
+      }
+      return true;
+    }
+    return false;
+  }
+
+  @Test
+  public void testPutTimelineEntities() throws Exception {
+    final String entityType = "dummy_type";
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    File entityTypeDir = new File(TEST_ROOT_DIR.getAbsolutePath() +
+        File.separator + "entities" + File.separator +
+        YarnConfiguration.DEFAULT_RM_CLUSTER_ID + File.separator +
+        UserGroupInformation.getCurrentUser().getUserName() +
+        File.separator + "test_flow_name" + File.separator +
+        "test_flow_version" + File.separator + "1" + File.separator +
+        appId.toString() + File.separator + entityType);
+    try {
+      if (withKerberosLogin) {
+        KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<Void>() {
+          @Override
+          public Void call() throws Exception {
+            publishAndVerifyEntity(appId, entityTypeDir, entityType, 1);
+            return null;
+          }
+        });
+      } else {
+        assertTrue("Entities should have been published successfully.",
+            publishWithRetries(appId, entityTypeDir, entityType, 1));
+
+        AppLevelTimelineCollector collector =
+            (AppLevelTimelineCollector) collectorManager.get(appId);
+        Token<TimelineDelegationTokenIdentifier> token =
+            collector.getDelegationTokenForApp();
+        assertNotNull(token);
+
+        // Verify if token is renewed automatically and entities can still be
+        // published.
+        Thread.sleep(1000);
+        // Entities should publish successfully after renewal.
+        assertTrue("Entities should have been published successfully.",
+            publishWithRetries(appId, entityTypeDir, entityType, 2));
+        assertNotNull(collector);
+        verify(collectorManager.getTokenManagerService(), atLeastOnce()).
+            renewToken(eq(collector.getDelegationTokenForApp()),
+                any(String.class));
+
+        // Wait to ensure lifetime of token expires and ensure its regenerated
+        // automatically.
+        Thread.sleep(3000);
+        for (int i = 0; i < 40; i++) {
+          if (!token.equals(collector.getDelegationTokenForApp())) {
+            break;
+          }
+          Thread.sleep(50);
+        }
+        assertNotEquals("Token should have been regenerated.", token,
+            collector.getDelegationTokenForApp());
+        Thread.sleep(1000);
+        // Try publishing with the old token in UGI. Publishing should fail due
+        // to invalid token.
+        try {
+          publishAndVerifyEntity(appId, entityTypeDir, entityType, 2);
+          fail("Exception should have been thrown due to Invalid Token.");
+        } catch (YarnException e) {
+          assertTrue("Exception thrown should have been due to Invalid Token.",
+              e.getCause().getMessage().contains("InvalidToken"));
+        }
+
+        // Update the regenerated token in UGI and retry publishing entities.
+        Token<TimelineDelegationTokenIdentifier> regeneratedToken =
+            collector.getDelegationTokenForApp();
+        regeneratedToken.setService(new Text("localhost" +
+            regeneratedToken.getService().toString().substring(
+            regeneratedToken.getService().toString().indexOf(":"))));
+        UserGroupInformation.getCurrentUser().addToken(regeneratedToken);
+        assertTrue("Entities should have been published successfully.",
+                 publishWithRetries(appId, entityTypeDir, entityType, 2));
+        // Token was generated twice, once when app collector was created and
+        // later after token lifetime expiry.
+        verify(collectorManager.getTokenManagerService(), times(2)).
+            generateToken(any(UserGroupInformation.class), any(String.class));
+        assertEquals(1, ((DummyNodeTimelineCollectorManager) collectorManager).
+            getTokenExpiredCnt());
+      }
+      // Wait for async entity to be published.
+      for (int i = 0; i < 50; i++) {
+        if (entityTypeDir.listFiles().length == 2) {
+          break;
+        }
+        Thread.sleep(50);
+      }
+      assertEquals(2, entityTypeDir.listFiles().length);
+      verifyEntity(entityTypeDir, "entity2", entityType);
+      AppLevelTimelineCollector collector =
+          (AppLevelTimelineCollector)collectorManager.get(appId);
+      assertNotNull(collector);
+      auxService.removeApplication(appId);
+      verify(collectorManager.getTokenManagerService()).cancelToken(
+          eq(collector.getDelegationTokenForApp()), any(String.class));
+    } finally {
+      FileUtils.deleteQuietly(entityTypeDir);
+    }
+  }
+
+  private static class DummyNodeTimelineCollectorManager extends
+      NodeTimelineCollectorManager {
+    private volatile int tokenExpiredCnt = 0;
+    DummyNodeTimelineCollectorManager() {
+      super();
+    }
+
+    private int getTokenExpiredCnt() {
+      return tokenExpiredCnt;
+    }
+
+    @Override
+    protected TimelineV2DelegationTokenSecretManagerService
+        createTokenManagerService() {
+      return spy(new TimelineV2DelegationTokenSecretManagerService() {
+        @Override
+        protected AbstractDelegationTokenSecretManager
+            <TimelineDelegationTokenIdentifier>
+            createTimelineDelegationTokenSecretManager(long secretKeyInterval,
+                long tokenMaxLifetime, long tokenRenewInterval,
+                long tokenRemovalScanInterval) {
+          return spy(new TimelineV2DelegationTokenSecretManager(
+              secretKeyInterval, tokenMaxLifetime, tokenRenewInterval, 2000L) {
+            @Override
+            protected void logExpireToken(
+                TimelineDelegationTokenIdentifier ident) throws IOException {
+              tokenExpiredCnt++;
+            }
+          });
+        }
+      });
+    }
+
+    @Override
+    protected CollectorNodemanagerProtocol getNMCollectorService() {
+      CollectorNodemanagerProtocol protocol =
+          mock(CollectorNodemanagerProtocol.class);
+      try {
+        GetTimelineCollectorContextResponse response =
+            GetTimelineCollectorContextResponse.newInstance(
+                UserGroupInformation.getCurrentUser().getUserName(),
+                "test_flow_name", "test_flow_version", 1L);
+        when(protocol.getTimelineCollectorContext(any(
+            GetTimelineCollectorContextRequest.class))).thenReturn(response);
+      } catch (YarnException | IOException e) {
+        fail();
+      }
+      return protocol;
+    }
+  }
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/resources/krb5.conf b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/resources/krb5.conf
deleted file mode 100644
index 121ac6d..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/resources/krb5.conf
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# 
-[libdefaults]
-	default_realm = APACHE.ORG
-	udp_preference_limit = 1
-	extra_addresses = 127.0.0.1
-[realms]
-	APACHE.ORG = {
-		admin_server = localhost:88
-		kdc = localhost:88
-	}
-[domain_realm]
-	localhost = APACHE.ORG
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
new file mode 100644
index 0000000..3519c3f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
@@ -0,0 +1,177 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.reader;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.net.HttpURLConnection;
+import java.net.URI;
+import java.net.URL;
+import java.util.List;
+
+import javax.ws.rs.core.MediaType;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.storage.DataGeneratorForTest;
+import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
+import org.junit.Assert;
+
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.ClientResponse.Status;
+import com.sun.jersey.api.client.GenericType;
+import com.sun.jersey.api.client.config.ClientConfig;
+import com.sun.jersey.api.client.config.DefaultClientConfig;
+import com.sun.jersey.client.urlconnection.HttpURLConnectionFactory;
+import com.sun.jersey.client.urlconnection.URLConnectionClientHandler;
+
+/**
+ * Test Base for TimelineReaderServer HBase tests.
+ */
+public abstract class AbstractTimelineReaderHBaseTestBase {
+  private static int serverPort;
+  private static TimelineReaderServer server;
+  private static HBaseTestingUtility util;
+
+  public static void setup() throws Exception {
+    util = new HBaseTestingUtility();
+    Configuration conf = util.getConfiguration();
+    conf.setInt("hfile.format.version", 3);
+    util.startMiniCluster();
+    DataGeneratorForTest.createSchema(util.getConfiguration());
+  }
+
+  public static void tearDown() throws Exception {
+    if (server != null) {
+      server.stop();
+      server = null;
+    }
+    if (util != null) {
+      util.shutdownMiniCluster();
+    }
+  }
+
+  protected static void initialize() throws Exception {
+    try {
+      Configuration config = util.getConfiguration();
+      config.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+      config.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
+      config.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+          "localhost:0");
+      config.set(YarnConfiguration.RM_CLUSTER_ID, "cluster1");
+      config.set(YarnConfiguration.TIMELINE_SERVICE_READER_CLASS,
+          "org.apache.hadoop.yarn.server.timelineservice.storage."
+              + "HBaseTimelineReaderImpl");
+      config.setInt("hfile.format.version", 3);
+      server = new TimelineReaderServer() {
+        @Override
+        protected void addFilters(Configuration conf) {
+          // The parent code uses hadoop-common jar from this version of
+          // Hadoop, but the tests are using hadoop-common jar from
+          // ${hbase-compatible-hadoop.version}.  This version uses Jetty 9
+          // while ${hbase-compatible-hadoop.version} uses Jetty 6, and there
+          // are many differences, including classnames and packages.
+          // We do nothing here, so that we don't cause a NoSuchMethodError or
+          // NoClassDefFoundError.
+          // Once ${hbase-compatible-hadoop.version} is changed to Hadoop 3,
+          // we should be able to remove this @Override.
+        }
+      };
+      server.init(config);
+      server.start();
+      serverPort = server.getWebServerPort();
+    } catch (Exception e) {
+      Assert.fail("Web server failed to start");
+    }
+  }
+
+  protected Client createClient() {
+    ClientConfig cfg = new DefaultClientConfig();
+    cfg.getClasses().add(YarnJacksonJaxbJsonProvider.class);
+    return new Client(
+        new URLConnectionClientHandler(new DummyURLConnectionFactory()), cfg);
+  }
+
+  protected ClientResponse getResponse(Client client, URI uri)
+      throws Exception {
+    ClientResponse resp =
+        client.resource(uri).accept(MediaType.APPLICATION_JSON)
+            .type(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+    if (resp == null || resp.getStatusInfo()
+        .getStatusCode() != ClientResponse.Status.OK.getStatusCode()) {
+      String msg = "";
+      if (resp != null) {
+        msg = String.valueOf(resp.getStatusInfo().getStatusCode());
+      }
+      throw new IOException(
+          "Incorrect response from timeline reader. " + "Status=" + msg);
+    }
+    return resp;
+  }
+
+  protected void verifyHttpResponse(Client client, URI uri, Status status) {
+    ClientResponse resp =
+        client.resource(uri).accept(MediaType.APPLICATION_JSON)
+            .type(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+    assertNotNull(resp);
+    assertTrue("Response from server should have been " + status,
+        resp.getStatusInfo().getStatusCode() == status.getStatusCode());
+    System.out.println("Response is: " + resp.getEntity(String.class));
+  }
+
+  protected List<FlowActivityEntity> verifyFlowEntites(Client client, URI uri,
+      int noOfEntities) throws Exception {
+    ClientResponse resp = getResponse(client, uri);
+    List<FlowActivityEntity> entities =
+        resp.getEntity(new GenericType<List<FlowActivityEntity>>() {
+        });
+    assertNotNull(entities);
+    assertEquals(noOfEntities, entities.size());
+    return entities;
+  }
+
+  protected static class DummyURLConnectionFactory
+      implements HttpURLConnectionFactory {
+
+    @Override
+    public HttpURLConnection getHttpURLConnection(final URL url)
+        throws IOException {
+      try {
+        return (HttpURLConnection) url.openConnection();
+      } catch (UndeclaredThrowableException e) {
+        throw new IOException(e.getCause());
+      }
+    }
+  }
+
+  protected static HBaseTestingUtility getHBaseTestingUtility() {
+    return util;
+  }
+
+  public static int getServerPort() {
+    return serverPort;
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
index 3f8978c..b2029ca 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
@@ -24,10 +24,7 @@
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
-import java.lang.reflect.UndeclaredThrowableException;
-import java.net.HttpURLConnection;
 import java.net.URI;
-import java.net.URL;
 import java.text.DateFormat;
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -39,7 +36,9 @@
 import javax.ws.rs.core.MediaType;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
@@ -48,16 +47,12 @@
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric.Type;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 import org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
-import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineSchemaCreator;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
-import org.junit.After;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -67,27 +62,27 @@
 import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.ClientResponse.Status;
 import com.sun.jersey.api.client.GenericType;
-import com.sun.jersey.api.client.config.ClientConfig;
-import com.sun.jersey.api.client.config.DefaultClientConfig;
-import com.sun.jersey.client.urlconnection.HttpURLConnectionFactory;
-import com.sun.jersey.client.urlconnection.URLConnectionClientHandler;
 
-public class TestTimelineReaderWebServicesHBaseStorage {
-  private int serverPort;
-  private TimelineReaderServer server;
-  private static HBaseTestingUtility util;
+/**
+ * Test TimelineReder Web Service REST API's.
+ */
+public class TestTimelineReaderWebServicesHBaseStorage
+    extends AbstractTimelineReaderHBaseTestBase {
   private static long ts = System.currentTimeMillis();
   private static long dayTs =
       HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(ts);
+  private static String doAsUser = "remoteuser";
 
   @BeforeClass
-  public static void setup() throws Exception {
-    util = new HBaseTestingUtility();
-    Configuration conf = util.getConfiguration();
-    conf.setInt("hfile.format.version", 3);
-    util.startMiniCluster();
-    TimelineSchemaCreator.createAllTables(util.getConfiguration(), false);
+  public static void setupBeforeClass() throws Exception {
+    setup();
     loadData();
+    initialize();
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    tearDown();
   }
 
   private static void loadData() throws Exception {
@@ -214,7 +209,7 @@
     entity4.addMetrics(metrics);
     te4.addEntity(entity4);
 
-    TimelineEntities te5 = new TimelineEntities();
+    TimelineEntities userEntities = new TimelineEntities();
     TimelineEntity entity5 = new TimelineEntity();
     entity5.setId("entity1");
     entity5.setType("type1");
@@ -270,7 +265,7 @@
     relatesTo1.put("type3",
         Sets.newHashSet("entity31", "entity35", "entity32", "entity33"));
     entity5.addRelatesToEntities(relatesTo1);
-    te5.addEntity(entity5);
+    userEntities.addEntity(entity5);
 
     TimelineEntity entity6 = new TimelineEntity();
     entity6.setId("entity2");
@@ -329,20 +324,35 @@
     relatesTo2.put("type6", Sets.newHashSet("entity61", "entity66"));
     relatesTo2.put("type3", Sets.newHashSet("entity31"));
     entity6.addRelatesToEntities(relatesTo2);
-    te5.addEntity(entity6);
+    userEntities.addEntity(entity6);
+
+    for (long i = 1; i <= 10; i++) {
+      TimelineEntity userEntity = new TimelineEntity();
+      userEntity.setType("entitytype");
+      userEntity.setId("entityid-" + i);
+      userEntity.setIdPrefix(11 - i);
+      userEntity.setCreatedTime(ts);
+      userEntities.addEntity(userEntity);
+    }
 
     HBaseTimelineWriterImpl hbi = null;
-    Configuration c1 = util.getConfiguration();
+    Configuration c1 = getHBaseTestingUtility().getConfiguration();
+    UserGroupInformation remoteUser =
+        UserGroupInformation.createRemoteUser(doAsUser);
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
-      hbi.write(cluster, user, flow, flowVersion, runid, entity.getId(), te);
-      hbi.write(cluster, user, flow, flowVersion, runid, entity1.getId(), te1);
-      hbi.write(cluster, user, flow, flowVersion, runid1, entity4.getId(), te4);
-      hbi.write(cluster, user, flow2,
-          flowVersion2, runid2, entity3.getId(), te3);
-      hbi.write(cluster, user, flow, flowVersion, runid,
-          "application_1111111111_1111", te5);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, entity.getId()), te, remoteUser);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, entity1.getId()), te1, remoteUser);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid1, entity4.getId()), te4, remoteUser);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow2, flowVersion2,
+          runid2, entity3.getId()), te3, remoteUser);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, "application_1111111111_1111"), userEntities, remoteUser);
+      writeApplicationEntities(hbi, ts);
       hbi.flush();
     } finally {
       if (hbi != null) {
@@ -351,80 +361,32 @@
     }
   }
 
-  @AfterClass
-  public static void tearDown() throws Exception {
-    util.shutdownMiniCluster();
-  }
+  static void writeApplicationEntities(HBaseTimelineWriterImpl hbi,
+      long timestamp) throws IOException {
+    int count = 1;
+    for (long i = 1; i <= 3; i++) {
+      for (int j = 1; j <= 5; j++) {
+        TimelineEntities te = new TimelineEntities();
+        ApplicationId appId =
+            BuilderUtils.newApplicationId(timestamp, count++);
+        ApplicationEntity appEntity = new ApplicationEntity();
+        appEntity.setId(
+            HBaseTimelineStorageUtils.convertApplicationIdToString(appId));
+        appEntity.setCreatedTime(timestamp);
 
-  @Before
-  public void init() throws Exception {
-    try {
-      Configuration config = util.getConfiguration();
-      config.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
-      config.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
-      config.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
-          "localhost:0");
-      config.set(YarnConfiguration.RM_CLUSTER_ID, "cluster1");
-      config.set(YarnConfiguration.TIMELINE_SERVICE_READER_CLASS,
-          "org.apache.hadoop.yarn.server.timelineservice.storage." +
-              "HBaseTimelineReaderImpl");
-      config.setInt("hfile.format.version", 3);
-      server = new TimelineReaderServer() {
-        @Override
-        protected void setupOptions(Configuration conf) {
-          // The parent code tries to use HttpServer2 from this version of
-          // Hadoop, but the tests are loading in HttpServer2 from
-          // ${hbase-compatible-hadoop.version}.  This version uses Jetty 9
-          // while ${hbase-compatible-hadoop.version} uses Jetty 6, and there
-          // are many differences, including classnames and packages.
-          // We do nothing here, so that we don't cause a NoSuchMethodError.
-          // Once ${hbase-compatible-hadoop.version} is changed to Hadoop 3,
-          // we should be able to remove this @Override.
-        }
-      };
-      server.init(config);
-      server.start();
-      serverPort = server.getWebServerPort();
-    } catch (Exception e) {
-      Assert.fail("Web server failed to start");
-    }
-  }
+        TimelineEvent created = new TimelineEvent();
+        created.setId(ApplicationMetricsConstants.CREATED_EVENT_TYPE);
+        created.setTimestamp(timestamp);
+        appEntity.addEvent(created);
+        TimelineEvent finished = new TimelineEvent();
+        finished.setId(ApplicationMetricsConstants.FINISHED_EVENT_TYPE);
+        finished.setTimestamp(timestamp + i * j);
 
-  private static Client createClient() {
-    ClientConfig cfg = new DefaultClientConfig();
-    cfg.getClasses().add(YarnJacksonJaxbJsonProvider.class);
-    return new Client(new URLConnectionClientHandler(
-        new DummyURLConnectionFactory()), cfg);
-  }
-
-  private static ClientResponse getResponse(Client client, URI uri)
-      throws Exception {
-    ClientResponse resp =
-        client.resource(uri).accept(MediaType.APPLICATION_JSON)
-        .type(MediaType.APPLICATION_JSON).get(ClientResponse.class);
-    if (resp == null ||
-        resp.getStatusInfo().getStatusCode() !=
-            ClientResponse.Status.OK.getStatusCode()) {
-      String msg = "";
-      if (resp != null) {
-        msg = String.valueOf(resp.getStatusInfo().getStatusCode());
-      }
-      throw new IOException("Incorrect response from timeline reader. " +
-          "Status=" + msg);
-    }
-    return resp;
-  }
-
-  private static class DummyURLConnectionFactory
-      implements HttpURLConnectionFactory {
-
-    @Override
-    public HttpURLConnection getHttpURLConnection(final URL url)
-        throws IOException {
-      try {
-        return (HttpURLConnection)url.openConnection();
-      } catch (UndeclaredThrowableException e) {
-        throw new IOException(e.getCause());
+        appEntity.addEvent(finished);
+        te.addEntity(appEntity);
+        hbi.write(new TimelineCollectorContext("cluster1", "user1", "flow1",
+            "CF7022C10F1354", i, appEntity.getId()), te,
+            UserGroupInformation.createRemoteUser("user1"));
       }
     }
   }
@@ -470,22 +432,11 @@
     return false;
   }
 
-  private static void verifyHttpResponse(Client client, URI uri,
-      Status status) {
-    ClientResponse resp =
-        client.resource(uri).accept(MediaType.APPLICATION_JSON)
-        .type(MediaType.APPLICATION_JSON).get(ClientResponse.class);
-    assertNotNull(resp);
-    assertTrue("Response from server should have been " + status,
-        resp.getStatusInfo().getStatusCode() == status.getStatusCode());
-    System.out.println("Response is: " + resp.getEntity(String.class));
-  }
-
   @Test
   public void testGetFlowRun() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/runs/" +
           "1002345678919");
       ClientResponse resp = getResponse(client, uri);
@@ -506,7 +457,7 @@
       }
 
       // Query without specifying cluster ID.
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/users/user1/flows/flow_name/runs/1002345678919");
       resp = getResponse(client, uri);
       entity = resp.getEntity(FlowRunEntity.class);
@@ -531,7 +482,7 @@
   public void testGetFlowRuns() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/runs");
       ClientResponse resp = getResponse(client, uri);
       Set<FlowRunEntity> entities =
@@ -551,8 +502,9 @@
         assertEquals(0, entity.getMetrics().size());
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/" +
-          "clusters/cluster1/users/user1/flows/flow_name/runs?limit=1");
+      uri =
+          URI.create("http://localhost:" + getServerPort() + "/ws/v2/timeline/"
+              + "clusters/cluster1/users/user1/flows/flow_name/runs?limit=1");
       resp = getResponse(client, uri);
       entities = resp.getEntity(new GenericType<Set<FlowRunEntity>>(){});
       assertEquals(MediaType.APPLICATION_JSON_TYPE + "; charset=utf-8",
@@ -567,7 +519,7 @@
         assertEquals(0, entity.getMetrics().size());
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/runs?" +
           "createdtimestart=1425016501030");
       resp = getResponse(client, uri);
@@ -584,7 +536,7 @@
         assertEquals(0, entity.getMetrics().size());
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/runs?" +
           "createdtimestart=1425016500999&createdtimeend=1425016501035");
       resp = getResponse(client, uri);
@@ -604,7 +556,7 @@
         assertEquals(0, entity.getMetrics().size());
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/runs?" +
           "createdtimeend=1425016501030");
       resp = getResponse(client, uri);
@@ -621,7 +573,7 @@
         assertEquals(0, entity.getMetrics().size());
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/runs?" +
           "fields=metrics");
       resp = getResponse(client, uri);
@@ -644,7 +596,7 @@
 
       // fields as CONFIGS will lead to a HTTP 400 as it makes no sense for
       // flow runs.
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/runs?" +
           "fields=CONFIGS");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
@@ -657,7 +609,7 @@
   public void testGetFlowRunsMetricsToRetrieve() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/runs?" +
           "metricstoretrieve=MAP_,HDFS_");
       ClientResponse resp = getResponse(client, uri);
@@ -677,7 +629,7 @@
       }
       assertEquals(3, metricCnt);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/runs?" +
           "metricstoretrieve=!(MAP_,HDFS_)");
       resp = getResponse(client, uri);
@@ -704,17 +656,17 @@
     Client client = createClient();
     try {
       // Query all flows.
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/flows");
       ClientResponse resp = getResponse(client, uri);
       Set<FlowActivityEntity> flowEntities =
           resp.getEntity(new GenericType<Set<FlowActivityEntity>>(){});
       assertNotNull(flowEntities);
-      assertEquals(2, flowEntities.size());
+      assertEquals(3, flowEntities.size());
       List<String> listFlowUIDs = new ArrayList<String>();
       for (FlowActivityEntity entity : flowEntities) {
         String flowUID =
-            (String)entity.getInfo().get(TimelineReaderManager.UID_KEY);
+            (String) entity.getInfo().get(TimelineReaderUtils.UID_KEY);
         listFlowUIDs.add(flowUID);
         assertEquals(TimelineUIDConverter.FLOW_UID.encodeUID(
             new TimelineReaderContext(entity.getCluster(), entity.getUser(),
@@ -722,13 +674,15 @@
         assertTrue((entity.getId().endsWith("@flow_name") &&
             entity.getFlowRuns().size() == 2) ||
             (entity.getId().endsWith("@flow_name2") &&
-            entity.getFlowRuns().size() == 1));
+                entity.getFlowRuns().size() == 1)
+            || (entity.getId().endsWith("@flow1")
+                && entity.getFlowRuns().size() == 3));
       }
 
       // Query flowruns based on UID returned in query above.
       List<String> listFlowRunUIDs = new ArrayList<String>();
       for (String flowUID : listFlowUIDs) {
-        uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
             "timeline/flow-uid/" + flowUID + "/runs");
         resp = getResponse(client, uri);
         Set<FlowRunEntity> frEntities =
@@ -736,7 +690,7 @@
         assertNotNull(frEntities);
         for (FlowRunEntity entity : frEntities) {
           String flowRunUID =
-              (String)entity.getInfo().get(TimelineReaderManager.UID_KEY);
+              (String) entity.getInfo().get(TimelineReaderUtils.UID_KEY);
           listFlowRunUIDs.add(flowRunUID);
           assertEquals(TimelineUIDConverter.FLOWRUN_UID.encodeUID(
               new TimelineReaderContext("cluster1", entity.getUser(),
@@ -744,11 +698,11 @@
               flowRunUID);
         }
       }
-      assertEquals(3, listFlowRunUIDs.size());
+      assertEquals(6, listFlowRunUIDs.size());
 
       // Query single flowrun based on UIDs' returned in query to get flowruns.
       for (String flowRunUID : listFlowRunUIDs) {
-        uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
             "timeline/run-uid/" + flowRunUID);
         resp = getResponse(client, uri);
         FlowRunEntity entity = resp.getEntity(FlowRunEntity.class);
@@ -760,7 +714,7 @@
       for (String flowRunUID : listFlowRunUIDs) {
         TimelineReaderContext context =
             TimelineUIDConverter.FLOWRUN_UID.decodeUID(flowRunUID);
-        uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
             "timeline/run-uid/" + flowRunUID + "/apps");
         resp = getResponse(client, uri);
         Set<TimelineEntity> appEntities =
@@ -768,7 +722,7 @@
         assertNotNull(appEntities);
         for (TimelineEntity entity : appEntities) {
           String appUID =
-              (String)entity.getInfo().get(TimelineReaderManager.UID_KEY);
+              (String) entity.getInfo().get(TimelineReaderUtils.UID_KEY);
           listAppUIDs.add(appUID);
           assertEquals(TimelineUIDConverter.APPLICATION_UID.encodeUID(
               new TimelineReaderContext(context.getClusterId(),
@@ -776,11 +730,11 @@
               context.getFlowRunId(), entity.getId(), null, null)), appUID);
         }
       }
-      assertEquals(4, listAppUIDs.size());
+      assertEquals(19, listAppUIDs.size());
 
       // Query single app based on UIDs' returned in query to get apps.
       for (String appUID : listAppUIDs) {
-        uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
             "timeline/app-uid/" + appUID);
         resp = getResponse(client, uri);
         TimelineEntity entity = resp.getEntity(TimelineEntity.class);
@@ -793,7 +747,7 @@
       for (String appUID : listAppUIDs) {
         TimelineReaderContext context =
             TimelineUIDConverter.APPLICATION_UID.decodeUID(appUID);
-        uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
             "timeline/app-uid/" + appUID + "/entities/type1");
         resp = getResponse(client, uri);
         Set<TimelineEntity> entities =
@@ -801,12 +755,13 @@
         assertNotNull(entities);
         for (TimelineEntity entity : entities) {
           String entityUID =
-              (String)entity.getInfo().get(TimelineReaderManager.UID_KEY);
+              (String) entity.getInfo().get(TimelineReaderUtils.UID_KEY);
           listEntityUIDs.add(entityUID);
           assertEquals(TimelineUIDConverter.GENERIC_ENTITY_UID.encodeUID(
               new TimelineReaderContext(context.getClusterId(),
               context.getUserId(), context.getFlowName(),
-              context.getFlowRunId(), context.getAppId(), "type1",
+                  context.getFlowRunId(), context.getAppId(), "type1",
+                  entity.getIdPrefix(),
               entity.getId())), entityUID);
         }
       }
@@ -814,39 +769,39 @@
 
       // Query single entity based on UIDs' returned in query to get entities.
       for (String entityUID : listEntityUIDs) {
-        uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
             "timeline/entity-uid/" + entityUID);
         resp = getResponse(client, uri);
         TimelineEntity entity = resp.getEntity(TimelineEntity.class);
         assertNotNull(entity);
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/flow-uid/dummy:flow/runs");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/run-uid/dummy:flowrun");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
 
       // Run Id is not a numerical value.
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/run-uid/some:dummy:flow:123v456");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/run-uid/dummy:flowrun/apps");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/app-uid/dummy:app");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/app-uid/dummy:app/entities/type1");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/entity-uid/dummy:entity");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
     } finally {
@@ -860,7 +815,7 @@
     try {
       String appUIDWithFlowInfo =
           "cluster1!user1!flow_name!1002345678919!application_1111111111_1111";
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/"+
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/"+
           "timeline/app-uid/" + appUIDWithFlowInfo);
       ClientResponse resp = getResponse(client, uri);
       TimelineEntity appEntity1 = resp.getEntity(TimelineEntity.class);
@@ -869,8 +824,9 @@
           TimelineEntityType.YARN_APPLICATION.toString(), appEntity1.getType());
       assertEquals("application_1111111111_1111", appEntity1.getId());
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/" +
-          "app-uid/" + appUIDWithFlowInfo + "/entities/type1");
+      uri =
+          URI.create("http://localhost:" + getServerPort() + "/ws/v2/timeline/"
+              + "app-uid/" + appUIDWithFlowInfo + "/entities/type1");
       resp = getResponse(client, uri);
       Set<TimelineEntity> entities1 =
           resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
@@ -878,17 +834,17 @@
       assertEquals(2, entities1.size());
       for (TimelineEntity entity : entities1) {
         assertNotNull(entity.getInfo());
-        assertEquals(1, entity.getInfo().size());
+        assertEquals(2, entity.getInfo().size());
         String uid =
-            (String) entity.getInfo().get(TimelineReaderManager.UID_KEY);
+            (String) entity.getInfo().get(TimelineReaderUtils.UID_KEY);
         assertNotNull(uid);
-        assertTrue(uid.equals(appUIDWithFlowInfo + "!type1!entity1") ||
-            uid.equals(appUIDWithFlowInfo + "!type1!entity2"));
+        assertTrue(uid.equals(appUIDWithFlowInfo + "!type1!0!entity1")
+            || uid.equals(appUIDWithFlowInfo + "!type1!0!entity2"));
       }
 
       String appUIDWithoutFlowInfo = "cluster1!application_1111111111_1111";
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/"+
-          "app-uid/" + appUIDWithoutFlowInfo);
+      uri = URI.create("http://localhost:" + getServerPort()
+          + "/ws/v2/timeline/" + "app-uid/" + appUIDWithoutFlowInfo);
       resp = getResponse(client, uri);
       TimelineEntity appEntity2 = resp.getEntity(TimelineEntity.class);
       assertNotNull(appEntity2);
@@ -896,8 +852,9 @@
           TimelineEntityType.YARN_APPLICATION.toString(), appEntity2.getType());
       assertEquals("application_1111111111_1111", appEntity2.getId());
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/" +
-          "app-uid/" + appUIDWithoutFlowInfo + "/entities/type1");
+      uri =
+          URI.create("http://localhost:" + getServerPort() + "/ws/v2/timeline/"
+              + "app-uid/" + appUIDWithoutFlowInfo + "/entities/type1");
       resp = getResponse(client, uri);
       Set<TimelineEntity> entities2 =
           resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
@@ -905,17 +862,17 @@
       assertEquals(2, entities2.size());
       for (TimelineEntity entity : entities2) {
         assertNotNull(entity.getInfo());
-        assertEquals(1, entity.getInfo().size());
+        assertEquals(2, entity.getInfo().size());
         String uid =
-            (String) entity.getInfo().get(TimelineReaderManager.UID_KEY);
+            (String) entity.getInfo().get(TimelineReaderUtils.UID_KEY);
         assertNotNull(uid);
-        assertTrue(uid.equals(appUIDWithoutFlowInfo + "!type1!entity1") ||
-            uid.equals(appUIDWithoutFlowInfo + "!type1!entity2"));
+        assertTrue(uid.equals(appUIDWithoutFlowInfo + "!type1!0!entity1")
+            || uid.equals(appUIDWithoutFlowInfo + "!type1!0!entity2"));
       }
 
-      String entityUIDWithFlowInfo = appUIDWithFlowInfo + "!type1!entity1";
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/"+
-          "entity-uid/" + entityUIDWithFlowInfo);
+      String entityUIDWithFlowInfo = appUIDWithFlowInfo + "!type1!0!entity1";
+      uri = URI.create("http://localhost:" + getServerPort()
+          + "/ws/v2/timeline/" + "entity-uid/" + entityUIDWithFlowInfo);
       resp = getResponse(client, uri);
       TimelineEntity singleEntity1 = resp.getEntity(TimelineEntity.class);
       assertNotNull(singleEntity1);
@@ -923,9 +880,9 @@
       assertEquals("entity1", singleEntity1.getId());
 
       String entityUIDWithoutFlowInfo =
-          appUIDWithoutFlowInfo + "!type1!entity1";
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/"+
-          "entity-uid/" + entityUIDWithoutFlowInfo);
+          appUIDWithoutFlowInfo + "!type1!0!entity1";
+      uri = URI.create("http://localhost:" + getServerPort()
+          + "/ws/v2/timeline/" + "entity-uid/" + entityUIDWithoutFlowInfo);
       resp = getResponse(client, uri);
       TimelineEntity singleEntity2 = resp.getEntity(TimelineEntity.class);
       assertNotNull(singleEntity2);
@@ -942,7 +899,7 @@
     try {
       String appUID =
           "cluster1!user*1!flow_name!1002345678919!application_1111111111_1111";
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/"+
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/"+
           "timeline/app-uid/" + appUID);
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
     } finally {
@@ -954,91 +911,64 @@
   public void testGetFlows() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/flows");
-      ClientResponse resp = getResponse(client, uri);
-      Set<FlowActivityEntity> entities =
-          resp.getEntity(new GenericType<Set<FlowActivityEntity>>(){});
-      assertNotNull(entities);
-      assertEquals(2, entities.size());
-      for (FlowActivityEntity entity : entities) {
-        assertTrue((entity.getId().endsWith("@flow_name") &&
-            entity.getFlowRuns().size() == 2) ||
-            (entity.getId().endsWith("@flow_name2") &&
-            entity.getFlowRuns().size() == 1));
-      }
+
+      verifyFlowEntites(client, uri, 3, new int[] {3, 2, 1},
+          new String[] {"flow1", "flow_name", "flow_name2"});
 
       // Query without specifying cluster ID.
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/flows/");
-      resp = getResponse(client, uri);
-      entities = resp.getEntity(new GenericType<Set<FlowActivityEntity>>(){});
-      assertNotNull(entities);
-      assertEquals(2, entities.size());
+      verifyFlowEntites(client, uri, 3, new int[] {3, 2, 1},
+          new String[] {"flow1", "flow_name", "flow_name2"});
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
               "timeline/clusters/cluster1/flows?limit=1");
-      resp = getResponse(client, uri);
-      entities = resp.getEntity(new GenericType<Set<FlowActivityEntity>>(){});
-      assertNotNull(entities);
-      assertEquals(1, entities.size());
+      verifyFlowEntites(client, uri, 1, new int[] {3},
+          new String[] {"flow1"});
 
       long firstFlowActivity =
           HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(1425016501000L);
 
       DateFormat fmt = TimelineReaderWebServices.DATE_FORMAT.get();
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/flows?daterange="
           + fmt.format(firstFlowActivity) + "-"
           + fmt.format(dayTs));
-      resp = getResponse(client, uri);
-      entities = resp.getEntity(new GenericType<Set<FlowActivityEntity>>(){});
-      assertNotNull(entities);
-      assertEquals(2, entities.size());
-      for (FlowActivityEntity entity : entities) {
-        assertTrue((entity.getId().endsWith("@flow_name") &&
-            entity.getFlowRuns().size() == 2) ||
-            (entity.getId().endsWith("@flow_name2") &&
-            entity.getFlowRuns().size() == 1));
-      }
+      verifyFlowEntites(client, uri, 3, new int[] {3, 2, 1},
+          new String[] {"flow1", "flow_name", "flow_name2"});
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/flows?daterange=" +
           fmt.format(dayTs + (4*86400000L)));
-      resp = getResponse(client, uri);
-      entities = resp.getEntity(new GenericType<Set<FlowActivityEntity>>(){});
-      assertNotNull(entities);
-      assertEquals(0, entities.size());
+      verifyFlowEntites(client, uri, 0, new int[] {}, new String[] {});
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/flows?daterange=-" +
           fmt.format(dayTs));
-      resp = getResponse(client, uri);
-      entities = resp.getEntity(new GenericType<Set<FlowActivityEntity>>(){});
-      assertNotNull(entities);
-      assertEquals(2, entities.size());
+      verifyFlowEntites(client, uri, 3, new int[] {3, 2, 1},
+          new String[] {"flow1", "flow_name", "flow_name2"});
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/flows?daterange=" +
            fmt.format(firstFlowActivity) + "-");
-      resp = getResponse(client, uri);
-      entities = resp.getEntity(new GenericType<Set<FlowActivityEntity>>(){});
-      assertNotNull(entities);
-      assertEquals(2, entities.size());
+      verifyFlowEntites(client, uri, 3, new int[] {3, 2, 1},
+          new String[] {"flow1", "flow_name", "flow_name2"});
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/flows?daterange=20150711:20150714");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/flows?daterange=20150714-20150711");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/flows?daterange=2015071129-20150712");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/flows?daterange=20150711-2015071243");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
     } finally {
@@ -1047,10 +977,47 @@
   }
 
   @Test
+  public void testGetFlowsForPagination() throws Exception {
+    Client client = createClient();
+    int noOfEntities = 3;
+    int limit = 2;
+    try {
+      String flowURI = "http://localhost:" + getServerPort() + "/ws/v2/"
+          + "timeline/clusters/cluster1/flows";
+      URI uri = URI.create(flowURI);
+      List<FlowActivityEntity> flowEntites =
+          verifyFlowEntites(client, uri, 3, new int[] {3, 2, 1},
+              new String[] {"flow1", "flow_name", "flow_name2"});
+      FlowActivityEntity fEntity1 = flowEntites.get(0);
+      FlowActivityEntity fEntity3 = flowEntites.get(noOfEntities - 1);
+
+      uri = URI.create(flowURI + "?limit=" + limit);
+      flowEntites = verifyFlowEntites(client, uri, limit);
+      assertEquals(fEntity1, flowEntites.get(0));
+      FlowActivityEntity fEntity2 = flowEntites.get(limit - 1);
+
+      uri = URI
+          .create(flowURI + "?limit=" + limit + "&fromid="
+              + fEntity2.getInfo().get(TimelineReaderUtils.FROMID_KEY));
+      flowEntites = verifyFlowEntites(client, uri, noOfEntities - limit + 1);
+      assertEquals(fEntity2, flowEntites.get(0));
+      assertEquals(fEntity3, flowEntites.get(noOfEntities - limit));
+
+      uri = URI
+          .create(flowURI + "?limit=" + limit + "&fromid="
+              + fEntity3.getInfo().get(TimelineReaderUtils.FROMID_KEY));
+      flowEntites = verifyFlowEntites(client, uri, 1);
+      assertEquals(fEntity3, flowEntites.get(0));
+    } finally {
+      client.destroy();
+    }
+  }
+
+  @Test
   public void testGetApp() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111?" +
           "userid=user1&fields=ALL&flowname=flow_name&flowrunid=1002345678919");
       ClientResponse resp = getResponse(client, uri);
@@ -1068,7 +1035,7 @@
         assertTrue(verifyMetrics(metric, m1, m2, m3));
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
               "timeline/apps/application_1111111111_2222?userid=user1" +
               "&fields=metrics&flowname=flow_name&flowrunid=1002345678919");
       resp = getResponse(client, uri);
@@ -1090,7 +1057,7 @@
   public void testGetAppWithoutFlowInfo() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111?" +
           "fields=ALL");
       ClientResponse resp = getResponse(client, uri);
@@ -1109,7 +1076,7 @@
         assertTrue(verifyMetrics(metric, m1, m2, m3));
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111?" +
           "fields=ALL&metricslimit=10");
       resp = getResponse(client, uri);
@@ -1139,7 +1106,7 @@
   public void testGetEntityWithoutFlowInfo() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1/entity1");
       ClientResponse resp = getResponse(client, uri);
@@ -1156,7 +1123,7 @@
   public void testGetEntitiesWithoutFlowInfo() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1");
       ClientResponse resp = getResponse(client, uri);
@@ -1180,7 +1147,7 @@
   public void testGetEntitiesDataToRetrieve() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?confstoretrieve=cfg_");
       ClientResponse resp = getResponse(client, uri);
@@ -1197,7 +1164,7 @@
       }
       assertEquals(2, cfgCnt);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?confstoretrieve=cfg_,config_");
       resp = getResponse(client, uri);
@@ -1214,7 +1181,7 @@
       }
       assertEquals(5, cfgCnt);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?confstoretrieve=!(cfg_,config_)");
       resp = getResponse(client, uri);
@@ -1230,7 +1197,7 @@
       }
       assertEquals(1, cfgCnt);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?metricstoretrieve=MAP_");
       resp = getResponse(client, uri);
@@ -1246,7 +1213,7 @@
       }
       assertEquals(1, metricCnt);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?metricstoretrieve=MAP1_,HDFS_");
       resp = getResponse(client, uri);
@@ -1263,7 +1230,7 @@
       }
       assertEquals(3, metricCnt);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?metricstoretrieve=!(MAP1_,HDFS_)");
       resp = getResponse(client, uri);
@@ -1288,7 +1255,7 @@
   public void testGetEntitiesConfigFilters() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?conffilters=config_param1%20eq%20value1%20OR%20" +
           "config_param1%20eq%20value3");
@@ -1302,7 +1269,7 @@
             entity.getId().equals("entity2"));
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?conffilters=config_param1%20eq%20value1%20AND" +
           "%20configuration_param2%20eq%20value2");
@@ -1313,7 +1280,7 @@
 
       // conffilters=(config_param1 eq value1 AND configuration_param2 eq
       // value2) OR (config_param1 eq value3 AND cfg_param3 eq value1)
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?conffilters=(config_param1%20eq%20value1%20AND" +
           "%20configuration_param2%20eq%20value2)%20OR%20(config_param1%20eq" +
@@ -1331,7 +1298,7 @@
 
       // conffilters=(config_param1 eq value1 AND configuration_param2 eq
       // value2) OR (config_param1 eq value3 AND cfg_param3 eq value1)
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?conffilters=(config_param1%20eq%20value1%20AND" +
           "%20configuration_param2%20eq%20value2)%20OR%20(config_param1%20eq" +
@@ -1347,7 +1314,7 @@
       }
       assertEquals(3, cfgCnt);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?conffilters=(config_param1%20eq%20value1%20AND" +
           "%20configuration_param2%20eq%20value2)%20OR%20(config_param1%20eq" +
@@ -1373,7 +1340,7 @@
       // entity1. For ne, both entity1 and entity2 will be returned. For ene,
       // only entity2 will be returned as we are checking for existence too.
       // conffilters=configuration_param2 ne value3
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?conffilters=configuration_param2%20ne%20value3");
       resp = getResponse(client, uri);
@@ -1385,7 +1352,7 @@
             entity.getId().equals("entity2"));
       }
       // conffilters=configuration_param2 ene value3
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?conffilters=configuration_param2%20ene%20value3");
       resp = getResponse(client, uri);
@@ -1405,7 +1372,7 @@
     Client client = createClient();
     try {
       // infofilters=info1 eq cluster1 OR info1 eq cluster2
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?infofilters=info1%20eq%20cluster1%20OR%20info1%20eq" +
           "%20cluster2");
@@ -1420,7 +1387,7 @@
       }
 
       // infofilters=info1 eq cluster1 AND info4 eq 35000
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?infofilters=info1%20eq%20cluster1%20AND%20info4%20" +
           "eq%2035000");
@@ -1430,7 +1397,7 @@
       assertEquals(0, entities.size());
 
       // infofilters=info4 eq 35000 OR info4 eq 36000
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?infofilters=info4%20eq%2035000%20OR%20info4%20eq" +
           "%2036000");
@@ -1445,7 +1412,7 @@
 
       // infofilters=(info1 eq cluster1 AND info4 eq 35000) OR
       // (info1 eq cluster2 AND info2 eq 2.0)
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?infofilters=(info1%20eq%20cluster1%20AND%20info4%20" +
           "eq%2035000)%20OR%20(info1%20eq%20cluster2%20AND%20info2%20eq%202.0" +
@@ -1459,12 +1426,13 @@
         infoCnt += entity.getInfo().size();
         assertEquals("entity2", entity.getId());
       }
-      // Includes UID in info field even if fields not specified as INFO.
-      assertEquals(1, infoCnt);
+      // Includes UID and FROM_ID in info field even if fields not specified as
+      // INFO.
+      assertEquals(2, infoCnt);
 
       // infofilters=(info1 eq cluster1 AND info4 eq 35000) OR
       // (info1 eq cluster2 AND info2 eq 2.0)
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?infofilters=(info1%20eq%20cluster1%20AND%20info4%20" +
           "eq%2035000)%20OR%20(info1%20eq%20cluster2%20AND%20info2%20eq%20" +
@@ -1478,15 +1446,15 @@
         infoCnt += entity.getInfo().size();
         assertEquals("entity2", entity.getId());
       }
-      // Includes UID in info field.
-      assertEquals(4, infoCnt);
+      // Includes UID and FROM_ID in info field.
+      assertEquals(5, infoCnt);
 
       // Test for behavior when compare op is ne(not equals) vs ene
       // (exists and not equals). info3 does not exist for entity2. For ne,
       // both entity1 and entity2 will be returned. For ene, only entity2 will
       // be returned as we are checking for existence too.
       // infofilters=info3 ne 39000
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?infofilters=info3%20ne%2039000");
       resp = getResponse(client, uri);
@@ -1498,7 +1466,7 @@
             entity.getId().equals("entity2"));
       }
       // infofilters=info3 ene 39000
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?infofilters=info3%20ene%2039000");
       resp = getResponse(client, uri);
@@ -1518,7 +1486,7 @@
     Client client = createClient();
     try {
       // metricfilters=HDFS_BYTES_READ lt 60 OR HDFS_BYTES_READ eq 157
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?metricfilters=HDFS_BYTES_READ%20lt%2060%20OR%20" +
           "HDFS_BYTES_READ%20eq%20157");
@@ -1533,7 +1501,7 @@
       }
 
       // metricfilters=HDFS_BYTES_READ lt 60 AND MAP_SLOT_MILLIS gt 40
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?metricfilters=HDFS_BYTES_READ%20lt%2060%20AND%20" +
           "MAP_SLOT_MILLIS%20gt%2040");
@@ -1544,7 +1512,7 @@
 
       // metricfilters=(HDFS_BYTES_READ lt 60 AND MAP_SLOT_MILLIS gt 40) OR
       // (MAP1_SLOT_MILLIS ge 140 AND MAP11_SLOT_MILLIS le 122)
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?metricfilters=(HDFS_BYTES_READ%20lt%2060%20AND%20" +
           "MAP_SLOT_MILLIS%20gt%2040)%20OR%20(MAP1_SLOT_MILLIS%20ge" +
@@ -1562,7 +1530,7 @@
 
       // metricfilters=(HDFS_BYTES_READ lt 60 AND MAP_SLOT_MILLIS gt 40) OR
       // (MAP1_SLOT_MILLIS ge 140 AND MAP11_SLOT_MILLIS le 122)
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?metricfilters=(HDFS_BYTES_READ%20lt%2060%20AND%20" +
           "MAP_SLOT_MILLIS%20gt%2040)%20OR%20(MAP1_SLOT_MILLIS%20ge" +
@@ -1580,7 +1548,7 @@
 
       // metricfilters=(HDFS_BYTES_READ lt 60 AND MAP_SLOT_MILLIS gt 40) OR
       // (MAP1_SLOT_MILLIS ge 140 AND MAP11_SLOT_MILLIS le 122)
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?metricfilters=(HDFS_BYTES_READ%20lt%2060%20AND%20" +
           "MAP_SLOT_MILLIS%20gt%2040)%20OR%20(MAP1_SLOT_MILLIS%20ge" +
@@ -1601,7 +1569,7 @@
       }
       assertEquals(2, metricCnt);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?metricfilters=(HDFS_BYTES_READ%20lt%2060%20AND%20" +
           "MAP_SLOT_MILLIS%20gt%2040)%20OR%20(MAP1_SLOT_MILLIS%20ge" +
@@ -1634,7 +1602,7 @@
       // entity1. For ne, both entity1 and entity2 will be returned. For ene,
       // only entity2 will be returned as we are checking for existence too.
       // metricfilters=MAP11_SLOT_MILLIS ne 100
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?metricfilters=MAP11_SLOT_MILLIS%20ne%20100");
       resp = getResponse(client, uri);
@@ -1646,7 +1614,7 @@
             entity.getId().equals("entity2"));
       }
       // metricfilters=MAP11_SLOT_MILLIS ene 100
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?metricfilters=MAP11_SLOT_MILLIS%20ene%20100");
       resp = getResponse(client, uri);
@@ -1665,7 +1633,7 @@
   public void testGetEntitiesEventFilters() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?eventfilters=event1,event3");
       ClientResponse resp = getResponse(client, uri);
@@ -1678,7 +1646,7 @@
             entity.getId().equals("entity2"));
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?eventfilters=!(event1,event3)");
       resp = getResponse(client, uri);
@@ -1687,7 +1655,7 @@
       assertEquals(0, entities.size());
 
       // eventfilters=!(event1,event3) OR event5,event6
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?eventfilters=!(event1,event3)%20OR%20event5,event6");
       resp = getResponse(client, uri);
@@ -1700,7 +1668,7 @@
 
       //  eventfilters=(!(event1,event3) OR event5,event6) OR
       // (event1,event2 AND (event3,event4))
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?eventfilters=(!(event1,event3)%20OR%20event5," +
           "event6)%20OR%20(event1,event2%20AND%20(event3,event4))");
@@ -1721,7 +1689,7 @@
   public void testGetEntitiesRelationFilters() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?isrelatedto=type3:entity31,type2:entity21:entity22");
       ClientResponse resp = getResponse(client, uri);
@@ -1734,7 +1702,8 @@
             entity.getId().equals("entity2"));
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/" +
+      uri = URI.create("http://localhost:" + getServerPort() +
+          "/ws/v2/timeline/" +
           "clusters/cluster1/apps/application_1111111111_1111/entities/type1" +
           "?isrelatedto=!(type3:entity31,type2:entity21:entity22)");
       resp = getResponse(client, uri);
@@ -1744,7 +1713,8 @@
 
       // isrelatedto=!(type3:entity31,type2:entity21:entity22)OR type5:entity51,
       // type6:entity61:entity66
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/" +
+      uri = URI.create("http://localhost:" + getServerPort() +
+          "/ws/v2/timeline/" +
           "clusters/cluster1/apps/application_1111111111_1111/entities/type1" +
           "?isrelatedto=!(type3:entity31,type2:entity21:entity22)%20OR%20" +
           "type5:entity51,type6:entity61:entity66");
@@ -1759,7 +1729,8 @@
       // isrelatedto=(!(type3:entity31,type2:entity21:entity22)OR type5:
       // entity51,type6:entity61:entity66) OR (type1:entity14,type2:entity21:
       // entity22 AND (type3:entity32:entity35,type4:entity42))
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/" +
+      uri = URI.create("http://localhost:" + getServerPort() +
+          "/ws/v2/timeline/" +
           "clusters/cluster1/apps/application_1111111111_1111/entities/type1" +
           "?isrelatedto=(!(type3:entity31,type2:entity21:entity22)%20OR%20" +
           "type5:entity51,type6:entity61:entity66)%20OR%20(type1:entity14," +
@@ -1776,7 +1747,8 @@
 
       // relatesto=!(type3:entity31,type2:entity21:entity22)OR type5:entity51,
       // type6:entity61:entity66
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/" +
+      uri = URI.create("http://localhost:" + getServerPort() +
+          "/ws/v2/timeline/" +
           "clusters/cluster1/apps/application_1111111111_1111/entities/type1" +
           "?relatesto=!%20(type3:entity31,type2:entity21:entity22%20)%20OR%20" +
           "type5:entity51,type6:entity61:entity66");
@@ -1791,7 +1763,9 @@
       // relatesto=(!(type3:entity31,type2:entity21:entity22)OR type5:entity51,
       // type6:entity61:entity66) OR (type1:entity14,type2:entity21:entity22 AND
       // (type3:entity32:entity35 , type4:entity42))
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/" +
+      uri =
+          URI.create("http://localhost:" + getServerPort() +
+          "/ws/v2/timeline/" +
           "clusters/cluster1/apps/application_1111111111_1111/entities/type1" +
           "?relatesto=(!(%20type3:entity31,type2:entity21:entity22)%20OR%20" +
           "type5:entity51,type6:entity61:entity66%20)%20OR%20(type1:entity14," +
@@ -1810,6 +1784,113 @@
     }
   }
 
+  private static void verifyMetricCount(TimelineEntity entity,
+      int expectedMetricsCnt, int expectedMeticsValCnt) {
+    int metricsValCnt = 0;
+    for (TimelineMetric m : entity.getMetrics()) {
+      metricsValCnt += m.getValues().size();
+    }
+    assertEquals(expectedMetricsCnt, entity.getMetrics().size());
+    assertEquals(expectedMeticsValCnt, metricsValCnt);
+  }
+
+  private static void verifyMetricsCount(Set<TimelineEntity> entities,
+      int expectedMetricsCnt, int expectedMeticsValCnt) {
+    int metricsCnt = 0;
+    int metricsValCnt = 0;
+    for (TimelineEntity entity : entities) {
+      metricsCnt += entity.getMetrics().size();
+      for (TimelineMetric m : entity.getMetrics()) {
+        metricsValCnt += m.getValues().size();
+      }
+    }
+    assertEquals(expectedMetricsCnt, metricsCnt);
+    assertEquals(expectedMeticsValCnt, metricsValCnt);
+  }
+
+  @Test
+  public void testGetEntitiesMetricsTimeRange() throws Exception {
+    Client client = createClient();
+    try {
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
+          "entities/type1?fields=ALL&metricslimit=100&metricstimestart=" +
+          (ts - 90000) + "&metricstimeend=" + (ts - 80000));
+      ClientResponse resp = getResponse(client, uri);
+      Set<TimelineEntity> entities =
+          resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
+      assertNotNull(entities);
+      assertEquals(2, entities.size());
+      verifyMetricsCount(entities, 4, 4);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
+          "entities/type1?fields=ALL&metricslimit=100&metricstimestart=" +
+          (ts - 100000) + "&metricstimeend=" + (ts - 80000));
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
+      assertNotNull(entities);
+      assertEquals(2, entities.size());
+      verifyMetricsCount(entities, 5, 9);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
+          "entities/type1?fields=ALL&metricslimit=100&metricstimestart=" +
+          (ts - 100000));
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
+      assertNotNull(entities);
+      assertEquals(2, entities.size());
+      verifyMetricsCount(entities, 5, 9);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
+          "entities/type1?fields=ALL&metricslimit=100&metricstimeend=" +
+          (ts - 90000));
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
+      assertNotNull(entities);
+      assertEquals(2, entities.size());
+      verifyMetricsCount(entities, 5, 5);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
+          "entities/type1?fields=ALL&metricstimestart=" +
+          (ts - 100000));
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
+      assertNotNull(entities);
+      assertEquals(2, entities.size());
+      verifyMetricsCount(entities, 5, 5);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
+          "entities/type1/entity2?fields=ALL&metricstimestart=" +
+          (ts - 100000) + "&metricstimeend=" + (ts - 80000));
+      resp = getResponse(client, uri);
+      TimelineEntity entity = resp.getEntity(TimelineEntity.class);
+      assertNotNull(entity);
+      verifyMetricCount(entity, 3, 3);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
+          "entities/type1/entity2?fields=ALL&metricslimit=5&metricstimestart=" +
+          (ts - 100000) + "&metricstimeend=" + (ts - 80000));
+      resp = getResponse(client, uri);
+      entity = resp.getEntity(TimelineEntity.class);
+      assertNotNull(entity);
+      verifyMetricCount(entity, 3, 5);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
+          "entities/type1?fields=ALL&metricslimit=100&metricstimestart=" +
+          (ts - 80000) + "&metricstimeend=" + (ts - 90000));
+      verifyHttpResponse(client, uri, Status.BAD_REQUEST);
+    } finally {
+      client.destroy();
+    }
+  }
+
   /**
    * Tests if specific configs and metrics are retrieve for getEntity call.
    */
@@ -1817,7 +1898,7 @@
   public void testGetEntityDataToRetrieve() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1/entity2?confstoretrieve=cfg_,configuration_");
       ClientResponse resp = getResponse(client, uri);
@@ -1831,7 +1912,7 @@
             configKey.startsWith("cfg_"));
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1/entity2?confstoretrieve=!(cfg_,configuration_)");
       resp = getResponse(client, uri);
@@ -1844,7 +1925,7 @@
         assertTrue(configKey.startsWith("config_"));
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1/entity2?metricstoretrieve=MAP1_,HDFS_");
       resp = getResponse(client, uri);
@@ -1858,7 +1939,7 @@
             metric.getId().startsWith("HDFS_"));
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1/entity2?metricstoretrieve=!(MAP1_,HDFS_)");
       resp = getResponse(client, uri);
@@ -1873,7 +1954,7 @@
         assertEquals(1, metric.getValues().size());
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1/entity2?metricstoretrieve=!(MAP1_,HDFS_)&" +
           "metricslimit=5");
@@ -1896,7 +1977,7 @@
   public void testGetFlowRunApps() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/runs/" +
           "1002345678919/apps?fields=ALL");
       ClientResponse resp = getResponse(client, uri);
@@ -1916,7 +1997,7 @@
         }
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
               "timeline/clusters/cluster1/users/user1/flows/flow_name/runs/" +
               "1002345678919/apps?fields=ALL&metricslimit=2");
       resp = getResponse(client, uri);
@@ -1936,14 +2017,14 @@
       }
 
       // Query without specifying cluster ID.
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/users/user1/flows/flow_name/runs/1002345678919/apps");
       resp = getResponse(client, uri);
       entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
       assertNotNull(entities);
       assertEquals(2, entities.size());
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/users/user1/flows/flow_name/runs/1002345678919/" +
           "apps?limit=1");
       resp = getResponse(client, uri);
@@ -1959,7 +2040,7 @@
   public void testGetFlowApps() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/apps?" +
           "fields=ALL");
       ClientResponse resp = getResponse(client, uri);
@@ -1998,7 +2079,7 @@
         }
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/apps?" +
           "fields=ALL&metricslimit=6");
       resp = getResponse(client, uri);
@@ -2042,14 +2123,14 @@
       }
 
       // Query without specifying cluster ID.
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/users/user1/flows/flow_name/apps");
       resp = getResponse(client, uri);
       entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
       assertNotNull(entities);
       assertEquals(3, entities.size());
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/users/user1/flows/flow_name/apps?limit=1");
       resp = getResponse(client, uri);
       entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
@@ -2065,7 +2146,7 @@
     Client client = createClient();
     try {
       String entityType = TimelineEntityType.YARN_APPLICATION.toString();
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/apps?" +
           "eventfilters=" + ApplicationMetricsConstants.FINISHED_EVENT_TYPE);
       ClientResponse resp = getResponse(client, uri);
@@ -2076,7 +2157,7 @@
       assertTrue("Unexpected app in result", entities.contains(
           newEntity(entityType, "application_1111111111_1111")));
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/apps?" +
           "metricfilters=HDFS_BYTES_READ%20ge%200");
       resp = getResponse(client, uri);
@@ -2086,7 +2167,7 @@
       assertTrue("Unexpected app in result", entities.contains(
           newEntity(entityType, "application_1111111111_1111")));
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/apps?" +
           "conffilters=cfg1%20eq%20value1");
       resp = getResponse(client, uri);
@@ -2104,7 +2185,7 @@
   public void testGetFlowRunNotPresent() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/runs/" +
           "1002345678929");
       verifyHttpResponse(client, uri, Status.NOT_FOUND);
@@ -2117,7 +2198,7 @@
   public void testGetFlowsNotPresent() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster2/flows");
       ClientResponse resp = getResponse(client, uri);
       Set<FlowActivityEntity> entities =
@@ -2135,7 +2216,7 @@
   public void testGetAppNotPresent() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1378");
       verifyHttpResponse(client, uri, Status.NOT_FOUND);
     } finally {
@@ -2147,7 +2228,7 @@
   public void testGetFlowRunAppsNotPresent() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster2/users/user1/flows/flow_name/runs/" +
           "1002345678919/apps");
       ClientResponse resp = getResponse(client, uri);
@@ -2166,7 +2247,7 @@
   public void testGetFlowAppsNotPresent() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster2/users/user1/flows/flow_name55/apps");
       ClientResponse resp = getResponse(client, uri);
       Set<TimelineEntity> entities =
@@ -2180,11 +2261,329 @@
     }
   }
 
-  @After
-  public void stop() throws Exception {
-    if (server != null) {
-      server.stop();
-      server = null;
+  @Test
+  public void testGenericEntitiesForPagination() throws Exception {
+    Client client = createClient();
+    try {
+      String resourceUri = "http://localhost:" + getServerPort() + "/ws/v2/"
+          + "timeline/clusters/cluster1/apps/application_1111111111_1111/"
+          + "entities/entitytype";
+      verifyEntitiesForPagination(client, resourceUri);
+      resourceUri = "http://localhost:" + getServerPort() + "/ws/v2/"
+          + "timeline/clusters/cluster1/users/" + doAsUser
+          + "/entities/entitytype";
+      verifyEntitiesForPagination(client, resourceUri);
+    } finally {
+      client.destroy();
+    }
+  }
+
+  private void verifyEntitiesForPagination(Client client, String resourceUri)
+      throws Exception {
+    int limit = 10;
+    String queryParam = "?limit=" + limit;
+    URI uri = URI.create(resourceUri + queryParam);
+
+    ClientResponse resp = getResponse(client, uri);
+    List<TimelineEntity> entities =
+        resp.getEntity(new GenericType<List<TimelineEntity>>() {
+        });
+    // verify for entity-10 to entity-1 in descending order.
+    verifyPaginatedEntites(entities, limit, limit);
+
+    limit = 4;
+    queryParam = "?limit=" + limit;
+    uri = URI.create(resourceUri + queryParam);
+    resp = getResponse(client, uri);
+    entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
+    });
+    // verify for entity-10 to entity-7 in descending order.
+    TimelineEntity entity = verifyPaginatedEntites(entities, limit, 10);
+
+    queryParam = "?limit=" + limit + "&fromid="
+        + entity.getInfo().get(TimelineReaderUtils.FROMID_KEY);
+    uri = URI.create(resourceUri + queryParam);
+    resp = getResponse(client, uri);
+    entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
+    });
+    // verify for entity-7 to entity-4 in descending order.
+    entity = verifyPaginatedEntites(entities, limit, 7);
+
+    queryParam = "?limit=" + limit + "&fromid="
+        + entity.getInfo().get(TimelineReaderUtils.FROMID_KEY);
+    uri = URI.create(resourceUri + queryParam);
+    resp = getResponse(client, uri);
+    entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
+    });
+    // verify for entity-4 to entity-1 in descending order.
+    entity = verifyPaginatedEntites(entities, limit, 4);
+
+    queryParam = "?limit=" + limit + "&fromid="
+        + entity.getInfo().get(TimelineReaderUtils.FROMID_KEY);
+    uri = URI.create(resourceUri + queryParam);
+    resp = getResponse(client, uri);
+    entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
+    });
+    // always entity-1 will be retrieved
+    entity = verifyPaginatedEntites(entities, 1, 1);
+  }
+
+  private TimelineEntity verifyPaginatedEntites(List<TimelineEntity> entities,
+      int limit, int startFrom) {
+    assertNotNull(entities);
+    assertEquals(limit, entities.size());
+    TimelineEntity entity = null;
+    for (TimelineEntity timelineEntity : entities) {
+      assertEquals("entitytype", timelineEntity.getType());
+      assertEquals("entityid-" + startFrom, timelineEntity.getId());
+      assertEquals(11 - startFrom--, timelineEntity.getIdPrefix());
+      entity = timelineEntity;
+    }
+    return entity;
+  }
+
+  private List<FlowActivityEntity> verifyFlowEntites(Client client, URI uri,
+      int noOfEntities,
+      int[] a, String[] flowsInSequence) throws Exception {
+    ClientResponse resp = getResponse(client, uri);
+    List<FlowActivityEntity> entities =
+        resp.getEntity(new GenericType<List<FlowActivityEntity>>() {
+        });
+    assertNotNull(entities);
+    assertEquals(noOfEntities, entities.size());
+    assertEquals(noOfEntities, flowsInSequence.length);
+    assertEquals(noOfEntities, a.length);
+    int count = 0;
+    for (FlowActivityEntity timelineEntity : entities) {
+      assertEquals(flowsInSequence[count],
+          timelineEntity.getInfo().get("SYSTEM_INFO_FLOW_NAME"));
+      assertEquals(a[count++], timelineEntity.getFlowRuns().size());
+    }
+    return entities;
+  }
+
+  @Test
+  public void testForFlowAppsPagination() throws Exception {
+    Client client = createClient();
+    try {
+      // app entities stored is 15 during initialization.
+      int totalAppEntities = 15;
+      String resourceUri = "http://localhost:" + getServerPort() + "/ws/v2/"
+          + "timeline/clusters/cluster1/users/user1/flows/flow1/apps";
+      URI uri = URI.create(resourceUri);
+      ClientResponse resp = getResponse(client, uri);
+      List<TimelineEntity> entities =
+          resp.getEntity(new GenericType<List<TimelineEntity>>() {
+          });
+      assertNotNull(entities);
+      assertEquals(totalAppEntities, entities.size());
+      TimelineEntity entity1 = entities.get(0);
+      TimelineEntity entity15 = entities.get(totalAppEntities - 1);
+
+      int limit = 10;
+      String queryParam = "?limit=" + limit;
+      uri = URI.create(resourceUri + queryParam);
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
+      });
+      assertNotNull(entities);
+      assertEquals(limit, entities.size());
+      assertEquals(entity1, entities.get(0));
+      TimelineEntity entity10 = entities.get(limit - 1);
+
+      uri =
+          URI.create(resourceUri + queryParam + "&fromid="
+              + entity10.getInfo().get(TimelineReaderUtils.FROMID_KEY));
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
+      });
+      assertNotNull(entities);
+      assertEquals(6, entities.size());
+      assertEquals(entity10, entities.get(0));
+      assertEquals(entity15, entities.get(5));
+
+    } finally {
+      client.destroy();
+    }
+  }
+
+  @Test
+  public void testForFlowRunAppsPagination() throws Exception {
+    Client client = createClient();
+    try {
+      // app entities stored is 15 during initialization.
+      int totalAppEntities = 5;
+      String resourceUri = "http://localhost:" + getServerPort() + "/ws/v2/"
+          + "timeline/clusters/cluster1/users/user1/flows/flow1/runs/1/apps";
+      URI uri = URI.create(resourceUri);
+      ClientResponse resp = getResponse(client, uri);
+      List<TimelineEntity> entities =
+          resp.getEntity(new GenericType<List<TimelineEntity>>() {
+          });
+      assertNotNull(entities);
+      assertEquals(totalAppEntities, entities.size());
+      TimelineEntity entity1 = entities.get(0);
+      TimelineEntity entity5 = entities.get(totalAppEntities - 1);
+
+      int limit = 3;
+      String queryParam = "?limit=" + limit;
+      uri = URI.create(resourceUri + queryParam);
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
+      });
+      assertNotNull(entities);
+      assertEquals(limit, entities.size());
+      assertEquals(entity1, entities.get(0));
+      TimelineEntity entity3 = entities.get(limit - 1);
+
+      uri =
+          URI.create(resourceUri + queryParam + "&fromid="
+              + entity3.getInfo().get(TimelineReaderUtils.FROMID_KEY));
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
+      });
+      assertNotNull(entities);
+      assertEquals(3, entities.size());
+      assertEquals(entity3, entities.get(0));
+      assertEquals(entity5, entities.get(2));
+
+    } finally {
+      client.destroy();
+    }
+  }
+
+  @Test
+  public void testForFlowRunsPagination() throws Exception {
+    Client client = createClient();
+    try {
+      // app entities stored is 15 during initialization.
+      int totalRuns = 3;
+      String resourceUri = "http://localhost:" + getServerPort() + "/ws/v2/"
+          + "timeline/clusters/cluster1/users/user1/flows/flow1/runs";
+      URI uri = URI.create(resourceUri);
+      ClientResponse resp = getResponse(client, uri);
+      List<TimelineEntity> entities =
+          resp.getEntity(new GenericType<List<TimelineEntity>>() {
+          });
+      assertNotNull(entities);
+      assertEquals(totalRuns, entities.size());
+      TimelineEntity entity1 = entities.get(0);
+      TimelineEntity entity3 = entities.get(totalRuns - 1);
+
+      int limit = 2;
+      String queryParam = "?limit=" + limit;
+      uri = URI.create(resourceUri + queryParam);
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
+      });
+      assertNotNull(entities);
+      assertEquals(limit, entities.size());
+      assertEquals(entity1, entities.get(0));
+      TimelineEntity entity2 = entities.get(limit - 1);
+
+      uri = URI.create(resourceUri + queryParam + "&fromid="
+          + entity2.getInfo().get(TimelineReaderUtils.FROMID_KEY));
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
+      });
+      assertNotNull(entities);
+      assertEquals(limit, entities.size());
+      assertEquals(entity2, entities.get(0));
+      assertEquals(entity3, entities.get(1));
+
+      uri = URI.create(resourceUri + queryParam + "&fromid="
+          + entity3.getInfo().get(TimelineReaderUtils.FROMID_KEY));
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
+      });
+      assertNotNull(entities);
+      assertEquals(1, entities.size());
+      assertEquals(entity3, entities.get(0));
+    } finally {
+      client.destroy();
+    }
+  }
+
+  @Test
+  public void testGetAppsMetricsRange() throws Exception {
+    Client client = createClient();
+    try {
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/users/user1/flows/flow_name/runs/" +
+          "1002345678919/apps?fields=ALL&metricslimit=100&metricstimestart=" +
+          (ts - 200000) + "&metricstimeend=" + (ts - 100000));
+      ClientResponse resp = getResponse(client, uri);
+      Set<TimelineEntity> entities =
+          resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
+      assertNotNull(entities);
+      assertEquals(2, entities.size());
+      verifyMetricsCount(entities, 4, 4);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/users/user1/flows/flow_name/runs/" +
+          "1002345678919/apps?fields=ALL&metricslimit=100");
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
+      assertNotNull(entities);
+      assertEquals(2, entities.size());
+      verifyMetricsCount(entities, 4, 10);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/users/user1/flows/flow_name/" +
+          "apps?fields=ALL&metricslimit=100&metricstimestart=" +
+          (ts - 200000) + "&metricstimeend=" + (ts - 100000));
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
+      assertNotNull(entities);
+      assertEquals(3, entities.size());
+      verifyMetricsCount(entities, 5, 5);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/users/user1/flows/flow_name/" +
+          "apps?fields=ALL&metricslimit=100");
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
+      assertNotNull(entities);
+      assertEquals(3, entities.size());
+      verifyMetricsCount(entities, 5, 12);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/users/user1/flows/flow_name/runs/" +
+          "1002345678919/apps?fields=ALL&metricslimit=100&metricstimestart=" +
+          (ts - 200000));
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
+      assertNotNull(entities);
+      assertEquals(2, entities.size());
+      verifyMetricsCount(entities, 4, 10);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/users/user1/flows/flow_name/runs/" +
+          "1002345678919/apps?fields=ALL&metricslimit=100&metricstimeend=" +
+          (ts - 100000));
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
+      assertNotNull(entities);
+      assertEquals(2, entities.size());
+      verifyMetricsCount(entities, 4, 4);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/apps/application_1111111111_1111?userid=user1&fields=ALL" +
+          "&flowname=flow_name&flowrunid=1002345678919&metricslimit=100" +
+          "&metricstimestart=" +(ts - 200000) + "&metricstimeend=" +
+          (ts - 100000));
+      resp = getResponse(client, uri);
+      TimelineEntity entity = resp.getEntity(TimelineEntity.class);
+      assertNotNull(entity);
+      verifyMetricCount(entity, 3, 3);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/users/user1/flows/flow_name/" +
+          "apps?fields=ALL&metricslimit=100&metricstimestart=" +
+          (ts - 100000) + "&metricstimeend=" + (ts - 200000));
+      verifyHttpResponse(client, uri, Status.BAD_REQUEST);
+    } finally {
+      client.destroy();
     }
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
index 5cbb781..cf6a854 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
@@ -24,16 +24,44 @@
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric.Type;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 
-final class DataGeneratorForTest {
-  static void loadApps(HBaseTestingUtility util) throws IOException {
+/**
+ * Utility class that creates the schema and generates test data.
+ */
+public final class DataGeneratorForTest {
+
+  // private constructor for utility class
+  private DataGeneratorForTest() {
+  }
+
+   /**
+   * Creates the schema for timeline service.
+   * @param conf
+   * @throws IOException
+   */
+  public static void createSchema(final Configuration conf)
+      throws IOException {
+    // set the jar location to null so that
+    // the coprocessor class is loaded from classpath
+    conf.set(YarnConfiguration.FLOW_RUN_COPROCESSOR_JAR_HDFS_LOCATION, " ");
+    // now create all tables
+    TimelineSchemaCreator.createAllTables(conf, false);
+  }
+
+  public static void loadApps(HBaseTestingUtility util, long ts)
+      throws IOException {
     TimelineEntities te = new TimelineEntities();
     TimelineEntity entity = new TimelineEntity();
     String id = "application_1111111111_2222";
@@ -42,11 +70,7 @@
     Long cTime = 1425016502000L;
     entity.setCreatedTime(cTime);
     // add the info map in Timeline Entity
-    Map<String, Object> infoMap = new HashMap<>();
-    infoMap.put("infoMapKey1", "infoMapValue2");
-    infoMap.put("infoMapKey2", 20);
-    infoMap.put("infoMapKey3", 85.85);
-    entity.addInfo(infoMap);
+    entity.addInfo(getInfoMap3());
     // add the isRelatedToEntity info
     Set<String> isRelatedToSet = new HashSet<>();
     isRelatedToSet.add("relatedto1");
@@ -71,29 +95,14 @@
     entity.addConfigs(conf);
     // add metrics
     Set<TimelineMetric> metrics = new HashSet<>();
-    TimelineMetric m1 = new TimelineMetric();
-    m1.setId("MAP_SLOT_MILLIS");
-    Map<Long, Number> metricValues = new HashMap<>();
-    long ts = System.currentTimeMillis();
-    metricValues.put(ts - 120000, 100000000);
-    metricValues.put(ts - 100000, 200000000);
-    metricValues.put(ts - 80000, 300000000);
-    metricValues.put(ts - 60000, 400000000);
-    metricValues.put(ts - 40000, 50000000000L);
-    metricValues.put(ts - 20000, 60000000000L);
-    m1.setType(Type.TIME_SERIES);
-    m1.setValues(metricValues);
-    metrics.add(m1);
+    metrics.add(getMetric4(ts));
 
     TimelineMetric m12 = new TimelineMetric();
     m12.setId("MAP1_BYTES");
     m12.addValue(ts, 50);
     metrics.add(m12);
     entity.addMetrics(metrics);
-    TimelineEvent event = new TimelineEvent();
-    event.setId("start_event");
-    event.setTimestamp(ts);
-    entity.addEvent(event);
+    entity.addEvent(addStartEvent(ts));
     te.addEntity(entity);
     TimelineEntities te1 = new TimelineEntities();
     TimelineEntity entity1 = new TimelineEntity();
@@ -102,10 +111,7 @@
     entity1.setType(TimelineEntityType.YARN_APPLICATION.toString());
     entity1.setCreatedTime(cTime + 20L);
     // add the info map in Timeline Entity
-    Map<String, Object> infoMap1 = new HashMap<>();
-    infoMap1.put("infoMapKey1", "infoMapValue1");
-    infoMap1.put("infoMapKey2", 10);
-    entity1.addInfo(infoMap1);
+    entity1.addInfo(getInfoMap4());
 
     // add the isRelatedToEntity info
     Set<String> isRelatedToSet1 = new HashSet<>();
@@ -133,21 +139,7 @@
     entity1.addConfigs(conf1);
 
     // add metrics
-    Set<TimelineMetric> metrics1 = new HashSet<>();
-    TimelineMetric m2 = new TimelineMetric();
-    m2.setId("MAP1_SLOT_MILLIS");
-    Map<Long, Number> metricValues1 = new HashMap<>();
-    long ts1 = System.currentTimeMillis();
-    metricValues1.put(ts1 - 120000, 100000000);
-    metricValues1.put(ts1 - 100000, 200000000);
-    metricValues1.put(ts1 - 80000, 300000000);
-    metricValues1.put(ts1 - 60000, 400000000);
-    metricValues1.put(ts1 - 40000, 50000000000L);
-    metricValues1.put(ts1 - 20000, 60000000000L);
-    m2.setType(Type.TIME_SERIES);
-    m2.setValues(metricValues1);
-    metrics1.add(m2);
-    entity1.addMetrics(metrics1);
+    entity1.addMetrics(getMetrics4(ts));
     TimelineEvent event11 = new TimelineEvent();
     event11.setId("end_event");
     event11.setTimestamp(ts);
@@ -159,6 +151,53 @@
     te1.addEntity(entity1);
 
     TimelineEntities te2 = new TimelineEntities();
+    te2.addEntity(getEntity4(cTime, ts));
+    HBaseTimelineWriterImpl hbi = null;
+    try {
+      hbi = new HBaseTimelineWriterImpl();
+      hbi.init(util.getConfiguration());
+      hbi.start();
+      UserGroupInformation remoteUser =
+          UserGroupInformation.createRemoteUser("user1");
+      hbi.write(
+          new TimelineCollectorContext("cluster1", "user1", "some_flow_name",
+              "AB7822C10F1111", 1002345678919L, "application_1111111111_2222"),
+          te, remoteUser);
+      hbi.write(
+          new TimelineCollectorContext("cluster1", "user1", "some_flow_name",
+              "AB7822C10F1111", 1002345678919L, "application_1111111111_3333"),
+          te1, remoteUser);
+      hbi.write(
+          new TimelineCollectorContext("cluster1", "user1", "some_flow_name",
+              "AB7822C10F1111", 1002345678919L, "application_1111111111_4444"),
+          te2, remoteUser);
+      hbi.stop();
+    } finally {
+      if (hbi != null) {
+        hbi.stop();
+        hbi.close();
+      }
+    }
+  }
+
+  private static Set<TimelineMetric> getMetrics4(long ts) {
+    Set<TimelineMetric> metrics1 = new HashSet<>();
+    TimelineMetric m2 = new TimelineMetric();
+    m2.setId("MAP1_SLOT_MILLIS");
+    Map<Long, Number> metricValues1 = new HashMap<>();
+    metricValues1.put(ts - 120000, 100000000);
+    metricValues1.put(ts - 100000, 200000000);
+    metricValues1.put(ts - 80000, 300000000);
+    metricValues1.put(ts - 60000, 400000000);
+    metricValues1.put(ts - 40000, 50000000000L);
+    metricValues1.put(ts - 20000, 60000000000L);
+    m2.setType(Type.TIME_SERIES);
+    m2.setValues(metricValues1);
+    metrics1.add(m2);
+    return metrics1;
+  }
+
+  private static TimelineEntity getEntity4(long cTime, long ts) {
     TimelineEntity entity2 = new TimelineEntity();
     String id2 = "application_1111111111_4444";
     entity2.setId(id2);
@@ -178,34 +217,102 @@
     relatesToSet14.add("relatesto7");
     relatesTo3.put("container2", relatesToSet14);
     entity2.setRelatesToEntities(relatesTo3);
-
-    te2.addEntity(entity2);
-    HBaseTimelineWriterImpl hbi = null;
-    try {
-      hbi = new HBaseTimelineWriterImpl();
-      hbi.init(util.getConfiguration());
-      hbi.start();
-      String cluster = "cluster1";
-      String user = "user1";
-      String flow = "some_flow_name";
-      String flowVersion = "AB7822C10F1111";
-      long runid = 1002345678919L;
-      String appName = "application_1111111111_2222";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
-      appName = "application_1111111111_3333";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te1);
-      appName = "application_1111111111_4444";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te2);
-      hbi.stop();
-    } finally {
-      if (hbi != null) {
-        hbi.stop();
-        hbi.close();
-      }
-    }
+    return entity2;
   }
 
-  static void loadEntities(HBaseTestingUtility util) throws IOException {
+  private static Map<String, Object> getInfoMap4() {
+    Map<String, Object> infoMap1 = new HashMap<>();
+    infoMap1.put("infoMapKey1", "infoMapValue1");
+    infoMap1.put("infoMapKey2", 10);
+    return infoMap1;
+  }
+
+  private static TimelineMetric getMetric4(long ts) {
+    TimelineMetric m1 = new TimelineMetric();
+    m1.setId("MAP_SLOT_MILLIS");
+    Map<Long, Number> metricValues = new HashMap<>();
+    metricValues.put(ts - 120000, 100000000);
+    metricValues.put(ts - 100000, 200000000);
+    metricValues.put(ts - 80000, 300000000);
+    metricValues.put(ts - 60000, 400000000);
+    metricValues.put(ts - 40000, 50000000000L);
+    metricValues.put(ts - 20000, 60000000000L);
+    m1.setType(Type.TIME_SERIES);
+    m1.setValues(metricValues);
+    return m1;
+  }
+
+  private static Map<String, Object> getInfoMap3() {
+    Map<String, Object> infoMap = new HashMap<>();
+    infoMap.put("infoMapKey1", "infoMapValue2");
+    infoMap.put("infoMapKey2", 20);
+    infoMap.put("infoMapKey3", 85.85);
+    return infoMap;
+  }
+
+  private static Map<String, Object> getInfoMap1() {
+    Map<String, Object> infoMap = new HashMap<>();
+    infoMap.put("infoMapKey1", "infoMapValue2");
+    infoMap.put("infoMapKey2", 20);
+    infoMap.put("infoMapKey3", 71.4);
+    return infoMap;
+  }
+
+  private static Map<String, Set<String>> getRelatesTo1() {
+    Set<String> relatesToSet = new HashSet<String>();
+    relatesToSet.add("relatesto1");
+    relatesToSet.add("relatesto3");
+    Map<String, Set<String>> relatesTo = new HashMap<>();
+    relatesTo.put("container", relatesToSet);
+    Set<String> relatesToSet11 = new HashSet<>();
+    relatesToSet11.add("relatesto4");
+    relatesTo.put("container1", relatesToSet11);
+    return relatesTo;
+  }
+
+  private static Map<String, String> getConfig1() {
+    Map<String, String> conf = new HashMap<>();
+    conf.put("config_param1", "value1");
+    conf.put("config_param2", "value2");
+    conf.put("cfg_param1", "value3");
+    return conf;
+  }
+
+  private static Map<String, String> getConfig2() {
+    Map<String, String> conf1 = new HashMap<>();
+    conf1.put("cfg_param1", "value1");
+    conf1.put("cfg_param2", "value2");
+    return conf1;
+  }
+
+  private static Map<String, Object> getInfoMap2() {
+    Map<String, Object> infoMap1 = new HashMap<>();
+    infoMap1.put("infoMapKey1", "infoMapValue1");
+    infoMap1.put("infoMapKey2", 10);
+    return infoMap1;
+  }
+
+  private static Map<String, Set<String>> getIsRelatedTo1() {
+    Set<String> isRelatedToSet = new HashSet<>();
+    isRelatedToSet.add("relatedto1");
+    Map<String, Set<String>> isRelatedTo = new HashMap<>();
+    isRelatedTo.put("task", isRelatedToSet);
+    return isRelatedTo;
+  }
+
+  private static Map<Long, Number> getMetricValues1(long ts) {
+    Map<Long, Number> metricValues = new HashMap<>();
+    metricValues.put(ts - 120000, 100000000);
+    metricValues.put(ts - 100000, 200000000);
+    metricValues.put(ts - 80000, 300000000);
+    metricValues.put(ts - 60000, 400000000);
+    metricValues.put(ts - 40000, 50000000000L);
+    metricValues.put(ts - 20000, 70000000000L);
+    return metricValues;
+  }
+
+  public static void loadEntities(HBaseTestingUtility util, long ts)
+      throws IOException {
     TimelineEntities te = new TimelineEntities();
     TimelineEntity entity = new TimelineEntity();
     String id = "hello";
@@ -215,50 +322,22 @@
     Long cTime = 1425016502000L;
     entity.setCreatedTime(cTime);
     // add the info map in Timeline Entity
-    Map<String, Object> infoMap = new HashMap<>();
-    infoMap.put("infoMapKey1", "infoMapValue2");
-    infoMap.put("infoMapKey2", 20);
-    infoMap.put("infoMapKey3", 71.4);
-    entity.addInfo(infoMap);
+    entity.addInfo(getInfoMap1());
     // add the isRelatedToEntity info
-    Set<String> isRelatedToSet = new HashSet<>();
-    isRelatedToSet.add("relatedto1");
-    Map<String, Set<String>> isRelatedTo = new HashMap<>();
-    isRelatedTo.put("task", isRelatedToSet);
-    entity.setIsRelatedToEntities(isRelatedTo);
+    entity.setIsRelatedToEntities(getIsRelatedTo1());
 
     // add the relatesTo info
-    Set<String> relatesToSet = new HashSet<String>();
-    relatesToSet.add("relatesto1");
-    relatesToSet.add("relatesto3");
-    Map<String, Set<String>> relatesTo = new HashMap<>();
-    relatesTo.put("container", relatesToSet);
-    Set<String> relatesToSet11 = new HashSet<>();
-    relatesToSet11.add("relatesto4");
-    relatesTo.put("container1", relatesToSet11);
-    entity.setRelatesToEntities(relatesTo);
+    entity.setRelatesToEntities(getRelatesTo1());
 
     // add some config entries
-    Map<String, String> conf = new HashMap<>();
-    conf.put("config_param1", "value1");
-    conf.put("config_param2", "value2");
-    conf.put("cfg_param1", "value3");
-    entity.addConfigs(conf);
+    entity.addConfigs(getConfig1());
 
     // add metrics
     Set<TimelineMetric> metrics = new HashSet<>();
     TimelineMetric m1 = new TimelineMetric();
     m1.setId("MAP_SLOT_MILLIS");
-    Map<Long, Number> metricValues = new HashMap<>();
-    long ts = System.currentTimeMillis();
-    metricValues.put(ts - 120000, 100000000);
-    metricValues.put(ts - 100000, 200000000);
-    metricValues.put(ts - 80000, 300000000);
-    metricValues.put(ts - 60000, 400000000);
-    metricValues.put(ts - 40000, 50000000000L);
-    metricValues.put(ts - 20000, 70000000000L);
     m1.setType(Type.TIME_SERIES);
-    m1.setValues(metricValues);
+    m1.setValues(getMetricValues1(ts));
     metrics.add(m1);
 
     TimelineMetric m12 = new TimelineMetric();
@@ -266,10 +345,7 @@
     m12.addValue(ts, 50);
     metrics.add(m12);
     entity.addMetrics(metrics);
-    TimelineEvent event = new TimelineEvent();
-    event.setId("start_event");
-    event.setTimestamp(ts);
-    entity.addEvent(event);
+    entity.addEvent(addStartEvent(ts));
     te.addEntity(entity);
 
     TimelineEntity entity1 = new TimelineEntity();
@@ -279,10 +355,7 @@
     entity1.setCreatedTime(cTime + 20L);
 
     // add the info map in Timeline Entity
-    Map<String, Object> infoMap1 = new HashMap<>();
-    infoMap1.put("infoMapKey1", "infoMapValue1");
-    infoMap1.put("infoMapKey2", 10);
-    entity1.addInfo(infoMap1);
+    entity1.addInfo(getInfoMap2());
 
     // add event.
     TimelineEvent event11 = new TimelineEvent();
@@ -296,15 +369,7 @@
 
 
     // add the isRelatedToEntity info
-    Set<String> isRelatedToSet1 = new HashSet<>();
-    isRelatedToSet1.add("relatedto3");
-    isRelatedToSet1.add("relatedto5");
-    Map<String, Set<String>> isRelatedTo1 = new HashMap<>();
-    isRelatedTo1.put("task1", isRelatedToSet1);
-    Set<String> isRelatedToSet11 = new HashSet<>();
-    isRelatedToSet11.add("relatedto4");
-    isRelatedTo1.put("task2", isRelatedToSet11);
-    entity1.setIsRelatedToEntities(isRelatedTo1);
+    entity1.setIsRelatedToEntities(getIsRelatedTo2());
 
     // add the relatesTo info
     Set<String> relatesToSet1 = new HashSet<String>();
@@ -315,29 +380,88 @@
     entity1.setRelatesToEntities(relatesTo1);
 
     // add some config entries
-    Map<String, String> conf1 = new HashMap<>();
-    conf1.put("cfg_param1", "value1");
-    conf1.put("cfg_param2", "value2");
-    entity1.addConfigs(conf1);
+    entity1.addConfigs(getConfig2());
 
     // add metrics
     Set<TimelineMetric> metrics1 = new HashSet<>();
     TimelineMetric m2 = new TimelineMetric();
     m2.setId("MAP1_SLOT_MILLIS");
-    Map<Long, Number> metricValues1 = new HashMap<>();
-    long ts1 = System.currentTimeMillis();
-    metricValues1.put(ts1 - 120000, 100000000);
-    metricValues1.put(ts1 - 100000, 200000000);
-    metricValues1.put(ts1 - 80000, 300000000);
-    metricValues1.put(ts1 - 60000, 400000000);
-    metricValues1.put(ts1 - 40000, 50000000000L);
-    metricValues1.put(ts1 - 20000, 60000000000L);
     m2.setType(Type.TIME_SERIES);
-    m2.setValues(metricValues1);
+    m2.setValues(getMetricValues2(ts));
     metrics1.add(m2);
     entity1.addMetrics(metrics1);
     te.addEntity(entity1);
 
+    te.addEntity(getEntity2(type, cTime, ts));
+
+    // For listing types
+    for (int i = 0; i < 10; i++) {
+      TimelineEntity entity3 = new TimelineEntity();
+      String id3 = "typeTest" + i;
+      entity3.setId(id3);
+      StringBuilder typeName = new StringBuilder("newType");
+      for (int j = 0; j < (i % 3); j++) {
+        typeName.append(" ").append(j);
+      }
+      entity3.setType(typeName.toString());
+      entity3.setCreatedTime(cTime + 80L + i);
+      te.addEntity(entity3);
+    }
+
+    // Create app entity for app to flow table
+    TimelineEntities appTe1 = new TimelineEntities();
+    TimelineEntity entityApp1 = new TimelineEntity();
+    String appName1 = "application_1231111111_1111";
+    entityApp1.setId(appName1);
+    entityApp1.setType(TimelineEntityType.YARN_APPLICATION.toString());
+    entityApp1.setCreatedTime(cTime + 40L);
+    TimelineEvent appCreationEvent1 = new TimelineEvent();
+    appCreationEvent1.setId(ApplicationMetricsConstants.CREATED_EVENT_TYPE);
+    appCreationEvent1.setTimestamp(cTime);
+    entityApp1.addEvent(appCreationEvent1);
+    appTe1.addEntity(entityApp1);
+
+    TimelineEntities appTe2 = new TimelineEntities();
+    TimelineEntity entityApp2 = new TimelineEntity();
+    String appName2 = "application_1231111111_1112";
+    entityApp2.setId(appName2);
+    entityApp2.setType(TimelineEntityType.YARN_APPLICATION.toString());
+    entityApp2.setCreatedTime(cTime + 50L);
+    TimelineEvent appCreationEvent2 = new TimelineEvent();
+    appCreationEvent2.setId(ApplicationMetricsConstants.CREATED_EVENT_TYPE);
+    appCreationEvent2.setTimestamp(cTime);
+    entityApp2.addEvent(appCreationEvent2);
+    appTe2.addEntity(entityApp2);
+
+    HBaseTimelineWriterImpl hbi = null;
+    try {
+      hbi = new HBaseTimelineWriterImpl();
+      hbi.init(util.getConfiguration());
+      hbi.start();
+
+      UserGroupInformation user =
+          UserGroupInformation.createRemoteUser("user1");
+      TimelineCollectorContext context =
+          new TimelineCollectorContext("cluster1", "user1", "some_flow_name",
+              "AB7822C10F1111", 1002345678919L, appName1);
+      hbi.write(context, te, user);
+      hbi.write(context, appTe1, user);
+
+      context = new TimelineCollectorContext("cluster1", "user1",
+          "some_flow_name", "AB7822C10F1111", 1002345678919L, appName2);
+      hbi.write(context, te, user);
+      hbi.write(context, appTe2, user);
+      hbi.stop();
+    } finally {
+      if (hbi != null) {
+        hbi.stop();
+        hbi.close();
+      }
+    }
+  }
+
+  private static TimelineEntity getEntity2(String type, long cTime,
+      long ts) {
     TimelineEntity entity2 = new TimelineEntity();
     String id2 = "hello2";
     entity2.setId(id2);
@@ -357,25 +481,36 @@
     relatesToSet14.add("relatesto7");
     relatesTo3.put("container2", relatesToSet14);
     entity2.setRelatesToEntities(relatesTo3);
-    te.addEntity(entity2);
-    HBaseTimelineWriterImpl hbi = null;
-    try {
-      hbi = new HBaseTimelineWriterImpl();
-      hbi.init(util.getConfiguration());
-      hbi.start();
-      String cluster = "cluster1";
-      String user = "user1";
-      String flow = "some_flow_name";
-      String flowVersion = "AB7822C10F1111";
-      long runid = 1002345678919L;
-      String appName = "application_1231111111_1111";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
-      hbi.stop();
-    } finally {
-      if (hbi != null) {
-        hbi.stop();
-        hbi.close();
-      }
-    }
+    return entity2;
+  }
+
+  private static TimelineEvent addStartEvent(long ts) {
+    TimelineEvent event = new TimelineEvent();
+    event.setId("start_event");
+    event.setTimestamp(ts);
+    return event;
+  }
+
+  private static Map<Long, Number> getMetricValues2(long ts1) {
+    Map<Long, Number> metricValues1 = new HashMap<>();
+    metricValues1.put(ts1 - 120000, 100000000);
+    metricValues1.put(ts1 - 100000, 200000000);
+    metricValues1.put(ts1 - 80000, 300000000);
+    metricValues1.put(ts1 - 60000, 400000000);
+    metricValues1.put(ts1 - 40000, 50000000000L);
+    metricValues1.put(ts1 - 20000, 60000000000L);
+    return metricValues1;
+  }
+
+  private static Map<String, Set<String>> getIsRelatedTo2() {
+    Set<String> isRelatedToSet1 = new HashSet<>();
+    isRelatedToSet1.add("relatedto3");
+    isRelatedToSet1.add("relatedto5");
+    Map<String, Set<String>> isRelatedTo1 = new HashMap<>();
+    isRelatedTo1.put("task1", isRelatedToSet1);
+    Set<String> isRelatedToSet11 = new HashSet<>();
+    isRelatedToSet11.add("relatedto4");
+    isRelatedTo1.put("task2", isRelatedToSet11);
+    return isRelatedTo1;
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
index 3948d23..111008a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
@@ -41,6 +41,7 @@
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
@@ -51,6 +52,7 @@
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetricOperation;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric.Type;
 import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
@@ -87,17 +89,14 @@
 
   private static HBaseTestingUtility util;
   private HBaseTimelineReaderImpl reader;
+  private static final long CURRENT_TIME = System.currentTimeMillis();
 
   @BeforeClass
   public static void setupBeforeClass() throws Exception {
     util = new HBaseTestingUtility();
     util.startMiniCluster();
-    createSchema();
-    DataGeneratorForTest.loadApps(util);
-  }
-
-  private static void createSchema() throws IOException {
-    TimelineSchemaCreator.createAllTables(util.getConfiguration(), false);
+    DataGeneratorForTest.createSchema(util.getConfiguration());
+    DataGeneratorForTest.loadApps(util, CURRENT_TIME);
   }
 
   @Before
@@ -165,7 +164,8 @@
       String flow = null;
       String flowVersion = "AB7822C10F1111";
       long runid = 1002345678919L;
-      hbi.write(cluster, user, flow, flowVersion, runid, appId, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appId), te, UserGroupInformation.createRemoteUser(user));
       hbi.stop();
 
       // retrieve the row
@@ -240,13 +240,12 @@
     TimelineMetric m1 = new TimelineMetric();
     m1.setId("MAP_SLOT_MILLIS");
     Map<Long, Number> metricValues = new HashMap<Long, Number>();
-    long ts = System.currentTimeMillis();
-    metricValues.put(ts - 120000, 100000000);
-    metricValues.put(ts - 100000, 200000000);
-    metricValues.put(ts - 80000, 300000000);
-    metricValues.put(ts - 60000, 400000000);
-    metricValues.put(ts - 40000, 50000000000L);
-    metricValues.put(ts - 20000, 60000000000L);
+    metricValues.put(CURRENT_TIME - 120000, 100000000);
+    metricValues.put(CURRENT_TIME - 100000, 200000000);
+    metricValues.put(CURRENT_TIME - 80000, 300000000);
+    metricValues.put(CURRENT_TIME - 60000, 400000000);
+    metricValues.put(CURRENT_TIME - 40000, 50000000000L);
+    metricValues.put(CURRENT_TIME - 20000, 60000000000L);
     m1.setType(Type.TIME_SERIES);
     m1.setValues(metricValues);
     metrics.add(m1);
@@ -263,7 +262,7 @@
     TimelineMetric aggMetric = new TimelineMetric();
     aggMetric.setId("MEM_USAGE");
     Map<Long, Number> aggMetricValues = new HashMap<Long, Number>();
-    long aggTs = ts;
+    long aggTs = CURRENT_TIME;
     aggMetricValues.put(aggTs - 120000, 102400000L);
     aggMetric.setType(Type.SINGLE_VALUE);
     aggMetric.setRealtimeAggregationOp(TimelineMetricOperation.SUM);
@@ -284,7 +283,8 @@
       String flow = "s!ome_f\tlow  _n am!e";
       String flowVersion = "AB7822C10F1111";
       long runid = 1002345678919L;
-      hbi.write(cluster, user, flow, flowVersion, runid, appId, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appId), te, UserGroupInformation.createRemoteUser(user));
 
       // Write entity again, this time without created time.
       entity = new ApplicationEntity();
@@ -296,7 +296,8 @@
       entity.addInfo(infoMap1);
       te = new TimelineEntities();
       te.addEntity(entity);
-      hbi.write(cluster, user, flow, flowVersion, runid, appId, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appId), te, UserGroupInformation.createRemoteUser(user));
       hbi.stop();
 
       infoMap.putAll(infoMap1);
@@ -384,7 +385,7 @@
           new TimelineReaderContext(cluster, user, flow, runid, appId,
           entity.getType(), entity.getId()),
           new TimelineDataToRetrieve(null, null,
-          EnumSet.of(TimelineReader.Field.ALL), Integer.MAX_VALUE));
+          EnumSet.of(TimelineReader.Field.ALL), Integer.MAX_VALUE, null, null));
       assertNotNull(e1);
 
       // verify attributes
@@ -393,6 +394,8 @@
           e1.getType());
       assertEquals(cTime, e1.getCreatedTime());
       Map<String, Object> infoMap2 = e1.getInfo();
+      // fromid key is added by storage. Remove it for comparision.
+      infoMap2.remove("FROM_ID");
       assertEquals(infoMap, infoMap2);
 
       Map<String, Set<String>> isRelatedTo2 = e1.getIsRelatedToEntities();
@@ -425,7 +428,7 @@
       e1 = reader.getEntity(new TimelineReaderContext(cluster, user, flow,
           runid, appId, entity.getType(), entity.getId()),
           new TimelineDataToRetrieve(null, null,
-          EnumSet.of(TimelineReader.Field.ALL), 3));
+          EnumSet.of(TimelineReader.Field.ALL), 3, null, null));
       assertNotNull(e1);
       assertEquals(appId, e1.getId());
       assertEquals(TimelineEntityType.YARN_APPLICATION.toString(),
@@ -446,12 +449,15 @@
       e1 = reader.getEntity(
           new TimelineReaderContext(cluster, user, flow, runid, appId,
          entity.getType(), entity.getId()), new TimelineDataToRetrieve(
-         null, null, EnumSet.of(TimelineReader.Field.ALL), null));
+         null, null, EnumSet.of(TimelineReader.Field.ALL), null, null, null));
       assertNotNull(e1);
       assertEquals(appId, e1.getId());
       assertEquals(TimelineEntityType.YARN_APPLICATION.toString(),
           e1.getType());
       assertEquals(cTime, e1.getCreatedTime());
+      infoMap2 = e1.getInfo();
+      // fromid key is added by storage. Remove it for comparison.
+      infoMap2.remove("FROM_ID");
       assertEquals(infoMap, e1.getInfo());
       assertEquals(isRelatedTo, e1.getIsRelatedToEntities());
       assertEquals(relatesTo, e1.getRelatesToEntities());
@@ -464,9 +470,9 @@
             metric.getId().equals("MEM_USAGE"));
         assertEquals(1, metric.getValues().size());
         if (metric.getId().equals("MAP_SLOT_MILLIS")) {
-          assertTrue(metric.getValues().containsKey(ts - 20000));
-          assertEquals(metricValues.get(ts - 20000),
-              metric.getValues().get(ts - 20000));
+          assertTrue(metric.getValues().containsKey(CURRENT_TIME - 20000));
+          assertEquals(metricValues.get(CURRENT_TIME - 20000),
+              metric.getValues().get(CURRENT_TIME - 20000));
         }
         if (metric.getId().equals("MEM_USAGE")) {
           assertTrue(metric.getValues().containsKey(aggTs - 120000));
@@ -513,7 +519,9 @@
       String flowVersion = "1111F01C2287BA";
       long runid = 1009876543218L;
       String appName = "application_123465899910_1001";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, entities);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), entities,
+          UserGroupInformation.createRemoteUser(user));
       hbi.stop();
 
       // retrieve the row
@@ -553,11 +561,13 @@
       TimelineEntity e1 = reader.getEntity(
           new TimelineReaderContext(cluster, user, flow, runid, appName,
           entity.getType(), entity.getId()),
-          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+          null, null));
       TimelineEntity e2 = reader.getEntity(
           new TimelineReaderContext(cluster, user, null, null, appName,
           entity.getType(), entity.getId()),
-          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+          null, null));
       assertNotNull(e1);
       assertNotNull(e2);
       assertEquals(e1, e2);
@@ -626,14 +636,17 @@
       hbi.init(c1);
       hbi.start();
       // Writing application entity.
+      TimelineCollectorContext context = new TimelineCollectorContext("c1",
+          "u1", "f1", "v1", 1002345678919L, appId);
+      UserGroupInformation user = UserGroupInformation.createRemoteUser("u1");
       try {
-        hbi.write("c1", "u1", "f1", "v1", 1002345678919L, appId, teApp);
+        hbi.write(context, teApp, user);
         Assert.fail("Expected an exception as metric values are non integral");
       } catch (IOException e) {}
 
       // Writing generic entity.
       try {
-        hbi.write("c1", "u1", "f1", "v1", 1002345678919L, appId, teEntity);
+        hbi.write(context, teEntity, user);
         Assert.fail("Expected an exception as metric values are non integral");
       } catch (IOException e) {}
       hbi.stop();
@@ -651,7 +664,8 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1111111111_2222",
         TimelineEntityType.YARN_APPLICATION.toString(), null),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertNotNull(entity);
     assertEquals(3, entity.getConfigs().size());
     assertEquals(1, entity.getIsRelatedToEntities().size());
@@ -659,8 +673,9 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineEntityFilters.Builder().build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(3, entities.size());
     int cfgCnt = 0;
     int metricCnt = 0;
@@ -684,7 +699,7 @@
     }
     assertEquals(5, cfgCnt);
     assertEquals(3, metricCnt);
-    assertEquals(5, infoCnt);
+    assertEquals(8, infoCnt);
     assertEquals(4, eventCnt);
     assertEquals(4, relatesToCnt);
     assertEquals(4, isRelatedToCnt);
@@ -696,8 +711,8 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, 1425016502000L, 1425016502040L, null,
-        null, null, null, null, null),
+        new TimelineEntityFilters.Builder().createdTimeBegin(1425016502000L)
+            .createTimeEnd(1425016502040L).build(),
         new TimelineDataToRetrieve());
     assertEquals(3, entities.size());
     for (TimelineEntity entity : entities) {
@@ -713,8 +728,8 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, 1425016502015L, null, null, null, null,
-        null, null, null),
+        new TimelineEntityFilters.Builder().createdTimeBegin(1425016502015L)
+            .build(),
         new TimelineDataToRetrieve());
     assertEquals(2, entities.size());
     for (TimelineEntity entity : entities) {
@@ -728,8 +743,8 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, 1425016502015L, null, null, null,
-        null, null, null),
+        new TimelineEntityFilters.Builder().createTimeEnd(1425016502015L)
+            .build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     for (TimelineEntity entity : entities) {
@@ -748,18 +763,20 @@
         TimelineEntityType.YARN_APPLICATION.toString(), null),
         new TimelineDataToRetrieve());
     assertNotNull(e1);
-    assertTrue(e1.getInfo().isEmpty() && e1.getConfigs().isEmpty() &&
+    assertEquals(1, e1.getInfo().size());
+    assertTrue(e1.getConfigs().isEmpty() &&
         e1.getMetrics().isEmpty() && e1.getIsRelatedToEntities().isEmpty() &&
         e1.getRelatesToEntities().isEmpty());
     Set<TimelineEntity> es1 = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(),
+        new TimelineEntityFilters.Builder().build(),
         new TimelineDataToRetrieve());
     assertEquals(3, es1.size());
     for (TimelineEntity e : es1) {
-      assertTrue(e.getInfo().isEmpty() && e.getConfigs().isEmpty() &&
+      assertEquals(1, e1.getInfo().size());
+      assertTrue(e.getConfigs().isEmpty() &&
           e.getMetrics().isEmpty() && e.getIsRelatedToEntities().isEmpty() &&
           e.getRelatesToEntities().isEmpty());
     }
@@ -772,17 +789,17 @@
         1002345678919L, "application_1111111111_2222",
         TimelineEntityType.YARN_APPLICATION.toString(), null),
         new TimelineDataToRetrieve(
-        null, null, EnumSet.of(Field.INFO, Field.CONFIGS), null));
+        null, null, EnumSet.of(Field.INFO, Field.CONFIGS), null, null, null));
     assertNotNull(e1);
     assertEquals(3, e1.getConfigs().size());
     assertEquals(0, e1.getIsRelatedToEntities().size());
     Set<TimelineEntity> es1 = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
-        null),
-        new TimelineEntityFilters(),
+        null), new TimelineEntityFilters.Builder().build(),
         new TimelineDataToRetrieve(
-        null, null, EnumSet.of(Field.IS_RELATED_TO, Field.METRICS), null));
+        null, null, EnumSet.of(Field.IS_RELATED_TO, Field.METRICS), null,
+        null, null));
     assertEquals(3, es1.size());
     int metricsCnt = 0;
     int isRelatedToCnt = 0;
@@ -792,7 +809,7 @@
       isRelatedToCnt += entity.getIsRelatedToEntities().size();
       infoCnt += entity.getInfo().size();
     }
-    assertEquals(0, infoCnt);
+    assertEquals(3, infoCnt);
     assertEquals(4, isRelatedToCnt);
     assertEquals(3, metricsCnt);
   }
@@ -810,9 +827,9 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, irt, null, null, null,
-        null),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineEntityFilters.Builder().isRelatedTo(irt).build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(2, entities.size());
     int isRelatedToCnt = 0;
     for (TimelineEntity timelineEntity : entities) {
@@ -836,8 +853,7 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, irt1, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt1).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     isRelatedToCnt = 0;
@@ -860,8 +876,7 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, irt2, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt2).build(),
         new TimelineDataToRetrieve());
     assertEquals(2, entities.size());
     isRelatedToCnt = 0;
@@ -883,8 +898,7 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, irt3, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt3).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     isRelatedToCnt = 0;
@@ -907,8 +921,7 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, irt4, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt4).build(),
         new TimelineDataToRetrieve());
     assertEquals(0, entities.size());
 
@@ -920,8 +933,7 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, irt5, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt5).build(),
         new TimelineDataToRetrieve());
     assertEquals(0, entities.size());
 
@@ -941,8 +953,7 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, irt6, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt6).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     isRelatedToCnt = 0;
@@ -969,9 +980,9 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, rt, null, null, null, null,
-        null),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineEntityFilters.Builder().relatesTo(rt).build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(2, entities.size());
     int relatesToCnt = 0;
     for (TimelineEntity timelineEntity : entities) {
@@ -995,8 +1006,7 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, rt1, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt1).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     relatesToCnt = 0;
@@ -1019,8 +1029,7 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, rt2, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt2).build(),
         new TimelineDataToRetrieve());
     assertEquals(2, entities.size());
     relatesToCnt = 0;
@@ -1042,8 +1051,7 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, rt3, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt3).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     relatesToCnt = 0;
@@ -1066,8 +1074,7 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, rt4, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt4).build(),
         new TimelineDataToRetrieve());
     assertEquals(0, entities.size());
 
@@ -1079,8 +1086,7 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, rt5, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt5).build(),
         new TimelineDataToRetrieve());
     assertEquals(0, entities.size());
 
@@ -1100,8 +1106,7 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, rt6, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt6).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     relatesToCnt = 0;
@@ -1137,8 +1142,7 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, rt7, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt7).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     relatesToCnt = 0;
@@ -1175,8 +1179,8 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, relatesTo, isRelatedTo,
-        null, null, null, eventFilter),
+        new TimelineEntityFilters.Builder().relatesTo(relatesTo)
+            .isRelatedTo(isRelatedTo).eventFilters(eventFilter).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     int eventCnt = 0;
@@ -1213,10 +1217,10 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-        null));
+        null, null, null));
     assertEquals(2, entities.size());
     int cfgCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1228,9 +1232,10 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList, null, null),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineEntityFilters.Builder().configFilters(confFilterList)
+            .build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(2, entities.size());
     cfgCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1245,10 +1250,10 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList1, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList1)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-        null));
+        null, null, null));
     assertEquals(1, entities.size());
     cfgCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1265,10 +1270,10 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList2, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList2)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-        null));
+        null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList confFilterList3 = new TimelineFilterList(
@@ -1278,10 +1283,10 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList3, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList3)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-        null));
+        null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList confFilterList4 = new TimelineFilterList(
@@ -1291,10 +1296,10 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList4, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList4)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-        null));
+        null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList confFilterList5 = new TimelineFilterList(
@@ -1304,10 +1309,10 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList5, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList5)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-        null));
+        null, null, null));
     assertEquals(3, entities.size());
   }
 
@@ -1322,9 +1327,9 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineEntityFilters.Builder().eventFilters(ef).build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(1, entities.size());
     int eventCnt = 0;
     for (TimelineEntity timelineEntity : entities) {
@@ -1344,8 +1349,8 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef1), new TimelineDataToRetrieve());
+        new TimelineEntityFilters.Builder().eventFilters(ef1).build(),
+        new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     eventCnt = 0;
     for (TimelineEntity timelineEntity : entities) {
@@ -1363,8 +1368,7 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef2),
+        new TimelineEntityFilters.Builder().eventFilters(ef2).build(),
         new TimelineDataToRetrieve());
     assertEquals(2, entities.size());
     eventCnt = 0;
@@ -1387,8 +1391,7 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef3),
+        new TimelineEntityFilters.Builder().eventFilters(ef3).build(),
         new TimelineDataToRetrieve());
     assertEquals(0, entities.size());
 
@@ -1405,8 +1408,7 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef4),
+        new TimelineEntityFilters.Builder().eventFilters(ef4).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     eventCnt = 0;
@@ -1427,8 +1429,7 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef5),
+        new TimelineEntityFilters.Builder().eventFilters(ef5).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     eventCnt = 0;
@@ -1450,15 +1451,15 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1111111111_2222",
         TimelineEntityType.YARN_APPLICATION.toString(), null),
-        new TimelineDataToRetrieve(list, null, null, null));
+        new TimelineDataToRetrieve(list, null, null, null, null, null));
     assertNotNull(e1);
     assertEquals(1, e1.getConfigs().size());
     Set<TimelineEntity> es1 = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null) ,
-        new TimelineEntityFilters(),
-        new TimelineDataToRetrieve(list, null, null, null));
+        new TimelineEntityFilters.Builder().build(),
+        new TimelineDataToRetrieve(list, null, null, null, null, null));
     int cfgCnt = 0;
     for (TimelineEntity entity : es1) {
       cfgCnt += entity.getConfigs().size();
@@ -1482,9 +1483,9 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList, null, null),
-        new TimelineDataToRetrieve(list, null, null, null));
+        new TimelineEntityFilters.Builder().configFilters(confFilterList)
+            .build(),
+        new TimelineDataToRetrieve(list, null, null, null, null, null));
     assertEquals(1, entities.size());
     int cfgCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1515,9 +1516,10 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList1, null, null),
-        new TimelineDataToRetrieve(confsToRetrieve, null, null, null));
+        new TimelineEntityFilters.Builder().configFilters(confFilterList1)
+            .build(),
+        new TimelineDataToRetrieve(confsToRetrieve, null, null, null, null,
+        null));
     assertEquals(2, entities.size());
     cfgCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1546,10 +1548,10 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(2, entities.size());
     int metricCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1561,9 +1563,10 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList, null),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
+            .build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(2, entities.size());
     metricCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1580,10 +1583,10 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList1, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(1, entities.size());
     metricCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1600,10 +1603,10 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList2, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList2)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList metricFilterList3 = new TimelineFilterList(
@@ -1613,10 +1616,10 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList3, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList3)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList metricFilterList4 = new TimelineFilterList(
@@ -1626,10 +1629,10 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList4, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList4)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList metricFilterList5 = new TimelineFilterList(
@@ -1639,10 +1642,10 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList5, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList5)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(3, entities.size());
   }
 
@@ -1655,15 +1658,15 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1111111111_2222",
         TimelineEntityType.YARN_APPLICATION.toString(), null),
-        new TimelineDataToRetrieve(null, list, null, null));
+        new TimelineDataToRetrieve(null, list, null, null, null, null));
     assertNotNull(e1);
     assertEquals(1, e1.getMetrics().size());
     Set<TimelineEntity> es1 = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(),
-        new TimelineDataToRetrieve(null, list, null, null));
+        new TimelineEntityFilters.Builder().build(),
+        new TimelineDataToRetrieve(null, list, null, null, null, null));
     int metricCnt = 0;
     for (TimelineEntity entity : es1) {
       metricCnt += entity.getMetrics().size();
@@ -1687,9 +1690,9 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList, null),
-        new TimelineDataToRetrieve(null, list, null, null));
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
+            .build(),
+        new TimelineDataToRetrieve(null, list, null, null, null, null));
     int metricCnt = 0;
     assertEquals(1, entities.size());
     for (TimelineEntity entity : entities) {
@@ -1713,9 +1716,10 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList1, null),
-        new TimelineDataToRetrieve(null, metricsToRetrieve, null, null));
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
+            .build(),
+        new TimelineDataToRetrieve(null, metricsToRetrieve, null, null, null,
+        null));
     metricCnt = 0;
     assertEquals(2, entities.size());
     for (TimelineEntity entity : entities) {
@@ -1730,9 +1734,10 @@
     entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1",
         "some_flow_name", 1002345678919L, null,
         TimelineEntityType.YARN_APPLICATION.toString(), null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList1, null), new TimelineDataToRetrieve(null,
-        metricsToRetrieve, EnumSet.of(Field.METRICS), Integer.MAX_VALUE));
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
+            .build(),
+        new TimelineDataToRetrieve(null, metricsToRetrieve,
+        EnumSet.of(Field.METRICS), Integer.MAX_VALUE, null, null));
     metricCnt = 0;
     int metricValCnt = 0;
     assertEquals(2, entities.size());
@@ -1749,6 +1754,86 @@
   }
 
   @Test
+  public void testReadAppsMetricTimeRange() throws Exception {
+    Set<TimelineEntity> entities = reader.getEntities(
+        new TimelineReaderContext("cluster1", "user1", "some_flow_name",
+        1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
+        null), new TimelineEntityFilters.Builder().build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
+        100, null, null));
+    assertEquals(3, entities.size());
+    int metricTimeSeriesCnt = 0;
+    int metricCnt = 0;
+    for (TimelineEntity entity : entities) {
+      metricCnt += entity.getMetrics().size();
+      for (TimelineMetric m : entity.getMetrics()) {
+        metricTimeSeriesCnt += m.getValues().size();
+      }
+    }
+    assertEquals(3, metricCnt);
+    assertEquals(13, metricTimeSeriesCnt);
+
+    entities = reader.getEntities(
+        new TimelineReaderContext("cluster1", "user1", "some_flow_name",
+        1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
+        null), new TimelineEntityFilters.Builder().build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
+        100, CURRENT_TIME - 40000, CURRENT_TIME));
+    assertEquals(3, entities.size());
+    metricCnt = 0;
+    metricTimeSeriesCnt = 0;
+    for (TimelineEntity entity : entities) {
+      metricCnt += entity.getMetrics().size();
+      for (TimelineMetric m : entity.getMetrics()) {
+        for (Long ts : m.getValues().keySet()) {
+          assertTrue(ts >= CURRENT_TIME - 40000 && ts <= CURRENT_TIME);
+        }
+        metricTimeSeriesCnt += m.getValues().size();
+      }
+    }
+    assertEquals(3, metricCnt);
+    assertEquals(5, metricTimeSeriesCnt);
+
+    entities = reader.getEntities(
+        new TimelineReaderContext("cluster1", "user1", "some_flow_name",
+        1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
+        null), new TimelineEntityFilters.Builder().build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
+        null, CURRENT_TIME - 40000, CURRENT_TIME));
+    assertEquals(3, entities.size());
+    metricCnt = 0;
+    metricTimeSeriesCnt = 0;
+    for (TimelineEntity entity : entities) {
+      metricCnt += entity.getMetrics().size();
+      for (TimelineMetric m : entity.getMetrics()) {
+        for (Long ts : m.getValues().keySet()) {
+          assertTrue(ts >= CURRENT_TIME - 40000 && ts <= CURRENT_TIME);
+        }
+        metricTimeSeriesCnt += m.getValues().size();
+      }
+    }
+    assertEquals(3, metricCnt);
+    assertEquals(3, metricTimeSeriesCnt);
+
+    TimelineEntity entity = reader.getEntity(new TimelineReaderContext(
+        "cluster1", "user1", "some_flow_name", 1002345678919L,
+        "application_1111111111_2222",
+        TimelineEntityType.YARN_APPLICATION.toString(), null),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), 100,
+        CURRENT_TIME - 40000, CURRENT_TIME));
+    assertNotNull(entity);
+    assertEquals(2, entity.getMetrics().size());
+    metricTimeSeriesCnt = 0;
+    for (TimelineMetric m : entity.getMetrics()) {
+      for (Long ts : m.getValues().keySet()) {
+        assertTrue(ts >= CURRENT_TIME - 40000 && ts <= CURRENT_TIME);
+      }
+      metricTimeSeriesCnt += m.getValues().size();
+    }
+    assertEquals(3, metricTimeSeriesCnt);
+  }
+
+  @Test
   public void testReadAppsInfoFilters() throws Exception {
     TimelineFilterList list1 = new TimelineFilterList();
     list1.addFilter(new TimelineKeyValueFilter(
@@ -1766,15 +1851,15 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList,
-        null, null, null),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList).build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(2, entities.size());
     int infoCnt = 0;
     for (TimelineEntity entity : entities) {
       infoCnt += entity.getInfo().size();
     }
-    assertEquals(5, infoCnt);
+    assertEquals(7, infoCnt);
 
     TimelineFilterList infoFilterList1 = new TimelineFilterList(
         new TimelineKeyValueFilter(
@@ -1783,15 +1868,16 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList1,
-        null, null, null),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList1)
+            .build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(1, entities.size());
     infoCnt = 0;
     for (TimelineEntity entity : entities) {
       infoCnt += entity.getInfo().size();
     }
-    assertEquals(3, infoCnt);
+    assertEquals(4, infoCnt);
 
     TimelineFilterList infoFilterList2 = new TimelineFilterList(
         new TimelineKeyValueFilter(
@@ -1802,9 +1888,10 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList2,
-        null, null, null),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList2)
+            .build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList infoFilterList3 = new TimelineFilterList(
@@ -1814,9 +1901,10 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList3,
-        null, null, null),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList3)
+            .build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList infoFilterList4 = new TimelineFilterList(
@@ -1826,9 +1914,10 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList4,
-        null, null, null),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList4)
+            .build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList infoFilterList5 = new TimelineFilterList(
@@ -1838,9 +1927,10 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList5,
-        null, null, null),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList5)
+            .build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(3, entities.size());
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
index e18d0d0..5e08999 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
@@ -40,6 +40,7 @@
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
@@ -48,6 +49,7 @@
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric.Type;
 import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
@@ -72,6 +74,11 @@
 import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumn;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTable;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -94,17 +101,14 @@
 
   private static HBaseTestingUtility util;
   private HBaseTimelineReaderImpl reader;
+  private static final long CURRENT_TIME = System.currentTimeMillis();
 
   @BeforeClass
   public static void setupBeforeClass() throws Exception {
     util = new HBaseTestingUtility();
     util.startMiniCluster();
-    createSchema();
-    DataGeneratorForTest.loadEntities(util);
-  }
-
-  private static void createSchema() throws IOException {
-    TimelineSchemaCreator.createAllTables(util.getConfiguration(), false);
+    DataGeneratorForTest.createSchema(util.getConfiguration());
+    DataGeneratorForTest.loadEntities(util, CURRENT_TIME);
   }
 
   @Before
@@ -200,13 +204,16 @@
       hbi.start();
       String cluster = "cluster_test_write_entity";
       String user = "user1";
+      String subAppUser = "subAppUser1";
       String flow = "some_flow_name";
       String flowVersion = "AB7822C10F1111";
       long runid = 1002345678919L;
       String appName = HBaseTimelineStorageUtils.convertApplicationIdToString(
           ApplicationId.newInstance(System.currentTimeMillis() + 9000000L, 1)
       );
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te,
+          UserGroupInformation.createRemoteUser(subAppUser));
       hbi.stop();
 
       // scan the table and see that entity exists
@@ -300,13 +307,13 @@
           new TimelineReaderContext(cluster, user, flow, runid, appName,
           entity.getType(), entity.getId()),
           new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL),
-          Integer.MAX_VALUE));
+          Integer.MAX_VALUE, null, null));
       Set<TimelineEntity> es1 = reader.getEntities(
           new TimelineReaderContext(cluster, user, flow, runid, appName,
           entity.getType(), null),
-          new TimelineEntityFilters(),
+          new TimelineEntityFilters.Builder().build(),
           new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL),
-          Integer.MAX_VALUE));
+          Integer.MAX_VALUE, null, null));
       assertNotNull(e1);
       assertEquals(1, es1.size());
 
@@ -315,6 +322,8 @@
       assertEquals(type, e1.getType());
       assertEquals(cTime, e1.getCreatedTime());
       Map<String, Object> infoMap2 = e1.getInfo();
+      // fromid key is added by storage. Remove it for comparison.
+      infoMap2.remove("FROM_ID");
       assertEquals(infoMap, infoMap2);
 
       Map<String, Set<String>> isRelatedTo2 = e1.getIsRelatedToEntities();
@@ -335,12 +344,16 @@
 
       e1 = reader.getEntity(new TimelineReaderContext(cluster, user, flow,
           runid, appName, entity.getType(), entity.getId()),
-          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+          null, null));
       assertNotNull(e1);
       assertEquals(id, e1.getId());
       assertEquals(type, e1.getType());
       assertEquals(cTime, e1.getCreatedTime());
-      assertEquals(infoMap, e1.getInfo());
+      infoMap2 = e1.getInfo();
+      // fromid key is added by storage. Remove it for comparision.
+      infoMap2.remove("FROM_ID");
+      assertEquals(infoMap, infoMap2);
       assertEquals(isRelatedTo, e1.getIsRelatedToEntities());
       assertEquals(relatesTo, e1.getRelatesToEntities());
       assertEquals(conf, e1.getConfigs());
@@ -351,6 +364,11 @@
         assertEquals(metricValues.get(ts - 20000),
             metric.getValues().get(ts - 20000));
       }
+
+      // verify for sub application table entities.
+      verifySubApplicationTableEntities(cluster, user, flow, flowVersion, runid,
+          appName, subAppUser, c1, entity, id, type, infoMap, isRelatedTo,
+          relatesTo, conf, metricValues, metrics, cTime, m1);
     } finally {
       if (hbi != null) {
         hbi.stop();
@@ -359,6 +377,98 @@
     }
   }
 
+  private void verifySubApplicationTableEntities(String cluster, String user,
+      String flow, String flowVersion, Long runid, String appName,
+      String subAppUser, Configuration c1, TimelineEntity entity, String id,
+      String type, Map<String, Object> infoMap,
+      Map<String, Set<String>> isRelatedTo, Map<String, Set<String>> relatesTo,
+      Map<String, String> conf, Map<Long, Number> metricValues,
+      Set<TimelineMetric> metrics, Long cTime, TimelineMetric m1)
+      throws IOException {
+    Scan s = new Scan();
+    // read from SubApplicationTable
+    byte[] startRow = new SubApplicationRowKeyPrefix(cluster, subAppUser, null,
+        null, null, null).getRowKeyPrefix();
+    s.setStartRow(startRow);
+    s.setMaxVersions(Integer.MAX_VALUE);
+    Connection conn = ConnectionFactory.createConnection(c1);
+    ResultScanner scanner =
+        new SubApplicationTable().getResultScanner(c1, conn, s);
+
+    int rowCount = 0;
+    int colCount = 0;
+    KeyConverter<String> stringKeyConverter = new StringKeyConverter();
+    for (Result result : scanner) {
+      if (result != null && !result.isEmpty()) {
+        rowCount++;
+        colCount += result.size();
+        byte[] row1 = result.getRow();
+        assertTrue(verifyRowKeyForSubApplication(row1, subAppUser, cluster,
+            user, entity));
+
+        // check info column family
+        String id1 = SubApplicationColumn.ID.readResult(result).toString();
+        assertEquals(id, id1);
+
+        String type1 = SubApplicationColumn.TYPE.readResult(result).toString();
+        assertEquals(type, type1);
+
+        Long cTime1 =
+            (Long) SubApplicationColumn.CREATED_TIME.readResult(result);
+        assertEquals(cTime1, cTime);
+
+        Map<String, Object> infoColumns = SubApplicationColumnPrefix.INFO
+            .readResults(result, new StringKeyConverter());
+        assertEquals(infoMap, infoColumns);
+
+        // Remember isRelatedTo is of type Map<String, Set<String>>
+        for (Map.Entry<String, Set<String>> isRelatedToEntry : isRelatedTo
+            .entrySet()) {
+          Object isRelatedToValue = SubApplicationColumnPrefix.IS_RELATED_TO
+              .readResult(result, isRelatedToEntry.getKey());
+          String compoundValue = isRelatedToValue.toString();
+          // id7?id9?id6
+          Set<String> isRelatedToValues =
+              new HashSet<String>(Separator.VALUES.splitEncoded(compoundValue));
+          assertEquals(isRelatedTo.get(isRelatedToEntry.getKey()).size(),
+              isRelatedToValues.size());
+          for (String v : isRelatedToEntry.getValue()) {
+            assertTrue(isRelatedToValues.contains(v));
+          }
+        }
+
+        // RelatesTo
+        for (Map.Entry<String, Set<String>> relatesToEntry : relatesTo
+            .entrySet()) {
+          String compoundValue = SubApplicationColumnPrefix.RELATES_TO
+              .readResult(result, relatesToEntry.getKey()).toString();
+          // id3?id4?id5
+          Set<String> relatesToValues =
+              new HashSet<String>(Separator.VALUES.splitEncoded(compoundValue));
+          assertEquals(relatesTo.get(relatesToEntry.getKey()).size(),
+              relatesToValues.size());
+          for (String v : relatesToEntry.getValue()) {
+            assertTrue(relatesToValues.contains(v));
+          }
+        }
+
+        // Configuration
+        Map<String, Object> configColumns = SubApplicationColumnPrefix.CONFIG
+            .readResults(result, stringKeyConverter);
+        assertEquals(conf, configColumns);
+
+        NavigableMap<String, NavigableMap<Long, Number>> metricsResult =
+            SubApplicationColumnPrefix.METRIC.readResultsWithTimestamps(result,
+                stringKeyConverter);
+
+        NavigableMap<Long, Number> metricMap = metricsResult.get(m1.getId());
+        matchMetrics(metricValues, metricMap);
+      }
+    }
+    assertEquals(1, rowCount);
+    assertEquals(16, colCount);
+  }
+
   private boolean isRowKeyCorrect(byte[] rowKey, String cluster, String user,
       String flow, Long runid, String appName, TimelineEntity te) {
 
@@ -406,7 +516,9 @@
       byte[] startRow =
           new EntityRowKeyPrefix(cluster, user, flow, runid, appName)
               .getRowKeyPrefix();
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, entities);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), entities,
+          UserGroupInformation.createRemoteUser(user));
       hbi.stop();
       // scan the table and see that entity exists
       Scan s = new Scan();
@@ -450,12 +562,14 @@
       TimelineEntity e1 = reader.getEntity(
           new TimelineReaderContext(cluster, user, flow, runid, appName,
           entity.getType(), entity.getId()),
-          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+          null, null));
       Set<TimelineEntity> es1 = reader.getEntities(
           new TimelineReaderContext(cluster, user, flow, runid, appName,
           entity.getType(), null),
-          new TimelineEntityFilters(),
-          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+          new TimelineEntityFilters.Builder().build(),
+          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+          null, null));
       assertNotNull(e1);
       assertEquals(1, es1.size());
 
@@ -509,14 +623,17 @@
       String flowVersion = "1111F01C2287BA";
       long runid = 1009876543218L;
       String appName = "application_123465899910_2001";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, entities);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), entities,
+          UserGroupInformation.createRemoteUser(user));
       hbi.stop();
 
       // read the timeline entity using the reader this time
       TimelineEntity e1 = reader.getEntity(
           new TimelineReaderContext(cluster, user, flow, runid, appName,
           entity.getType(), entity.getId()),
-          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+          null, null));
       assertNotNull(e1);
       // check the events
       NavigableSet<TimelineEvent> events = e1.getEvents();
@@ -545,15 +662,17 @@
     TimelineEntity entity = reader.getEntity(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", "hello"),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertNotNull(entity);
     assertEquals(3, entity.getConfigs().size());
     assertEquals(1, entity.getIsRelatedToEntities().size());
     Set<TimelineEntity> entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world",
-        null), new TimelineEntityFilters(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        null), new TimelineEntityFilters.Builder().build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(3, entities.size());
     int cfgCnt = 0;
     int metricCnt = 0;
@@ -577,7 +696,7 @@
     }
     assertEquals(5, cfgCnt);
     assertEquals(3, metricCnt);
-    assertEquals(5, infoCnt);
+    assertEquals(8, infoCnt);
     assertEquals(4, eventCnt);
     assertEquals(4, relatesToCnt);
     assertEquals(4, isRelatedToCnt);
@@ -588,8 +707,9 @@
     Set<TimelineEntity> entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, 1425016502000L, 1425016502040L, null,
-        null, null, null, null, null), new TimelineDataToRetrieve());
+        new TimelineEntityFilters.Builder().createdTimeBegin(1425016502000L)
+            .createTimeEnd(1425016502040L).build(),
+        new TimelineDataToRetrieve());
     assertEquals(3, entities.size());
     for (TimelineEntity entity : entities) {
       if (!entity.getId().equals("hello") && !entity.getId().equals("hello1") &&
@@ -601,8 +721,9 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, 1425016502015L, null, null, null, null,
-        null, null, null), new TimelineDataToRetrieve());
+        new TimelineEntityFilters.Builder().createdTimeBegin(1425016502015L)
+            .build(),
+        new TimelineDataToRetrieve());
     assertEquals(2, entities.size());
     for (TimelineEntity entity : entities) {
       if (!entity.getId().equals("hello1") &&
@@ -613,8 +734,9 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world",  null),
-        new TimelineEntityFilters(null, null, 1425016502015L, null, null, null,
-        null, null, null), new TimelineDataToRetrieve());
+        new TimelineEntityFilters.Builder().createTimeEnd(1425016502015L)
+            .build(),
+        new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     for (TimelineEntity entity : entities) {
       if (!entity.getId().equals("hello")) {
@@ -646,8 +768,9 @@
     Set<TimelineEntity> entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, relatesTo, isRelatedTo,
-        null, null, null, eventFilter), new TimelineDataToRetrieve());
+        new TimelineEntityFilters.Builder().relatesTo(relatesTo)
+            .isRelatedTo(isRelatedTo).eventFilters(eventFilter).build(),
+        new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     int eventCnt = 0;
     int isRelatedToCnt = 0;
@@ -675,9 +798,9 @@
     Set<TimelineEntity> entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineEntityFilters.Builder().eventFilters(ef).build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(1, entities.size());
     int eventCnt = 0;
     for (TimelineEntity timelineEntity : entities) {
@@ -696,8 +819,7 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef1),
+        new TimelineEntityFilters.Builder().eventFilters(ef1).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     eventCnt = 0;
@@ -715,8 +837,7 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef2),
+        new TimelineEntityFilters.Builder().eventFilters(ef2).build(),
         new TimelineDataToRetrieve());
     assertEquals(2, entities.size());
     eventCnt = 0;
@@ -737,8 +858,7 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef3),
+        new TimelineEntityFilters.Builder().eventFilters(ef3).build(),
         new TimelineDataToRetrieve());
     assertEquals(0, entities.size());
 
@@ -754,8 +874,7 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef4),
+        new TimelineEntityFilters.Builder().eventFilters(ef4).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     eventCnt = 0;
@@ -775,8 +894,7 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef5),
+        new TimelineEntityFilters.Builder().eventFilters(ef5).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     eventCnt = 0;
@@ -801,9 +919,9 @@
     Set<TimelineEntity> entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, irt, null, null, null,
-        null),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineEntityFilters.Builder().isRelatedTo(irt).build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(2, entities.size());
     int isRelatedToCnt = 0;
     for (TimelineEntity timelineEntity : entities) {
@@ -825,8 +943,7 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, irt1, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt1).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     isRelatedToCnt = 0;
@@ -848,8 +965,7 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, irt2, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt2).build(),
         new TimelineDataToRetrieve());
     assertEquals(2, entities.size());
     isRelatedToCnt = 0;
@@ -869,8 +985,7 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, irt3, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt3).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     isRelatedToCnt = 0;
@@ -892,8 +1007,7 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, irt4, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt4).build(),
         new TimelineDataToRetrieve());
     assertEquals(0, entities.size());
 
@@ -904,8 +1018,7 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, irt5, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt5).build(),
         new TimelineDataToRetrieve());
     assertEquals(0, entities.size());
 
@@ -924,8 +1037,7 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, irt6, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt6).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     isRelatedToCnt = 0;
@@ -950,9 +1062,9 @@
     Set<TimelineEntity> entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, rt, null, null, null, null,
-        null),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineEntityFilters.Builder().relatesTo(rt).build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(2, entities.size());
     int relatesToCnt = 0;
     for (TimelineEntity timelineEntity : entities) {
@@ -974,8 +1086,7 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, rt1, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt1).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     relatesToCnt = 0;
@@ -997,8 +1108,7 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, rt2, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt2).build(),
         new TimelineDataToRetrieve());
     assertEquals(2, entities.size());
     relatesToCnt = 0;
@@ -1018,8 +1128,7 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, rt3, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt3).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     relatesToCnt = 0;
@@ -1041,8 +1150,7 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, rt4, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt4).build(),
         new TimelineDataToRetrieve());
     assertEquals(0, entities.size());
 
@@ -1053,8 +1161,7 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, rt5, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt5).build(),
         new TimelineDataToRetrieve());
     assertEquals(0, entities.size());
 
@@ -1073,8 +1180,7 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, rt6, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt6).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     relatesToCnt = 0;
@@ -1109,8 +1215,7 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, rt7, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt7).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     relatesToCnt = 0;
@@ -1130,19 +1235,21 @@
         1002345678919L, "application_1231111111_1111", "world", "hello"),
         new TimelineDataToRetrieve());
     assertNotNull(e1);
-    assertTrue(e1.getInfo().isEmpty() && e1.getConfigs().isEmpty() &&
+    assertEquals(1, e1.getInfo().size());
+    assertTrue(e1.getConfigs().isEmpty() &&
         e1.getMetrics().isEmpty() && e1.getIsRelatedToEntities().isEmpty() &&
         e1.getRelatesToEntities().isEmpty());
     Set<TimelineEntity> es1 = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(),
+        new TimelineEntityFilters.Builder().build(),
         new TimelineDataToRetrieve());
     assertEquals(3, es1.size());
     for (TimelineEntity e : es1) {
-      assertTrue(e.getInfo().isEmpty() && e.getConfigs().isEmpty() &&
+      assertTrue(e.getConfigs().isEmpty() &&
           e.getMetrics().isEmpty() && e.getIsRelatedToEntities().isEmpty() &&
           e.getRelatesToEntities().isEmpty());
+      assertEquals(1, e.getInfo().size());
     }
   }
 
@@ -1152,16 +1259,16 @@
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", "hello"),
         new TimelineDataToRetrieve(
-        null, null, EnumSet.of(Field.INFO, Field.CONFIGS), null));
+        null, null, EnumSet.of(Field.INFO, Field.CONFIGS), null, null, null));
     assertNotNull(e1);
     assertEquals(3, e1.getConfigs().size());
     assertEquals(0, e1.getIsRelatedToEntities().size());
     Set<TimelineEntity> es1 = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(),
-        new TimelineDataToRetrieve(
-        null, null, EnumSet.of(Field.IS_RELATED_TO, Field.METRICS), null));
+        new TimelineEntityFilters.Builder().build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.IS_RELATED_TO,
+        Field.METRICS), null, null, null));
     assertEquals(3, es1.size());
     int metricsCnt = 0;
     int isRelatedToCnt = 0;
@@ -1171,7 +1278,7 @@
       isRelatedToCnt += entity.getIsRelatedToEntities().size();
       infoCnt += entity.getInfo().size();
     }
-    assertEquals(0, infoCnt);
+    assertEquals(3, infoCnt);
     assertEquals(4, isRelatedToCnt);
     assertEquals(3, metricsCnt);
   }
@@ -1184,14 +1291,14 @@
     TimelineEntity e1 = reader.getEntity(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", "hello"),
-        new TimelineDataToRetrieve(list, null, null, null));
+        new TimelineDataToRetrieve(list, null, null, null, null, null));
     assertNotNull(e1);
     assertEquals(1, e1.getConfigs().size());
     Set<TimelineEntity> es1 = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(),
-        new TimelineDataToRetrieve(list, null, null, null));
+        new TimelineEntityFilters.Builder().build(),
+        new TimelineDataToRetrieve(list, null, null, null, null, null));
     int cfgCnt = 0;
     for (TimelineEntity entity : es1) {
       cfgCnt += entity.getConfigs().size();
@@ -1220,10 +1327,10 @@
     Set<TimelineEntity> entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-        null));
+        null, null, null));
     assertEquals(2, entities.size());
     int cfgCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1234,9 +1341,10 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList, null, null),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineEntityFilters.Builder().configFilters(confFilterList)
+            .build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(2, entities.size());
     cfgCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1250,10 +1358,10 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList1, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList1)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-        null));
+        null, null, null));
     assertEquals(1, entities.size());
     cfgCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1269,10 +1377,10 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList2, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList2)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-        null));
+        null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList confFilterList3 = new TimelineFilterList(
@@ -1281,22 +1389,22 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList3, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList3)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-        null));
+        null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList confFilterList4 = new TimelineFilterList(
         new TimelineKeyValueFilter(
         TimelineCompareOp.NOT_EQUAL, "dummy_config", "value1"));
     entities = reader.getEntities(
-            new TimelineReaderContext("cluster1", "user1", "some_flow_name",
+        new TimelineReaderContext("cluster1", "user1", "some_flow_name",
             1002345678919L, "application_1231111111_1111", "world", null),
-            new TimelineEntityFilters(null, null, null, null, null, null,
-            confFilterList4, null, null),
-            new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-            null));
+        new TimelineEntityFilters.Builder().configFilters(confFilterList4)
+            .build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
+            null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList confFilterList5 = new TimelineFilterList(
@@ -1305,10 +1413,10 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList5, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList5)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-        null));
+        null, null, null));
     assertEquals(3, entities.size());
   }
 
@@ -1323,9 +1431,9 @@
     Set<TimelineEntity> entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList, null, null),
-        new TimelineDataToRetrieve(list, null, null, null));
+        new TimelineEntityFilters.Builder().configFilters(confFilterList)
+            .build(),
+        new TimelineDataToRetrieve(list, null, null, null, null, null));
     assertEquals(1, entities.size());
     int cfgCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1354,9 +1462,10 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList1, null, null),
-        new TimelineDataToRetrieve(confsToRetrieve, null, null, null));
+        new TimelineEntityFilters.Builder().configFilters(confFilterList1)
+            .build(),
+        new TimelineDataToRetrieve(confsToRetrieve, null, null, null, null,
+        null));
     assertEquals(2, entities.size());
     cfgCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1377,14 +1486,14 @@
     TimelineEntity e1 = reader.getEntity(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", "hello"),
-        new TimelineDataToRetrieve(null, list, null, null));
+        new TimelineDataToRetrieve(null, list, null, null, null, null));
     assertNotNull(e1);
     assertEquals(1, e1.getMetrics().size());
     Set<TimelineEntity> es1 = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(),
-        new TimelineDataToRetrieve(null, list, null, null));
+        new TimelineEntityFilters.Builder().build(),
+        new TimelineDataToRetrieve(null, list, null, null, null, null));
     int metricCnt = 0;
     for (TimelineEntity entity : es1) {
       metricCnt += entity.getMetrics().size();
@@ -1397,6 +1506,63 @@
   }
 
   @Test
+  public void testReadEntitiesMetricTimeRange() throws Exception {
+    Set<TimelineEntity> entities = reader.getEntities(
+        new TimelineReaderContext("cluster1", "user1", "some_flow_name",
+        1002345678919L, "application_1231111111_1111", "world", null),
+        new TimelineEntityFilters.Builder().build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
+        100, null, null));
+    assertEquals(3, entities.size());
+    int metricTimeSeriesCnt = 0;
+    int metricCnt = 0;
+    for (TimelineEntity entity : entities) {
+      metricCnt += entity.getMetrics().size();
+      for (TimelineMetric m : entity.getMetrics()) {
+        metricTimeSeriesCnt += m.getValues().size();
+      }
+    }
+    assertEquals(3, metricCnt);
+    assertEquals(13, metricTimeSeriesCnt);
+
+    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1",
+        "some_flow_name", 1002345678919L, "application_1231111111_1111",
+        "world", null), new TimelineEntityFilters.Builder().build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
+        100, CURRENT_TIME - 40000, CURRENT_TIME));
+    assertEquals(3, entities.size());
+    metricCnt = 0;
+    metricTimeSeriesCnt = 0;
+    for (TimelineEntity entity : entities) {
+      metricCnt += entity.getMetrics().size();
+      for (TimelineMetric m : entity.getMetrics()) {
+        for (Long ts : m.getValues().keySet()) {
+          assertTrue(ts >= CURRENT_TIME - 40000 && ts <= CURRENT_TIME);
+        }
+        metricTimeSeriesCnt += m.getValues().size();
+      }
+    }
+    assertEquals(3, metricCnt);
+    assertEquals(5, metricTimeSeriesCnt);
+
+    TimelineEntity entity = reader.getEntity(new TimelineReaderContext(
+        "cluster1", "user1", "some_flow_name", 1002345678919L,
+        "application_1231111111_1111", "world", "hello"),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), 100,
+        CURRENT_TIME - 40000, CURRENT_TIME));
+    assertNotNull(entity);
+    assertEquals(2, entity.getMetrics().size());
+    metricTimeSeriesCnt = 0;
+    for (TimelineMetric m : entity.getMetrics()) {
+      for (Long ts : m.getValues().keySet()) {
+        assertTrue(ts >= CURRENT_TIME - 40000 && ts <= CURRENT_TIME);
+      }
+      metricTimeSeriesCnt += m.getValues().size();
+    }
+    assertEquals(3, metricTimeSeriesCnt);
+  }
+
+  @Test
   public void testReadEntitiesMetricFilters() throws Exception {
     TimelineFilterList list1 = new TimelineFilterList();
     list1.addFilter(new TimelineCompareFilter(
@@ -1411,10 +1577,10 @@
     Set<TimelineEntity> entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(2, entities.size());
     int metricCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1425,9 +1591,10 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList, null),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
+            .build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(2, entities.size());
     metricCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1443,10 +1610,10 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList1, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(1, entities.size());
     metricCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1462,10 +1629,10 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList2, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList2)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList metricFilterList3 = new TimelineFilterList(
@@ -1474,10 +1641,10 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList3, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList3)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList metricFilterList4 = new TimelineFilterList(
@@ -1486,10 +1653,10 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList4, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList4)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList metricFilterList5 = new TimelineFilterList(
@@ -1498,10 +1665,10 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList5, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList5)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(3, entities.size());
   }
 
@@ -1516,9 +1683,9 @@
     Set<TimelineEntity> entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList, null),
-        new TimelineDataToRetrieve(null, list, null, null));
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
+            .build(),
+        new TimelineDataToRetrieve(null, list, null, null, null, null));
     assertEquals(1, entities.size());
     int metricCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1545,10 +1712,10 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList1, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
+            .build(),
         new TimelineDataToRetrieve(
-        null, metricsToRetrieve, EnumSet.of(Field.METRICS), null));
+        null, metricsToRetrieve, EnumSet.of(Field.METRICS), null, null, null));
     assertEquals(2, entities.size());
     metricCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1564,9 +1731,11 @@
 
     entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1",
         "some_flow_name", 1002345678919L, "application_1231111111_1111",
-        "world", null), new TimelineEntityFilters(null, null, null, null, null,
-        null, null, metricFilterList1, null), new TimelineDataToRetrieve(null,
-        metricsToRetrieve, EnumSet.of(Field.METRICS), Integer.MAX_VALUE));
+        "world", null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
+            .build(),
+        new TimelineDataToRetrieve(null, metricsToRetrieve,
+        EnumSet.of(Field.METRICS), Integer.MAX_VALUE, null, null));
     assertEquals(2, entities.size());
     metricCnt = 0;
     int metricValCnt = 0;
@@ -1599,15 +1768,15 @@
     Set<TimelineEntity> entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList,
-        null, null, null),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList).build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(2, entities.size());
     int infoCnt = 0;
     for (TimelineEntity entity : entities) {
       infoCnt += entity.getInfo().size();
     }
-    assertEquals(5, infoCnt);
+    assertEquals(7, infoCnt);
 
     TimelineFilterList infoFilterList1 = new TimelineFilterList(
         new TimelineKeyValueFilter(
@@ -1615,15 +1784,16 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList1,
-        null, null, null),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList1)
+            .build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(1, entities.size());
     infoCnt = 0;
     for (TimelineEntity entity : entities) {
       infoCnt += entity.getInfo().size();
     }
-    assertEquals(3, infoCnt);
+    assertEquals(4, infoCnt);
 
     TimelineFilterList infoFilterList2 = new TimelineFilterList(
         new TimelineKeyValueFilter(
@@ -1633,9 +1803,10 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList2,
-        null, null, null),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList2)
+            .build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList infoFilterList3 = new TimelineFilterList(
@@ -1644,9 +1815,10 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList3,
-        null, null, null),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList3)
+            .build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList infoFilterList4 = new TimelineFilterList(
@@ -1655,9 +1827,10 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList4,
-        null, null, null),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList4)
+            .build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList infoFilterList5 = new TimelineFilterList(
@@ -1666,14 +1839,49 @@
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList5,
-        null, null, null),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList5)
+            .build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(3, entities.size());
   }
 
+  @Test(timeout = 90000)
+  public void testListTypesInApp() throws Exception {
+    Set<String> types = reader.getEntityTypes(
+        new TimelineReaderContext("cluster1", "user1", "some_flow_name",
+            1002345678919L, "application_1231111111_1111", null, null));
+    assertEquals(4, types.size());
+
+    types = reader.getEntityTypes(
+        new TimelineReaderContext("cluster1", null, null,
+            null, "application_1231111111_1111", null, null));
+    assertEquals(4, types.size());
+
+    types = reader.getEntityTypes(
+        new TimelineReaderContext("cluster1", null, null,
+            null, "application_1231111111_1112", null, null));
+    assertEquals(4, types.size());
+
+    types = reader.getEntityTypes(
+        new TimelineReaderContext("cluster1", "user1", "some_flow_name",
+            1002345678919L, "application_1231111111_1113", null, null));
+    assertEquals(0, types.size());
+  }
+
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
     util.shutdownMiniCluster();
   }
+
+  private boolean verifyRowKeyForSubApplication(byte[] rowKey, String suAppUser,
+      String cluster, String user, TimelineEntity te) {
+    SubApplicationRowKey key = SubApplicationRowKey.parseRowKey(rowKey);
+    assertEquals(suAppUser, key.getSubAppUserId());
+    assertEquals(cluster, key.getClusterId());
+    assertEquals(te.getType(), key.getEntityType());
+    assertEquals(te.getId(), key.getEntityId());
+    assertEquals(user, key.getUserId());
+    return true;
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageSchema.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageSchema.java
new file mode 100644
index 0000000..0dcd171
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageSchema.java
@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Table;
+
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
+
+/**
+ * Unit tests for checking different schema prefixes.
+ */
+public class TestHBaseTimelineStorageSchema {
+  private static HBaseTestingUtility util;
+
+  @BeforeClass
+  public static void setupBeforeClass() throws Exception {
+    util = new HBaseTestingUtility();
+    Configuration conf = util.getConfiguration();
+    conf.setInt("hfile.format.version", 3);
+    util.startMiniCluster();
+  }
+
+  @Test
+  public void createWithDefaultPrefix() throws IOException {
+    Configuration hbaseConf = util.getConfiguration();
+    DataGeneratorForTest.createSchema(hbaseConf);
+    Connection conn = null;
+    conn = ConnectionFactory.createConnection(hbaseConf);
+    Admin admin = conn.getAdmin();
+
+    TableName entityTableName = BaseTable.getTableName(hbaseConf,
+        EntityTable.TABLE_NAME_CONF_NAME, EntityTable.DEFAULT_TABLE_NAME);
+    assertTrue(admin.tableExists(entityTableName));
+    assertTrue(entityTableName.getNameAsString().startsWith(
+        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX));
+    Table entityTable = conn.getTable(BaseTable.getTableName(hbaseConf,
+        EntityTable.TABLE_NAME_CONF_NAME, EntityTable.DEFAULT_TABLE_NAME));
+    assertNotNull(entityTable);
+
+    TableName flowRunTableName = BaseTable.getTableName(hbaseConf,
+        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME);
+    assertTrue(admin.tableExists(flowRunTableName));
+    assertTrue(flowRunTableName.getNameAsString().startsWith(
+        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX));
+    Table flowRunTable = conn.getTable(BaseTable.getTableName(hbaseConf,
+        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
+    assertNotNull(flowRunTable);
+  }
+
+  @Test
+  public void createWithSetPrefix() throws IOException {
+    Configuration hbaseConf = util.getConfiguration();
+    String prefix = "unit-test.";
+    hbaseConf.set(YarnConfiguration.TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX_NAME,
+        prefix);
+    DataGeneratorForTest.createSchema(hbaseConf);
+    Connection conn = null;
+    conn = ConnectionFactory.createConnection(hbaseConf);
+    Admin admin = conn.getAdmin();
+
+    TableName entityTableName = BaseTable.getTableName(hbaseConf,
+        EntityTable.TABLE_NAME_CONF_NAME, EntityTable.DEFAULT_TABLE_NAME);
+    assertTrue(admin.tableExists(entityTableName));
+    assertTrue(entityTableName.getNameAsString().startsWith(prefix));
+    Table entityTable = conn.getTable(BaseTable.getTableName(hbaseConf,
+        EntityTable.TABLE_NAME_CONF_NAME, EntityTable.DEFAULT_TABLE_NAME));
+    assertNotNull(entityTable);
+
+    TableName flowRunTableName = BaseTable.getTableName(hbaseConf,
+        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME);
+    assertTrue(admin.tableExists(flowRunTableName));
+    assertTrue(flowRunTableName.getNameAsString().startsWith(prefix));
+    Table flowRunTable = conn.getTable(BaseTable.getTableName(hbaseConf,
+        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
+    assertNotNull(flowRunTable);
+
+    // create another set with a diff prefix
+    hbaseConf
+        .unset(YarnConfiguration.TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX_NAME);
+    prefix = "yet-another-unit-test.";
+    hbaseConf.set(YarnConfiguration.TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX_NAME,
+        prefix);
+    DataGeneratorForTest.createSchema(hbaseConf);
+    entityTableName = BaseTable.getTableName(hbaseConf,
+        EntityTable.TABLE_NAME_CONF_NAME, EntityTable.DEFAULT_TABLE_NAME);
+    assertTrue(admin.tableExists(entityTableName));
+    assertTrue(entityTableName.getNameAsString().startsWith(prefix));
+    entityTable = conn.getTable(BaseTable.getTableName(hbaseConf,
+        EntityTable.TABLE_NAME_CONF_NAME, EntityTable.DEFAULT_TABLE_NAME));
+    assertNotNull(entityTable);
+
+    flowRunTableName = BaseTable.getTableName(hbaseConf,
+        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME);
+    assertTrue(admin.tableExists(flowRunTableName));
+    assertTrue(flowRunTableName.getNameAsString().startsWith(prefix));
+    flowRunTable = conn.getTable(BaseTable.getTableName(hbaseConf,
+        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
+    assertNotNull(flowRunTable);
+    hbaseConf
+    .unset(YarnConfiguration.TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX_NAME);
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestFlowDataGenerator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestFlowDataGenerator.java
index b608987..8ab32df 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestFlowDataGenerator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestFlowDataGenerator.java
@@ -33,7 +33,7 @@
 /**
  * Generates the data/entities for the FlowRun and FlowActivity Tables.
  */
-final class TestFlowDataGenerator {
+public final class TestFlowDataGenerator {
   private TestFlowDataGenerator() {
   }
 
@@ -41,7 +41,8 @@
   private static final String METRIC_2 = "HDFS_BYTES_READ";
   public static final long END_TS_INCR = 10000L;
 
-  static TimelineEntity getEntityMetricsApp1(long insertTs, Configuration c1) {
+  public static TimelineEntity getEntityMetricsApp1(long insertTs,
+      Configuration c1) {
     TimelineEntity entity = new TimelineEntity();
     String id = "flowRunMetrics_test";
     String type = TimelineEntityType.YARN_APPLICATION.toString();
@@ -83,7 +84,7 @@
   }
 
 
-  static TimelineEntity getEntityMetricsApp1Complete(long insertTs,
+  public static TimelineEntity getEntityMetricsApp1Complete(long insertTs,
       Configuration c1) {
     TimelineEntity entity = new TimelineEntity();
     String id = "flowRunMetrics_test";
@@ -125,7 +126,7 @@
   }
 
 
-  static TimelineEntity getEntityMetricsApp1(long insertTs) {
+  public static TimelineEntity getEntityMetricsApp1(long insertTs) {
     TimelineEntity entity = new TimelineEntity();
     String id = "flowRunMetrics_test";
     String type = TimelineEntityType.YARN_APPLICATION.toString();
@@ -168,8 +169,7 @@
     return entity;
   }
 
-
-  static TimelineEntity getEntityMetricsApp2(long insertTs) {
+  public static TimelineEntity getEntityMetricsApp2(long insertTs) {
     TimelineEntity entity = new TimelineEntity();
     String id = "flowRunMetrics_test";
     String type = TimelineEntityType.YARN_APPLICATION.toString();
@@ -200,7 +200,7 @@
     return entity;
   }
 
-  static TimelineEntity getEntity1() {
+  public static TimelineEntity getEntity1() {
     TimelineEntity entity = new TimelineEntity();
     String id = "flowRunHello";
     String type = TimelineEntityType.YARN_APPLICATION.toString();
@@ -243,7 +243,7 @@
     return entity;
   }
 
-  static TimelineEntity getAFullEntity(long ts, long endTs) {
+  public static TimelineEntity getAFullEntity(long ts, long endTs) {
     TimelineEntity entity = new TimelineEntity();
     String id = "flowRunFullEntity";
     String type = TimelineEntityType.YARN_APPLICATION.toString();
@@ -292,7 +292,7 @@
     return entity;
   }
 
-  static TimelineEntity getEntityGreaterStartTime(long startTs) {
+  public static TimelineEntity getEntityGreaterStartTime(long startTs) {
     TimelineEntity entity = new TimelineEntity();
     entity.setCreatedTime(startTs);
     entity.setId("flowRunHello with greater start time");
@@ -308,7 +308,7 @@
     return entity;
   }
 
-  static TimelineEntity getEntityMaxEndTime(long endTs) {
+  public static TimelineEntity getEntityMaxEndTime(long endTs) {
     TimelineEntity entity = new TimelineEntity();
     entity.setId("flowRunHello Max End time");
     entity.setType(TimelineEntityType.YARN_APPLICATION.toString());
@@ -322,7 +322,7 @@
     return entity;
   }
 
-  static TimelineEntity getEntityMinStartTime(long startTs) {
+  public static TimelineEntity getEntityMinStartTime(long startTs) {
     TimelineEntity entity = new TimelineEntity();
     String id = "flowRunHelloMInStartTime";
     String type = TimelineEntityType.YARN_APPLICATION.toString();
@@ -336,7 +336,7 @@
     return entity;
   }
 
-  static TimelineEntity getMinFlushEntity(long startTs) {
+  public static TimelineEntity getMinFlushEntity(long startTs) {
     TimelineEntity entity = new TimelineEntity();
     String id = "flowRunHelloFlushEntityMin";
     String type = TimelineEntityType.YARN_APPLICATION.toString();
@@ -350,7 +350,7 @@
     return entity;
   }
 
-  static TimelineEntity getMaxFlushEntity(long startTs) {
+  public static TimelineEntity getMaxFlushEntity(long startTs) {
     TimelineEntity entity = new TimelineEntity();
     String id = "flowRunHelloFlushEntityMax";
     String type = TimelineEntityType.YARN_APPLICATION.toString();
@@ -365,7 +365,7 @@
     return entity;
   }
 
-  static TimelineEntity getFlowApp1(long appCreatedTime) {
+  public static TimelineEntity getFlowApp1(long appCreatedTime) {
     TimelineEntity entity = new TimelineEntity();
     String id = "flowActivity_test";
     String type = TimelineEntityType.YARN_APPLICATION.toString();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java
index 2778f50..4bf221e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java
@@ -30,7 +30,6 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Get;
@@ -39,18 +38,21 @@
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.server.timeline.GenericObjectMapper;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.storage.DataGeneratorForTest;
 import org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineReaderImpl;
 import org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
-import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineSchemaCreator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
 import org.junit.AfterClass;
@@ -70,11 +72,7 @@
     Configuration conf = util.getConfiguration();
     conf.setInt("hfile.format.version", 3);
     util.startMiniCluster();
-    createSchema();
-  }
-
-  private static void createSchema() throws IOException {
-    TimelineSchemaCreator.createAllTables(util.getConfiguration(), false);
+    DataGeneratorForTest.createSchema(util.getConfiguration());
   }
 
   /**
@@ -121,13 +119,18 @@
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+
+      UserGroupInformation remoteUser =
+          UserGroupInformation.createRemoteUser(user);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
 
       // write another entity with the right min start time
       te = new TimelineEntities();
       te.addEntity(entityMinStartTime);
       appName = "application_100000000000_3333";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
 
       // writer another entity for max end time
       TimelineEntity entityMaxEndTime = TestFlowDataGenerator
@@ -135,7 +138,8 @@
       te = new TimelineEntities();
       te.addEntity(entityMaxEndTime);
       appName = "application_100000000000_4444";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
 
       // writer another entity with greater start time
       TimelineEntity entityGreaterStartTime = TestFlowDataGenerator
@@ -143,7 +147,8 @@
       te = new TimelineEntities();
       te.addEntity(entityGreaterStartTime);
       appName = "application_1000000000000000_2222";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
 
       // flush everything to hbase
       hbi.flush();
@@ -155,8 +160,9 @@
 
     Connection conn = ConnectionFactory.createConnection(c1);
     // check in flow activity table
-    Table table1 = conn.getTable(TableName
-        .valueOf(FlowActivityTable.DEFAULT_TABLE_NAME));
+    Table table1 = conn.getTable(
+        BaseTable.getTableName(c1, FlowActivityTable.TABLE_NAME_CONF_NAME,
+            FlowActivityTable.DEFAULT_TABLE_NAME));
     byte[] startRow =
         new FlowActivityRowKey(cluster, minStartTs, user, flow).getRowKey();
     Get g = new Get(startRow);
@@ -187,8 +193,7 @@
       Set<TimelineEntity> entities = hbr.getEntities(
           new TimelineReaderContext(cluster, null, null, null, null,
           TimelineEntityType.YARN_FLOW_ACTIVITY.toString(), null),
-          new TimelineEntityFilters(10L, null, null, null, null, null,
-          null, null, null),
+          new TimelineEntityFilters.Builder().entityLimit(10L).build(),
           new TimelineDataToRetrieve());
       assertEquals(1, entities.size());
       for (TimelineEntity e : entities) {
@@ -231,7 +236,8 @@
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
       String appName = "application_1111999999_1234";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, UserGroupInformation.createRemoteUser(user));
       hbi.flush();
     } finally {
       if (hbi != null) {
@@ -252,8 +258,7 @@
       Set<TimelineEntity> entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow, null, null,
           TimelineEntityType.YARN_FLOW_ACTIVITY.toString(), null),
-          new TimelineEntityFilters(10L, null, null, null, null, null,
-          null, null, null),
+          new TimelineEntityFilters.Builder().entityLimit(10L).build(),
           new TimelineDataToRetrieve());
       assertEquals(1, entities.size());
       for (TimelineEntity e : entities) {
@@ -286,8 +291,9 @@
             .getRowKey();
     s.setStopRow(stopRow);
     Connection conn = ConnectionFactory.createConnection(c1);
-    Table table1 = conn.getTable(TableName
-        .valueOf(FlowActivityTable.DEFAULT_TABLE_NAME));
+    Table table1 = conn.getTable(
+        BaseTable.getTableName(c1, FlowActivityTable.TABLE_NAME_CONF_NAME,
+            FlowActivityTable.DEFAULT_TABLE_NAME));
     ResultScanner scanner = table1.getScanner(s);
     int rowCount = 0;
     for (Result result : scanner) {
@@ -344,20 +350,27 @@
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
+
+      UserGroupInformation remoteUser =
+          UserGroupInformation.createRemoteUser(user);
+
       String appName = "application_11888888888_1111";
-      hbi.write(cluster, user, flow, flowVersion1, runid1, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion1,
+          runid1, appName), te, remoteUser);
 
       // write an application with to this flow but a different runid/ version
       te = new TimelineEntities();
       te.addEntity(entityApp1);
       appName = "application_11888888888_2222";
-      hbi.write(cluster, user, flow, flowVersion2, runid2, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion2,
+          runid2, appName), te, remoteUser);
 
       // write an application with to this flow but a different runid/ version
       te = new TimelineEntities();
       te.addEntity(entityApp1);
       appName = "application_11888888888_3333";
-      hbi.write(cluster, user, flow, flowVersion3, runid3, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion3,
+          runid3, appName), te, remoteUser);
 
       hbi.flush();
     } finally {
@@ -379,8 +392,7 @@
       Set<TimelineEntity> entities = hbr.getEntities(
           new TimelineReaderContext(cluster, null, null, null, null,
           TimelineEntityType.YARN_FLOW_ACTIVITY.toString(), null),
-          new TimelineEntityFilters(10L, null, null, null, null, null,
-          null, null, null),
+          new TimelineEntityFilters.Builder().entityLimit(10L).build(),
           new TimelineDataToRetrieve());
       assertEquals(1, entities.size());
       for (TimelineEntity e : entities) {
@@ -425,13 +437,13 @@
         new FlowActivityRowKey(cluster, appCreatedTime, user, flow).getRowKey();
     s.setStartRow(startRow);
     String clusterStop = cluster + "1";
-    byte[] stopRow =
-        new FlowActivityRowKey(clusterStop, appCreatedTime, user, flow)
-        .getRowKey();
+    byte[] stopRow = new FlowActivityRowKey(clusterStop, appCreatedTime, user,
+        flow).getRowKey();
     s.setStopRow(stopRow);
     Connection conn = ConnectionFactory.createConnection(c1);
-    Table table1 = conn.getTable(TableName
-        .valueOf(FlowActivityTable.DEFAULT_TABLE_NAME));
+    Table table1 = conn.getTable(
+        BaseTable.getTableName(c1, FlowActivityTable.TABLE_NAME_CONF_NAME,
+            FlowActivityTable.DEFAULT_TABLE_NAME));
     ResultScanner scanner = table1.getScanner(s);
     int rowCount = 0;
     for (Result result : scanner) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
index 7f46a5a..1ad02e1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -41,14 +40,16 @@
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
@@ -57,12 +58,12 @@
 import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
 import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList.Operator;
 import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelinePrefixFilter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.DataGeneratorForTest;
 import org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineReaderImpl;
 import org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
-import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineSchemaCreator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -84,18 +85,14 @@
     Configuration conf = util.getConfiguration();
     conf.setInt("hfile.format.version", 3);
     util.startMiniCluster();
-    createSchema();
-  }
-
-  private static void createSchema() throws IOException {
-    TimelineSchemaCreator.createAllTables(util.getConfiguration(), false);
+    DataGeneratorForTest.createSchema(util.getConfiguration());
   }
 
   @Test
   public void checkCoProcessorOff() throws IOException, InterruptedException {
     Configuration hbaseConf = util.getConfiguration();
-    TableName table = TableName.valueOf(hbaseConf.get(
-        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
+    TableName table = BaseTable.getTableName(hbaseConf,
+        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME);
     Connection conn = null;
     conn = ConnectionFactory.createConnection(hbaseConf);
     Admin admin = conn.getAdmin();
@@ -106,42 +103,42 @@
       // check the regions.
       // check in flow run table
       util.waitUntilAllRegionsAssigned(table);
-      HRegionServer server = util.getRSForFirstRegionInTable(table);
-      List<Region> regions = server.getOnlineRegions(table);
-      for (Region region : regions) {
-        assertTrue(HBaseTimelineStorageUtils.isFlowRunTable(
-            region.getRegionInfo(), hbaseConf));
-      }
+      checkCoprocessorExists(table, true);
     }
 
-    table = TableName.valueOf(hbaseConf.get(
+    table = BaseTable.getTableName(hbaseConf,
         FlowActivityTable.TABLE_NAME_CONF_NAME,
-        FlowActivityTable.DEFAULT_TABLE_NAME));
+        FlowActivityTable.DEFAULT_TABLE_NAME);
     if (admin.tableExists(table)) {
       // check the regions.
       // check in flow activity table
       util.waitUntilAllRegionsAssigned(table);
-      HRegionServer server = util.getRSForFirstRegionInTable(table);
-      List<Region> regions = server.getOnlineRegions(table);
-      for (Region region : regions) {
-        assertFalse(HBaseTimelineStorageUtils.isFlowRunTable(
-            region.getRegionInfo(), hbaseConf));
-      }
+      checkCoprocessorExists(table, false);
     }
 
-    table = TableName.valueOf(hbaseConf.get(
-        EntityTable.TABLE_NAME_CONF_NAME,
-        EntityTable.DEFAULT_TABLE_NAME));
+    table = BaseTable.getTableName(hbaseConf, EntityTable.TABLE_NAME_CONF_NAME,
+        EntityTable.DEFAULT_TABLE_NAME);
     if (admin.tableExists(table)) {
       // check the regions.
       // check in entity run table
       util.waitUntilAllRegionsAssigned(table);
-      HRegionServer server = util.getRSForFirstRegionInTable(table);
-      List<Region> regions = server.getOnlineRegions(table);
-      for (Region region : regions) {
-        assertFalse(HBaseTimelineStorageUtils.isFlowRunTable(
-            region.getRegionInfo(), hbaseConf));
+      checkCoprocessorExists(table, false);
+    }
+  }
+
+  private void checkCoprocessorExists(TableName table, boolean exists)
+      throws IOException, InterruptedException {
+    HRegionServer server = util.getRSForFirstRegionInTable(table);
+    List<Region> regions = server.getOnlineRegions(table);
+    for (Region region : regions) {
+      boolean found = false;
+      Set<String> coprocs = region.getCoprocessorHost().getCoprocessors();
+      for (String coprocName : coprocs) {
+        if (coprocName.contains("FlowRunCoprocessor")) {
+          found = true;
+        }
       }
+      assertEquals(found, exists);
     }
   }
 
@@ -186,13 +183,18 @@
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+
+      UserGroupInformation remoteUser =
+          UserGroupInformation.createRemoteUser(user);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
 
       // write another entity with the right min start time
       te = new TimelineEntities();
       te.addEntity(entityMinStartTime);
       appName = "application_100000000000_3333";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
 
       // writer another entity for max end time
       TimelineEntity entityMaxEndTime = TestFlowDataGenerator
@@ -200,7 +202,8 @@
       te = new TimelineEntities();
       te.addEntity(entityMaxEndTime);
       appName = "application_100000000000_4444";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
 
       // writer another entity with greater start time
       TimelineEntity entityGreaterStartTime = TestFlowDataGenerator
@@ -208,7 +211,8 @@
       te = new TimelineEntities();
       te.addEntity(entityGreaterStartTime);
       appName = "application_1000000000000000_2222";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
 
       // flush everything to hbase
       hbi.flush();
@@ -220,8 +224,8 @@
 
     Connection conn = ConnectionFactory.createConnection(c1);
     // check in flow run table
-    Table table1 = conn.getTable(TableName
-        .valueOf(FlowRunTable.DEFAULT_TABLE_NAME));
+    Table table1 = conn.getTable(BaseTable.getTableName(c1,
+        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
     // scan the table and see that we get back the right min and max
     // timestamps
     byte[] startRow = new FlowRunRowKey(cluster, user, flow, runid).getRowKey();
@@ -292,15 +296,19 @@
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
+      UserGroupInformation remoteUser =
+          UserGroupInformation.createRemoteUser(user);
       String appName = "application_11111111111111_1111";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
       // write another application with same metric to this flow
       te = new TimelineEntities();
       TimelineEntity entityApp2 = TestFlowDataGenerator
           .getEntityMetricsApp2(System.currentTimeMillis());
       te.addEntity(entityApp2);
       appName = "application_11111111111111_2222";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
       hbi.flush();
     } finally {
       if (hbi != null) {
@@ -356,24 +364,24 @@
   /*
    * checks the batch limits on a scan
    */
-  void checkFlowRunTableBatchLimit(String cluster, String user,
-      String flow, long runid, Configuration c1) throws IOException {
+  void checkFlowRunTableBatchLimit(String cluster, String user, String flow,
+      long runid, Configuration c1) throws IOException {
 
     Scan s = new Scan();
     s.addFamily(FlowRunColumnFamily.INFO.getBytes());
-    byte[] startRow =
-        new FlowRunRowKey(cluster, user, flow, runid).getRowKey();
+    byte[] startRow = new FlowRunRowKey(cluster, user, flow, runid)
+        .getRowKey();
     s.setStartRow(startRow);
     // set a batch limit
     int batchLimit = 2;
     s.setBatch(batchLimit);
     String clusterStop = cluster + "1";
-    byte[] stopRow =
-        new FlowRunRowKey(clusterStop, user, flow, runid).getRowKey();
+    byte[] stopRow = new FlowRunRowKey(clusterStop, user, flow, runid)
+        .getRowKey();
     s.setStopRow(stopRow);
     Connection conn = ConnectionFactory.createConnection(c1);
-    Table table1 = conn
-        .getTable(TableName.valueOf(FlowRunTable.DEFAULT_TABLE_NAME));
+    Table table1 = conn.getTable(BaseTable.getTableName(c1,
+        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
     ResultScanner scanner = table1.getScanner(s);
 
     int loopCount = 0;
@@ -517,8 +525,8 @@
         new FlowRunRowKey(clusterStop, user, flow, runid).getRowKey();
     s.setStopRow(stopRow);
     Connection conn = ConnectionFactory.createConnection(c1);
-    Table table1 = conn.getTable(TableName
-        .valueOf(FlowRunTable.DEFAULT_TABLE_NAME));
+    Table table1 = conn.getTable(BaseTable.getTableName(c1,
+        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
     ResultScanner scanner = table1.getScanner(s);
 
     int rowCount = 0;
@@ -561,15 +569,22 @@
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
+      UserGroupInformation remoteUser =
+          UserGroupInformation.createRemoteUser(user);
+
       String appName = "application_11111111111111_1111";
-      hbi.write(cluster, user, flow, flowVersion, 1002345678919L, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          1002345678919L, appName), te,
+          remoteUser);
       // write another application with same metric to this flow
       te = new TimelineEntities();
       TimelineEntity entityApp2 = TestFlowDataGenerator
           .getEntityMetricsApp2(System.currentTimeMillis());
       te.addEntity(entityApp2);
       appName = "application_11111111111111_2222";
-      hbi.write(cluster, user, flow, flowVersion, 1002345678918L, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          1002345678918L, appName), te,
+          remoteUser);
       hbi.flush();
     } finally {
       if (hbi != null) {
@@ -589,7 +604,8 @@
       TimelineEntity entity = hbr.getEntity(
           new TimelineReaderContext(cluster, user, flow, 1002345678919L, null,
           TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineDataToRetrieve(null, metricsToRetrieve, null, null));
+          new TimelineDataToRetrieve(null, metricsToRetrieve, null, null, null,
+          null));
       assertTrue(TimelineEntityType.YARN_FLOW_RUN.matches(entity.getType()));
       Set<TimelineMetric> metrics = entity.getMetrics();
       assertEquals(1, metrics.size());
@@ -613,8 +629,9 @@
       Set<TimelineEntity> entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow, null, null,
           TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineEntityFilters(),
-          new TimelineDataToRetrieve(null, metricsToRetrieve, null, null));
+          new TimelineEntityFilters.Builder().build(),
+          new TimelineDataToRetrieve(null, metricsToRetrieve, null, null, null,
+          null));
       assertEquals(2, entities.size());
       int metricCnt = 0;
       for (TimelineEntity timelineEntity : entities) {
@@ -646,15 +663,20 @@
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
+      UserGroupInformation remoteUser =
+          UserGroupInformation.createRemoteUser(user);
+
       String appName = "application_11111111111111_1111";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
       // write another application with same metric to this flow
       te = new TimelineEntities();
       TimelineEntity entityApp2 = TestFlowDataGenerator
           .getEntityMetricsApp2(System.currentTimeMillis());
       te.addEntity(entityApp2);
       appName = "application_11111111111111_2222";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
       hbi.flush();
     } finally {
       if (hbi != null) {
@@ -674,7 +696,7 @@
       Set<TimelineEntity> entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow, runid, null,
           TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineEntityFilters(),
+          new TimelineEntityFilters.Builder().build(),
           new TimelineDataToRetrieve());
       assertEquals(1, entities.size());
       for (TimelineEntity timelineEntity : entities) {
@@ -684,8 +706,9 @@
       entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow, runid, null,
           TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineEntityFilters(), new TimelineDataToRetrieve(null, null,
-          EnumSet.of(Field.METRICS), null));
+          new TimelineEntityFilters.Builder().build(),
+          new TimelineDataToRetrieve(null, null,
+              EnumSet.of(Field.METRICS), null, null, null));
       assertEquals(1, entities.size());
       for (TimelineEntity timelineEntity : entities) {
         Set<TimelineMetric> timelineMetrics = timelineEntity.getMetrics();
@@ -739,6 +762,8 @@
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
+      UserGroupInformation remoteUser =
+          UserGroupInformation.createRemoteUser(user);
 
       for (int i = start; i < count; i++) {
         String appName = "application_1060350000000_" + appIdSuffix;
@@ -748,7 +773,8 @@
         te1.addEntity(entityApp1);
         entityApp2 = TestFlowDataGenerator.getMaxFlushEntity(insertTs);
         te1.addEntity(entityApp2);
-        hbi.write(cluster, user, flow, flowVersion, runid, appName, te1);
+        hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+            runid, appName), te1, remoteUser);
         Thread.sleep(1);
 
         appName = "application_1001199480000_7" + appIdSuffix;
@@ -760,7 +786,9 @@
         entityApp2 = TestFlowDataGenerator.getMaxFlushEntity(insertTs);
         te1.addEntity(entityApp2);
 
-        hbi.write(cluster, user, flow, flowVersion, runid, appName, te1);
+        hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+            runid, appName), te1,
+            remoteUser);
         if (i % 1000 == 0) {
           hbi.flush();
           checkMinMaxFlush(c1, minTS, startTs, count, cluster, user, flow,
@@ -782,8 +810,8 @@
       boolean checkMax) throws IOException {
     Connection conn = ConnectionFactory.createConnection(c1);
     // check in flow run table
-    Table table1 = conn.getTable(TableName
-        .valueOf(FlowRunTable.DEFAULT_TABLE_NAME));
+    Table table1 = conn.getTable(BaseTable.getTableName(c1,
+        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
     // scan the table and see that we get back the right min and max
     // timestamps
     byte[] startRow = new FlowRunRowKey(cluster, user, flow, runid).getRowKey();
@@ -828,16 +856,23 @@
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
-      hbi.write(cluster, user, flow, "CF7022C10F1354", 1002345678919L,
-          "application_11111111111111_1111", te);
+      UserGroupInformation remoteUser =
+          UserGroupInformation.createRemoteUser(user);
+
+      hbi.write(
+          new TimelineCollectorContext(cluster, user, flow, "CF7022C10F1354",
+              1002345678919L, "application_11111111111111_1111"),
+          te, remoteUser);
       // write another application with same metric to this flow
       te = new TimelineEntities();
       TimelineEntity entityApp2 = TestFlowDataGenerator.getEntityMetricsApp2(
           System.currentTimeMillis());
       entityApp2.setCreatedTime(1425016502000L);
       te.addEntity(entityApp2);
-      hbi.write(cluster, user, flow, "CF7022C10F1354", 1002345678918L,
-          "application_11111111111111_2222", te);
+      hbi.write(
+          new TimelineCollectorContext(cluster, user, flow, "CF7022C10F1354",
+              1002345678918L, "application_11111111111111_2222"),
+          te, remoteUser);
       hbi.flush();
     } finally {
       if (hbi != null) {
@@ -855,8 +890,9 @@
       Set<TimelineEntity> entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow,
           null, null, TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineEntityFilters(null, 1425016501000L, 1425016502001L, null,
-          null, null, null, null, null), new TimelineDataToRetrieve());
+          new TimelineEntityFilters.Builder().createdTimeBegin(1425016501000L)
+              .createTimeEnd(1425016502001L).build(),
+          new TimelineDataToRetrieve());
       assertEquals(2, entities.size());
       for (TimelineEntity entity : entities) {
         if (!entity.getId().equals("user2@flow_name2/1002345678918") &&
@@ -868,8 +904,9 @@
       entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow, null, null,
           TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineEntityFilters(null, 1425016501050L, null, null, null,
-          null, null, null, null), new TimelineDataToRetrieve());
+          new TimelineEntityFilters.Builder().createdTimeBegin(1425016501050L)
+              .build(),
+          new TimelineDataToRetrieve());
       assertEquals(1, entities.size());
       for (TimelineEntity entity : entities) {
         if (!entity.getId().equals("user2@flow_name2/1002345678918")) {
@@ -879,8 +916,9 @@
       entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow, null, null,
           TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineEntityFilters(null, null, 1425016501050L, null, null,
-          null, null, null, null), new TimelineDataToRetrieve());
+          new TimelineEntityFilters.Builder().createTimeEnd(1425016501050L)
+              .build(),
+          new TimelineDataToRetrieve());
       assertEquals(1, entities.size());
       for (TimelineEntity entity : entities) {
         if (!entity.getId().equals("user2@flow_name2/1002345678919")) {
@@ -910,15 +948,22 @@
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
-      hbi.write(cluster, user, flow, "CF7022C10F1354", 1002345678919L,
-          "application_11111111111111_1111", te);
+      UserGroupInformation remoteUser =
+          UserGroupInformation.createRemoteUser(user);
+
+      hbi.write(
+          new TimelineCollectorContext(cluster, user, flow, "CF7022C10F1354",
+              1002345678919L, "application_11111111111111_1111"),
+          te, remoteUser);
       // write another application with same metric to this flow
       te = new TimelineEntities();
       TimelineEntity entityApp2 = TestFlowDataGenerator.getEntityMetricsApp2(
           System.currentTimeMillis());
       te.addEntity(entityApp2);
-      hbi.write(cluster, user, flow, "CF7022C10F1354", 1002345678918L,
-          "application_11111111111111_2222", te);
+      hbi.write(
+          new TimelineCollectorContext(cluster, user, flow, "CF7022C10F1354",
+              1002345678918L, "application_11111111111111_2222"),
+          te, remoteUser);
       hbi.flush();
     } finally {
       if (hbi != null) {
@@ -946,9 +991,10 @@
       Set<TimelineEntity> entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow, null,
           null, TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineEntityFilters(null, null, null, null, null, null, null,
-          metricFilterList, null), new TimelineDataToRetrieve(null, null,
-          EnumSet.of(Field.METRICS), null));
+          new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
+              .build(),
+          new TimelineDataToRetrieve(null, null,
+          EnumSet.of(Field.METRICS), null, null, null));
       assertEquals(2, entities.size());
       int metricCnt = 0;
       for (TimelineEntity entity : entities) {
@@ -963,9 +1009,10 @@
       entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow, null, null,
           TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineEntityFilters(null, null, null, null, null, null, null,
-          metricFilterList1, null), new TimelineDataToRetrieve(null, null,
-          EnumSet.of(Field.METRICS), null));
+          new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
+              .build(),
+          new TimelineDataToRetrieve(null, null,
+          EnumSet.of(Field.METRICS), null, null, null));
       assertEquals(1, entities.size());
       metricCnt = 0;
       for (TimelineEntity entity : entities) {
@@ -979,9 +1026,10 @@
       entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow, null, null,
           TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineEntityFilters(null, null, null, null, null, null, null,
-          metricFilterList2, null), new TimelineDataToRetrieve(null, null,
-          EnumSet.of(Field.METRICS), null));
+          new TimelineEntityFilters.Builder().metricFilters(metricFilterList2)
+              .build(),
+          new TimelineDataToRetrieve(null, null,
+          EnumSet.of(Field.METRICS), null, null, null));
       assertEquals(0, entities.size());
 
       TimelineFilterList metricFilterList3 = new TimelineFilterList(
@@ -989,9 +1037,10 @@
       entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow, null, null,
           TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineEntityFilters(null, null, null, null, null, null, null,
-          metricFilterList3, null), new TimelineDataToRetrieve(null, null,
-          EnumSet.of(Field.METRICS), null));
+          new TimelineEntityFilters.Builder().metricFilters(metricFilterList3)
+              .build(),
+          new TimelineDataToRetrieve(null, null,
+          EnumSet.of(Field.METRICS), null, null, null));
       assertEquals(0, entities.size());
 
       TimelineFilterList list3 = new TimelineFilterList();
@@ -1010,10 +1059,10 @@
       entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow, null, null,
           TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineEntityFilters(null, null, null, null, null, null, null,
-          metricFilterList4, null),
+          new TimelineEntityFilters.Builder().metricFilters(metricFilterList4)
+              .build(),
           new TimelineDataToRetrieve(null, metricsToRetrieve,
-          EnumSet.of(Field.ALL), null));
+          EnumSet.of(Field.ALL), null, null, null));
       assertEquals(2, entities.size());
       metricCnt = 0;
       for (TimelineEntity entity : entities) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java
index eb18e28..0ef8260 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java
@@ -35,7 +35,6 @@
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -49,10 +48,13 @@
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
+import org.apache.hadoop.yarn.server.timelineservice.storage.DataGeneratorForTest;
 import org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
-import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineSchemaCreator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
@@ -69,8 +71,8 @@
 
   private static HBaseTestingUtility util;
 
-  private static final String METRIC_1 = "MAP_SLOT_MILLIS";
-  private static final String METRIC_2 = "HDFS_BYTES_READ";
+  private static final String METRIC1 = "MAP_SLOT_MILLIS";
+  private static final String METRIC2 = "HDFS_BYTES_READ";
 
   private final byte[] aRowKey = Bytes.toBytes("a");
   private final byte[] aFamily = Bytes.toBytes("family");
@@ -82,15 +84,12 @@
     Configuration conf = util.getConfiguration();
     conf.setInt("hfile.format.version", 3);
     util.startMiniCluster();
-    createSchema();
+    DataGeneratorForTest.createSchema(util.getConfiguration());
   }
 
-  private static void createSchema() throws IOException {
-    TimelineSchemaCreator.createAllTables(util.getConfiguration(), false);
-  }
-
-  /** Writes non numeric data into flow run table
-   * reads it back.
+  /**
+   * writes non numeric data into flow run table.
+   * reads it back
    *
    * @throws Exception
    */
@@ -106,11 +105,10 @@
     p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnNameBytes,
         valueBytes);
     Configuration hbaseConf = util.getConfiguration();
-    TableName table = TableName.valueOf(hbaseConf.get(
-        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
     Connection conn = null;
     conn = ConnectionFactory.createConnection(hbaseConf);
-    Table flowRunTable = conn.getTable(table);
+    Table flowRunTable = conn.getTable(BaseTable.getTableName(hbaseConf,
+        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
     flowRunTable.put(p);
 
     Get g = new Get(rowKeyBytes);
@@ -156,11 +154,10 @@
         value4Bytes);
 
     Configuration hbaseConf = util.getConfiguration();
-    TableName table = TableName.valueOf(hbaseConf.get(
-        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
     Connection conn = null;
     conn = ConnectionFactory.createConnection(hbaseConf);
-    Table flowRunTable = conn.getTable(table);
+    Table flowRunTable = conn.getTable(BaseTable.getTableName(hbaseConf,
+        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
     flowRunTable.put(p);
 
     String rowKey2 = "nonNumericRowKey2";
@@ -262,7 +259,6 @@
           .getFamilyMap(FlowRunColumnFamily.INFO.getBytes());
       // we expect all back in one next call
       assertEquals(4, values.size());
-      System.out.println(" values size " + values.size() +  " " + batchLimit);
       rowCount++;
     }
     // should get back 1 row with each invocation
@@ -286,9 +282,12 @@
     Configuration c1 = util.getConfiguration();
     TimelineEntities te1 = null;
     TimelineEntity entityApp1 = null;
+    UserGroupInformation remoteUser =
+        UserGroupInformation.createRemoteUser(user);
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
+
       // now insert count * ( 100 + 100) metrics
       // each call to getEntityMetricsApp1 brings back 100 values
       // of metric1 and 100 of metric2
@@ -298,14 +297,16 @@
         te1 = new TimelineEntities();
         entityApp1 = TestFlowDataGenerator.getEntityMetricsApp1(insertTs, c1);
         te1.addEntity(entityApp1);
-        hbi.write(cluster, user, flow, flowVersion, runid, appName, te1);
+        hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+            runid, appName), te1, remoteUser);
 
         appName = "application_2048000000000_7" + appIdSuffix;
         insertTs++;
         te1 = new TimelineEntities();
         entityApp1 = TestFlowDataGenerator.getEntityMetricsApp2(insertTs);
         te1.addEntity(entityApp1);
-        hbi.write(cluster, user, flow, flowVersion, runid, appName, te1);
+        hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+            runid, appName), te1, remoteUser);
       }
     } finally {
       String appName = "application_10240000000000_" + appIdSuffix;
@@ -314,17 +315,19 @@
           insertTs + 1, c1);
       te1.addEntity(entityApp1);
       if (hbi != null) {
-        hbi.write(cluster, user, flow, flowVersion, runid, appName, te1);
+        hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+            runid, appName), te1, remoteUser);
         hbi.flush();
         hbi.close();
       }
     }
 
     // check in flow run table
-    HRegionServer server = util.getRSForFirstRegionInTable(TableName
-        .valueOf(FlowRunTable.DEFAULT_TABLE_NAME));
-    List<Region> regions = server.getOnlineRegions(TableName
-        .valueOf(FlowRunTable.DEFAULT_TABLE_NAME));
+    HRegionServer server = util.getRSForFirstRegionInTable(
+        BaseTable.getTableName(c1, FlowRunTable.TABLE_NAME_CONF_NAME,
+            FlowRunTable.DEFAULT_TABLE_NAME));
+    List<Region> regions = server.getOnlineRegions(BaseTable.getTableName(c1,
+        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
     assertTrue("Didn't find any regions for primary table!",
         regions.size() > 0);
     // flush and compact all the regions of the primary table
@@ -349,8 +352,8 @@
         new FlowRunRowKey(clusterStop, user, flow, runid).getRowKey();
     s.setStopRow(stopRow);
     Connection conn = ConnectionFactory.createConnection(c1);
-    Table table1 = conn.getTable(TableName
-        .valueOf(FlowRunTable.DEFAULT_TABLE_NAME));
+    Table table1 = conn.getTable(BaseTable.getTableName(c1,
+        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
     ResultScanner scanner = table1.getScanner(s);
 
     int rowCount = 0;
@@ -364,13 +367,13 @@
       rowCount++;
       // check metric1
       byte[] q = ColumnHelper.getColumnQualifier(
-          FlowRunColumnPrefix.METRIC.getColumnPrefixBytes(), METRIC_1);
+          FlowRunColumnPrefix.METRIC.getColumnPrefixBytes(), METRIC1);
       assertTrue(values.containsKey(q));
       assertEquals(141, Bytes.toLong(values.get(q)));
 
       // check metric2
       q = ColumnHelper.getColumnQualifier(
-          FlowRunColumnPrefix.METRIC.getColumnPrefixBytes(), METRIC_2);
+          FlowRunColumnPrefix.METRIC.getColumnPrefixBytes(), METRIC2);
       assertTrue(values.containsKey(q));
       assertEquals(57, Bytes.toLong(values.get(q)));
     }
@@ -587,9 +590,9 @@
     long cellTsFinalStart = 10001120L;
     long cellTsFinal = cellTsFinalStart;
 
-    long cellTsFinalStartNotExpire =
-        TimestampGenerator.getSupplementedTimestamp(
-        System.currentTimeMillis(), "application_10266666661166_118821");
+    long cellTsFinalStartNotExpire = TimestampGenerator
+        .getSupplementedTimestamp(System.currentTimeMillis(),
+            "application_10266666661166_118821");
     long cellTsFinalNotExpire = cellTsFinalStartNotExpire;
 
     long cellTsNotFinalStart = currentTimestamp - 5;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
index a934a3d..8b46d32 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
@@ -182,6 +182,23 @@
   }
 
   /**
+   * Creates a HBase {@link SingleColumnValueFilter} with specified column.
+   * @param <T> Describes the type of column prefix.
+   * @param column Column which value to be filtered.
+   * @param value Value to be filtered.
+   * @param op Compare operator
+   * @return a SingleColumnValue Filter
+   * @throws IOException if any exception.
+   */
+  public static <T> Filter createHBaseSingleColValueFilter(Column<T> column,
+      Object value, CompareOp op) throws IOException {
+    Filter singleColValFilter = createHBaseSingleColValueFilter(
+        column.getColumnFamilyBytes(), column.getColumnQualifierBytes(),
+        column.getValueConverter().encodeValue(value), op, true);
+    return singleColValFilter;
+  }
+
+  /**
    * Creates a HBase {@link SingleColumnValueFilter}.
    *
    * @param columnFamily Column Family represented as bytes.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
index dc50f42..1ebfab2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
@@ -22,7 +22,6 @@
 import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.service.AbstractService;
@@ -30,6 +29,8 @@
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.reader.EntityTypeReader;
 import org.apache.hadoop.yarn.server.timelineservice.storage.reader.TimelineEntityReader;
 import org.apache.hadoop.yarn.server.timelineservice.storage.reader.TimelineEntityReaderFactory;
 import org.slf4j.Logger;
@@ -54,7 +55,7 @@
   @Override
   public void serviceInit(Configuration conf) throws Exception {
     super.serviceInit(conf);
-    hbaseConf = HBaseConfiguration.create(conf);
+    hbaseConf = HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(conf);
     conn = ConnectionFactory.createConnection(hbaseConf);
   }
 
@@ -85,4 +86,11 @@
             filters, dataToRetrieve);
     return reader.readEntities(hbaseConf, conn);
   }
+
+  @Override
+  public Set<String> getEntityTypes(TimelineReaderContext context)
+      throws IOException {
+    EntityTypeReader reader = new EntityTypeReader(context);
+    return reader.readEntityTypes(hbaseConf, conn);
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
index afa58cb..9e9134c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
@@ -24,26 +24,28 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.AbstractService;
+import  org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse;
 import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumn;
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumn;
+import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumnPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnName;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongKeyConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
@@ -63,6 +65,10 @@
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumn;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -85,6 +91,7 @@
   private TypedBufferedMutator<ApplicationTable> applicationTable;
   private TypedBufferedMutator<FlowActivityTable> flowActivityTable;
   private TypedBufferedMutator<FlowRunTable> flowRunTable;
+  private TypedBufferedMutator<SubApplicationTable> subApplicationTable;
 
   /**
    * Used to convert strings key components to and from storage format.
@@ -97,6 +104,10 @@
    */
   private final KeyConverter<Long> longKeyConverter = new LongKeyConverter();
 
+  private enum Tables {
+    APPLICATION_TABLE, ENTITY_TABLE, SUBAPPLICATION_TABLE
+  };
+
   public HBaseTimelineWriterImpl() {
     super(HBaseTimelineWriterImpl.class.getName());
   }
@@ -107,7 +118,8 @@
   @Override
   protected void serviceInit(Configuration conf) throws Exception {
     super.serviceInit(conf);
-    Configuration hbaseConf = HBaseConfiguration.create(conf);
+    Configuration hbaseConf =
+        HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(conf);
     conn = ConnectionFactory.createConnection(hbaseConf);
     entityTable = new EntityTable().getTableMutator(hbaseConf, conn);
     appToFlowTable = new AppToFlowTable().getTableMutator(hbaseConf, conn);
@@ -115,17 +127,28 @@
     flowRunTable = new FlowRunTable().getTableMutator(hbaseConf, conn);
     flowActivityTable =
         new FlowActivityTable().getTableMutator(hbaseConf, conn);
+    subApplicationTable =
+        new SubApplicationTable().getTableMutator(hbaseConf, conn);
   }
 
   /**
    * Stores the entire information in TimelineEntities to the timeline store.
    */
   @Override
-  public TimelineWriteResponse write(String clusterId, String userId,
-      String flowName, String flowVersion, long flowRunId, String appId,
-      TimelineEntities data) throws IOException {
+  public TimelineWriteResponse write(TimelineCollectorContext context,
+      TimelineEntities data, UserGroupInformation callerUgi)
+      throws IOException {
 
     TimelineWriteResponse putStatus = new TimelineWriteResponse();
+
+    String clusterId = context.getClusterId();
+    String userId = context.getUserId();
+    String flowName = context.getFlowName();
+    String flowVersion = context.getFlowVersion();
+    long flowRunId = context.getFlowRunId();
+    String appId = context.getAppId();
+    String subApplicationUser = callerUgi.getShortUserName();
+
     // defensive coding to avoid NPE during row key construction
     if ((flowName == null) || (appId == null) || (clusterId == null)
         || (userId == null)) {
@@ -144,43 +167,45 @@
 
       // if the entity is the application, the destination is the application
       // table
-      boolean isApplication = isApplicationEntity(te);
+      boolean isApplication = ApplicationEntity.isApplicationEntity(te);
       byte[] rowKey;
       if (isApplication) {
         ApplicationRowKey applicationRowKey =
             new ApplicationRowKey(clusterId, userId, flowName, flowRunId,
                 appId);
         rowKey = applicationRowKey.getRowKey();
+        store(rowKey, te, flowVersion, Tables.APPLICATION_TABLE);
       } else {
         EntityRowKey entityRowKey =
             new EntityRowKey(clusterId, userId, flowName, flowRunId, appId,
-                te.getType(), te.getId());
+                te.getType(), te.getIdPrefix(), te.getId());
         rowKey = entityRowKey.getRowKey();
+        store(rowKey, te, flowVersion, Tables.ENTITY_TABLE);
       }
 
-      storeInfo(rowKey, te, flowVersion, isApplication);
-      storeEvents(rowKey, te.getEvents(), isApplication);
-      storeConfig(rowKey, te.getConfigs(), isApplication);
-      storeMetrics(rowKey, te.getMetrics(), isApplication);
-      storeRelations(rowKey, te, isApplication);
+      if (!isApplication && !userId.equals(subApplicationUser)) {
+        SubApplicationRowKey subApplicationRowKey =
+            new SubApplicationRowKey(subApplicationUser, clusterId,
+                te.getType(), te.getIdPrefix(), te.getId(), userId);
+        rowKey = subApplicationRowKey.getRowKey();
+        store(rowKey, te, flowVersion, Tables.SUBAPPLICATION_TABLE);
+      }
 
       if (isApplication) {
         TimelineEvent event =
-            getApplicationEvent(te,
+            ApplicationEntity.getApplicationEvent(te,
                 ApplicationMetricsConstants.CREATED_EVENT_TYPE);
         FlowRunRowKey flowRunRowKey =
             new FlowRunRowKey(clusterId, userId, flowName, flowRunId);
         if (event != null) {
-          AppToFlowRowKey appToFlowRowKey =
-              new AppToFlowRowKey(clusterId, appId);
-          onApplicationCreated(flowRunRowKey, appToFlowRowKey, appId, userId,
+          onApplicationCreated(flowRunRowKey, clusterId, appId, userId,
               flowVersion, te, event.getTimestamp());
         }
         // if it's an application entity, store metrics
         storeFlowMetricsAppRunning(flowRunRowKey, appId, te);
         // if application has finished, store it's finish time and write final
         // values of all metrics
-        event = getApplicationEvent(te,
+        event = ApplicationEntity.getApplicationEvent(te,
             ApplicationMetricsConstants.FINISHED_EVENT_TYPE);
         if (event != null) {
           onApplicationFinished(flowRunRowKey, flowVersion, appId, te,
@@ -192,18 +217,22 @@
   }
 
   private void onApplicationCreated(FlowRunRowKey flowRunRowKey,
-      AppToFlowRowKey appToFlowRowKey, String appId, String userId,
-      String flowVersion, TimelineEntity te, long appCreatedTimeStamp)
+      String clusterId, String appId, String userId, String flowVersion,
+      TimelineEntity te, long appCreatedTimeStamp)
       throws IOException {
 
     String flowName = flowRunRowKey.getFlowName();
     Long flowRunId = flowRunRowKey.getFlowRunId();
 
     // store in App to flow table
+    AppToFlowRowKey appToFlowRowKey = new AppToFlowRowKey(appId);
     byte[] rowKey = appToFlowRowKey.getRowKey();
-    AppToFlowColumn.FLOW_ID.store(rowKey, appToFlowTable, null, flowName);
-    AppToFlowColumn.FLOW_RUN_ID.store(rowKey, appToFlowTable, null, flowRunId);
-    AppToFlowColumn.USER_ID.store(rowKey, appToFlowTable, null, userId);
+    AppToFlowColumnPrefix.FLOW_NAME.store(rowKey, appToFlowTable, clusterId,
+        null, flowName);
+    AppToFlowColumnPrefix.FLOW_RUN_ID.store(rowKey, appToFlowTable, clusterId,
+        null, flowRunId);
+    AppToFlowColumnPrefix.USER_ID.store(rowKey, appToFlowTable, clusterId, null,
+        userId);
 
     // store in flow run table
     storeAppCreatedInFlowRunTable(flowRunRowKey, appId, te);
@@ -301,72 +330,108 @@
     }
   }
 
-  private void storeRelations(byte[] rowKey, TimelineEntity te,
-      boolean isApplication) throws IOException {
-    if (isApplication) {
-      storeRelations(rowKey, te.getIsRelatedToEntities(),
-          ApplicationColumnPrefix.IS_RELATED_TO, applicationTable);
-      storeRelations(rowKey, te.getRelatesToEntities(),
-          ApplicationColumnPrefix.RELATES_TO, applicationTable);
-    } else {
-      storeRelations(rowKey, te.getIsRelatedToEntities(),
-          EntityColumnPrefix.IS_RELATED_TO, entityTable);
-      storeRelations(rowKey, te.getRelatesToEntities(),
-          EntityColumnPrefix.RELATES_TO, entityTable);
-    }
-  }
-
   /**
    * Stores the Relations from the {@linkplain TimelineEntity} object.
    */
   private <T> void storeRelations(byte[] rowKey,
-      Map<String, Set<String>> connectedEntities,
-      ColumnPrefix<T> columnPrefix, TypedBufferedMutator<T> table)
-          throws IOException {
-    for (Map.Entry<String, Set<String>> connectedEntity : connectedEntities
-        .entrySet()) {
-      // id3?id4?id5
-      String compoundValue =
-          Separator.VALUES.joinEncoded(connectedEntity.getValue());
-      columnPrefix.store(rowKey, table,
-          stringKeyConverter.encode(connectedEntity.getKey()), null,
-          compoundValue);
+      Map<String, Set<String>> connectedEntities, ColumnPrefix<T> columnPrefix,
+      TypedBufferedMutator<T> table) throws IOException {
+    if (connectedEntities != null) {
+      for (Map.Entry<String, Set<String>> connectedEntity : connectedEntities
+          .entrySet()) {
+        // id3?id4?id5
+        String compoundValue =
+            Separator.VALUES.joinEncoded(connectedEntity.getValue());
+        columnPrefix.store(rowKey, table,
+            stringKeyConverter.encode(connectedEntity.getKey()), null,
+            compoundValue);
+      }
     }
   }
 
   /**
    * Stores information from the {@linkplain TimelineEntity} object.
    */
-  private void storeInfo(byte[] rowKey, TimelineEntity te, String flowVersion,
-      boolean isApplication) throws IOException {
-
-    if (isApplication) {
+  private void store(byte[] rowKey, TimelineEntity te,
+      String flowVersion,
+      Tables table) throws IOException {
+    switch (table) {
+    case APPLICATION_TABLE:
       ApplicationColumn.ID.store(rowKey, applicationTable, null, te.getId());
       ApplicationColumn.CREATED_TIME.store(rowKey, applicationTable, null,
           te.getCreatedTime());
       ApplicationColumn.FLOW_VERSION.store(rowKey, applicationTable, null,
           flowVersion);
-      Map<String, Object> info = te.getInfo();
-      if (info != null) {
-        for (Map.Entry<String, Object> entry : info.entrySet()) {
-          ApplicationColumnPrefix.INFO.store(rowKey, applicationTable,
-              stringKeyConverter.encode(entry.getKey()), null,
-              entry.getValue());
-        }
-      }
-    } else {
+      storeInfo(rowKey, te.getInfo(), flowVersion, ApplicationColumnPrefix.INFO,
+          applicationTable);
+      storeMetrics(rowKey, te.getMetrics(), ApplicationColumnPrefix.METRIC,
+          applicationTable);
+      storeEvents(rowKey, te.getEvents(), ApplicationColumnPrefix.EVENT,
+          applicationTable);
+      storeConfig(rowKey, te.getConfigs(), ApplicationColumnPrefix.CONFIG,
+          applicationTable);
+      storeRelations(rowKey, te.getIsRelatedToEntities(),
+          ApplicationColumnPrefix.IS_RELATED_TO, applicationTable);
+      storeRelations(rowKey, te.getRelatesToEntities(),
+          ApplicationColumnPrefix.RELATES_TO, applicationTable);
+      break;
+    case ENTITY_TABLE:
       EntityColumn.ID.store(rowKey, entityTable, null, te.getId());
       EntityColumn.TYPE.store(rowKey, entityTable, null, te.getType());
       EntityColumn.CREATED_TIME.store(rowKey, entityTable, null,
           te.getCreatedTime());
       EntityColumn.FLOW_VERSION.store(rowKey, entityTable, null, flowVersion);
-      Map<String, Object> info = te.getInfo();
-      if (info != null) {
-        for (Map.Entry<String, Object> entry : info.entrySet()) {
-          EntityColumnPrefix.INFO.store(rowKey, entityTable,
-              stringKeyConverter.encode(entry.getKey()), null,
-              entry.getValue());
-        }
+      storeInfo(rowKey, te.getInfo(), flowVersion, EntityColumnPrefix.INFO,
+          entityTable);
+      storeMetrics(rowKey, te.getMetrics(), EntityColumnPrefix.METRIC,
+          entityTable);
+      storeEvents(rowKey, te.getEvents(), EntityColumnPrefix.EVENT,
+          entityTable);
+      storeConfig(rowKey, te.getConfigs(), EntityColumnPrefix.CONFIG,
+          entityTable);
+      storeRelations(rowKey, te.getIsRelatedToEntities(),
+          EntityColumnPrefix.IS_RELATED_TO, entityTable);
+      storeRelations(rowKey, te.getRelatesToEntities(),
+          EntityColumnPrefix.RELATES_TO, entityTable);
+      break;
+    case SUBAPPLICATION_TABLE:
+      SubApplicationColumn.ID.store(rowKey, subApplicationTable, null,
+          te.getId());
+      SubApplicationColumn.TYPE.store(rowKey, subApplicationTable, null,
+          te.getType());
+      SubApplicationColumn.CREATED_TIME.store(rowKey, subApplicationTable, null,
+          te.getCreatedTime());
+      SubApplicationColumn.FLOW_VERSION.store(rowKey, subApplicationTable, null,
+          flowVersion);
+      storeInfo(rowKey, te.getInfo(), flowVersion,
+          SubApplicationColumnPrefix.INFO, subApplicationTable);
+      storeMetrics(rowKey, te.getMetrics(), SubApplicationColumnPrefix.METRIC,
+          subApplicationTable);
+      storeEvents(rowKey, te.getEvents(), SubApplicationColumnPrefix.EVENT,
+          subApplicationTable);
+      storeConfig(rowKey, te.getConfigs(), SubApplicationColumnPrefix.CONFIG,
+          subApplicationTable);
+      storeRelations(rowKey, te.getIsRelatedToEntities(),
+          SubApplicationColumnPrefix.IS_RELATED_TO, subApplicationTable);
+      storeRelations(rowKey, te.getRelatesToEntities(),
+          SubApplicationColumnPrefix.RELATES_TO, subApplicationTable);
+      break;
+    default:
+      LOG.info("Invalid table name provided.");
+      break;
+    }
+  }
+
+  /**
+   * stores the info information from {@linkplain TimelineEntity}.
+   */
+  private <T> void storeInfo(byte[] rowKey, Map<String, Object> info,
+      String flowVersion, ColumnPrefix<T> columnPrefix,
+      TypedBufferedMutator<T> table) throws IOException {
+    if (info != null) {
+      for (Map.Entry<String, Object> entry : info.entrySet()) {
+        columnPrefix.store(rowKey, table,
+            stringKeyConverter.encode(entry.getKey()), null, entry.getValue());
       }
     }
   }
@@ -374,19 +439,13 @@
   /**
    * stores the config information from {@linkplain TimelineEntity}.
    */
-  private void storeConfig(byte[] rowKey, Map<String, String> config,
-      boolean isApplication) throws IOException {
-    if (config == null) {
-      return;
-    }
-    for (Map.Entry<String, String> entry : config.entrySet()) {
-      byte[] configKey = stringKeyConverter.encode(entry.getKey());
-      if (isApplication) {
-        ApplicationColumnPrefix.CONFIG.store(rowKey, applicationTable,
-            configKey, null, entry.getValue());
-      } else {
-        EntityColumnPrefix.CONFIG.store(rowKey, entityTable, configKey,
-            null, entry.getValue());
+  private <T> void storeConfig(byte[] rowKey, Map<String, String> config,
+      ColumnPrefix<T> columnPrefix, TypedBufferedMutator<T> table)
+      throws IOException {
+    if (config != null) {
+      for (Map.Entry<String, String> entry : config.entrySet()) {
+        byte[] configKey = stringKeyConverter.encode(entry.getKey());
+        columnPrefix.store(rowKey, table, configKey, null, entry.getValue());
       }
     }
   }
@@ -395,8 +454,9 @@
    * stores the {@linkplain TimelineMetric} information from the
    * {@linkplain TimelineEvent} object.
    */
-  private void storeMetrics(byte[] rowKey, Set<TimelineMetric> metrics,
-      boolean isApplication) throws IOException {
+  private <T> void storeMetrics(byte[] rowKey, Set<TimelineMetric> metrics,
+      ColumnPrefix<T> columnPrefix, TypedBufferedMutator<T> table)
+      throws IOException {
     if (metrics != null) {
       for (TimelineMetric metric : metrics) {
         byte[] metricColumnQualifier =
@@ -404,13 +464,8 @@
         Map<Long, Number> timeseries = metric.getValues();
         for (Map.Entry<Long, Number> timeseriesEntry : timeseries.entrySet()) {
           Long timestamp = timeseriesEntry.getKey();
-          if (isApplication) {
-            ApplicationColumnPrefix.METRIC.store(rowKey, applicationTable,
-                metricColumnQualifier, timestamp, timeseriesEntry.getValue());
-          } else {
-            EntityColumnPrefix.METRIC.store(rowKey, entityTable,
-                metricColumnQualifier, timestamp, timeseriesEntry.getValue());
-          }
+          columnPrefix.store(rowKey, table, metricColumnQualifier, timestamp,
+              timeseriesEntry.getValue());
         }
       }
     }
@@ -419,8 +474,9 @@
   /**
    * Stores the events from the {@linkplain TimelineEvent} object.
    */
-  private void storeEvents(byte[] rowKey, Set<TimelineEvent> events,
-      boolean isApplication) throws IOException {
+  private <T> void storeEvents(byte[] rowKey, Set<TimelineEvent> events,
+      ColumnPrefix<T> columnPrefix, TypedBufferedMutator<T> table)
+      throws IOException {
     if (events != null) {
       for (TimelineEvent event : events) {
         if (event != null) {
@@ -438,26 +494,16 @@
               byte[] columnQualifierBytes =
                   new EventColumnName(eventId, eventTimestamp, null)
                       .getColumnQualifier();
-              if (isApplication) {
-                ApplicationColumnPrefix.EVENT.store(rowKey, applicationTable,
-                    columnQualifierBytes, null, Separator.EMPTY_BYTES);
-              } else {
-                EntityColumnPrefix.EVENT.store(rowKey, entityTable,
-                    columnQualifierBytes, null, Separator.EMPTY_BYTES);
-              }
+              columnPrefix.store(rowKey, table, columnQualifierBytes, null,
+                  Separator.EMPTY_BYTES);
             } else {
               for (Map.Entry<String, Object> info : eventInfo.entrySet()) {
                 // eventId=infoKey
                 byte[] columnQualifierBytes =
                     new EventColumnName(eventId, eventTimestamp, info.getKey())
                         .getColumnQualifier();
-                if (isApplication) {
-                  ApplicationColumnPrefix.EVENT.store(rowKey, applicationTable,
-                      columnQualifierBytes, null, info.getValue());
-                } else {
-                  EntityColumnPrefix.EVENT.store(rowKey, entityTable,
-                      columnQualifierBytes, null, info.getValue());
-                }
+                columnPrefix.store(rowKey, table, columnQualifierBytes, null,
+                    info.getValue());
               } // for info: eventInfo
             }
           }
@@ -466,33 +512,6 @@
     }
   }
 
-  /**
-   * Checks if the input TimelineEntity object is an ApplicationEntity.
-   *
-   * @param te TimelineEntity object.
-   * @return true if input is an ApplicationEntity, false otherwise
-   */
-  static boolean isApplicationEntity(TimelineEntity te) {
-    return te.getType().equals(TimelineEntityType.YARN_APPLICATION.toString());
-  }
-
-  /**
-   * @param te TimelineEntity object.
-   * @param eventId event with this id needs to be fetched
-   * @return TimelineEvent if TimelineEntity contains the desired event.
-   */
-  private static TimelineEvent getApplicationEvent(TimelineEntity te,
-      String eventId) {
-    if (isApplicationEntity(te)) {
-      for (TimelineEvent event : te.getEvents()) {
-        if (event.getId().equals(eventId)) {
-          return event;
-        }
-      }
-    }
-    return null;
-  }
-
   /*
    * (non-Javadoc)
    *
@@ -524,6 +543,7 @@
     applicationTable.flush();
     flowRunTable.flush();
     flowActivityTable.flush();
+    subApplicationTable.flush();
   }
 
   /**
@@ -556,11 +576,13 @@
       // The close API performs flushing and releases any resources held
       flowActivityTable.close();
     }
+    if (subApplicationTable != null) {
+      subApplicationTable.close();
+    }
     if (conn != null) {
       LOG.info("closing the hbase Connection");
       conn.close();
     }
     super.serviceStop();
   }
-
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
index dbed05d..210fd85 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
@@ -32,16 +32,18 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.util.GenericOptionsParser;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
 import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTable;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTable;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
@@ -62,7 +64,9 @@
       LoggerFactory.getLogger(TimelineSchemaCreator.class);
   private static final String SKIP_EXISTING_TABLE_OPTION_SHORT = "s";
   private static final String APP_METRICS_TTL_OPTION_SHORT = "ma";
+  private static final String SUB_APP_METRICS_TTL_OPTION_SHORT = "msa";
   private static final String APP_TABLE_NAME_SHORT = "a";
+  private static final String SUB_APP_TABLE_NAME_SHORT = "sa";
   private static final String APP_TO_FLOW_TABLE_NAME_SHORT = "a2f";
   private static final String ENTITY_METRICS_TTL_OPTION_SHORT = "me";
   private static final String ENTITY_TABLE_NAME_SHORT = "e";
@@ -71,7 +75,10 @@
 
   public static void main(String[] args) throws Exception {
 
-    Configuration hbaseConf = HBaseConfiguration.create();
+    LOG.info("Starting the schema creation");
+    Configuration hbaseConf =
+        HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(
+            new YarnConfiguration());
     // Grab input args and allow for -Dxyz style arguments
     String[] otherArgs = new GenericOptionsParser(hbaseConf, args)
         .getRemainingArgs();
@@ -117,6 +124,21 @@
         new ApplicationTable().setMetricsTTL(appMetricsTTL, hbaseConf);
       }
 
+      // Grab the subApplicationTableName argument
+      String subApplicationTableName = commandLine.getOptionValue(
+          SUB_APP_TABLE_NAME_SHORT);
+      if (StringUtils.isNotBlank(subApplicationTableName)) {
+        hbaseConf.set(SubApplicationTable.TABLE_NAME_CONF_NAME,
+            subApplicationTableName);
+      }
+      // Grab the subApplication metrics TTL
+      String subApplicationTableMetricsTTL = commandLine
+          .getOptionValue(SUB_APP_METRICS_TTL_OPTION_SHORT);
+      if (StringUtils.isNotBlank(subApplicationTableMetricsTTL)) {
+        int subAppMetricsTTL = Integer.parseInt(subApplicationTableMetricsTTL);
+        new SubApplicationTable().setMetricsTTL(subAppMetricsTTL, hbaseConf);
+      }
+
       // create all table schemas in hbase
       final boolean skipExisting = commandLine.hasOption(
           SKIP_EXISTING_TABLE_OPTION_SHORT);
@@ -178,6 +200,18 @@
     o.setRequired(false);
     options.addOption(o);
 
+    o = new Option(SUB_APP_TABLE_NAME_SHORT, "subApplicationTableName", true,
+        "subApplication table name");
+    o.setArgName("subApplicationTableName");
+    o.setRequired(false);
+    options.addOption(o);
+
+    o = new Option(SUB_APP_METRICS_TTL_OPTION_SHORT, "subApplicationMetricsTTL",
+        true, "TTL for metrics column family");
+    o.setArgName("subApplicationMetricsTTL");
+    o.setRequired(false);
+    options.addOption(o);
+
     // Options without an argument
     // No need to set arg name since we do not need an argument here
     o = new Option(SKIP_EXISTING_TABLE_OPTION_SHORT, "skipExistingTable",
@@ -216,6 +250,11 @@
         " The name of the Application table\n");
     usage.append("[-applicationMetricsTTL <Application Table Metrics TTL>]" +
         " TTL for metrics in the Application table\n");
+    usage.append("[-subApplicationTableName <SubApplication Table Name>]" +
+        " The name of the SubApplication table\n");
+    usage.append("[-subApplicationMetricsTTL " +
+        " <SubApplication Table Metrics TTL>]" +
+        " TTL for metrics in the SubApplication table\n");
     usage.append("[-skipExistingTable] Whether to skip existing" +
         " hbase tables\n");
     System.out.println(usage.toString());
@@ -308,6 +347,15 @@
           throw e;
         }
       }
+      try {
+        new SubApplicationTable().createTable(admin, hbaseConf);
+      } catch (IOException e) {
+        if (skipExisting) {
+          LOG.warn("Skip and continue on: " + e.getMessage());
+        } else {
+          throw e;
+        }
+      }
     } finally {
       if (conn != null) {
         conn.close();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumn.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumn.java
index dde3911..00eaa7e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumn.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumn.java
@@ -105,52 +105,4 @@
     return column.getValueConverter();
   }
 
-  /**
-   * Retrieve an {@link ApplicationColumn} given a name, or null if there is no
-   * match. The following holds true: {@code columnFor(x) == columnFor(y)} if
-   * and only if {@code x.equals(y)} or {@code (x == y == null)}.
-   *
-   * @param columnQualifier Name of the column to retrieve
-   * @return the corresponding {@link ApplicationColumn} or null
-   */
-  public static final ApplicationColumn columnFor(String columnQualifier) {
-
-    // Match column based on value, assume column family matches.
-    for (ApplicationColumn ac : ApplicationColumn.values()) {
-      // Find a match based only on name.
-      if (ac.getColumnQualifier().equals(columnQualifier)) {
-        return ac;
-      }
-    }
-
-    // Default to null
-    return null;
-  }
-
-  /**
-   * Retrieve an {@link ApplicationColumn} given a name, or null if there is no
-   * match. The following holds true: {@code columnFor(a,x) == columnFor(b,y)}
-   * if and only if {@code a.equals(b) & x.equals(y)} or
-   * {@code (x == y == null)}
-   *
-   * @param columnFamily The columnFamily for which to retrieve the column.
-   * @param name Name of the column to retrieve
-   * @return the corresponding {@link ApplicationColumn} or null if both
-   *         arguments don't match.
-   */
-  public static final ApplicationColumn columnFor(
-      ApplicationColumnFamily columnFamily, String name) {
-
-    for (ApplicationColumn ac : ApplicationColumn.values()) {
-      // Find a match based column family and on name.
-      if (ac.columnFamily.equals(columnFamily)
-          && ac.getColumnQualifier().equals(name)) {
-        return ac;
-      }
-    }
-
-    // Default to null
-    return null;
-  }
-
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
index 42488f4..8297dc5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
@@ -233,56 +233,4 @@
         keyConverter);
   }
 
-  /**
-   * Retrieve an {@link ApplicationColumnPrefix} given a name, or null if there
-   * is no match. The following holds true: {@code columnFor(x) == columnFor(y)}
-   * if and only if {@code x.equals(y)} or {@code (x == y == null)}
-   *
-   * @param columnPrefix Name of the column to retrieve
-   * @return the corresponding {@link ApplicationColumnPrefix} or null
-   */
-  public static final ApplicationColumnPrefix columnFor(String columnPrefix) {
-
-    // Match column based on value, assume column family matches.
-    for (ApplicationColumnPrefix acp : ApplicationColumnPrefix.values()) {
-      // Find a match based only on name.
-      if (acp.getColumnPrefix().equals(columnPrefix)) {
-        return acp;
-      }
-    }
-
-    // Default to null
-    return null;
-  }
-
-  /**
-   * Retrieve an {@link ApplicationColumnPrefix} given a name, or null if there
-   * is no match. The following holds true:
-   * {@code columnFor(a,x) == columnFor(b,y)} if and only if
-   * {@code (x == y == null)} or {@code a.equals(b) & x.equals(y)}
-   *
-   * @param columnFamily The columnFamily for which to retrieve the column.
-   * @param columnPrefix Name of the column to retrieve
-   * @return the corresponding {@link ApplicationColumnPrefix} or null if both
-   *         arguments don't match.
-   */
-  public static final ApplicationColumnPrefix columnFor(
-      ApplicationColumnFamily columnFamily, String columnPrefix) {
-
-    // TODO: needs unit test to confirm and need to update javadoc to explain
-    // null prefix case.
-
-    for (ApplicationColumnPrefix acp : ApplicationColumnPrefix.values()) {
-      // Find a match based column family and on name.
-      if (acp.columnFamily.equals(columnFamily)
-          && (((columnPrefix == null) && (acp.getColumnPrefix() == null)) ||
-          (acp.getColumnPrefix().equals(columnPrefix)))) {
-        return acp;
-      }
-    }
-
-    // Default to null
-    return null;
-  }
-
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
index da62fdf..e89a6a7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
@@ -18,9 +18,13 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.storage.application;
 
+import java.util.List;
+
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
 
@@ -33,7 +37,7 @@
   private final String flowName;
   private final Long flowRunId;
   private final String appId;
-  private final KeyConverter<ApplicationRowKey> appRowKeyConverter =
+  private final ApplicationRowKeyConverter appRowKeyConverter =
       new ApplicationRowKeyConverter();
 
   public ApplicationRowKey(String clusterId, String userId, String flowName,
@@ -86,6 +90,24 @@
   }
 
   /**
+   * Constructs a row key for the application table as follows:
+   * {@code clusterId!userName!flowName!flowRunId!AppId}.
+   * @return String representation of row key.
+   */
+  public String getRowKeyAsString() {
+    return appRowKeyConverter.encodeAsString(this);
+  }
+
+  /**
+   * Given the encoded row key as string, returns the row key as an object.
+   * @param encodedRowKey String representation of row key.
+   * @return A <cite>ApplicationRowKey</cite> object.
+   */
+  public static ApplicationRowKey parseRowKeyFromString(String encodedRowKey) {
+    return new ApplicationRowKeyConverter().decodeFromString(encodedRowKey);
+  }
+
+  /**
    * Encodes and decodes row key for application table. The row key is of the
    * form: clusterId!userName!flowName!flowRunId!appId. flowRunId is a long,
    * appId is encoded and decoded using {@link AppIdKeyConverter} and rest are
@@ -93,7 +115,7 @@
    * <p>
    */
   final private static class ApplicationRowKeyConverter implements
-      KeyConverter<ApplicationRowKey> {
+      KeyConverter<ApplicationRowKey>, KeyConverterToString<ApplicationRowKey> {
 
     private final KeyConverter<String> appIDKeyConverter =
         new AppIdKeyConverter();
@@ -201,6 +223,29 @@
       return new ApplicationRowKey(clusterId, userId, flowName, flowRunId,
           appId);
     }
+
+    @Override
+    public String encodeAsString(ApplicationRowKey key) {
+      if (key.clusterId == null || key.userId == null || key.flowName == null
+          || key.flowRunId == null || key.appId == null) {
+        throw new IllegalArgumentException();
+      }
+      return TimelineReaderUtils
+          .joinAndEscapeStrings(new String[] {key.clusterId, key.userId,
+              key.flowName, key.flowRunId.toString(), key.appId});
+    }
+
+    @Override
+    public ApplicationRowKey decodeFromString(String encodedRowKey) {
+      List<String> split = TimelineReaderUtils.split(encodedRowKey);
+      if (split == null || split.size() != 5) {
+        throw new IllegalArgumentException(
+            "Invalid row key for application table.");
+      }
+      Long flowRunId = Long.valueOf(split.get(3));
+      return new ApplicationRowKey(split.get(0), split.get(1), split.get(2),
+          flowRunId, split.get(4));
+    }
   }
 
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
index d3bdd39..4da720e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
@@ -70,7 +70,7 @@
 public class ApplicationTable extends BaseTable<ApplicationTable> {
   /** application prefix. */
   private static final String PREFIX =
-      YarnConfiguration.TIMELINE_SERVICE_PREFIX + ".application";
+      YarnConfiguration.TIMELINE_SERVICE_PREFIX + "application";
 
   /** config param name that specifies the application table name. */
   public static final String TABLE_NAME_CONF_NAME = PREFIX + ".table.name";
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumn.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumn.java
index ff61633..67497fc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumn.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumn.java
@@ -98,51 +98,4 @@
     return column.readResult(result, columnQualifierBytes);
   }
 
-  /**
-   * Retrieve an {@link AppToFlowColumn} given a name, or null if there is no
-   * match. The following holds true: {@code columnFor(x) == columnFor(y)} if
-   * and only if {@code x.equals(y)} or {@code (x == y == null)}
-   *
-   * @param columnQualifier Name of the column to retrieve
-   * @return the corresponding {@link AppToFlowColumn} or null
-   */
-  public static final AppToFlowColumn columnFor(String columnQualifier) {
-
-    // Match column based on value, assume column family matches.
-    for (AppToFlowColumn ec : AppToFlowColumn.values()) {
-      // Find a match based only on name.
-      if (ec.getColumnQualifier().equals(columnQualifier)) {
-        return ec;
-      }
-    }
-
-    // Default to null
-    return null;
-  }
-
-  /**
-   * Retrieve an {@link AppToFlowColumn} given a name, or null if there is no
-   * match. The following holds true: {@code columnFor(a,x) == columnFor(b,y)}
-   * if and only if {@code a.equals(b) & x.equals(y)} or
-   * {@code (x == y == null)}
-   *
-   * @param columnFamily The columnFamily for which to retrieve the column.
-   * @param name Name of the column to retrieve
-   * @return the corresponding {@link AppToFlowColumn} or null if both arguments
-   *         don't match.
-   */
-  public static final AppToFlowColumn columnFor(
-      AppToFlowColumnFamily columnFamily, String name) {
-
-    for (AppToFlowColumn ec : AppToFlowColumn.values()) {
-      // Find a match based column family and on name.
-      if (ec.columnFamily.equals(columnFamily)
-          && ec.getColumnQualifier().equals(name)) {
-        return ec;
-      }
-    }
-
-    // Default to null
-    return null;
-  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnPrefix.java
new file mode 100644
index 0000000..752a380
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnPrefix.java
@@ -0,0 +1,206 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.NavigableMap;
+
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+/**
+ * Identifies partially qualified columns for the app-to-flow table.
+ */
+public enum AppToFlowColumnPrefix implements ColumnPrefix<AppToFlowTable> {
+
+  /**
+   * The flow name.
+   */
+  FLOW_NAME(AppToFlowColumnFamily.MAPPING, "flow_name"),
+
+  /**
+   * The flow run ID.
+   */
+  FLOW_RUN_ID(AppToFlowColumnFamily.MAPPING, "flow_run_id"),
+
+  /**
+   * The user.
+   */
+  USER_ID(AppToFlowColumnFamily.MAPPING, "user_id");
+
+  private final ColumnHelper<AppToFlowTable> column;
+  private final ColumnFamily<AppToFlowTable> columnFamily;
+  private final String columnPrefix;
+  private final byte[] columnPrefixBytes;
+
+  AppToFlowColumnPrefix(ColumnFamily<AppToFlowTable> columnFamily,
+      String columnPrefix) {
+    this.columnFamily = columnFamily;
+    this.columnPrefix = columnPrefix;
+    if (columnPrefix == null) {
+      this.columnPrefixBytes = null;
+    } else {
+      // Future-proof by ensuring the right column prefix hygiene.
+      this.columnPrefixBytes =
+          Bytes.toBytes(Separator.SPACE.encode(columnPrefix));
+    }
+    this.column = new ColumnHelper<AppToFlowTable>(columnFamily);
+  }
+
+  @Override
+  public byte[] getColumnPrefixBytes(String qualifierPrefix) {
+    return ColumnHelper.getColumnQualifier(
+        columnPrefixBytes, qualifierPrefix);
+  }
+
+  @Override
+  public byte[] getColumnPrefixBytes(byte[] qualifierPrefix) {
+    return ColumnHelper.getColumnQualifier(
+        columnPrefixBytes, qualifierPrefix);
+  }
+
+  @Override
+  public byte[] getColumnFamilyBytes() {
+    return columnFamily.getBytes();
+  }
+
+  @Override
+  public void store(byte[] rowKey,
+      TypedBufferedMutator<AppToFlowTable> tableMutator, byte[] qualifier,
+      Long timestamp, Object inputValue, Attribute... attributes)
+      throws IOException {
+
+    // Null check
+    if (qualifier == null) {
+      throw new IOException("Cannot store column with null qualifier in "
+          + tableMutator.getName().getNameAsString());
+    }
+
+    byte[] columnQualifier = getColumnPrefixBytes(qualifier);
+
+    column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
+        attributes);
+  }
+
+  @Override
+  public void store(byte[] rowKey,
+      TypedBufferedMutator<AppToFlowTable> tableMutator, String qualifier,
+      Long timestamp, Object inputValue, Attribute... attributes)
+      throws IOException {
+
+    // Null check
+    if (qualifier == null) {
+      throw new IOException("Cannot store column with null qualifier in "
+          + tableMutator.getName().getNameAsString());
+    }
+
+    byte[] columnQualifier = getColumnPrefixBytes(qualifier);
+
+    column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
+        attributes);
+  }
+
+  @Override
+  public ValueConverter getValueConverter() {
+    return column.getValueConverter();
+  }
+
+  @Override
+  public Object readResult(Result result, String qualifier) throws IOException {
+    byte[] columnQualifier =
+        ColumnHelper.getColumnQualifier(columnPrefixBytes, qualifier);
+    return column.readResult(result, columnQualifier);
+  }
+
+  @Override
+  public <K> Map<K, Object> readResults(Result result,
+      KeyConverter<K> keyConverter)
+      throws IOException {
+    return column.readResults(result, columnPrefixBytes, keyConverter);
+  }
+
+  @Override
+  public <K, V> NavigableMap<K, NavigableMap<Long, V>>
+      readResultsWithTimestamps(Result result,
+      KeyConverter<K> keyConverter) throws IOException {
+    return column.readResultsWithTimestamps(result, columnPrefixBytes,
+        keyConverter);
+  }
+
+  /**
+   * Retrieve an {@link AppToFlowColumnPrefix} given a name, or null if there
+   * is no match. The following holds true: {@code columnFor(x) == columnFor(y)}
+   * if and only if {@code x.equals(y)} or {@code (x == y == null)}
+   *
+   * @param columnPrefix Name of the column to retrieve
+   * @return the corresponding {@link AppToFlowColumnPrefix} or null
+   */
+  public static final AppToFlowColumnPrefix columnFor(String columnPrefix) {
+
+    // Match column based on value, assume column family matches.
+    for (AppToFlowColumnPrefix afcp : AppToFlowColumnPrefix.values()) {
+      // Find a match based only on name.
+      if (afcp.columnPrefix.equals(columnPrefix)) {
+        return afcp;
+      }
+    }
+
+    // Default to null
+    return null;
+  }
+
+  /**
+   * Retrieve an {@link AppToFlowColumnPrefix} given a name, or null if there
+   * is no match. The following holds true:
+   * {@code columnFor(a,x) == columnFor(b,y)} if and only if
+   * {@code (x == y == null)} or {@code a.equals(b) & x.equals(y)}
+   *
+   * @param columnFamily The columnFamily for which to retrieve the column.
+   * @param columnPrefix Name of the column to retrieve
+   * @return the corresponding {@link AppToFlowColumnPrefix} or null if both
+   *         arguments don't match.
+   */
+  public static final AppToFlowColumnPrefix columnFor(
+      AppToFlowColumnFamily columnFamily, String columnPrefix) {
+
+    // TODO: needs unit test to confirm and need to update javadoc to explain
+    // null prefix case.
+
+    for (AppToFlowColumnPrefix afcp : AppToFlowColumnPrefix.values()) {
+      // Find a match based column family and on name.
+      if (afcp.columnFamily.equals(columnFamily)
+          && (((columnPrefix == null) && (afcp.columnPrefix == null)) ||
+          (afcp.columnPrefix.equals(columnPrefix)))) {
+        return afcp;
+      }
+    }
+
+    // Default to null
+    return null;
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowRowKey.java
index 8df4407..146c475 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowRowKey.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowRowKey.java
@@ -17,41 +17,32 @@
  */
 package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow;
 
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
 
 /**
- * Represents a rowkey for the app_flow table.
+ * Represents a row key for the app_flow table, which is the app id.
  */
 public class AppToFlowRowKey {
-  private final String clusterId;
   private final String appId;
-  private final KeyConverter<AppToFlowRowKey> appToFlowRowKeyConverter =
-      new AppToFlowRowKeyConverter();
+  private final KeyConverter<String> appIdKeyConverter =
+      new AppIdKeyConverter();
 
-  public AppToFlowRowKey(String clusterId, String appId) {
-    this.clusterId = clusterId;
+  public AppToFlowRowKey(String appId) {
     this.appId = appId;
   }
 
-  public String getClusterId() {
-    return clusterId;
-  }
-
   public String getAppId() {
     return appId;
   }
 
   /**
-   * Constructs a row key prefix for the app_flow table as follows:
-   * {@code clusterId!AppId}.
+   * Constructs a row key prefix for the app_flow table.
    *
    * @return byte array with the row key
    */
   public  byte[] getRowKey() {
-    return appToFlowRowKeyConverter.encode(this);
+    return appIdKeyConverter.encode(appId);
   }
 
   /**
@@ -61,83 +52,7 @@
    * @return an <cite>AppToFlowRowKey</cite> object.
    */
   public static AppToFlowRowKey parseRowKey(byte[] rowKey) {
-    return new AppToFlowRowKeyConverter().decode(rowKey);
-  }
-
-  /**
-   * Encodes and decodes row key for app_flow table. The row key is of the form
-   * clusterId!appId. clusterId is a string and appId is encoded/decoded using
-   * {@link AppIdKeyConverter}.
-   * <p>
-   */
-  final private static class AppToFlowRowKeyConverter implements
-      KeyConverter<AppToFlowRowKey> {
-
-    private final KeyConverter<String> appIDKeyConverter =
-        new AppIdKeyConverter();
-
-    /**
-     * Intended for use in AppToFlowRowKey only.
-     */
-    private AppToFlowRowKeyConverter() {
-    }
-
-
-    /**
-     * App to flow row key is of the form clusterId!appId with the 2 segments
-     * separated by !. The sizes below indicate sizes of both of these segments
-     * in sequence. clusterId is a string. appId is represented as 12 bytes w.
-     * cluster Timestamp part of appid taking 8 bytes(long) and seq id taking 4
-     * bytes(int). Strings are variable in size (i.e. end whenever separator is
-     * encountered). This is used while decoding and helps in determining where
-     * to split.
-     */
-    private static final int[] SEGMENT_SIZES = {Separator.VARIABLE_SIZE,
-        Bytes.SIZEOF_LONG + Bytes.SIZEOF_INT };
-
-    /*
-     * (non-Javadoc)
-     *
-     * Encodes AppToFlowRowKey object into a byte array with each
-     * component/field in AppToFlowRowKey separated by Separator#QUALIFIERS.
-     * This leads to an app to flow table row key of the form clusterId!appId
-     *
-     * @see
-     * org.apache.hadoop.yarn.server.timelineservice.storage.common
-     * .KeyConverter#encode(java.lang.Object)
-     */
-    @Override
-    public byte[] encode(AppToFlowRowKey rowKey) {
-      byte[] first =
-          Separator.encode(rowKey.getClusterId(), Separator.SPACE,
-              Separator.TAB, Separator.QUALIFIERS);
-      byte[] second = appIDKeyConverter.encode(rowKey.getAppId());
-      return Separator.QUALIFIERS.join(first, second);
-    }
-
-    /*
-     * (non-Javadoc)
-     *
-     * Decodes an app to flow row key of the form clusterId!appId represented
-     * in byte format and converts it into an AppToFlowRowKey object.
-     *
-     * @see
-     * org.apache.hadoop.yarn.server.timelineservice.storage.common
-     * .KeyConverter#decode(byte[])
-     */
-    @Override
-    public AppToFlowRowKey decode(byte[] rowKey) {
-      byte[][] rowKeyComponents =
-          Separator.QUALIFIERS.split(rowKey, SEGMENT_SIZES);
-      if (rowKeyComponents.length != 2) {
-        throw new IllegalArgumentException("the row key is not valid for "
-            + "the app-to-flow table");
-      }
-      String clusterId =
-          Separator.decode(Bytes.toString(rowKeyComponents[0]),
-              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
-      String appId = appIDKeyConverter.decode(rowKeyComponents[1]);
-      return new AppToFlowRowKey(clusterId, appId);
-    }
+    String appId = new AppIdKeyConverter().decode(rowKey);
+    return new AppToFlowRowKey(appId);
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTable.java
index 40d95a4..04da5c7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTable.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTable.java
@@ -41,21 +41,32 @@
  * <pre>
  * |--------------------------------------|
  * |  Row       | Column Family           |
- * |  key       | info                    |
+ * |  key       | mapping                 |
  * |--------------------------------------|
- * | clusterId! | flowName:               |
- * | AppId      | foo@daily_hive_report   |
+ * | appId      | flow_name!cluster1:     |
+ * |            | foo@daily_hive_report   |
  * |            |                         |
- * |            | flowRunId:              |
+ * |            | flow_run_id!cluster1:   |
  * |            | 1452828720457           |
  * |            |                         |
- * |            | user_id:                |
+ * |            | user_id!cluster1:       |
  * |            | admin                   |
  * |            |                         |
+ * |            | flow_name!cluster2:     |
+ * |            | bar@ad_hoc_query        |
  * |            |                         |
+ * |            | flow_run_id!cluster2:   |
+ * |            | 1452828498752           |
+ * |            |                         |
+ * |            | user_id!cluster2:       |
+ * |            | joe                     |
  * |            |                         |
  * |--------------------------------------|
  * </pre>
+ *
+ * It is possible (although unlikely) in a multi-cluster environment that there
+ * may be more than one applications for a given app id. Different clusters are
+ * recorded as different sets of columns.
  */
 public class AppToFlowTable extends BaseTable<AppToFlowTable> {
   /** app_flow prefix. */
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BaseTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BaseTable.java
index 8581aa4..93d809c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BaseTable.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BaseTable.java
@@ -29,6 +29,7 @@
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 
 /**
  * Implements behavior common to tables used in the timeline service storage. It
@@ -114,16 +115,42 @@
   }
 
   /**
-   * Get the table name for this table.
+   * Get the table name for the input table.
    *
-   * @param hbaseConf HBase configuration from which table name will be fetched.
+   * @param conf HBase configuration from which table name will be fetched.
+   * @param tableName name of the table to be fetched
    * @return A {@link TableName} object.
    */
-  public TableName getTableName(Configuration hbaseConf) {
-    TableName table =
-        TableName.valueOf(hbaseConf.get(tableNameConfName, defaultTableName));
-    return table;
+  public static TableName getTableName(Configuration conf, String tableName) {
+    String tableSchemaPrefix =  conf.get(
+        YarnConfiguration.TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX_NAME,
+        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX);
+    return TableName.valueOf(tableSchemaPrefix + tableName);
+  }
 
+  /**
+   * Get the table name for this table.
+   *
+   * @param conf HBase configuration from which table name will be fetched.
+   * @return A {@link TableName} object.
+   */
+  public TableName getTableName(Configuration conf) {
+    String tableName = conf.get(tableNameConfName, defaultTableName);
+    return getTableName(conf, tableName);
+  }
+
+  /**
+   * Get the table name based on the input config parameters.
+   *
+   * @param conf HBase configuration from which table name will be fetched.
+   * @param tableNameInConf the table name parameter in conf.
+   * @param defaultTableName the default table name.
+   * @return A {@link TableName} object.
+   */
+  public static TableName getTableName(Configuration conf,
+      String tableNameInConf, String defaultTableName) {
+    String tableName = conf.get(tableNameInConf, defaultTableName);
+    return getTableName(conf, tableName);
   }
 
   /**
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
index a9c2148..9f95d44 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
@@ -52,11 +52,28 @@
 
   private final ValueConverter converter;
 
+  private final boolean supplementTs;
+
   public ColumnHelper(ColumnFamily<T> columnFamily) {
     this(columnFamily, GenericConverter.getInstance());
   }
 
   public ColumnHelper(ColumnFamily<T> columnFamily, ValueConverter converter) {
+    this(columnFamily, converter, false);
+  }
+
+  /**
+   * @param columnFamily column family implementation.
+   * @param converter converter use to encode/decode values stored in the column
+   *     or column prefix.
+   * @param needSupplementTs flag to indicate if cell timestamp needs to be
+   *     modified for this column by calling
+   *     {@link TimestampGenerator#getSupplementedTimestamp(long, String)}. This
+   *     would be required for columns(such as metrics in flow run table) where
+   *     potential collisions can occur due to same timestamp.
+   */
+  public ColumnHelper(ColumnFamily<T> columnFamily, ValueConverter converter,
+      boolean needSupplementTs) {
     this.columnFamily = columnFamily;
     columnFamilyBytes = columnFamily.getBytes();
     if (converter == null) {
@@ -64,6 +81,7 @@
     } else {
       this.converter = converter;
     }
+    this.supplementTs = needSupplementTs;
   }
 
   /**
@@ -106,18 +124,24 @@
   }
 
   /*
-   * Figures out the cell timestamp used in the Put For storing into flow run
-   * table. We would like to left shift the timestamp and supplement it with the
-   * AppId id so that there are no collisions in the flow run table's cells
+   * Figures out the cell timestamp used in the Put For storing.
+   * Will supplement the timestamp if required. Typically done for flow run
+   * table.If we supplement the timestamp, we left shift the timestamp and
+   * supplement it with the AppId id so that there are no collisions in the flow
+   * run table's cells.
    */
   private long getPutTimestamp(Long timestamp, Attribute[] attributes) {
     if (timestamp == null) {
       timestamp = System.currentTimeMillis();
     }
-    String appId = getAppIdFromAttributes(attributes);
-    long supplementedTS = TimestampGenerator.getSupplementedTimestamp(
-        timestamp, appId);
-    return supplementedTS;
+    if (!this.supplementTs) {
+      return timestamp;
+    } else {
+      String appId = getAppIdFromAttributes(attributes);
+      long supplementedTS = TimestampGenerator.getSupplementedTimestamp(
+          timestamp, appId);
+      return supplementedTS;
+    }
   }
 
   private String getAppIdFromAttributes(Attribute[] attributes) {
@@ -192,7 +216,6 @@
 
       NavigableMap<byte[], NavigableMap<Long, byte[]>> columnCellMap =
           resultMap.get(columnFamilyBytes);
-
       // could be that there is no such column family.
       if (columnCellMap != null) {
         for (Entry<byte[], NavigableMap<Long, byte[]>> entry : columnCellMap
@@ -235,9 +258,9 @@
               for (Entry<Long, byte[]> cell : cells.entrySet()) {
                 V value =
                     (V) converter.decodeValue(cell.getValue());
-                cellResults.put(
-                    TimestampGenerator.getTruncatedTimestamp(cell.getKey()),
-                    value);
+                Long ts = supplementTs ? TimestampGenerator.
+                    getTruncatedTimestamp(cell.getKey()) : cell.getKey();
+                cellResults.put(ts, value);
               }
             }
             results.put(converterColumnKey, cellResults);
@@ -318,8 +341,9 @@
   /**
    * @param columnPrefixBytes The byte representation for the column prefix.
    *          Should not contain {@link Separator#QUALIFIERS}.
-   * @param qualifier for the remainder of the column. Any
-   *          {@link Separator#QUALIFIERS} will be encoded in the qualifier.
+   * @param qualifier for the remainder of the column.
+   *          {@link Separator#QUALIFIERS} is permissible in the qualifier
+   *          as it is joined only with the column prefix bytes.
    * @return fully sanitized column qualifier that is a combination of prefix
    *         and qualifier. If prefix is null, the result is simply the encoded
    *         qualifier without any separator.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
index b8c7029..b0d8527 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
@@ -17,25 +17,31 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.storage.common;
 
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.client.Query;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationOperation;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
 import java.text.NumberFormat;
-import java.util.List;
-import java.util.Map;
 
 /**
  * A bunch of utility functions used in HBase TimelineService backend.
@@ -200,24 +206,6 @@
     return appId;
   }
 
-  public static boolean isFlowRunTable(HRegionInfo hRegionInfo,
-                                       Configuration conf) {
-    String regionTableName = hRegionInfo.getTable().getNameAsString();
-    String flowRunTableName = conf.get(FlowRunTable.TABLE_NAME_CONF_NAME,
-        FlowRunTable.DEFAULT_TABLE_NAME);
-    if (HBaseTimelineStorageUtils.LOG.isDebugEnabled()) {
-      HBaseTimelineStorageUtils.LOG.debug("regionTableName=" + regionTableName);
-    }
-    if (flowRunTableName.equalsIgnoreCase(regionTableName)) {
-      if (HBaseTimelineStorageUtils.LOG.isDebugEnabled()) {
-        HBaseTimelineStorageUtils.LOG.debug(
-            "table is the flow run table!! " + flowRunTableName);
-      }
-      return true;
-    }
-    return false;
-  }
-
   /**
    * Converts an int into it's inverse int to be used in (row) keys
    * where we want to have the largest int value in the top of the table
@@ -273,4 +261,94 @@
     sb.append(APP_ID_FORMAT.get().format(appId.getId()));
     return sb.toString();
   }
+
+  /**
+   * @param conf Yarn configuration. Used to see if there is an explicit config
+   *          pointing to the HBase config file to read. It should not be null
+   *          or a NullPointerException will be thrown.
+   * @return a configuration with the HBase configuration from the classpath,
+   *         optionally overwritten by the timeline service configuration URL if
+   *         specified.
+   * @throws MalformedURLException if a timeline service HBase configuration URL
+   *           is specified but is a malformed URL.
+   */
+  public static Configuration getTimelineServiceHBaseConf(Configuration conf)
+      throws MalformedURLException {
+    if (conf == null) {
+      throw new NullPointerException();
+    }
+
+    Configuration hbaseConf;
+    String timelineServiceHBaseConfFileURL =
+        conf.get(YarnConfiguration.TIMELINE_SERVICE_HBASE_CONFIGURATION_FILE);
+    if (timelineServiceHBaseConfFileURL != null
+        && timelineServiceHBaseConfFileURL.length() > 0) {
+      LOG.info("Using hbase configuration at " +
+          timelineServiceHBaseConfFileURL);
+      // create a clone so that we don't mess with out input one
+      hbaseConf = new Configuration(conf);
+      Configuration plainHBaseConf = new Configuration(false);
+      URL hbaseSiteXML = new URL(timelineServiceHBaseConfFileURL);
+      plainHBaseConf.addResource(hbaseSiteXML);
+      HBaseConfiguration.merge(hbaseConf, plainHBaseConf);
+    } else {
+      // default to what is on the classpath
+      hbaseConf = HBaseConfiguration.create(conf);
+    }
+    return hbaseConf;
+  }
+
+  /**
+   * Given a row key prefix stored in a byte array, return a byte array for its
+   * immediate next row key.
+   *
+   * @param rowKeyPrefix The provided row key prefix, represented in an array.
+   * @return the closest next row key of the provided row key.
+   */
+  public static byte[] calculateTheClosestNextRowKeyForPrefix(
+      byte[] rowKeyPrefix) {
+    // Essentially we are treating it like an 'unsigned very very long' and
+    // doing +1 manually.
+    // Search for the place where the trailing 0xFFs start
+    int offset = rowKeyPrefix.length;
+    while (offset > 0) {
+      if (rowKeyPrefix[offset - 1] != (byte) 0xFF) {
+        break;
+      }
+      offset--;
+    }
+
+    if (offset == 0) {
+      // We got an 0xFFFF... (only FFs) stopRow value which is
+      // the last possible prefix before the end of the table.
+      // So set it to stop at the 'end of the table'
+      return HConstants.EMPTY_END_ROW;
+    }
+
+    // Copy the right length of the original
+    byte[] newStopRow = Arrays.copyOfRange(rowKeyPrefix, 0, offset);
+    // And increment the last one
+    newStopRow[newStopRow.length - 1]++;
+    return newStopRow;
+  }
+
+  /**
+   * Checks if passed object is of integral type(Short/Integer/Long).
+   *
+   * @param obj Object to be checked.
+   * @return true if object passed is of type Short or Integer or Long, false
+   * otherwise.
+   */
+  public static boolean isIntegralValue(Object obj) {
+    return (obj instanceof Short) || (obj instanceof Integer) ||
+        (obj instanceof Long);
+  }
+
+  public static void setMetricsTimeRange(Query query, byte[] metricsCf,
+      long tsBegin, long tsEnd) {
+    if (tsBegin != 0 || tsEnd != Long.MAX_VALUE) {
+      query.setColumnFamilyTimeRange(metricsCf,
+          tsBegin, ((tsEnd == Long.MAX_VALUE) ? Long.MAX_VALUE : (tsEnd + 1)));
+    }
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverterToString.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverterToString.java
new file mode 100644
index 0000000..1f52a7b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverterToString.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+/**
+ * Interface which has to be implemented for encoding and decoding row keys or
+ * column qualifiers as string.
+ */
+public interface KeyConverterToString<T> {
+  /**
+   * Encode key as string.
+   * @param key of type T to be encoded as string.
+   * @return encoded value as string.
+   */
+  String encodeAsString(T key);
+
+  /**
+   * Decode row key from string to a key of type T.
+   * @param encodedKey string representation of row key
+   * @return type T which has been constructed after decoding string.
+   */
+  T decodeFromString(String encodedKey);
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongConverter.java
index 600601a..6ab69f7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongConverter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongConverter.java
@@ -40,7 +40,7 @@
 
   @Override
   public byte[] encodeValue(Object value) throws IOException {
-    if (!TimelineStorageUtils.isIntegralValue(value)) {
+    if (!HBaseTimelineStorageUtils.isIntegralValue(value)) {
       throw new IOException("Expected integral value");
     }
     return Bytes.toBytes(((Number)value).longValue());
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
index 93b4b36..b228d84 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
@@ -94,28 +94,6 @@
     return column.readResult(result, columnQualifierBytes);
   }
 
-  /**
-   * Retrieve an {@link EntityColumn} given a name, or null if there is no
-   * match. The following holds true: {@code columnFor(x) == columnFor(y)} if
-   * and only if {@code x.equals(y)} or {@code (x == y == null)}
-   *
-   * @param columnQualifier Name of the column to retrieve
-   * @return the corresponding {@link EntityColumn} or null
-   */
-  public static final EntityColumn columnFor(String columnQualifier) {
-
-    // Match column based on value, assume column family matches.
-    for (EntityColumn ec : EntityColumn.values()) {
-      // Find a match based only on name.
-      if (ec.getColumnQualifier().equals(columnQualifier)) {
-        return ec;
-      }
-    }
-
-    // Default to null
-    return null;
-  }
-
   @Override
   public byte[] getColumnQualifierBytes() {
     return columnQualifierBytes.clone();
@@ -131,30 +109,4 @@
     return column.getValueConverter();
   }
 
-  /**
-   * Retrieve an {@link EntityColumn} given a name, or null if there is no
-   * match. The following holds true: {@code columnFor(a,x) == columnFor(b,y)}
-   * if and only if {@code a.equals(b) & x.equals(y)} or
-   * {@code (x == y == null)}
-   *
-   * @param columnFamily The columnFamily for which to retrieve the column.
-   * @param name Name of the column to retrieve
-   * @return the corresponding {@link EntityColumn} or null if both arguments
-   *         don't match.
-   */
-  public static final EntityColumn columnFor(EntityColumnFamily columnFamily,
-      String name) {
-
-    for (EntityColumn ec : EntityColumn.values()) {
-      // Find a match based column family and on name.
-      if (ec.columnFamily.equals(columnFamily)
-          && ec.getColumnQualifier().equals(name)) {
-        return ec;
-      }
-    }
-
-    // Default to null
-    return null;
-  }
-
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnPrefix.java
index e410549..d385108 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnPrefix.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnPrefix.java
@@ -246,55 +246,4 @@
         keyConverter);
   }
 
-  /**
-   * Retrieve an {@link EntityColumnPrefix} given a name, or null if there is no
-   * match. The following holds true: {@code columnFor(x) == columnFor(y)} if
-   * and only if {@code x.equals(y)} or {@code (x == y == null)}
-   *
-   * @param columnPrefix Name of the column to retrieve
-   * @return the corresponding {@link EntityColumnPrefix} or null
-   */
-  public static final EntityColumnPrefix columnFor(String columnPrefix) {
-
-    // Match column based on value, assume column family matches.
-    for (EntityColumnPrefix ecp : EntityColumnPrefix.values()) {
-      // Find a match based only on name.
-      if (ecp.getColumnPrefix().equals(columnPrefix)) {
-        return ecp;
-      }
-    }
-
-    // Default to null
-    return null;
-  }
-
-  /**
-   * Retrieve an {@link EntityColumnPrefix} given a name, or null if there is no
-   * match. The following holds true: {@code columnFor(a,x) == columnFor(b,y)}
-   * if and only if {@code (x == y == null)} or
-   * {@code a.equals(b) & x.equals(y)}
-   *
-   * @param columnFamily The columnFamily for which to retrieve the column.
-   * @param columnPrefix Name of the column to retrieve
-   * @return the corresponding {@link EntityColumnPrefix} or null if both
-   *         arguments don't match.
-   */
-  public static final EntityColumnPrefix columnFor(
-      EntityColumnFamily columnFamily, String columnPrefix) {
-
-    // TODO: needs unit test to confirm and need to update javadoc to explain
-    // null prefix case.
-
-    for (EntityColumnPrefix ecp : EntityColumnPrefix.values()) {
-      // Find a match based column family and on name.
-      if (ecp.columnFamily.equals(columnFamily)
-          && (((columnPrefix == null) && (ecp.getColumnPrefix() == null)) ||
-          (ecp.getColumnPrefix().equals(columnPrefix)))) {
-        return ecp;
-      }
-    }
-
-    // Default to null
-    return null;
-  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
index ff22178..b85a9b0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
@@ -17,9 +17,13 @@
  */
 package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
 
+import java.util.List;
+
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
 
@@ -33,18 +37,21 @@
   private final Long flowRunId;
   private final String appId;
   private final String entityType;
+  private final Long entityIdPrefix;
   private final String entityId;
-  private final KeyConverter<EntityRowKey> entityRowKeyConverter =
+  private final EntityRowKeyConverter entityRowKeyConverter =
       new EntityRowKeyConverter();
 
   public EntityRowKey(String clusterId, String userId, String flowName,
-      Long flowRunId, String appId, String entityType, String entityId) {
+      Long flowRunId, String appId, String entityType, Long entityIdPrefix,
+      String entityId) {
     this.clusterId = clusterId;
     this.userId = userId;
     this.flowName = flowName;
     this.flowRunId = flowRunId;
     this.appId = appId;
     this.entityType = entityType;
+    this.entityIdPrefix = entityIdPrefix;
     this.entityId = entityId;
   }
 
@@ -76,6 +83,10 @@
     return entityId;
   }
 
+  public Long getEntityIdPrefix() {
+    return entityIdPrefix;
+  }
+
   /**
    * Constructs a row key for the entity table as follows:
    * {@code userName!clusterId!flowName!flowRunId!AppId!entityType!entityId}.
@@ -89,7 +100,6 @@
 
   /**
    * Given the raw row key as bytes, returns the row key as an object.
-   *
    * @param rowKey byte representation of row key.
    * @return An <cite>EntityRowKey</cite> object.
    */
@@ -98,6 +108,27 @@
   }
 
   /**
+   * Constructs a row key for the entity table as follows:
+   * <p>
+   * {@code userName!clusterId!flowName!flowRunId!AppId!
+   * entityType!entityIdPrefix!entityId}.
+   * </p>
+   * @return String representation of row key.
+   */
+  public String getRowKeyAsString() {
+    return entityRowKeyConverter.encodeAsString(this);
+  }
+
+  /**
+   * Given the encoded row key as string, returns the row key as an object.
+   * @param encodedRowKey String representation of row key.
+   * @return A <cite>EntityRowKey</cite> object.
+   */
+  public static EntityRowKey parseRowKeyFromString(String encodedRowKey) {
+    return new EntityRowKeyConverter().decodeFromString(encodedRowKey);
+  }
+
+  /**
    * Encodes and decodes row key for entity table. The row key is of the form :
    * userName!clusterId!flowName!flowRunId!appId!entityType!entityId. flowRunId
    * is a long, appId is encoded/decoded using {@link AppIdKeyConverter} and
@@ -105,7 +136,7 @@
    * <p>
    */
   final private static class EntityRowKeyConverter implements
-      KeyConverter<EntityRowKey> {
+      KeyConverter<EntityRowKey>, KeyConverterToString<EntityRowKey> {
 
     private final AppIdKeyConverter appIDKeyConverter = new AppIdKeyConverter();
 
@@ -126,7 +157,7 @@
     private static final int[] SEGMENT_SIZES = {Separator.VARIABLE_SIZE,
         Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG,
         AppIdKeyConverter.getKeySize(), Separator.VARIABLE_SIZE,
-        Separator.VARIABLE_SIZE };
+        Bytes.SIZEOF_LONG, Separator.VARIABLE_SIZE };
 
     /*
      * (non-Javadoc)
@@ -172,11 +203,25 @@
       byte[] entityType =
           Separator.encode(rowKey.getEntityType(), Separator.SPACE,
               Separator.TAB, Separator.QUALIFIERS);
-      byte[] entityId =
-          rowKey.getEntityId() == null ? Separator.EMPTY_BYTES : Separator
-              .encode(rowKey.getEntityId(), Separator.SPACE, Separator.TAB,
-                  Separator.QUALIFIERS);
-      byte[] fourth = Separator.QUALIFIERS.join(entityType, entityId);
+
+      if (rowKey.getEntityIdPrefix() == null) {
+        return Separator.QUALIFIERS.join(first, second, third, entityType,
+            Separator.EMPTY_BYTES);
+      }
+
+      byte[] entityIdPrefix = Bytes.toBytes(rowKey.getEntityIdPrefix());
+
+      if (rowKey.getEntityId() == null) {
+        return Separator.QUALIFIERS.join(first, second, third, entityType,
+            entityIdPrefix, Separator.EMPTY_BYTES);
+      }
+
+      byte[] entityId = Separator.encode(rowKey.getEntityId(), Separator.SPACE,
+          Separator.TAB, Separator.QUALIFIERS);
+
+      byte[] fourth =
+          Separator.QUALIFIERS.join(entityType, entityIdPrefix, entityId);
+
       return Separator.QUALIFIERS.join(first, second, third, fourth);
     }
 
@@ -196,7 +241,7 @@
     public EntityRowKey decode(byte[] rowKey) {
       byte[][] rowKeyComponents =
           Separator.QUALIFIERS.split(rowKey, SEGMENT_SIZES);
-      if (rowKeyComponents.length != 7) {
+      if (rowKeyComponents.length != 8) {
         throw new IllegalArgumentException("the row key is not valid for "
             + "an entity");
       }
@@ -215,11 +260,40 @@
       String entityType =
           Separator.decode(Bytes.toString(rowKeyComponents[5]),
               Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+
+      Long entityPrefixId = Bytes.toLong(rowKeyComponents[6]);
+
       String entityId =
-          Separator.decode(Bytes.toString(rowKeyComponents[6]),
+          Separator.decode(Bytes.toString(rowKeyComponents[7]),
               Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
       return new EntityRowKey(clusterId, userId, flowName, flowRunId, appId,
-          entityType, entityId);
+          entityType, entityPrefixId, entityId);
+    }
+
+    @Override
+    public String encodeAsString(EntityRowKey key) {
+      if (key.clusterId == null || key.userId == null || key.flowName == null
+          || key.flowRunId == null || key.appId == null
+          || key.entityType == null || key.entityIdPrefix == null
+          || key.entityId == null) {
+        throw new IllegalArgumentException();
+      }
+      return TimelineReaderUtils
+          .joinAndEscapeStrings(new String[] {key.clusterId, key.userId,
+              key.flowName, key.flowRunId.toString(), key.appId, key.entityType,
+              key.entityIdPrefix.toString(), key.entityId});
+    }
+
+    @Override
+    public EntityRowKey decodeFromString(String encodedRowKey) {
+      List<String> split = TimelineReaderUtils.split(encodedRowKey);
+      if (split == null || split.size() != 8) {
+        throw new IllegalArgumentException("Invalid row key for entity table.");
+      }
+      Long flowRunId = Long.valueOf(split.get(3));
+      Long entityIdPrefix = Long.valueOf(split.get(6));
+      return new EntityRowKey(split.get(0), split.get(1), split.get(2),
+          flowRunId, split.get(4), split.get(5), entityIdPrefix, split.get(7));
     }
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKeyPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKeyPrefix.java
index 9146180..47a1789 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKeyPrefix.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKeyPrefix.java
@@ -31,17 +31,20 @@
    * Creates a prefix which generates the following rowKeyPrefixes for the
    * entity table:
    * {@code userName!clusterId!flowName!flowRunId!AppId!entityType!}.
-   *
    * @param clusterId identifying the cluster
    * @param userId identifying the user
    * @param flowName identifying the flow
    * @param flowRunId identifying the individual run of this flow
    * @param appId identifying the application
    * @param entityType which entity type
+   * @param entityIdPrefix for entityId
+   * @param entityId for an entity
    */
   public EntityRowKeyPrefix(String clusterId, String userId, String flowName,
-      Long flowRunId, String appId, String entityType) {
-    super(clusterId, userId, flowName, flowRunId, appId, entityType, null);
+      Long flowRunId, String appId, String entityType, Long entityIdPrefix,
+      String entityId) {
+    super(clusterId, userId, flowName, flowRunId, appId, entityType,
+        entityIdPrefix, entityId);
   }
 
   /**
@@ -57,7 +60,7 @@
    */
   public EntityRowKeyPrefix(String clusterId, String userId, String flowName,
       Long flowRunId, String appId) {
-    super(clusterId, userId, flowName, flowRunId, appId, null, null);
+    this(clusterId, userId, flowName, flowRunId, appId, null, null, null);
   }
 
   /*
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java
index df5ce69..988bba2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java
@@ -50,8 +50,8 @@
  * | flowRunId! |                              |              | configKey2:  |
  * | AppId!     | created_time:                | metricId1:   | configValue2 |
  * | entityType!| 1392993084018                | metricValue2 |              |
- * | entityId   |                              | @timestamp2  |              |
- * |            | i!infoKey:                   |              |              |
+ * | idPrefix!  |                              | @timestamp2  |              |
+ * | entityId   | i!infoKey:                   |              |              |
  * |            | infoValue                    | metricId1:   |              |
  * |            |                              | metricValue1 |              |
  * |            | r!relatesToKey:              | @timestamp2  |              |
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
index 439e0c8..706b002 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
@@ -191,62 +191,6 @@
         keyConverter);
   }
 
-  /**
-   * Retrieve an {@link FlowActivityColumnPrefix} given a name, or null if there
-   * is no match. The following holds true: {@code columnFor(x) == columnFor(y)}
-   * if and only if {@code x.equals(y)} or {@code (x == y == null)}
-   *
-   * @param columnPrefix
-   *          Name of the column to retrieve
-   * @return the corresponding {@link FlowActivityColumnPrefix} or null
-   */
-  public static final FlowActivityColumnPrefix columnFor(String columnPrefix) {
-
-    // Match column based on value, assume column family matches.
-    for (FlowActivityColumnPrefix flowActivityColPrefix :
-        FlowActivityColumnPrefix.values()) {
-      // Find a match based only on name.
-      if (flowActivityColPrefix.getColumnPrefix().equals(columnPrefix)) {
-        return flowActivityColPrefix;
-      }
-    }
-    // Default to null
-    return null;
-  }
-
-  /**
-   * Retrieve an {@link FlowActivityColumnPrefix} given a name, or null if there
-   * is no match. The following holds true:
-   * {@code columnFor(a,x) == columnFor(b,y)} if and only if
-   * {@code (x == y == null)} or {@code a.equals(b) & x.equals(y)}
-   *
-   * @param columnFamily
-   *          The columnFamily for which to retrieve the column.
-   * @param columnPrefix
-   *          Name of the column to retrieve
-   * @return the corresponding {@link FlowActivityColumnPrefix} or null if both
-   *         arguments don't match.
-   */
-  public static final FlowActivityColumnPrefix columnFor(
-      FlowActivityColumnFamily columnFamily, String columnPrefix) {
-
-    // TODO: needs unit test to confirm and need to update javadoc to explain
-    // null prefix case.
-
-    for (FlowActivityColumnPrefix flowActivityColumnPrefix :
-        FlowActivityColumnPrefix.values()) {
-      // Find a match based column family and on name.
-      if (flowActivityColumnPrefix.columnFamily.equals(columnFamily)
-          && (((columnPrefix == null) && (flowActivityColumnPrefix
-              .getColumnPrefix() == null)) || (flowActivityColumnPrefix
-              .getColumnPrefix().equals(columnPrefix)))) {
-        return flowActivityColumnPrefix;
-      }
-    }
-    // Default to null
-    return null;
-  }
-
   /*
    * (non-Javadoc)
    *
@@ -271,7 +215,7 @@
     byte[] columnQualifier = getColumnPrefixBytes(qualifier);
     Attribute[] combinedAttributes =
         HBaseTimelineStorageUtils.combineAttributes(attributes, this.aggOp);
-    column.store(rowKey, tableMutator, columnQualifier, null, inputValue,
+    column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
         combinedAttributes);
   }
 }
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
index bb77e36..b8a5dba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
@@ -17,10 +17,14 @@
  */
 package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
 
+import java.util.List;
+
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
 
 /**
@@ -32,8 +36,8 @@
   private final Long dayTs;
   private final String userId;
   private final String flowName;
-  private final KeyConverter<FlowActivityRowKey> flowActivityRowKeyConverter =
-      new FlowActivityRowKeyConverter();
+  private final FlowActivityRowKeyConverter
+      flowActivityRowKeyConverter = new FlowActivityRowKeyConverter();
 
   /**
    * @param clusterId identifying the cluster
@@ -104,13 +108,32 @@
   }
 
   /**
+   * Constructs a row key for the flow activity table as follows:
+   * {@code clusterId!dayTimestamp!user!flowName}.
+   * @return String representation of row key
+   */
+  public String getRowKeyAsString() {
+    return flowActivityRowKeyConverter.encodeAsString(this);
+  }
+
+  /**
+   * Given the raw row key as string, returns the row key as an object.
+   * @param encodedRowKey String representation of row key.
+   * @return A <cite>FlowActivityRowKey</cite> object.
+   */
+  public static FlowActivityRowKey parseRowKeyFromString(String encodedRowKey) {
+    return new FlowActivityRowKeyConverter().decodeFromString(encodedRowKey);
+  }
+
+  /**
    * Encodes and decodes row key for flow activity table. The row key is of the
    * form : clusterId!dayTimestamp!user!flowName. dayTimestamp(top of the day
    * timestamp) is a long and rest are strings.
    * <p>
    */
-  final private static class FlowActivityRowKeyConverter implements
-      KeyConverter<FlowActivityRowKey> {
+  final private static class FlowActivityRowKeyConverter
+      implements KeyConverter<FlowActivityRowKey>,
+      KeyConverterToString<FlowActivityRowKey> {
 
     private FlowActivityRowKeyConverter() {
     }
@@ -192,5 +215,33 @@
               Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
       return new FlowActivityRowKey(clusterId, dayTs, userId, flowName);
     }
+
+    @Override
+    public String encodeAsString(FlowActivityRowKey key) {
+      if (key.getDayTimestamp() == null) {
+        return TimelineReaderUtils
+            .joinAndEscapeStrings(new String[] {key.clusterId});
+      } else if (key.getUserId() == null) {
+        return TimelineReaderUtils.joinAndEscapeStrings(
+            new String[] {key.clusterId, key.dayTs.toString()});
+      } else if (key.getFlowName() == null) {
+        return TimelineReaderUtils.joinAndEscapeStrings(
+            new String[] {key.clusterId, key.dayTs.toString(), key.userId});
+      }
+      return TimelineReaderUtils.joinAndEscapeStrings(new String[] {
+          key.clusterId, key.dayTs.toString(), key.userId, key.flowName});
+    }
+
+    @Override
+    public FlowActivityRowKey decodeFromString(String encodedRowKey) {
+      List<String> split = TimelineReaderUtils.split(encodedRowKey);
+      if (split == null || split.size() != 4) {
+        throw new IllegalArgumentException(
+            "Invalid row key for flow activity.");
+      }
+      Long dayTs = Long.valueOf(split.get(1));
+      return new FlowActivityRowKey(split.get(0), dayTs, split.get(2),
+          split.get(3));
+    }
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
index 90dd345..3797faf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
@@ -76,7 +76,7 @@
     // Future-proof by ensuring the right column prefix hygiene.
     this.columnQualifierBytes = Bytes.toBytes(Separator.SPACE
         .encode(columnQualifier));
-    this.column = new ColumnHelper<FlowRunTable>(columnFamily, converter);
+    this.column = new ColumnHelper<FlowRunTable>(columnFamily, converter, true);
   }
 
   /**
@@ -123,60 +123,9 @@
     return column.readResult(result, columnQualifierBytes);
   }
 
-  /**
-   * Retrieve an {@link FlowRunColumn} given a name, or null if there is no
-   * match. The following holds true: {@code columnFor(x) == columnFor(y)} if
-   * and only if {@code x.equals(y)} or {@code (x == y == null)}
-   *
-   * @param columnQualifier
-   *          Name of the column to retrieve
-   * @return the corresponding {@link FlowRunColumn} or null
-   */
-  public static final FlowRunColumn columnFor(String columnQualifier) {
-
-    // Match column based on value, assume column family matches.
-    for (FlowRunColumn ec : FlowRunColumn.values()) {
-      // Find a match based only on name.
-      if (ec.getColumnQualifier().equals(columnQualifier)) {
-        return ec;
-      }
-    }
-
-    // Default to null
-    return null;
-  }
-
   @Override
   public ValueConverter getValueConverter() {
     return column.getValueConverter();
   }
 
-  /**
-   * Retrieve an {@link FlowRunColumn} given a name, or null if there is no
-   * match. The following holds true: {@code columnFor(a,x) == columnFor(b,y)}
-   * if and only if {@code a.equals(b) & x.equals(y)} or
-   * {@code (x == y == null)}
-   *
-   * @param columnFamily
-   *          The columnFamily for which to retrieve the column.
-   * @param name
-   *          Name of the column to retrieve
-   * @return the corresponding {@link FlowRunColumn} or null if both arguments
-   *         don't match.
-   */
-  public static final FlowRunColumn columnFor(FlowRunColumnFamily columnFamily,
-      String name) {
-
-    for (FlowRunColumn ec : FlowRunColumn.values()) {
-      // Find a match based column family and on name.
-      if (ec.columnFamily.equals(columnFamily)
-          && ec.getColumnQualifier().equals(name)) {
-        return ec;
-      }
-    }
-
-    // Default to null
-    return null;
-  }
-
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnPrefix.java
index 278d18e..f521cd7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnPrefix.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnPrefix.java
@@ -69,7 +69,7 @@
   private FlowRunColumnPrefix(ColumnFamily<FlowRunTable> columnFamily,
       String columnPrefix, AggregationOperation fra, ValueConverter converter,
       boolean compoundColQual) {
-    column = new ColumnHelper<FlowRunTable>(columnFamily, converter);
+    column = new ColumnHelper<FlowRunTable>(columnFamily, converter, true);
     this.columnFamily = columnFamily;
     this.columnPrefix = columnPrefix;
     if (columnPrefix == null) {
@@ -209,60 +209,9 @@
         keyConverter);
   }
 
-  /**
-   * Retrieve an {@link FlowRunColumnPrefix} given a name, or null if there is
-   * no match. The following holds true: {@code columnFor(x) == columnFor(y)} if
-   * and only if {@code x.equals(y)} or {@code (x == y == null)}
-   *
-   * @param columnPrefix Name of the column to retrieve
-   * @return the corresponding {@link FlowRunColumnPrefix} or null
-   */
-  public static final FlowRunColumnPrefix columnFor(String columnPrefix) {
-
-    // Match column based on value, assume column family matches.
-    for (FlowRunColumnPrefix frcp : FlowRunColumnPrefix.values()) {
-      // Find a match based only on name.
-      if (frcp.getColumnPrefix().equals(columnPrefix)) {
-        return frcp;
-      }
-    }
-
-    // Default to null
-    return null;
-  }
-
   @Override
   public ValueConverter getValueConverter() {
     return column.getValueConverter();
   }
 
-  /**
-   * Retrieve an {@link FlowRunColumnPrefix} given a name, or null if there is
-   * no match. The following holds true:
-   * {@code columnFor(a,x) == columnFor(b,y)} if and only if
-   * {@code (x == y == null)} or {@code a.equals(b) & x.equals(y)}
-   *
-   * @param columnFamily The columnFamily for which to retrieve the column.
-   * @param columnPrefix Name of the column to retrieve
-   * @return the corresponding {@link FlowRunColumnPrefix} or null if both
-   *         arguments don't match.
-   */
-  public static final FlowRunColumnPrefix columnFor(
-      FlowRunColumnFamily columnFamily, String columnPrefix) {
-
-    // TODO: needs unit test to confirm and need to update javadoc to explain
-    // null prefix case.
-
-    for (FlowRunColumnPrefix frcp : FlowRunColumnPrefix.values()) {
-      // Find a match based column family and on name.
-      if (frcp.columnFamily.equals(columnFamily)
-          && (((columnPrefix == null) && (frcp.getColumnPrefix() == null)) ||
-          (frcp.getColumnPrefix().equals(columnPrefix)))) {
-        return frcp;
-      }
-    }
-
-    // Default to null
-    return null;
-  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
index 221420e..359eec9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
@@ -58,7 +58,6 @@
 
   private static final Logger LOG =
       LoggerFactory.getLogger(FlowRunCoprocessor.class);
-  private boolean isFlowRunRegion = false;
 
   private Region region;
   /**
@@ -72,15 +71,9 @@
     if (e instanceof RegionCoprocessorEnvironment) {
       RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
       this.region = env.getRegion();
-      isFlowRunRegion = HBaseTimelineStorageUtils.isFlowRunTable(
-          region.getRegionInfo(), env.getConfiguration());
     }
   }
 
-  public boolean isFlowRunRegion() {
-    return isFlowRunRegion;
-  }
-
   /*
    * (non-Javadoc)
    *
@@ -100,10 +93,6 @@
   public void prePut(ObserverContext<RegionCoprocessorEnvironment> e, Put put,
       WALEdit edit, Durability durability) throws IOException {
     Map<String, byte[]> attributes = put.getAttributesMap();
-
-    if (!isFlowRunRegion) {
-      return;
-    }
     // Assumption is that all the cells in a put are the same operation.
     List<Tag> tags = new ArrayList<>();
     if ((attributes != null) && (attributes.size() > 0)) {
@@ -171,10 +160,6 @@
   @Override
   public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> e,
       Get get, List<Cell> results) throws IOException {
-    if (!isFlowRunRegion) {
-      return;
-    }
-
     Scan scan = new Scan(get);
     scan.setMaxVersions();
     RegionScanner scanner = null;
@@ -206,12 +191,9 @@
   public RegionScanner preScannerOpen(
       ObserverContext<RegionCoprocessorEnvironment> e, Scan scan,
       RegionScanner scanner) throws IOException {
-
-    if (isFlowRunRegion) {
-      // set max versions for scan to see all
-      // versions to aggregate for metrics
-      scan.setMaxVersions();
-    }
+    // set max versions for scan to see all
+    // versions to aggregate for metrics
+    scan.setMaxVersions();
     return scanner;
   }
 
@@ -231,9 +213,6 @@
   public RegionScanner postScannerOpen(
       ObserverContext<RegionCoprocessorEnvironment> e, Scan scan,
       RegionScanner scanner) throws IOException {
-    if (!isFlowRunRegion) {
-      return scanner;
-    }
     return new FlowScanner(e.getEnvironment(), scan,
         scanner, FlowScannerOperation.READ);
   }
@@ -242,9 +221,6 @@
   public InternalScanner preFlush(
       ObserverContext<RegionCoprocessorEnvironment> c, Store store,
       InternalScanner scanner) throws IOException {
-    if (!isFlowRunRegion) {
-      return scanner;
-    }
     if (LOG.isDebugEnabled()) {
       if (store != null) {
         LOG.debug("preFlush store = " + store.getColumnFamilyName()
@@ -265,9 +241,6 @@
   @Override
   public void postFlush(ObserverContext<RegionCoprocessorEnvironment> c,
       Store store, StoreFile resultFile) {
-    if (!isFlowRunRegion) {
-      return;
-    }
     if (LOG.isDebugEnabled()) {
       if (store != null) {
         LOG.debug("postFlush store = " + store.getColumnFamilyName()
@@ -289,9 +262,6 @@
       InternalScanner scanner, ScanType scanType, CompactionRequest request)
       throws IOException {
 
-    if (!isFlowRunRegion) {
-      return scanner;
-    }
     FlowScannerOperation requestOp = FlowScannerOperation.MINOR_COMPACTION;
     if (request != null) {
       requestOp = (request.isMajor() ? FlowScannerOperation.MAJOR_COMPACTION
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKey.java
index 8fda9a8..7ce91cf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKey.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKey.java
@@ -17,8 +17,12 @@
  */
 package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
 
+import java.util.List;
+
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
 
@@ -70,7 +74,6 @@
 
   /**
    * Given the raw row key as bytes, returns the row key as an object.
-   *
    * @param rowKey Byte representation of row key.
    * @return A <cite>FlowRunRowKey</cite> object.
    */
@@ -79,6 +82,24 @@
   }
 
   /**
+   * Constructs a row key for the flow run table as follows:
+   * {@code clusterId!userId!flowName!Flow Run Id}.
+   * @return String representation of row key
+   */
+  public String getRowKeyAsString() {
+    return flowRunRowKeyConverter.encodeAsString(this);
+  }
+
+  /**
+   * Given the encoded row key as string, returns the row key as an object.
+   * @param encodedRowKey String representation of row key.
+   * @return A <cite>FlowRunRowKey</cite> object.
+   */
+  public static FlowRunRowKey parseRowKeyFromString(String encodedRowKey) {
+    return new FlowRunRowKeyConverter().decodeFromString(encodedRowKey);
+  }
+
+  /**
    * returns the Flow Key as a verbose String output.
    * @return String
    */
@@ -101,7 +122,7 @@
    * <p>
    */
   final private static class FlowRunRowKeyConverter implements
-      KeyConverter<FlowRunRowKey> {
+      KeyConverter<FlowRunRowKey>, KeyConverterToString<FlowRunRowKey> {
 
     private FlowRunRowKeyConverter() {
     }
@@ -186,5 +207,27 @@
           LongConverter.invertLong(Bytes.toLong(rowKeyComponents[3]));
       return new FlowRunRowKey(clusterId, userId, flowName, flowRunId);
     }
+
+    @Override
+    public String encodeAsString(FlowRunRowKey key) {
+      if (key.clusterId == null || key.userId == null || key.flowName == null
+          || key.flowRunId == null) {
+        throw new IllegalArgumentException();
+      }
+      return TimelineReaderUtils.joinAndEscapeStrings(new String[] {
+          key.clusterId, key.userId, key.flowName, key.flowRunId.toString()});
+    }
+
+    @Override
+    public FlowRunRowKey decodeFromString(String encodedRowKey) {
+      List<String> split = TimelineReaderUtils.split(encodedRowKey);
+      if (split == null || split.size() != 4) {
+        throw new IllegalArgumentException(
+            "Invalid row key for flow run table.");
+      }
+      Long flowRunId = Long.valueOf(split.get(3));
+      return new FlowRunRowKey(split.get(0), split.get(1), split.get(2),
+          flowRunId);
+    }
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunTable.java
index 9c6549f..a1d32ee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunTable.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunTable.java
@@ -29,6 +29,8 @@
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Coprocessor;
 
 /**
  * The flow run table has column family info
@@ -133,8 +135,15 @@
     infoCF.setMaxVersions(DEFAULT_METRICS_MAX_VERSIONS);
 
     // TODO: figure the split policy
-    flowRunTableDescp.addCoprocessor(FlowRunCoprocessor.class
-        .getCanonicalName());
+    String coprocessorJarPathStr = hbaseConf.get(
+        YarnConfiguration.FLOW_RUN_COPROCESSOR_JAR_HDFS_LOCATION,
+        YarnConfiguration.DEFAULT_HDFS_LOCATION_FLOW_RUN_COPROCESSOR_JAR);
+
+    Path coprocessorJarPath = new Path(coprocessorJarPathStr);
+    LOG.info("CoprocessorJarPath=" + coprocessorJarPath.toString());
+    flowRunTableDescp.addCoprocessor(
+        FlowRunCoprocessor.class.getCanonicalName(), coprocessorJarPath,
+        Coprocessor.PRIORITY_USER, null);
     admin.createTable(flowRunTableDescp);
     LOG.info("Status of table creation for " + table.getNameAsString() + "="
         + admin.tableExists(table));
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/package-info.java
index d0bc366..e78db2a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/package-info.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/package-info.java
@@ -17,12 +17,12 @@
  */
 
 /**
- * Package org.apache.hadoop.server.timelineservice contains classes to be used
- * across timeline reader and collector.
+ * Package org.apache.hadoop.yarn.server.timelineservice.storage contains
+ * classes which define and implement reading and writing to backend storage.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 package org.apache.hadoop.yarn.server.timelineservice.storage;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file
+import org.apache.hadoop.classification.InterfaceStability;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
new file mode 100644
index 0000000..5bacf66
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
@@ -0,0 +1,158 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
+import org.apache.hadoop.yarn.webapp.NotFoundException;
+
+/**
+ * The base class for reading timeline data from the HBase storage. This class
+ * provides basic support to validate and augment reader context.
+ */
+public abstract class AbstractTimelineStorageReader {
+
+  private final TimelineReaderContext context;
+  /**
+   * Used to look up the flow context.
+   */
+  private final AppToFlowTable appToFlowTable = new AppToFlowTable();
+
+  public AbstractTimelineStorageReader(TimelineReaderContext ctxt) {
+    context = ctxt;
+  }
+
+  protected TimelineReaderContext getContext() {
+    return context;
+  }
+
+  /**
+   * Looks up flow context from AppToFlow table.
+   *
+   * @param appToFlowRowKey to identify Cluster and App Ids.
+   * @param clusterId the cluster id.
+   * @param hbaseConf HBase configuration.
+   * @param conn HBase Connection.
+   * @return flow context information.
+   * @throws IOException if any problem occurs while fetching flow information.
+   */
+  protected FlowContext lookupFlowContext(AppToFlowRowKey appToFlowRowKey,
+      String clusterId, Configuration hbaseConf, Connection conn)
+      throws IOException {
+    byte[] rowKey = appToFlowRowKey.getRowKey();
+    Get get = new Get(rowKey);
+    Result result = appToFlowTable.getResult(hbaseConf, conn, get);
+    if (result != null && !result.isEmpty()) {
+      Object flowName =
+          AppToFlowColumnPrefix.FLOW_NAME.readResult(result, clusterId);
+      Object flowRunId =
+          AppToFlowColumnPrefix.FLOW_RUN_ID.readResult(result, clusterId);
+      Object userId =
+          AppToFlowColumnPrefix.USER_ID.readResult(result, clusterId);
+      if (flowName == null || userId == null || flowRunId == null) {
+        throw new NotFoundException(
+            "Unable to find the context flow name, and flow run id, "
+            + "and user id for clusterId=" + clusterId
+            + ", appId=" + appToFlowRowKey.getAppId());
+      }
+      return new FlowContext((String)userId, (String)flowName,
+          ((Number)flowRunId).longValue());
+    } else {
+      throw new NotFoundException(
+          "Unable to find the context flow name, and flow run id, "
+          + "and user id for clusterId=" + clusterId
+          + ", appId=" + appToFlowRowKey.getAppId());
+    }
+  }
+
+  /**
+    * Sets certain parameters to defaults if the values are not provided.
+    *
+    * @param hbaseConf HBase Configuration.
+    * @param conn HBase Connection.
+    * @throws IOException if any exception is encountered while setting params.
+    */
+  protected void augmentParams(Configuration hbaseConf, Connection conn)
+      throws IOException {
+    defaultAugmentParams(hbaseConf, conn);
+  }
+
+  /**
+   * Default behavior for all timeline readers to augment parameters.
+   *
+   * @param hbaseConf HBase Configuration.
+   * @param conn HBase Connection.
+   * @throws IOException if any exception is encountered while setting params.
+   */
+  final protected void defaultAugmentParams(Configuration hbaseConf,
+      Connection conn) throws IOException {
+    // In reality all three should be null or neither should be null
+    if (context.getFlowName() == null || context.getFlowRunId() == null
+        || context.getUserId() == null) {
+      // Get flow context information from AppToFlow table.
+      AppToFlowRowKey appToFlowRowKey =
+          new AppToFlowRowKey(context.getAppId());
+      FlowContext flowContext =
+          lookupFlowContext(appToFlowRowKey, context.getClusterId(), hbaseConf,
+          conn);
+      context.setFlowName(flowContext.flowName);
+      context.setFlowRunId(flowContext.flowRunId);
+      context.setUserId(flowContext.userId);
+    }
+  }
+
+  /**
+   * Validates the required parameters to read the entities.
+   */
+  protected abstract void validateParams();
+
+  /**
+   * Encapsulates flow context information.
+   */
+  protected static class FlowContext {
+    private final String userId;
+    private final String flowName;
+    private final Long flowRunId;
+
+    public FlowContext(String user, String flowName, Long flowRunId) {
+      this.userId = user;
+      this.flowName = flowName;
+      this.flowRunId = flowRunId;
+    }
+
+    protected String getUserId() {
+      return userId;
+    }
+
+    protected String getFlowName() {
+      return flowName;
+    }
+
+    protected Long getFlowRunId() {
+      return flowRunId;
+    }
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
index aa2bfda..0edd6a5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
@@ -24,6 +24,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Query;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -39,6 +40,7 @@
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
 import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
 import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
@@ -48,10 +50,11 @@
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKeyPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
 
 import com.google.common.base.Preconditions;
 
@@ -65,7 +68,7 @@
 
   public ApplicationEntityReader(TimelineReaderContext ctxt,
       TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
-    super(ctxt, entityFilters, toRetrieve, true);
+    super(ctxt, entityFilters, toRetrieve);
   }
 
   public ApplicationEntityReader(TimelineReaderContext ctxt,
@@ -313,6 +316,8 @@
             context.getFlowName(), context.getFlowRunId(), context.getAppId());
     byte[] rowKey = applicationRowKey.getRowKey();
     Get get = new Get(rowKey);
+    // Set time range for metric values.
+    setMetricsTimeRange(get);
     get.setMaxVersions(getDataToRetrieve().getMetricsLimit());
     if (filterList != null && !filterList.getFilters().isEmpty()) {
       get.setFilter(filterList);
@@ -343,20 +348,9 @@
   @Override
   protected void augmentParams(Configuration hbaseConf, Connection conn)
       throws IOException {
-    TimelineReaderContext context = getContext();
     if (isSingleEntityRead()) {
       // Get flow context information from AppToFlow table.
-      if (context.getFlowName() == null || context.getFlowRunId() == null
-          || context.getUserId() == null) {
-        AppToFlowRowKey appToFlowRowKey =
-            new AppToFlowRowKey(context.getClusterId(), context.getAppId());
-        FlowContext flowContext =
-            lookupFlowContext(appToFlowRowKey,
-                hbaseConf, conn);
-        context.setFlowName(flowContext.getFlowName());
-        context.setFlowRunId(flowContext.getFlowRunId());
-        context.setUserId(flowContext.getUserId());
-      }
+      defaultAugmentParams(hbaseConf, conn);
     }
     // Add configs/metrics to fields to retrieve if confsToRetrieve and/or
     // metricsToRetrieve are specified.
@@ -366,24 +360,65 @@
     }
   }
 
+  private void setMetricsTimeRange(Query query) {
+    // Set time range for metric values.
+    HBaseTimelineStorageUtils.setMetricsTimeRange(
+        query, ApplicationColumnFamily.METRICS.getBytes(),
+        getDataToRetrieve().getMetricsTimeBegin(),
+        getDataToRetrieve().getMetricsTimeEnd());
+  }
+
   @Override
   protected ResultScanner getResults(Configuration hbaseConf,
       Connection conn, FilterList filterList) throws IOException {
     Scan scan = new Scan();
     TimelineReaderContext context = getContext();
+    RowKeyPrefix<ApplicationRowKey> applicationRowKeyPrefix = null;
+
     // Whether or not flowRunID is null doesn't matter, the
     // ApplicationRowKeyPrefix will do the right thing.
-    RowKeyPrefix<ApplicationRowKey> applicationRowKeyPrefix =
-        new ApplicationRowKeyPrefix(context.getClusterId(),
-            context.getUserId(), context.getFlowName(),
-            context.getFlowRunId());
-    scan.setRowPrefixFilter(applicationRowKeyPrefix.getRowKeyPrefix());
+    // default mode, will always scans from beginning of entity type.
+    if (getFilters().getFromId() == null) {
+      applicationRowKeyPrefix = new ApplicationRowKeyPrefix(
+          context.getClusterId(), context.getUserId(), context.getFlowName(),
+          context.getFlowRunId());
+      scan.setRowPrefixFilter(applicationRowKeyPrefix.getRowKeyPrefix());
+    } else {
+      ApplicationRowKey applicationRowKey = null;
+      try {
+        applicationRowKey =
+            ApplicationRowKey.parseRowKeyFromString(getFilters().getFromId());
+      } catch (IllegalArgumentException e) {
+        throw new BadRequestException("Invalid filter fromid is provided.");
+      }
+      if (!context.getClusterId().equals(applicationRowKey.getClusterId())) {
+        throw new BadRequestException(
+            "fromid doesn't belong to clusterId=" + context.getClusterId());
+      }
+
+      // set start row
+      scan.setStartRow(applicationRowKey.getRowKey());
+
+      // get the bytes for stop row
+      applicationRowKeyPrefix = new ApplicationRowKeyPrefix(
+          context.getClusterId(), context.getUserId(), context.getFlowName(),
+          context.getFlowRunId());
+
+      // set stop row
+      scan.setStopRow(
+          HBaseTimelineStorageUtils.calculateTheClosestNextRowKeyForPrefix(
+              applicationRowKeyPrefix.getRowKeyPrefix()));
+    }
+
     FilterList newList = new FilterList();
     newList.addFilter(new PageFilter(getFilters().getLimit()));
     if (filterList != null && !filterList.getFilters().isEmpty()) {
       newList.addFilter(filterList);
     }
     scan.setFilter(newList);
+
+    // Set time range for metric values.
+    setMetricsTimeRange(scan);
     scan.setMaxVersions(getDataToRetrieve().getMetricsLimit());
     return getTable().getResultScanner(hbaseConf, conn, scan);
   }
@@ -476,6 +511,10 @@
     if (hasField(fieldsToRetrieve, Field.METRICS)) {
       readMetrics(entity, result, ApplicationColumnPrefix.METRIC);
     }
+
+    ApplicationRowKey rowKey = ApplicationRowKey.parseRowKey(result.getRow());
+    entity.getInfo().put(TimelineReaderUtils.FROMID_KEY,
+        rowKey.getRowKeyAsString());
     return entity;
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java
new file mode 100644
index 0000000..05570f1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java
@@ -0,0 +1,179 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
+import org.apache.hadoop.hbase.filter.KeyOnlyFilter;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Set;
+import java.util.TreeSet;
+
+/**
+ * Timeline entity reader for listing all available entity types given one
+ * reader context. Right now only supports listing all entity types within one
+ * YARN application.
+ */
+public final class EntityTypeReader extends AbstractTimelineStorageReader {
+
+  private static final Log LOG = LogFactory.getLog(EntityTypeReader.class);
+  private static final EntityTable ENTITY_TABLE = new EntityTable();
+
+  public EntityTypeReader(TimelineReaderContext context) {
+    super(context);
+  }
+
+  /**
+   * Reads a set of timeline entity types from the HBase storage for the given
+   * context.
+   *
+   * @param hbaseConf HBase Configuration.
+   * @param conn HBase Connection.
+   * @return a set of <cite>TimelineEntity</cite> objects, with only type field
+   *         set.
+   * @throws IOException if any exception is encountered while reading entities.
+   */
+  public Set<String> readEntityTypes(Configuration hbaseConf,
+      Connection conn) throws IOException {
+
+    validateParams();
+    augmentParams(hbaseConf, conn);
+
+    Set<String> types = new TreeSet<>();
+    TimelineReaderContext context = getContext();
+    EntityRowKeyPrefix prefix = new EntityRowKeyPrefix(context.getClusterId(),
+        context.getUserId(), context.getFlowName(), context.getFlowRunId(),
+        context.getAppId());
+    byte[] currRowKey = prefix.getRowKeyPrefix();
+    byte[] nextRowKey = prefix.getRowKeyPrefix();
+    nextRowKey[nextRowKey.length - 1]++;
+
+    FilterList typeFilterList = new FilterList();
+    typeFilterList.addFilter(new FirstKeyOnlyFilter());
+    typeFilterList.addFilter(new KeyOnlyFilter());
+    typeFilterList.addFilter(new PageFilter(1));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("FilterList created for scan is - " + typeFilterList);
+    }
+
+    int counter = 0;
+    while (true) {
+      try (ResultScanner results =
+          getResult(hbaseConf, conn, typeFilterList, currRowKey, nextRowKey)) {
+        TimelineEntity entity = parseEntityForType(results.next());
+        if (entity == null) {
+          break;
+        }
+        ++counter;
+        if (!types.add(entity.getType())) {
+          LOG.warn("Failed to add type " + entity.getType()
+              + " to the result set because there is a duplicated copy. ");
+        }
+        String currType = entity.getType();
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Current row key: " + Arrays.toString(currRowKey));
+          LOG.debug("New entity type discovered: " + currType);
+        }
+        currRowKey = getNextRowKey(prefix.getRowKeyPrefix(), currType);
+      }
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Scanned " + counter + "records for "
+          + types.size() + "types");
+    }
+    return types;
+  }
+
+  @Override
+  protected void validateParams() {
+    Preconditions.checkNotNull(getContext(), "context shouldn't be null");
+    Preconditions.checkNotNull(getContext().getClusterId(),
+        "clusterId shouldn't be null");
+    Preconditions.checkNotNull(getContext().getAppId(),
+        "appId shouldn't be null");
+  }
+
+  /**
+   * Gets the possibly next row key prefix given current prefix and type.
+   *
+   * @param currRowKeyPrefix The current prefix that contains user, cluster,
+   *                         flow, run, and application id.
+   * @param entityType Current entity type.
+   * @return A new prefix for the possibly immediately next row key.
+   */
+  private static byte[] getNextRowKey(byte[] currRowKeyPrefix,
+      String entityType) {
+    if (currRowKeyPrefix == null || entityType == null) {
+      return null;
+    }
+
+    byte[] entityTypeEncoded = Separator.QUALIFIERS.join(
+        Separator.encode(entityType, Separator.SPACE, Separator.TAB,
+            Separator.QUALIFIERS),
+        Separator.EMPTY_BYTES);
+
+    byte[] currRowKey
+        = new byte[currRowKeyPrefix.length + entityTypeEncoded.length];
+    System.arraycopy(currRowKeyPrefix, 0, currRowKey, 0,
+        currRowKeyPrefix.length);
+    System.arraycopy(entityTypeEncoded, 0, currRowKey, currRowKeyPrefix.length,
+        entityTypeEncoded.length);
+
+    return HBaseTimelineStorageUtils.calculateTheClosestNextRowKeyForPrefix(
+        currRowKey);
+  }
+
+  private ResultScanner getResult(Configuration hbaseConf, Connection conn,
+      FilterList filterList, byte[] startPrefix, byte[] endPrefix)
+      throws IOException {
+    Scan scan = new Scan(startPrefix, endPrefix);
+    scan.setFilter(filterList);
+    scan.setSmall(true);
+    return ENTITY_TABLE.getResultScanner(hbaseConf, conn, scan);
+  }
+
+  private TimelineEntity parseEntityForType(Result result)
+      throws IOException {
+    if (result == null || result.isEmpty()) {
+      return null;
+    }
+    TimelineEntity entity = new TimelineEntity();
+    EntityRowKey newRowKey = EntityRowKey.parseRowKey(result.getRow());
+    entity.setType(newRowKey.getEntityType());
+    return entity;
+  }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
index 9ba5e38..a1cdb29 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
@@ -34,6 +34,7 @@
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongKeyConverter;
@@ -41,6 +42,7 @@
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTable;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
 
 import com.google.common.base.Preconditions;
 
@@ -60,7 +62,7 @@
 
   public FlowActivityEntityReader(TimelineReaderContext ctxt,
       TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
-    super(ctxt, entityFilters, toRetrieve, true);
+    super(ctxt, entityFilters, toRetrieve);
   }
 
   public FlowActivityEntityReader(TimelineReaderContext ctxt,
@@ -110,11 +112,30 @@
       Connection conn, FilterList filterList) throws IOException {
     Scan scan = new Scan();
     String clusterId = getContext().getClusterId();
-    if (getFilters().getCreatedTimeBegin() == 0L &&
-        getFilters().getCreatedTimeEnd() == Long.MAX_VALUE) {
+    if (getFilters().getFromId() == null
+        && getFilters().getCreatedTimeBegin() == 0L
+        && getFilters().getCreatedTimeEnd() == Long.MAX_VALUE) {
        // All records have to be chosen.
       scan.setRowPrefixFilter(new FlowActivityRowKeyPrefix(clusterId)
           .getRowKeyPrefix());
+    } else if (getFilters().getFromId() != null) {
+      FlowActivityRowKey key = null;
+      try {
+        key =
+            FlowActivityRowKey.parseRowKeyFromString(getFilters().getFromId());
+      } catch (IllegalArgumentException e) {
+        throw new BadRequestException("Invalid filter fromid is provided.");
+      }
+      if (!clusterId.equals(key.getClusterId())) {
+        throw new BadRequestException(
+            "fromid doesn't belong to clusterId=" + clusterId);
+      }
+      scan.setStartRow(key.getRowKey());
+      scan.setStopRow(
+          new FlowActivityRowKeyPrefix(clusterId,
+              (getFilters().getCreatedTimeBegin() <= 0 ? 0
+                  : (getFilters().getCreatedTimeBegin() - 1)))
+                      .getRowKeyPrefix());
     } else {
       scan.setStartRow(new FlowActivityRowKeyPrefix(clusterId, getFilters()
           .getCreatedTimeEnd()).getRowKeyPrefix());
@@ -157,7 +178,8 @@
       flowRun.setId(flowRun.getId());
       flowActivity.addFlowRun(flowRun);
     }
-
+    flowActivity.getInfo().put(TimelineReaderUtils.FROMID_KEY,
+        rowKey.getRowKeyAsString());
     return flowActivity;
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
index 986a28f..af043b3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
@@ -39,10 +39,12 @@
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
 import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
 import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumn;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnFamily;
@@ -63,7 +65,7 @@
 
   public FlowRunEntityReader(TimelineReaderContext ctxt,
       TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
-    super(ctxt, entityFilters, toRetrieve, true);
+    super(ctxt, entityFilters, toRetrieve);
   }
 
   public FlowRunEntityReader(TimelineReaderContext ctxt,
@@ -210,10 +212,36 @@
       FilterList filterList) throws IOException {
     Scan scan = new Scan();
     TimelineReaderContext context = getContext();
-    RowKeyPrefix<FlowRunRowKey> flowRunRowKeyPrefix =
-        new FlowRunRowKeyPrefix(context.getClusterId(), context.getUserId(),
-            context.getFlowName());
-    scan.setRowPrefixFilter(flowRunRowKeyPrefix.getRowKeyPrefix());
+    RowKeyPrefix<FlowRunRowKey> flowRunRowKeyPrefix = null;
+    if (getFilters().getFromId() == null) {
+      flowRunRowKeyPrefix = new FlowRunRowKeyPrefix(context.getClusterId(),
+          context.getUserId(), context.getFlowName());
+      scan.setRowPrefixFilter(flowRunRowKeyPrefix.getRowKeyPrefix());
+    } else {
+      FlowRunRowKey flowRunRowKey = null;
+      try {
+        flowRunRowKey =
+            FlowRunRowKey.parseRowKeyFromString(getFilters().getFromId());
+      } catch (IllegalArgumentException e) {
+        throw new BadRequestException("Invalid filter fromid is provided.");
+      }
+      if (!context.getClusterId().equals(flowRunRowKey.getClusterId())) {
+        throw new BadRequestException(
+            "fromid doesn't belong to clusterId=" + context.getClusterId());
+      }
+      // set start row
+      scan.setStartRow(flowRunRowKey.getRowKey());
+
+      // get the bytes for stop row
+      flowRunRowKeyPrefix = new FlowRunRowKeyPrefix(context.getClusterId(),
+          context.getUserId(), context.getFlowName());
+
+      // set stop row
+      scan.setStopRow(
+          HBaseTimelineStorageUtils.calculateTheClosestNextRowKeyForPrefix(
+              flowRunRowKeyPrefix.getRowKeyPrefix()));
+    }
+
     FilterList newList = new FilterList();
     newList.addFilter(new PageFilter(getFilters().getLimit()));
     if (filterList != null && !filterList.getFilters().isEmpty()) {
@@ -226,16 +254,11 @@
 
   @Override
   protected TimelineEntity parseEntity(Result result) throws IOException {
-    TimelineReaderContext context = getContext();
     FlowRunEntity flowRun = new FlowRunEntity();
-    flowRun.setUser(context.getUserId());
-    flowRun.setName(context.getFlowName());
-    if (isSingleEntityRead()) {
-      flowRun.setRunId(context.getFlowRunId());
-    } else {
-      FlowRunRowKey rowKey = FlowRunRowKey.parseRowKey(result.getRow());
-      flowRun.setRunId(rowKey.getFlowRunId());
-    }
+    FlowRunRowKey rowKey = FlowRunRowKey.parseRowKey(result.getRow());
+    flowRun.setRunId(rowKey.getFlowRunId());
+    flowRun.setUser(rowKey.getUserId());
+    flowRun.setName(rowKey.getFlowName());
 
     // read the start time
     Long startTime = (Long) FlowRunColumn.MIN_START_TIME.readResult(result);
@@ -264,6 +287,8 @@
 
     // set the id
     flowRun.setId(flowRun.getId());
+    flowRun.getInfo().put(TimelineReaderUtils.FROMID_KEY,
+        rowKey.getRowKeyAsString());
     return flowRun;
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
index 4e1ab8a..3a44445 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
@@ -19,12 +19,14 @@
 
 import java.io.IOException;
 import java.util.EnumSet;
+import java.util.Iterator;
 import java.util.Map;
 import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Query;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -32,20 +34,20 @@
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.filter.FamilyFilter;
 import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.PageFilter;
 import org.apache.hadoop.hbase.filter.FilterList.Operator;
 import org.apache.hadoop.hbase.filter.QualifierFilter;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
 import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
 import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
-import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumn;
-import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
@@ -56,7 +58,7 @@
 import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
-import org.apache.hadoop.yarn.webapp.NotFoundException;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
 
 import com.google.common.base.Preconditions;
 
@@ -68,20 +70,14 @@
   private static final EntityTable ENTITY_TABLE = new EntityTable();
 
   /**
-   * Used to look up the flow context.
-   */
-  private final AppToFlowTable appToFlowTable = new AppToFlowTable();
-
-  /**
    * Used to convert strings key components to and from storage format.
    */
   private final KeyConverter<String> stringKeyConverter =
       new StringKeyConverter();
 
   public GenericEntityReader(TimelineReaderContext ctxt,
-      TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve,
-      boolean sortedKeys) {
-    super(ctxt, entityFilters, toRetrieve, sortedKeys);
+      TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
+    super(ctxt, entityFilters, toRetrieve);
   }
 
   public GenericEntityReader(TimelineReaderContext ctxt,
@@ -139,7 +135,7 @@
    *
    * @return true if we need to fetch some of the columns, false otherwise.
    */
-  private boolean fetchPartialEventCols(TimelineFilterList eventFilters,
+  protected boolean fetchPartialEventCols(TimelineFilterList eventFilters,
       EnumSet<Field> fieldsToRetrieve) {
     return (eventFilters != null && !eventFilters.getFilterList().isEmpty() &&
         !hasField(fieldsToRetrieve, Field.EVENTS));
@@ -150,7 +146,7 @@
    *
    * @return true if we need to fetch some of the columns, false otherwise.
    */
-  private boolean fetchPartialRelatesToCols(TimelineFilterList relatesTo,
+  protected boolean fetchPartialRelatesToCols(TimelineFilterList relatesTo,
       EnumSet<Field> fieldsToRetrieve) {
     return (relatesTo != null && !relatesTo.getFilterList().isEmpty() &&
         !hasField(fieldsToRetrieve, Field.RELATES_TO));
@@ -400,60 +396,6 @@
     return listBasedOnFields;
   }
 
-  /**
-   * Looks up flow context from AppToFlow table.
-   *
-   * @param appToFlowRowKey to identify Cluster and App Ids.
-   * @param hbaseConf HBase configuration.
-   * @param conn HBase Connection.
-   * @return flow context information.
-   * @throws IOException if any problem occurs while fetching flow information.
-   */
-  protected FlowContext lookupFlowContext(AppToFlowRowKey appToFlowRowKey,
-      Configuration hbaseConf, Connection conn) throws IOException {
-    byte[] rowKey = appToFlowRowKey.getRowKey();
-    Get get = new Get(rowKey);
-    Result result = appToFlowTable.getResult(hbaseConf, conn, get);
-    if (result != null && !result.isEmpty()) {
-      return new FlowContext(AppToFlowColumn.USER_ID.readResult(result)
-          .toString(), AppToFlowColumn.FLOW_ID.readResult(result).toString(),
-          ((Number) AppToFlowColumn.FLOW_RUN_ID.readResult(result))
-          .longValue());
-    } else {
-      throw new NotFoundException(
-          "Unable to find the context flow ID and flow run ID for clusterId="
-              + appToFlowRowKey.getClusterId() + ", appId="
-              + appToFlowRowKey.getAppId());
-    }
-  }
-
-  /**
-   * Encapsulates flow context information.
-   */
-  protected static class FlowContext {
-    private final String userId;
-    private final String flowName;
-    private final Long flowRunId;
-
-    public FlowContext(String user, String flowName, Long flowRunId) {
-      this.userId = user;
-      this.flowName = flowName;
-      this.flowRunId = flowRunId;
-    }
-
-    protected String getUserId() {
-      return userId;
-    }
-
-    protected String getFlowName() {
-      return flowName;
-    }
-
-    protected Long getFlowRunId() {
-      return flowRunId;
-    }
-  }
-
   @Override
   protected void validateParams() {
     Preconditions.checkNotNull(getContext(), "context shouldn't be null");
@@ -474,19 +416,7 @@
   @Override
   protected void augmentParams(Configuration hbaseConf, Connection conn)
       throws IOException {
-    TimelineReaderContext context = getContext();
-    // In reality all three should be null or neither should be null
-    if (context.getFlowName() == null || context.getFlowRunId() == null
-        || context.getUserId() == null) {
-      // Get flow context information from AppToFlow table.
-      AppToFlowRowKey appToFlowRowKey =
-          new AppToFlowRowKey(context.getClusterId(), context.getAppId());
-      FlowContext flowContext =
-          lookupFlowContext(appToFlowRowKey, hbaseConf, conn);
-      context.setFlowName(flowContext.flowName);
-      context.setFlowRunId(flowContext.flowRunId);
-      context.setUserId(flowContext.userId);
-    }
+    defaultAugmentParams(hbaseConf, conn);
     // Add configs/metrics to fields to retrieve if confsToRetrieve and/or
     // metricsToRetrieve are specified.
     getDataToRetrieve().addFieldsBasedOnConfsAndMetricsToRetrieve();
@@ -499,16 +429,52 @@
   protected Result getResult(Configuration hbaseConf, Connection conn,
       FilterList filterList) throws IOException {
     TimelineReaderContext context = getContext();
-    byte[] rowKey =
-        new EntityRowKey(context.getClusterId(), context.getUserId(),
-            context.getFlowName(), context.getFlowRunId(), context.getAppId(),
-            context.getEntityType(), context.getEntityId()).getRowKey();
-    Get get = new Get(rowKey);
-    get.setMaxVersions(getDataToRetrieve().getMetricsLimit());
-    if (filterList != null && !filterList.getFilters().isEmpty()) {
-      get.setFilter(filterList);
+    Result result = null;
+    if (context.getEntityIdPrefix() != null) {
+      byte[] rowKey = new EntityRowKey(context.getClusterId(),
+          context.getUserId(), context.getFlowName(), context.getFlowRunId(),
+          context.getAppId(), context.getEntityType(),
+          context.getEntityIdPrefix(), context.getEntityId()).getRowKey();
+      Get get = new Get(rowKey);
+      setMetricsTimeRange(get);
+      get.setMaxVersions(getDataToRetrieve().getMetricsLimit());
+      if (filterList != null && !filterList.getFilters().isEmpty()) {
+        get.setFilter(filterList);
+      }
+      result = getTable().getResult(hbaseConf, conn, get);
+
+    } else {
+      // Prepare for range scan
+      // create single SingleColumnValueFilter and add to existing filters.
+      FilterList filter = new FilterList(Operator.MUST_PASS_ALL);
+      if (filterList != null && !filterList.getFilters().isEmpty()) {
+        filter.addFilter(filterList);
+      }
+      FilterList newFilter = new FilterList();
+      newFilter.addFilter(TimelineFilterUtils.createHBaseSingleColValueFilter(
+          EntityColumn.ID, context.getEntityId(), CompareOp.EQUAL));
+      newFilter.addFilter(new PageFilter(1));
+      filter.addFilter(newFilter);
+
+      ResultScanner results = getResults(hbaseConf, conn, filter);
+      try {
+        Iterator<Result> iterator = results.iterator();
+        if (iterator.hasNext()) {
+          result = iterator.next();
+        }
+      } finally {
+        results.close();
+      }
     }
-    return getTable().getResult(hbaseConf, conn, get);
+    return result;
+  }
+
+  private void setMetricsTimeRange(Query query) {
+    // Set time range for metric values.
+    HBaseTimelineStorageUtils.setMetricsTimeRange(
+        query, EntityColumnFamily.METRICS.getBytes(),
+        getDataToRetrieve().getMetricsTimeBegin(),
+        getDataToRetrieve().getMetricsTimeEnd());
   }
 
   @Override
@@ -518,11 +484,45 @@
     // and one type
     Scan scan = new Scan();
     TimelineReaderContext context = getContext();
-    RowKeyPrefix<EntityRowKey> entityRowKeyPrefix =
-        new EntityRowKeyPrefix(context.getClusterId(), context.getUserId(),
-            context.getFlowName(), context.getFlowRunId(), context.getAppId(),
-            context.getEntityType());
-    scan.setRowPrefixFilter(entityRowKeyPrefix.getRowKeyPrefix());
+    RowKeyPrefix<EntityRowKey> entityRowKeyPrefix = null;
+    // default mode, will always scans from beginning of entity type.
+    if (getFilters() == null || getFilters().getFromId() == null) {
+      entityRowKeyPrefix = new EntityRowKeyPrefix(context.getClusterId(),
+          context.getUserId(), context.getFlowName(), context.getFlowRunId(),
+          context.getAppId(), context.getEntityType(), null, null);
+      scan.setRowPrefixFilter(entityRowKeyPrefix.getRowKeyPrefix());
+    } else { // pagination mode, will scan from given entityIdPrefix!enitityId
+
+      EntityRowKey entityRowKey = null;
+      try {
+        entityRowKey =
+            EntityRowKey.parseRowKeyFromString(getFilters().getFromId());
+      } catch (IllegalArgumentException e) {
+        throw new BadRequestException("Invalid filter fromid is provided.");
+      }
+      if (!context.getClusterId().equals(entityRowKey.getClusterId())) {
+        throw new BadRequestException(
+            "fromid doesn't belong to clusterId=" + context.getClusterId());
+      }
+
+      // set start row
+      scan.setStartRow(entityRowKey.getRowKey());
+
+      // get the bytes for stop row
+      entityRowKeyPrefix = new EntityRowKeyPrefix(context.getClusterId(),
+          context.getUserId(), context.getFlowName(), context.getFlowRunId(),
+          context.getAppId(), context.getEntityType(), null, null);
+
+      // set stop row
+      scan.setStopRow(
+          HBaseTimelineStorageUtils.calculateTheClosestNextRowKeyForPrefix(
+              entityRowKeyPrefix.getRowKeyPrefix()));
+
+      // set page filter to limit. This filter has to set only in pagination
+      // mode.
+      filterList.addFilter(new PageFilter(getFilters().getLimit()));
+    }
+    setMetricsTimeRange(scan);
     scan.setMaxVersions(getDataToRetrieve().getMetricsLimit());
     if (filterList != null && !filterList.getFilters().isEmpty()) {
       scan.setFilter(filterList);
@@ -536,10 +536,10 @@
       return null;
     }
     TimelineEntity entity = new TimelineEntity();
-    String entityType = EntityColumn.TYPE.readResult(result).toString();
-    entity.setType(entityType);
-    String entityId = EntityColumn.ID.readResult(result).toString();
-    entity.setId(entityId);
+    EntityRowKey parseRowKey = EntityRowKey.parseRowKey(result.getRow());
+    entity.setType(parseRowKey.getEntityType());
+    entity.setId(parseRowKey.getEntityId());
+    entity.setIdPrefix(parseRowKey.getEntityIdPrefix().longValue());
 
     TimelineEntityFilters filters = getFilters();
     // fetch created time
@@ -619,6 +619,9 @@
     if (hasField(fieldsToRetrieve, Field.METRICS)) {
       readMetrics(entity, result, EntityColumnPrefix.METRIC);
     }
+
+    entity.getInfo().put(TimelineReaderUtils.FROMID_KEY,
+        parseRowKey.getRowKeyAsString());
     return entity;
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java
new file mode 100644
index 0000000..e780dcc
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java
@@ -0,0 +1,488 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Query;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
+import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumn;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTable;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+
+import com.google.common.base.Preconditions;
+
+class SubApplicationEntityReader extends GenericEntityReader {
+  private static final SubApplicationTable SUB_APPLICATION_TABLE =
+      new SubApplicationTable();
+
+  SubApplicationEntityReader(TimelineReaderContext ctxt,
+      TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
+    super(ctxt, entityFilters, toRetrieve);
+  }
+
+  SubApplicationEntityReader(TimelineReaderContext ctxt,
+      TimelineDataToRetrieve toRetrieve) {
+    super(ctxt, toRetrieve);
+  }
+
+  /**
+   * Uses the {@link SubApplicationTable}.
+   */
+  protected BaseTable<?> getTable() {
+    return SUB_APPLICATION_TABLE;
+  }
+
+  @Override
+  protected FilterList constructFilterListBasedOnFilters() throws IOException {
+    // Filters here cannot be null for multiple entity reads as they are set in
+    // augmentParams if null.
+    FilterList listBasedOnFilters = new FilterList();
+    TimelineEntityFilters filters = getFilters();
+    // Create filter list based on created time range and add it to
+    // listBasedOnFilters.
+    long createdTimeBegin = filters.getCreatedTimeBegin();
+    long createdTimeEnd = filters.getCreatedTimeEnd();
+    if (createdTimeBegin != 0 || createdTimeEnd != Long.MAX_VALUE) {
+      listBasedOnFilters.addFilter(TimelineFilterUtils
+          .createSingleColValueFiltersByRange(SubApplicationColumn.CREATED_TIME,
+              createdTimeBegin, createdTimeEnd));
+    }
+    // Create filter list based on metric filters and add it to
+    // listBasedOnFilters.
+    TimelineFilterList metricFilters = filters.getMetricFilters();
+    if (metricFilters != null && !metricFilters.getFilterList().isEmpty()) {
+      listBasedOnFilters.addFilter(TimelineFilterUtils.createHBaseFilterList(
+          SubApplicationColumnPrefix.METRIC, metricFilters));
+    }
+    // Create filter list based on config filters and add it to
+    // listBasedOnFilters.
+    TimelineFilterList configFilters = filters.getConfigFilters();
+    if (configFilters != null && !configFilters.getFilterList().isEmpty()) {
+      listBasedOnFilters.addFilter(TimelineFilterUtils.createHBaseFilterList(
+          SubApplicationColumnPrefix.CONFIG, configFilters));
+    }
+    // Create filter list based on info filters and add it to listBasedOnFilters
+    TimelineFilterList infoFilters = filters.getInfoFilters();
+    if (infoFilters != null && !infoFilters.getFilterList().isEmpty()) {
+      listBasedOnFilters.addFilter(TimelineFilterUtils
+          .createHBaseFilterList(SubApplicationColumnPrefix.INFO, infoFilters));
+    }
+    return listBasedOnFilters;
+  }
+
+  /**
+   * Add {@link QualifierFilter} filters to filter list for each column of
+   * entity table.
+   *
+   * @param list filter list to which qualifier filters have to be added.
+   */
+  protected void updateFixedColumns(FilterList list) {
+    for (SubApplicationColumn column : SubApplicationColumn.values()) {
+      list.addFilter(new QualifierFilter(CompareOp.EQUAL,
+          new BinaryComparator(column.getColumnQualifierBytes())));
+    }
+  }
+
+  /**
+   * Creates a filter list which indicates that only some of the column
+   * qualifiers in the info column family will be returned in result.
+   *
+   * @param isApplication If true, it means operations are to be performed for
+   *          application table, otherwise for entity table.
+   * @return filter list.
+   * @throws IOException if any problem occurs while creating filter list.
+   */
+  private FilterList createFilterListForColsOfInfoFamily() throws IOException {
+    FilterList infoFamilyColsFilter = new FilterList(Operator.MUST_PASS_ONE);
+    // Add filters for each column in entity table.
+    updateFixedColumns(infoFamilyColsFilter);
+    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
+    // If INFO field has to be retrieved, add a filter for fetching columns
+    // with INFO column prefix.
+    if (hasField(fieldsToRetrieve, Field.INFO)) {
+      infoFamilyColsFilter.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL,
+              SubApplicationColumnPrefix.INFO));
+    }
+    TimelineFilterList relatesTo = getFilters().getRelatesTo();
+    if (hasField(fieldsToRetrieve, Field.RELATES_TO)) {
+      // If RELATES_TO field has to be retrieved, add a filter for fetching
+      // columns with RELATES_TO column prefix.
+      infoFamilyColsFilter.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL,
+              SubApplicationColumnPrefix.RELATES_TO));
+    } else if (relatesTo != null && !relatesTo.getFilterList().isEmpty()) {
+      // Even if fields to retrieve does not contain RELATES_TO, we still
+      // need to have a filter to fetch some of the column qualifiers if
+      // relatesTo filters are specified. relatesTo filters will then be
+      // matched after fetching rows from HBase.
+      Set<String> relatesToCols =
+          TimelineFilterUtils.fetchColumnsFromFilterList(relatesTo);
+      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
+          SubApplicationColumnPrefix.RELATES_TO, relatesToCols));
+    }
+    TimelineFilterList isRelatedTo = getFilters().getIsRelatedTo();
+    if (hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
+      // If IS_RELATED_TO field has to be retrieved, add a filter for fetching
+      // columns with IS_RELATED_TO column prefix.
+      infoFamilyColsFilter.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL,
+              SubApplicationColumnPrefix.IS_RELATED_TO));
+    } else if (isRelatedTo != null && !isRelatedTo.getFilterList().isEmpty()) {
+      // Even if fields to retrieve does not contain IS_RELATED_TO, we still
+      // need to have a filter to fetch some of the column qualifiers if
+      // isRelatedTo filters are specified. isRelatedTo filters will then be
+      // matched after fetching rows from HBase.
+      Set<String> isRelatedToCols =
+          TimelineFilterUtils.fetchColumnsFromFilterList(isRelatedTo);
+      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
+          SubApplicationColumnPrefix.IS_RELATED_TO, isRelatedToCols));
+    }
+    TimelineFilterList eventFilters = getFilters().getEventFilters();
+    if (hasField(fieldsToRetrieve, Field.EVENTS)) {
+      // If EVENTS field has to be retrieved, add a filter for fetching columns
+      // with EVENT column prefix.
+      infoFamilyColsFilter.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL,
+              SubApplicationColumnPrefix.EVENT));
+    } else if (eventFilters != null
+        && !eventFilters.getFilterList().isEmpty()) {
+      // Even if fields to retrieve does not contain EVENTS, we still need to
+      // have a filter to fetch some of the column qualifiers on the basis of
+      // event filters specified. Event filters will then be matched after
+      // fetching rows from HBase.
+      Set<String> eventCols =
+          TimelineFilterUtils.fetchColumnsFromFilterList(eventFilters);
+      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
+          SubApplicationColumnPrefix.EVENT, eventCols));
+    }
+    return infoFamilyColsFilter;
+  }
+
+  /**
+   * Exclude column prefixes via filters which are not required(based on fields
+   * to retrieve) from info column family. These filters are added to filter
+   * list which contains a filter for getting info column family.
+   *
+   * @param infoColFamilyList filter list for info column family.
+   */
+  private void excludeFieldsFromInfoColFamily(FilterList infoColFamilyList) {
+    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
+    // Events not required.
+    if (!hasField(fieldsToRetrieve, Field.EVENTS)) {
+      infoColFamilyList.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
+              SubApplicationColumnPrefix.EVENT));
+    }
+    // info not required.
+    if (!hasField(fieldsToRetrieve, Field.INFO)) {
+      infoColFamilyList.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
+              SubApplicationColumnPrefix.INFO));
+    }
+    // is related to not required.
+    if (!hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
+      infoColFamilyList.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
+              SubApplicationColumnPrefix.IS_RELATED_TO));
+    }
+    // relates to not required.
+    if (!hasField(fieldsToRetrieve, Field.RELATES_TO)) {
+      infoColFamilyList.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
+              SubApplicationColumnPrefix.RELATES_TO));
+    }
+  }
+
+  /**
+   * Updates filter list based on fields for confs and metrics to retrieve.
+   *
+   * @param listBasedOnFields filter list based on fields.
+   * @throws IOException if any problem occurs while updating filter list.
+   */
+  private void updateFilterForConfsAndMetricsToRetrieve(
+      FilterList listBasedOnFields) throws IOException {
+    TimelineDataToRetrieve dataToRetrieve = getDataToRetrieve();
+    // Please note that if confsToRetrieve is specified, we would have added
+    // CONFS to fields to retrieve in augmentParams() even if not specified.
+    if (dataToRetrieve.getFieldsToRetrieve().contains(Field.CONFIGS)) {
+      // Create a filter list for configs.
+      listBasedOnFields.addFilter(
+          TimelineFilterUtils.createFilterForConfsOrMetricsToRetrieve(
+              dataToRetrieve.getConfsToRetrieve(),
+              SubApplicationColumnFamily.CONFIGS,
+              SubApplicationColumnPrefix.CONFIG));
+    }
+
+    // Please note that if metricsToRetrieve is specified, we would have added
+    // METRICS to fields to retrieve in augmentParams() even if not specified.
+    if (dataToRetrieve.getFieldsToRetrieve().contains(Field.METRICS)) {
+      // Create a filter list for metrics.
+      listBasedOnFields.addFilter(
+          TimelineFilterUtils.createFilterForConfsOrMetricsToRetrieve(
+              dataToRetrieve.getMetricsToRetrieve(),
+              SubApplicationColumnFamily.METRICS,
+              SubApplicationColumnPrefix.METRIC));
+    }
+  }
+
+  @Override
+  protected FilterList constructFilterListBasedOnFields() throws IOException {
+    if (!needCreateFilterListBasedOnFields()) {
+      // Fetch all the columns. No need of a filter.
+      return null;
+    }
+    FilterList listBasedOnFields = new FilterList(Operator.MUST_PASS_ONE);
+    FilterList infoColFamilyList = new FilterList();
+    // By default fetch everything in INFO column family.
+    FamilyFilter infoColumnFamily = new FamilyFilter(CompareOp.EQUAL,
+        new BinaryComparator(SubApplicationColumnFamily.INFO.getBytes()));
+    infoColFamilyList.addFilter(infoColumnFamily);
+    if (fetchPartialColsFromInfoFamily()) {
+      // We can fetch only some of the columns from info family.
+      infoColFamilyList.addFilter(createFilterListForColsOfInfoFamily());
+    } else {
+      // Exclude column prefixes in info column family which are not required
+      // based on fields to retrieve.
+      excludeFieldsFromInfoColFamily(infoColFamilyList);
+    }
+    listBasedOnFields.addFilter(infoColFamilyList);
+    updateFilterForConfsAndMetricsToRetrieve(listBasedOnFields);
+    return listBasedOnFields;
+  }
+
+  @Override
+  protected void validateParams() {
+    Preconditions.checkNotNull(getContext(), "context shouldn't be null");
+    Preconditions.checkNotNull(getDataToRetrieve(),
+        "data to retrieve shouldn't be null");
+    Preconditions.checkNotNull(getContext().getClusterId(),
+        "clusterId shouldn't be null");
+    Preconditions.checkNotNull(getContext().getDoAsUser(),
+        "DoAsUser shouldn't be null");
+    Preconditions.checkNotNull(getContext().getEntityType(),
+        "entityType shouldn't be null");
+  }
+
+  @Override
+  protected void augmentParams(Configuration hbaseConf, Connection conn)
+      throws IOException {
+    getDataToRetrieve().addFieldsBasedOnConfsAndMetricsToRetrieve();
+    createFiltersIfNull();
+  }
+
+  private void setMetricsTimeRange(Query query) {
+    // Set time range for metric values.
+    HBaseTimelineStorageUtils.setMetricsTimeRange(query,
+        SubApplicationColumnFamily.METRICS.getBytes(),
+        getDataToRetrieve().getMetricsTimeBegin(),
+        getDataToRetrieve().getMetricsTimeEnd());
+  }
+
+  @Override
+  protected ResultScanner getResults(Configuration hbaseConf, Connection conn,
+      FilterList filterList) throws IOException {
+
+    // Scan through part of the table to find the entities belong to one app
+    // and one type
+    Scan scan = new Scan();
+    TimelineReaderContext context = getContext();
+    if (context.getDoAsUser() == null) {
+      throw new BadRequestException("Invalid user!");
+    }
+
+    RowKeyPrefix<SubApplicationRowKey> subApplicationRowKeyPrefix = null;
+    // default mode, will always scans from beginning of entity type.
+    if (getFilters() == null || getFilters().getFromId() == null) {
+      subApplicationRowKeyPrefix = new SubApplicationRowKeyPrefix(
+          context.getDoAsUser(), context.getClusterId(),
+          context.getEntityType(), null, null, null);
+      scan.setRowPrefixFilter(subApplicationRowKeyPrefix.getRowKeyPrefix());
+    } else { // pagination mode, will scan from given entityIdPrefix!enitityId
+
+      SubApplicationRowKey entityRowKey = null;
+      try {
+        entityRowKey = SubApplicationRowKey
+            .parseRowKeyFromString(getFilters().getFromId());
+      } catch (IllegalArgumentException e) {
+        throw new BadRequestException("Invalid filter fromid is provided.");
+      }
+      if (!context.getClusterId().equals(entityRowKey.getClusterId())) {
+        throw new BadRequestException(
+            "fromid doesn't belong to clusterId=" + context.getClusterId());
+      }
+
+      // set start row
+      scan.setStartRow(entityRowKey.getRowKey());
+
+      // get the bytes for stop row
+      subApplicationRowKeyPrefix = new SubApplicationRowKeyPrefix(
+          context.getDoAsUser(), context.getClusterId(),
+          context.getEntityType(), null, null, null);
+
+      // set stop row
+      scan.setStopRow(
+          HBaseTimelineStorageUtils.calculateTheClosestNextRowKeyForPrefix(
+              subApplicationRowKeyPrefix.getRowKeyPrefix()));
+
+      // set page filter to limit. This filter has to set only in pagination
+      // mode.
+      filterList.addFilter(new PageFilter(getFilters().getLimit()));
+    }
+    setMetricsTimeRange(scan);
+    scan.setMaxVersions(getDataToRetrieve().getMetricsLimit());
+    if (filterList != null && !filterList.getFilters().isEmpty()) {
+      scan.setFilter(filterList);
+    }
+    return getTable().getResultScanner(hbaseConf, conn, scan);
+  }
+
+  @Override
+  protected Result getResult(Configuration hbaseConf, Connection conn,
+      FilterList filterList) throws IOException {
+    throw new UnsupportedOperationException(
+        "we don't support a single entity query");
+  }
+
+  @Override
+  protected TimelineEntity parseEntity(Result result) throws IOException {
+    if (result == null || result.isEmpty()) {
+      return null;
+    }
+    TimelineEntity entity = new TimelineEntity();
+    SubApplicationRowKey parseRowKey =
+        SubApplicationRowKey.parseRowKey(result.getRow());
+    entity.setType(parseRowKey.getEntityType());
+    entity.setId(parseRowKey.getEntityId());
+    entity.setIdPrefix(parseRowKey.getEntityIdPrefix().longValue());
+
+    TimelineEntityFilters filters = getFilters();
+    // fetch created time
+    Long createdTime =
+        (Long) SubApplicationColumn.CREATED_TIME.readResult(result);
+    entity.setCreatedTime(createdTime);
+
+    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
+    // fetch is related to entities and match isRelatedTo filter. If isRelatedTo
+    // filters do not match, entity would be dropped. We have to match filters
+    // locally as relevant HBase filters to filter out rows on the basis of
+    // isRelatedTo are not set in HBase scan.
+    boolean checkIsRelatedTo =
+        filters.getIsRelatedTo() != null
+            && filters.getIsRelatedTo().getFilterList().size() > 0;
+    if (hasField(fieldsToRetrieve, Field.IS_RELATED_TO) || checkIsRelatedTo) {
+      readRelationship(entity, result, SubApplicationColumnPrefix.IS_RELATED_TO,
+          true);
+      if (checkIsRelatedTo && !TimelineStorageUtils.matchIsRelatedTo(entity,
+          filters.getIsRelatedTo())) {
+        return null;
+      }
+      if (!hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
+        entity.getIsRelatedToEntities().clear();
+      }
+    }
+
+    // fetch relates to entities and match relatesTo filter. If relatesTo
+    // filters do not match, entity would be dropped. We have to match filters
+    // locally as relevant HBase filters to filter out rows on the basis of
+    // relatesTo are not set in HBase scan.
+    boolean checkRelatesTo =
+        !isSingleEntityRead() && filters.getRelatesTo() != null
+            && filters.getRelatesTo().getFilterList().size() > 0;
+    if (hasField(fieldsToRetrieve, Field.RELATES_TO) || checkRelatesTo) {
+      readRelationship(entity, result, SubApplicationColumnPrefix.RELATES_TO,
+          false);
+      if (checkRelatesTo && !TimelineStorageUtils.matchRelatesTo(entity,
+          filters.getRelatesTo())) {
+        return null;
+      }
+      if (!hasField(fieldsToRetrieve, Field.RELATES_TO)) {
+        entity.getRelatesToEntities().clear();
+      }
+    }
+
+    // fetch info if fieldsToRetrieve contains INFO or ALL.
+    if (hasField(fieldsToRetrieve, Field.INFO)) {
+      readKeyValuePairs(entity, result, SubApplicationColumnPrefix.INFO, false);
+    }
+
+    // fetch configs if fieldsToRetrieve contains CONFIGS or ALL.
+    if (hasField(fieldsToRetrieve, Field.CONFIGS)) {
+      readKeyValuePairs(entity, result, SubApplicationColumnPrefix.CONFIG,
+          true);
+    }
+
+    // fetch events and match event filters if they exist. If event filters do
+    // not match, entity would be dropped. We have to match filters locally
+    // as relevant HBase filters to filter out rows on the basis of events
+    // are not set in HBase scan.
+    boolean checkEvents =
+        !isSingleEntityRead() && filters.getEventFilters() != null
+            && filters.getEventFilters().getFilterList().size() > 0;
+    if (hasField(fieldsToRetrieve, Field.EVENTS) || checkEvents) {
+      readEvents(entity, result, SubApplicationColumnPrefix.EVENT);
+      if (checkEvents && !TimelineStorageUtils.matchEventFilters(entity,
+          filters.getEventFilters())) {
+        return null;
+      }
+      if (!hasField(fieldsToRetrieve, Field.EVENTS)) {
+        entity.getEvents().clear();
+      }
+    }
+
+    // fetch metrics if fieldsToRetrieve contains METRICS or ALL.
+    if (hasField(fieldsToRetrieve, Field.METRICS)) {
+      readMetrics(entity, result, SubApplicationColumnPrefix.METRIC);
+    }
+
+    entity.getInfo().put(TimelineReaderUtils.FROMID_KEY,
+        parseRowKey.getRowKeyAsString());
+    return entity;
+  }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java
index 424d141..07e8423 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java
@@ -21,11 +21,10 @@
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.LinkedHashSet;
 import java.util.Map;
 import java.util.NavigableMap;
-import java.util.NavigableSet;
 import java.util.Set;
-import java.util.TreeSet;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.client.Connection;
@@ -60,12 +59,12 @@
  * HBase storage. Different types can be defined for different types of the
  * entities that are being requested.
  */
-public abstract class TimelineEntityReader {
+public abstract class TimelineEntityReader extends
+    AbstractTimelineStorageReader {
   private static final Logger LOG =
       LoggerFactory.getLogger(TimelineEntityReader.class);
 
   private final boolean singleEntityRead;
-  private TimelineReaderContext context;
   private TimelineDataToRetrieve dataToRetrieve;
   // used only for multiple entity read mode
   private TimelineEntityFilters filters;
@@ -76,14 +75,6 @@
   private BaseTable<?> table;
 
   /**
-   * Specifies whether keys for this table are sorted in a manner where entities
-   * can be retrieved by created time. If true, it will be sufficient to collect
-   * the first results as specified by the limit. Otherwise all matched entities
-   * will be fetched and then limit applied.
-   */
-  private boolean sortedKeys = false;
-
-  /**
    * Used to convert strings key components to and from storage format.
    */
   private final KeyConverter<String> stringKeyConverter =
@@ -96,15 +87,11 @@
    *     made.
    * @param entityFilters Filters which limit the entities returned.
    * @param toRetrieve Data to retrieve for each entity.
-   * @param sortedKeys Specifies whether key for this table are sorted or not.
-   *     If sorted, entities can be retrieved by created time.
    */
   protected TimelineEntityReader(TimelineReaderContext ctxt,
-      TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve,
-      boolean sortedKeys) {
+      TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
+    super(ctxt);
     this.singleEntityRead = false;
-    this.sortedKeys = sortedKeys;
-    this.context = ctxt;
     this.dataToRetrieve = toRetrieve;
     this.filters = entityFilters;
 
@@ -120,8 +107,8 @@
    */
   protected TimelineEntityReader(TimelineReaderContext ctxt,
       TimelineDataToRetrieve toRetrieve) {
+    super(ctxt);
     this.singleEntityRead = true;
-    this.context = ctxt;
     this.dataToRetrieve = toRetrieve;
 
     this.setTable(getTable());
@@ -185,10 +172,6 @@
     return null;
   }
 
-  protected TimelineReaderContext getContext() {
-    return context;
-  }
-
   protected TimelineDataToRetrieve getDataToRetrieve() {
     return dataToRetrieve;
   }
@@ -203,7 +186,7 @@
    */
   protected void createFiltersIfNull() {
     if (filters == null) {
-      filters = new TimelineEntityFilters();
+      filters = new TimelineEntityFilters.Builder().build();
     }
   }
 
@@ -229,7 +212,7 @@
     if (result == null || result.isEmpty()) {
       // Could not find a matching row.
       LOG.info("Cannot find matching entity of type " +
-          context.getEntityType());
+          getContext().getEntityType());
       return null;
     }
     return parseEntity(result);
@@ -250,7 +233,7 @@
     validateParams();
     augmentParams(hbaseConf, conn);
 
-    NavigableSet<TimelineEntity> entities = new TreeSet<>();
+    Set<TimelineEntity> entities = new LinkedHashSet<>();
     FilterList filterList = createFilterList();
     if (LOG.isDebugEnabled() && filterList != null) {
       LOG.debug("FilterList created for scan is - " + filterList);
@@ -263,14 +246,8 @@
           continue;
         }
         entities.add(entity);
-        if (!sortedKeys) {
-          if (entities.size() > filters.getLimit()) {
-            entities.pollLast();
-          }
-        } else {
-          if (entities.size() == filters.getLimit()) {
-            break;
-          }
+        if (entities.size() == filters.getLimit()) {
+          break;
         }
       }
       return entities;
@@ -289,21 +266,6 @@
   }
 
   /**
-   * Validates the required parameters to read the entities.
-   */
-  protected abstract void validateParams();
-
-  /**
-   * Sets certain parameters to defaults if the values are not provided.
-   *
-   * @param hbaseConf HBase Configuration.
-   * @param conn HBase Connection.
-   * @throws IOException if any exception is encountered while setting params.
-   */
-  protected abstract void augmentParams(Configuration hbaseConf,
-      Connection conn) throws IOException;
-
-  /**
    * Fetches a {@link Result} instance for a single-entity read.
    *
    * @param hbaseConf HBase Configuration.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReaderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReaderFactory.java
index b2a9476..fa16077 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReaderFactory.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReaderFactory.java
@@ -82,8 +82,24 @@
         YARN_FLOW_RUN.matches(context.getEntityType())) {
       return new FlowRunEntityReader(context, filters, dataToRetrieve);
     } else {
+      if (context.getDoAsUser() != null) {
+        return new SubApplicationEntityReader(context, filters, dataToRetrieve);
+      }
       // assume we're dealing with a generic entity read
-      return new GenericEntityReader(context, filters, dataToRetrieve, false);
+      return new GenericEntityReader(context, filters, dataToRetrieve);
     }
   }
+
+  /**
+   * Creates a timeline entity type reader that will read all available entity
+   * types within the specified context.
+   *
+   * @param context Reader context which defines the scope in which query has to
+   *                be made. Limited to application level only.
+   * @return an <cite>EntityTypeReader</cite> object
+   */
+  public static EntityTypeReader createEntityTypeReader(
+      TimelineReaderContext context) {
+    return new EntityTypeReader(context);
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumn.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumn.java
new file mode 100644
index 0000000..46b0cc90
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumn.java
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+/**
+ * Identifies fully qualified columns for the {@link SubApplicationTable}.
+ */
+public enum SubApplicationColumn implements Column<SubApplicationTable> {
+
+  /**
+   * Identifier for the sub application.
+   */
+  ID(SubApplicationColumnFamily.INFO, "id"),
+
+  /**
+   * The type of sub application.
+   */
+  TYPE(SubApplicationColumnFamily.INFO, "type"),
+
+  /**
+   * When the sub application was created.
+   */
+  CREATED_TIME(SubApplicationColumnFamily.INFO, "created_time",
+      new LongConverter()),
+
+  /**
+   * The version of the flow that this sub application belongs to.
+   */
+  FLOW_VERSION(SubApplicationColumnFamily.INFO, "flow_version");
+
+  private final ColumnHelper<SubApplicationTable> column;
+  private final ColumnFamily<SubApplicationTable> columnFamily;
+  private final String columnQualifier;
+  private final byte[] columnQualifierBytes;
+
+  SubApplicationColumn(ColumnFamily<SubApplicationTable> columnFamily,
+      String columnQualifier) {
+    this(columnFamily, columnQualifier, GenericConverter.getInstance());
+  }
+
+  SubApplicationColumn(ColumnFamily<SubApplicationTable> columnFamily,
+      String columnQualifier, ValueConverter converter) {
+    this.columnFamily = columnFamily;
+    this.columnQualifier = columnQualifier;
+    // Future-proof by ensuring the right column prefix hygiene.
+    this.columnQualifierBytes =
+        Bytes.toBytes(Separator.SPACE.encode(columnQualifier));
+    this.column = new ColumnHelper<SubApplicationTable>(columnFamily,
+        converter);
+  }
+
+
+  public void store(byte[] rowKey,
+      TypedBufferedMutator<SubApplicationTable> tableMutator, Long timestamp,
+      Object inputValue, Attribute... attributes) throws IOException {
+    column.store(rowKey, tableMutator, columnQualifierBytes, timestamp,
+        inputValue, attributes);
+  }
+
+  public Object readResult(Result result) throws IOException {
+    return column.readResult(result, columnQualifierBytes);
+  }
+
+  @Override
+  public byte[] getColumnQualifierBytes() {
+    return columnQualifierBytes.clone();
+  }
+
+  @Override
+  public byte[] getColumnFamilyBytes() {
+    return columnFamily.getBytes();
+  }
+
+  @Override
+  public ValueConverter getValueConverter() {
+    return column.getValueConverter();
+  }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnFamily.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnFamily.java
new file mode 100644
index 0000000..1d7f8fd
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnFamily.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents the sub application table column families.
+ */
+public enum SubApplicationColumnFamily
+    implements ColumnFamily<SubApplicationTable> {
+
+  /**
+   * Info column family houses known columns, specifically ones included in
+   * columnfamily filters.
+   */
+  INFO("i"),
+
+  /**
+   * Configurations are in a separate column family for two reasons:
+   * a) the size of the config values can be very large and
+   * b) we expect that config values
+   * are often separately accessed from other metrics and info columns.
+   */
+  CONFIGS("c"),
+
+  /**
+   * Metrics have a separate column family, because they have a separate TTL.
+   */
+  METRICS("m");
+
+  /**
+   * Byte representation of this column family.
+   */
+  private final byte[] bytes;
+
+  /**
+   * @param value
+   *          create a column family with this name. Must be lower case and
+   *          without spaces.
+   */
+  SubApplicationColumnFamily(String value) {
+    // column families should be lower case and not contain any spaces.
+    this.bytes = Bytes.toBytes(Separator.SPACE.encode(value));
+  }
+
+  public byte[] getBytes() {
+    return Bytes.copy(bytes);
+  }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnPrefix.java
new file mode 100644
index 0000000..06ecced
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnPrefix.java
@@ -0,0 +1,250 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.NavigableMap;
+
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+/**
+ * Identifies partially qualified columns for the sub app table.
+ */
+public enum SubApplicationColumnPrefix
+    implements ColumnPrefix<SubApplicationTable> {
+
+  /**
+   * To store TimelineEntity getIsRelatedToEntities values.
+   */
+  IS_RELATED_TO(SubApplicationColumnFamily.INFO, "s"),
+
+  /**
+   * To store TimelineEntity getRelatesToEntities values.
+   */
+  RELATES_TO(SubApplicationColumnFamily.INFO, "r"),
+
+  /**
+   * To store TimelineEntity info values.
+   */
+  INFO(SubApplicationColumnFamily.INFO, "i"),
+
+  /**
+   * Lifecycle events for an entity.
+   */
+  EVENT(SubApplicationColumnFamily.INFO, "e", true),
+
+  /**
+   * Config column stores configuration with config key as the column name.
+   */
+  CONFIG(SubApplicationColumnFamily.CONFIGS, null),
+
+  /**
+   * Metrics are stored with the metric name as the column name.
+   */
+  METRIC(SubApplicationColumnFamily.METRICS, null, new LongConverter());
+
+  private final ColumnHelper<SubApplicationTable> column;
+  private final ColumnFamily<SubApplicationTable> columnFamily;
+
+  /**
+   * Can be null for those cases where the provided column qualifier is the
+   * entire column name.
+   */
+  private final String columnPrefix;
+  private final byte[] columnPrefixBytes;
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   */
+  SubApplicationColumnPrefix(ColumnFamily<SubApplicationTable> columnFamily,
+      String columnPrefix) {
+    this(columnFamily, columnPrefix, false, GenericConverter.getInstance());
+  }
+
+  SubApplicationColumnPrefix(ColumnFamily<SubApplicationTable> columnFamily,
+      String columnPrefix, boolean compondColQual) {
+    this(columnFamily, columnPrefix, compondColQual,
+        GenericConverter.getInstance());
+  }
+
+  SubApplicationColumnPrefix(ColumnFamily<SubApplicationTable> columnFamily,
+      String columnPrefix, ValueConverter converter) {
+    this(columnFamily, columnPrefix, false, converter);
+  }
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   * @param converter used to encode/decode values to be stored in HBase for
+   * this column prefix.
+   */
+  SubApplicationColumnPrefix(ColumnFamily<SubApplicationTable> columnFamily,
+      String columnPrefix, boolean compondColQual, ValueConverter converter) {
+    column = new ColumnHelper<SubApplicationTable>(columnFamily, converter);
+    this.columnFamily = columnFamily;
+    this.columnPrefix = columnPrefix;
+    if (columnPrefix == null) {
+      this.columnPrefixBytes = null;
+    } else {
+      // Future-proof by ensuring the right column prefix hygiene.
+      this.columnPrefixBytes =
+          Bytes.toBytes(Separator.SPACE.encode(columnPrefix));
+    }
+  }
+
+  /**
+   * @return the column name value
+   */
+  public String getColumnPrefix() {
+    return columnPrefix;
+  }
+
+  @Override
+  public byte[] getColumnPrefixBytes(byte[] qualifierPrefix) {
+    return ColumnHelper.getColumnQualifier(
+        this.columnPrefixBytes, qualifierPrefix);
+  }
+
+  @Override
+  public byte[] getColumnPrefixBytes(String qualifierPrefix) {
+    return ColumnHelper.getColumnQualifier(
+        this.columnPrefixBytes, qualifierPrefix);
+  }
+
+  @Override
+  public byte[] getColumnFamilyBytes() {
+    return columnFamily.getBytes();
+  }
+
+  @Override
+  public ValueConverter getValueConverter() {
+    return column.getValueConverter();
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
+   * #store(byte[],
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.
+   * TypedBufferedMutator, java.lang.String, java.lang.Long, java.lang.Object,
+   * org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute[])
+   */
+  public void store(byte[] rowKey,
+      TypedBufferedMutator<SubApplicationTable> tableMutator, String qualifier,
+      Long timestamp, Object inputValue, Attribute... attributes)
+      throws IOException {
+
+    // Null check
+    if (qualifier == null) {
+      throw new IOException("Cannot store column with null qualifier in "
+          + tableMutator.getName().getNameAsString());
+    }
+
+    byte[] columnQualifier = getColumnPrefixBytes(qualifier);
+
+    column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
+        attributes);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
+   * #store(byte[],
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.
+   * TypedBufferedMutator, java.lang.String, java.lang.Long, java.lang.Object)
+   */
+  public void store(byte[] rowKey,
+      TypedBufferedMutator<SubApplicationTable> tableMutator, byte[] qualifier,
+      Long timestamp, Object inputValue, Attribute... attributes)
+      throws IOException {
+
+    // Null check
+    if (qualifier == null) {
+      throw new IOException("Cannot store column with null qualifier in "
+          + tableMutator.getName().getNameAsString());
+    }
+
+    byte[] columnQualifier = getColumnPrefixBytes(qualifier);
+
+    column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
+        attributes);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
+   * #readResult(org.apache.hadoop.hbase.client.Result, java.lang.String)
+   */
+  public Object readResult(Result result, String qualifier) throws IOException {
+    byte[] columnQualifier =
+        ColumnHelper.getColumnQualifier(this.columnPrefixBytes, qualifier);
+    return column.readResult(result, columnQualifier);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
+   * #readResults(org.apache.hadoop.hbase.client.Result,
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter)
+   */
+  public <K> Map<K, Object> readResults(Result result,
+      KeyConverter<K> keyConverter) throws IOException {
+    return column.readResults(result, columnPrefixBytes, keyConverter);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
+   * #readResultsWithTimestamps(org.apache.hadoop.hbase.client.Result,
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter)
+   */
+  public <K, V> NavigableMap<K, NavigableMap<Long, V>>
+      readResultsWithTimestamps(Result result, KeyConverter<K> keyConverter)
+      throws IOException {
+    return column.readResultsWithTimestamps(result, columnPrefixBytes,
+        keyConverter);
+  }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKey.java
new file mode 100644
index 0000000..fb1f774
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKey.java
@@ -0,0 +1,290 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents a rowkey for the sub app table.
+ */
+public class SubApplicationRowKey {
+  private final String subAppUserId;
+  private final String clusterId;
+  private final String entityType;
+  private final Long entityIdPrefix;
+  private final String entityId;
+  private final String userId;
+  private final SubApplicationRowKeyConverter subAppRowKeyConverter =
+      new SubApplicationRowKeyConverter();
+
+  public SubApplicationRowKey(String subAppUserId, String clusterId,
+      String entityType, Long entityIdPrefix, String entityId, String userId) {
+    this.subAppUserId = subAppUserId;
+    this.clusterId = clusterId;
+    this.entityType = entityType;
+    this.entityIdPrefix = entityIdPrefix;
+    this.entityId = entityId;
+    this.userId = userId;
+  }
+
+  public String getClusterId() {
+    return clusterId;
+  }
+
+  public String getSubAppUserId() {
+    return subAppUserId;
+  }
+
+  public String getEntityType() {
+    return entityType;
+  }
+
+  public String getEntityId() {
+    return entityId;
+  }
+
+  public Long getEntityIdPrefix() {
+    return entityIdPrefix;
+  }
+
+  public String getUserId() {
+    return userId;
+  }
+
+  /**
+   * Constructs a row key for the sub app table as follows:
+   * {@code subAppUserId!clusterId!entityType
+   * !entityPrefix!entityId!userId}.
+   * Typically used while querying a specific sub app.
+   *
+   * subAppUserId is usually the doAsUser.
+   * userId is the yarn user that the AM runs as.
+   *
+   * @return byte array with the row key.
+   */
+  public byte[] getRowKey() {
+    return subAppRowKeyConverter.encode(this);
+  }
+
+  /**
+   * Given the raw row key as bytes, returns the row key as an object.
+   *
+   * @param rowKey byte representation of row key.
+   * @return An <cite>SubApplicationRowKey</cite> object.
+   */
+  public static SubApplicationRowKey parseRowKey(byte[] rowKey) {
+    return new SubApplicationRowKeyConverter().decode(rowKey);
+  }
+
+  /**
+   * Constructs a row key for the sub app table as follows:
+   * <p>
+   * {@code subAppUserId!clusterId!
+   * entityType!entityIdPrefix!entityId!userId}.
+   *
+   * subAppUserId is usually the doAsUser.
+   * userId is the yarn user that that the AM runs as.
+   *
+   * </p>
+   *
+   * @return String representation of row key.
+   */
+  public String getRowKeyAsString() {
+    return subAppRowKeyConverter.encodeAsString(this);
+  }
+
+  /**
+   * Given the encoded row key as string, returns the row key as an object.
+   *
+   * @param encodedRowKey String representation of row key.
+   * @return A <cite>SubApplicationRowKey</cite> object.
+   */
+  public static SubApplicationRowKey parseRowKeyFromString(
+      String encodedRowKey) {
+    return new SubApplicationRowKeyConverter().decodeFromString(encodedRowKey);
+  }
+
+  /**
+   * Encodes and decodes row key for sub app table.
+   * The row key is of the form :
+   * subAppUserId!clusterId!flowRunId!appId!entityType!entityId!userId
+   *
+   * subAppUserId is usually the doAsUser.
+   * userId is the yarn user that the AM runs as.
+   *
+   * <p>
+   */
+  final private static class SubApplicationRowKeyConverter
+      implements KeyConverter<SubApplicationRowKey>,
+      KeyConverterToString<SubApplicationRowKey> {
+
+    private SubApplicationRowKeyConverter() {
+    }
+
+    /**
+     * sub app row key is of the form
+     * subAppUserId!clusterId!entityType!entityPrefix!entityId!userId
+     * w. each segment separated by !.
+     *
+     * subAppUserId is usually the doAsUser.
+     * userId is the yarn user that the AM runs as.
+     *
+     * The sizes below indicate sizes of each one of these
+     * segments in sequence. clusterId, subAppUserId, entityType,
+     * entityId and userId are strings.
+     * entity prefix is a long hence 8 bytes in size. Strings are
+     * variable in size (i.e. end whenever separator is encountered).
+     * This is used while decoding and helps in determining where to split.
+     */
+    private static final int[] SEGMENT_SIZES = {Separator.VARIABLE_SIZE,
+        Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG,
+        Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE};
+
+    /*
+     * (non-Javadoc)
+     *
+     * Encodes SubApplicationRowKey object into a byte array with each
+     * component/field in SubApplicationRowKey separated by
+     * Separator#QUALIFIERS.
+     * This leads to an sub app table row key of the form
+     * subAppUserId!clusterId!entityType!entityPrefix!entityId!userId
+     *
+     * subAppUserId is usually the doAsUser.
+     * userId is the yarn user that the AM runs as.
+     *
+     * If entityType in passed SubApplicationRowKey object is null (and the
+     * fields preceding it are not null i.e. clusterId, subAppUserId), this
+     * returns a row key prefix of the form subAppUserId!clusterId!
+     * If entityId in SubApplicationRowKey is null
+     * (other components are not null), this returns a row key prefix
+     * of the form subAppUserId!clusterId!entityType!
+     *
+     * @see org.apache.hadoop.yarn.server.timelineservice.storage.common
+     * .KeyConverter#encode(java.lang.Object)
+     */
+    @Override
+    public byte[] encode(SubApplicationRowKey rowKey) {
+      byte[] subAppUser = Separator.encode(rowKey.getSubAppUserId(),
+          Separator.SPACE, Separator.TAB, Separator.QUALIFIERS);
+      byte[] cluster = Separator.encode(rowKey.getClusterId(), Separator.SPACE,
+          Separator.TAB, Separator.QUALIFIERS);
+      byte[] first = Separator.QUALIFIERS.join(subAppUser, cluster);
+      if (rowKey.getEntityType() == null) {
+        return first;
+      }
+      byte[] entityType = Separator.encode(rowKey.getEntityType(),
+          Separator.SPACE, Separator.TAB, Separator.QUALIFIERS);
+
+      if (rowKey.getEntityIdPrefix() == null) {
+        return Separator.QUALIFIERS.join(first, entityType,
+            Separator.EMPTY_BYTES);
+      }
+
+      byte[] entityIdPrefix = Bytes.toBytes(rowKey.getEntityIdPrefix());
+
+      if (rowKey.getEntityId() == null) {
+        return Separator.QUALIFIERS.join(first, entityType, entityIdPrefix,
+            Separator.EMPTY_BYTES);
+      }
+
+      byte[] entityId = Separator.encode(rowKey.getEntityId(), Separator.SPACE,
+          Separator.TAB, Separator.QUALIFIERS);
+
+      byte[] userId = Separator.encode(rowKey.getUserId(),
+          Separator.SPACE, Separator.TAB, Separator.QUALIFIERS);
+
+      byte[] second = Separator.QUALIFIERS.join(entityType, entityIdPrefix,
+          entityId, userId);
+
+      return Separator.QUALIFIERS.join(first, second);
+    }
+
+    /*
+     * (non-Javadoc)
+     *
+     * Decodes a sub application row key of the form
+     * subAppUserId!clusterId!entityType!entityPrefix!entityId!userId
+     *
+     * subAppUserId is usually the doAsUser.
+     * userId is the yarn user that the AM runs as.
+     *
+     * represented in byte format
+     * and converts it into an SubApplicationRowKey object.
+     *
+     * @see org.apache.hadoop.yarn.server.timelineservice.storage.common
+     * .KeyConverter#decode(byte[])
+     */
+    @Override
+    public SubApplicationRowKey decode(byte[] rowKey) {
+      byte[][] rowKeyComponents =
+          Separator.QUALIFIERS.split(rowKey, SEGMENT_SIZES);
+      if (rowKeyComponents.length != 6) {
+        throw new IllegalArgumentException(
+            "the row key is not valid for " + "a sub app");
+      }
+      String subAppUserId =
+          Separator.decode(Bytes.toString(rowKeyComponents[0]),
+              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+      String clusterId = Separator.decode(Bytes.toString(rowKeyComponents[1]),
+          Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+      String entityType = Separator.decode(Bytes.toString(rowKeyComponents[2]),
+          Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+
+      Long entityPrefixId = Bytes.toLong(rowKeyComponents[3]);
+
+      String entityId = Separator.decode(Bytes.toString(rowKeyComponents[4]),
+          Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+      String userId =
+          Separator.decode(Bytes.toString(rowKeyComponents[5]),
+              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+
+      return new SubApplicationRowKey(subAppUserId, clusterId, entityType,
+          entityPrefixId, entityId, userId);
+    }
+
+    @Override
+    public String encodeAsString(SubApplicationRowKey key) {
+      if (key.subAppUserId == null || key.clusterId == null
+          || key.entityType == null || key.entityIdPrefix == null
+          || key.entityId == null || key.userId == null) {
+        throw new IllegalArgumentException();
+      }
+      return TimelineReaderUtils.joinAndEscapeStrings(
+          new String[] {key.subAppUserId, key.clusterId, key.entityType,
+              key.entityIdPrefix.toString(), key.entityId, key.userId});
+    }
+
+    @Override
+    public SubApplicationRowKey decodeFromString(String encodedRowKey) {
+      List<String> split = TimelineReaderUtils.split(encodedRowKey);
+      if (split == null || split.size() != 6) {
+        throw new IllegalArgumentException(
+            "Invalid row key for sub app table.");
+      }
+      Long entityIdPrefix = Long.valueOf(split.get(3));
+      return new SubApplicationRowKey(split.get(0), split.get(1),
+          split.get(2), entityIdPrefix, split.get(4), split.get(5));
+    }
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKeyPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKeyPrefix.java
new file mode 100644
index 0000000..0c04959
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKeyPrefix.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+
+/**
+ * Represents a partial rowkey without the entityId or without entityType and
+ * entityId for the sub application table.
+ *
+ */
+public class SubApplicationRowKeyPrefix extends SubApplicationRowKey
+    implements RowKeyPrefix<SubApplicationRowKey> {
+
+  /**
+   * Creates a prefix which generates the following rowKeyPrefixes for the sub
+   * application table:
+   * {@code subAppUserId!clusterId!entityType!entityPrefix!userId}.
+   *
+   * @param subAppUserId
+   *          identifying the subApp User
+   * @param clusterId
+   *          identifying the cluster
+   * @param entityType
+   *          which entity type
+   * @param entityIdPrefix
+   *          for entityId
+   * @param entityId
+   *          for an entity
+   * @param userId
+   *          for the user who runs the AM
+   *
+   * subAppUserId is usually the doAsUser.
+   * userId is the yarn user that the AM runs as.
+   *
+   */
+  public SubApplicationRowKeyPrefix(String subAppUserId, String clusterId,
+      String entityType, Long entityIdPrefix, String entityId,
+      String userId) {
+    super(subAppUserId, clusterId, entityType, entityIdPrefix, entityId,
+        userId);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.
+   * RowKeyPrefix#getRowKeyPrefix()
+   */
+  public byte[] getRowKeyPrefix() {
+    return super.getRowKey();
+  }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTable.java
new file mode 100644
index 0000000..334bab6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTable.java
@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants;
+
+/**
+ * The sub application table has column families:
+ * info, config and metrics.
+ * Info stores information about a timeline entity object
+ * config stores configuration data of a timeline entity object
+ * metrics stores the metrics of a timeline entity object
+ *
+ * Example sub application table record:
+ *
+ * <pre>
+ * |-------------------------------------------------------------------------|
+ * |  Row          | Column Family             | Column Family| Column Family|
+ * |  key          | info                      | metrics      | config       |
+ * |-------------------------------------------------------------------------|
+ * | subAppUserId! | id:entityId               | metricId1:   | configKey1:  |
+ * | clusterId!    | type:entityType           | metricValue1 | configValue1 |
+ * | entityType!   |                           | @timestamp1  |              |
+ * | idPrefix!|    |                           |              | configKey2:  |
+ * | entityId!     | created_time:             | metricId1:   | configValue2 |
+ * | userId        | 1392993084018             | metricValue2 |              |
+ * |               |                           | @timestamp2  |              |
+ * |               | i!infoKey:                |              |              |
+ * |               | infoValue                 | metricId1:   |              |
+ * |               |                           | metricValue1 |              |
+ * |               |                           | @timestamp2  |              |
+ * |               | e!eventId=timestamp=      |              |              |
+ * |               | infoKey:                  |              |              |
+ * |               | eventInfoValue            |              |              |
+ * |               |                           |              |              |
+ * |               | r!relatesToKey:           |              |              |
+ * |               | id3=id4=id5               |              |              |
+ * |               |                           |              |              |
+ * |               | s!isRelatedToKey          |              |              |
+ * |               | id7=id9=id6               |              |              |
+ * |               |                           |              |              |
+ * |               | flowVersion:              |              |              |
+ * |               | versionValue              |              |              |
+ * |-------------------------------------------------------------------------|
+ * </pre>
+ */
+public class SubApplicationTable extends BaseTable<SubApplicationTable> {
+  /** sub app prefix. */
+  private static final String PREFIX =
+      YarnConfiguration.TIMELINE_SERVICE_PREFIX + "subapplication";
+
+  /** config param name that specifies the subapplication table name. */
+  public static final String TABLE_NAME_CONF_NAME = PREFIX + ".table.name";
+
+  /**
+   * config param name that specifies the TTL for metrics column family in
+   * subapplication table.
+   */
+  private static final String METRICS_TTL_CONF_NAME = PREFIX
+      + ".table.metrics.ttl";
+
+  /**
+   * config param name that specifies max-versions for
+   * metrics column family in subapplication table.
+   */
+  private static final String METRICS_MAX_VERSIONS =
+      PREFIX + ".table.metrics.max-versions";
+
+  /** default value for subapplication table name. */
+  public static final String DEFAULT_TABLE_NAME =
+      "timelineservice.subapplication";
+
+  /** default TTL is 30 days for metrics timeseries. */
+  private static final int DEFAULT_METRICS_TTL = 2592000;
+
+  /** default max number of versions. */
+  private static final int DEFAULT_METRICS_MAX_VERSIONS = 10000;
+
+  private static final Log LOG = LogFactory.getLog(
+      SubApplicationTable.class);
+
+  public SubApplicationTable() {
+    super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.BaseTable#createTable
+   * (org.apache.hadoop.hbase.client.Admin,
+   * org.apache.hadoop.conf.Configuration)
+   */
+  public void createTable(Admin admin, Configuration hbaseConf)
+      throws IOException {
+
+    TableName table = getTableName(hbaseConf);
+    if (admin.tableExists(table)) {
+      // do not disable / delete existing table
+      // similar to the approach taken by map-reduce jobs when
+      // output directory exists
+      throw new IOException("Table " + table.getNameAsString()
+          + " already exists.");
+    }
+
+    HTableDescriptor subAppTableDescp = new HTableDescriptor(table);
+    HColumnDescriptor infoCF =
+        new HColumnDescriptor(SubApplicationColumnFamily.INFO.getBytes());
+    infoCF.setBloomFilterType(BloomType.ROWCOL);
+    subAppTableDescp.addFamily(infoCF);
+
+    HColumnDescriptor configCF =
+        new HColumnDescriptor(SubApplicationColumnFamily.CONFIGS.getBytes());
+    configCF.setBloomFilterType(BloomType.ROWCOL);
+    configCF.setBlockCacheEnabled(true);
+    subAppTableDescp.addFamily(configCF);
+
+    HColumnDescriptor metricsCF =
+        new HColumnDescriptor(SubApplicationColumnFamily.METRICS.getBytes());
+    subAppTableDescp.addFamily(metricsCF);
+    metricsCF.setBlockCacheEnabled(true);
+    // always keep 1 version (the latest)
+    metricsCF.setMinVersions(1);
+    metricsCF.setMaxVersions(
+        hbaseConf.getInt(METRICS_MAX_VERSIONS, DEFAULT_METRICS_MAX_VERSIONS));
+    metricsCF.setTimeToLive(hbaseConf.getInt(METRICS_TTL_CONF_NAME,
+        DEFAULT_METRICS_TTL));
+    subAppTableDescp.setRegionSplitPolicyClassName(
+        "org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy");
+    subAppTableDescp.setValue("KeyPrefixRegionSplitPolicy.prefix_length",
+        TimelineHBaseSchemaConstants.USERNAME_SPLIT_KEY_PREFIX_LENGTH);
+    admin.createTable(subAppTableDescp,
+        TimelineHBaseSchemaConstants.getUsernameSplits());
+    LOG.info("Status of table creation for " + table.getNameAsString() + "="
+        + admin.tableExists(table));
+  }
+
+  /**
+   * @param metricsTTL time to live parameter for the metricss in this table.
+   * @param hbaseConf configururation in which to set the metrics TTL config
+   *          variable.
+   */
+  public void setMetricsTTL(int metricsTTL, Configuration hbaseConf) {
+    hbaseConf.setInt(METRICS_TTL_CONF_NAME, metricsTTL);
+  }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/package-info.java
similarity index 65%
copy from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
copy to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/package-info.java
index 5d14c6f..52cc399 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/package-info.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -16,10 +16,13 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
+/**
+ * Package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication
+ * contains classes related to implementation for subapplication table.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
 
-export default Ember.Route.extend({
-  model() {
-    return this.store.findAll('yarn-queue');
-  },
-});
\ No newline at end of file
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestHBaseTimelineStorageUtils.java
similarity index 66%
copy from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
copy to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestHBaseTimelineStorageUtils.java
index 5d14c6f..402a89b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestHBaseTimelineStorageUtils.java
@@ -16,10 +16,18 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
 
-export default Ember.Route.extend({
-  model() {
-    return this.store.findAll('yarn-queue');
-  },
-});
\ No newline at end of file
+import org.junit.Test;
+
+/**
+ * Unit tests for HBaseTimelineStorageUtils static methos.
+ */
+public class TestHBaseTimelineStorageUtils {
+
+  @Test(expected=NullPointerException.class)
+  public void testGetTimelineServiceHBaseConfNullArgument() throws Exception {
+    HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(null);
+  }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestKeyConverters.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestKeyConverters.java
index 58df970..1bd363f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestKeyConverters.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestKeyConverters.java
@@ -26,6 +26,10 @@
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.junit.Test;
 
+/**
+ * Unit tests for key converters for various tables' row keys.
+ *
+ */
 public class TestKeyConverters {
 
   @Test
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
index 5beb189..4770238 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
@@ -22,6 +22,7 @@
 
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKeyPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
@@ -30,9 +31,14 @@
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
 import org.junit.Test;
 
 
+/**
+ * Class to test the row key structures for various tables.
+ *
+ */
 public class TestRowKeys {
 
   private final static String QUALIFIER_SEP = Separator.QUALIFIERS.getValue();
@@ -40,6 +46,7 @@
       .toBytes(QUALIFIER_SEP);
   private final static String CLUSTER = "cl" + QUALIFIER_SEP + "uster";
   private final static String USER = QUALIFIER_SEP + "user";
+  private final static String SUB_APP_USER = QUALIFIER_SEP + "subAppUser";
   private final static String FLOW_NAME = "dummy_" + QUALIFIER_SEP + "flow"
       + QUALIFIER_SEP;
   private final static Long FLOW_RUN_ID;
@@ -126,43 +133,46 @@
    */
   @Test
   public void testAppToFlowRowKey() {
-    byte[] byteRowKey = new AppToFlowRowKey(CLUSTER,
-        APPLICATION_ID).getRowKey();
+    byte[] byteRowKey = new AppToFlowRowKey(APPLICATION_ID).getRowKey();
     AppToFlowRowKey rowKey = AppToFlowRowKey.parseRowKey(byteRowKey);
-    assertEquals(CLUSTER, rowKey.getClusterId());
     assertEquals(APPLICATION_ID, rowKey.getAppId());
   }
 
   @Test
   public void testEntityRowKey() {
-    String entityId = "!ent!ity!!id!";
-    String entityType = "entity!Type";
+    TimelineEntity entity = new TimelineEntity();
+    entity.setId("!ent!ity!!id!");
+    entity.setType("entity!Type");
+    entity.setIdPrefix(54321);
+
     byte[] byteRowKey =
         new EntityRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID, APPLICATION_ID,
-            entityType, entityId).getRowKey();
+            entity.getType(), entity.getIdPrefix(),
+            entity.getId()).getRowKey();
     EntityRowKey rowKey = EntityRowKey.parseRowKey(byteRowKey);
     assertEquals(CLUSTER, rowKey.getClusterId());
     assertEquals(USER, rowKey.getUserId());
     assertEquals(FLOW_NAME, rowKey.getFlowName());
     assertEquals(FLOW_RUN_ID, rowKey.getFlowRunId());
     assertEquals(APPLICATION_ID, rowKey.getAppId());
-    assertEquals(entityType, rowKey.getEntityType());
-    assertEquals(entityId, rowKey.getEntityId());
+    assertEquals(entity.getType(), rowKey.getEntityType());
+    assertEquals(entity.getIdPrefix(), rowKey.getEntityIdPrefix().longValue());
+    assertEquals(entity.getId(), rowKey.getEntityId());
 
     byte[] byteRowKeyPrefix =
         new EntityRowKeyPrefix(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID,
-            APPLICATION_ID, entityType).getRowKeyPrefix();
+            APPLICATION_ID, entity.getType(), null, null)
+                .getRowKeyPrefix();
     byte[][] splits =
         Separator.QUALIFIERS.split(
             byteRowKeyPrefix,
             new int[] {Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE,
                 Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG,
                 AppIdKeyConverter.getKeySize(), Separator.VARIABLE_SIZE,
-                Separator.VARIABLE_SIZE});
+                Bytes.SIZEOF_LONG, Separator.VARIABLE_SIZE });
     assertEquals(7, splits.length);
-    assertEquals(0, splits[6].length);
     assertEquals(APPLICATION_ID, new AppIdKeyConverter().decode(splits[4]));
-    assertEquals(entityType,
+    assertEquals(entity.getType(),
         Separator.QUALIFIERS.decode(Bytes.toString(splits[5])));
     verifyRowPrefixBytes(byteRowKeyPrefix);
 
@@ -243,4 +253,24 @@
     verifyRowPrefixBytes(byteRowKeyPrefix);
   }
 
+  @Test
+  public void testSubAppRowKey() {
+    TimelineEntity entity = new TimelineEntity();
+    entity.setId("entity1");
+    entity.setType("DAG");
+    entity.setIdPrefix(54321);
+
+    byte[] byteRowKey =
+        new SubApplicationRowKey(SUB_APP_USER, CLUSTER,
+            entity.getType(), entity.getIdPrefix(),
+            entity.getId(), USER).getRowKey();
+    SubApplicationRowKey rowKey = SubApplicationRowKey.parseRowKey(byteRowKey);
+    assertEquals(CLUSTER, rowKey.getClusterId());
+    assertEquals(SUB_APP_USER, rowKey.getSubAppUserId());
+    assertEquals(entity.getType(), rowKey.getEntityType());
+    assertEquals(entity.getIdPrefix(), rowKey.getEntityIdPrefix().longValue());
+    assertEquals(entity.getId(), rowKey.getEntityId());
+    assertEquals(USER, rowKey.getUserId());
+  }
+
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeysAsString.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeysAsString.java
new file mode 100644
index 0000000..148cf56
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeysAsString.java
@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import static org.junit.Assert.assertEquals;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
+import org.junit.Test;
+
+/**
+ * Test for row key as string.
+ */
+public class TestRowKeysAsString {
+
+  private final static String CLUSTER =
+      "cl" + TimelineReaderUtils.DEFAULT_DELIMITER_CHAR + "uster"
+          + TimelineReaderUtils.DEFAULT_ESCAPE_CHAR;
+  private final static String USER =
+      TimelineReaderUtils.DEFAULT_ESCAPE_CHAR + "user";
+  private final static String SUB_APP_USER =
+      TimelineReaderUtils.DEFAULT_ESCAPE_CHAR + "subAppUser";
+
+  private final static String FLOW_NAME =
+      "dummy_" + TimelineReaderUtils.DEFAULT_DELIMITER_CHAR
+          + TimelineReaderUtils.DEFAULT_ESCAPE_CHAR + "flow"
+          + TimelineReaderUtils.DEFAULT_DELIMITER_CHAR;
+  private final static Long FLOW_RUN_ID = System.currentTimeMillis();
+  private final static String APPLICATION_ID =
+      ApplicationId.newInstance(System.currentTimeMillis(), 1).toString();
+
+  @Test(timeout = 10000)
+  public void testApplicationRow() {
+    String rowKeyAsString = new ApplicationRowKey(CLUSTER, USER, FLOW_NAME,
+        FLOW_RUN_ID, APPLICATION_ID).getRowKeyAsString();
+    ApplicationRowKey rowKey =
+        ApplicationRowKey.parseRowKeyFromString(rowKeyAsString);
+    assertEquals(CLUSTER, rowKey.getClusterId());
+    assertEquals(USER, rowKey.getUserId());
+    assertEquals(FLOW_NAME, rowKey.getFlowName());
+    assertEquals(FLOW_RUN_ID, rowKey.getFlowRunId());
+    assertEquals(APPLICATION_ID, rowKey.getAppId());
+  }
+
+  @Test(timeout = 10000)
+  public void testEntityRowKey() {
+    char del = TimelineReaderUtils.DEFAULT_DELIMITER_CHAR;
+    char esc = TimelineReaderUtils.DEFAULT_ESCAPE_CHAR;
+    String id = del + esc + "ent" + esc + del + "ity" + esc + del + esc + "id"
+        + esc + del + esc;
+    String type = "entity" + esc + del + esc + "Type";
+    TimelineEntity entity = new TimelineEntity();
+    entity.setId(id);
+    entity.setType(type);
+    entity.setIdPrefix(54321);
+
+    String rowKeyAsString =
+        new EntityRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID, APPLICATION_ID,
+            entity.getType(), entity.getIdPrefix(), entity.getId())
+                .getRowKeyAsString();
+    EntityRowKey rowKey = EntityRowKey.parseRowKeyFromString(rowKeyAsString);
+    assertEquals(CLUSTER, rowKey.getClusterId());
+    assertEquals(USER, rowKey.getUserId());
+    assertEquals(FLOW_NAME, rowKey.getFlowName());
+    assertEquals(FLOW_RUN_ID, rowKey.getFlowRunId());
+    assertEquals(APPLICATION_ID, rowKey.getAppId());
+    assertEquals(entity.getType(), rowKey.getEntityType());
+    assertEquals(entity.getIdPrefix(), rowKey.getEntityIdPrefix().longValue());
+    assertEquals(entity.getId(), rowKey.getEntityId());
+
+  }
+
+  @Test(timeout = 10000)
+  public void testFlowActivityRowKey() {
+    Long ts = 1459900830000L;
+    Long dayTimestamp = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(ts);
+    String rowKeyAsString = new FlowActivityRowKey(CLUSTER, ts, USER, FLOW_NAME)
+        .getRowKeyAsString();
+    FlowActivityRowKey rowKey =
+        FlowActivityRowKey.parseRowKeyFromString(rowKeyAsString);
+    assertEquals(CLUSTER, rowKey.getClusterId());
+    assertEquals(dayTimestamp, rowKey.getDayTimestamp());
+    assertEquals(USER, rowKey.getUserId());
+    assertEquals(FLOW_NAME, rowKey.getFlowName());
+  }
+
+  @Test(timeout = 10000)
+  public void testFlowRunRowKey() {
+    String rowKeyAsString =
+        new FlowRunRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID)
+            .getRowKeyAsString();
+    FlowRunRowKey rowKey = FlowRunRowKey.parseRowKeyFromString(rowKeyAsString);
+    assertEquals(CLUSTER, rowKey.getClusterId());
+    assertEquals(USER, rowKey.getUserId());
+    assertEquals(FLOW_NAME, rowKey.getFlowName());
+    assertEquals(FLOW_RUN_ID, rowKey.getFlowRunId());
+  }
+
+  @Test(timeout = 10000)
+  public void testSubApplicationRowKey() {
+    char del = TimelineReaderUtils.DEFAULT_DELIMITER_CHAR;
+    char esc = TimelineReaderUtils.DEFAULT_ESCAPE_CHAR;
+    String id = del + esc + "ent" + esc + del + "ity" + esc + del + esc + "id"
+        + esc + del + esc;
+    String type = "entity" + esc + del + esc + "Type";
+    TimelineEntity entity = new TimelineEntity();
+    entity.setId(id);
+    entity.setType(type);
+    entity.setIdPrefix(54321);
+
+    String rowKeyAsString = new SubApplicationRowKey(SUB_APP_USER, CLUSTER,
+        entity.getType(), entity.getIdPrefix(), entity.getId(), USER)
+            .getRowKeyAsString();
+    SubApplicationRowKey rowKey = SubApplicationRowKey
+        .parseRowKeyFromString(rowKeyAsString);
+    assertEquals(SUB_APP_USER, rowKey.getSubAppUserId());
+    assertEquals(CLUSTER, rowKey.getClusterId());
+    assertEquals(entity.getType(), rowKey.getEntityType());
+    assertEquals(entity.getIdPrefix(), rowKey.getEntityIdPrefix().longValue());
+    assertEquals(entity.getId(), rowKey.getEntityId());
+    assertEquals(USER, rowKey.getUserId());
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
index 56f7b2b..38221fe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
@@ -18,27 +18,22 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.collector;
 
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import java.util.concurrent.Future;
+
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ScheduledThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
 /**
  * Service that handles writes to the timeline service and writes them to the
  * backing storage for a given YARN application.
@@ -51,30 +46,68 @@
   private static final Logger LOG =
       LoggerFactory.getLogger(TimelineCollector.class);
 
-  private final static int AGGREGATION_EXECUTOR_NUM_THREADS = 1;
-  private final static int AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS = 15;
-  private static Set<String> entityTypesSkipAggregation
-      = initializeSkipSet();
-
   private final ApplicationId appId;
+  private final String appUser;
   private final TimelineCollectorContext context;
-  private ScheduledThreadPoolExecutor appAggregationExecutor;
-  private AppLevelAggregator appAggregator;
   private UserGroupInformation currentUser;
+  private Token<TimelineDelegationTokenIdentifier> delegationTokenForApp;
+  private long tokenMaxDate = 0;
+  private String tokenRenewer;
+  private Future<?> renewalOrRegenerationFuture;
 
   public AppLevelTimelineCollector(ApplicationId appId) {
+    this(appId, null);
+  }
+
+  public AppLevelTimelineCollector(ApplicationId appId, String user) {
     super(AppLevelTimelineCollector.class.getName() + " - " + appId.toString());
     Preconditions.checkNotNull(appId, "AppId shouldn't be null");
     this.appId = appId;
+    this.appUser = user;
     context = new TimelineCollectorContext();
   }
 
-  private static Set<String> initializeSkipSet() {
-    Set<String> result = new HashSet<>();
-    result.add(TimelineEntityType.YARN_APPLICATION.toString());
-    result.add(TimelineEntityType.YARN_FLOW_RUN.toString());
-    result.add(TimelineEntityType.YARN_FLOW_ACTIVITY.toString());
-    return result;
+  public UserGroupInformation getCurrentUser() {
+    return currentUser;
+  }
+
+  public String getAppUser() {
+    return appUser;
+  }
+
+  void setDelegationTokenAndFutureForApp(
+      Token<TimelineDelegationTokenIdentifier> token,
+      Future<?> appRenewalOrRegenerationFuture, long tknMaxDate,
+      String renewer) {
+    this.delegationTokenForApp = token;
+    this.tokenMaxDate = tknMaxDate;
+    this.tokenRenewer = renewer;
+    this.renewalOrRegenerationFuture = appRenewalOrRegenerationFuture;
+  }
+
+  void setRenewalOrRegenerationFutureForApp(
+      Future<?> appRenewalOrRegenerationFuture) {
+    this.renewalOrRegenerationFuture = appRenewalOrRegenerationFuture;
+  }
+
+  void cancelRenewalOrRegenerationFutureForApp() {
+    if (renewalOrRegenerationFuture != null &&
+        !renewalOrRegenerationFuture.isDone()) {
+      renewalOrRegenerationFuture.cancel(true);
+    }
+  }
+
+  long getAppDelegationTokenMaxDate() {
+    return tokenMaxDate;
+  }
+
+  String getAppDelegationTokenRenewer() {
+    return tokenRenewer;
+  }
+
+  @VisibleForTesting
+  public Token<TimelineDelegationTokenIdentifier> getDelegationTokenForApp() {
+    return this.delegationTokenForApp;
   }
 
   @Override
@@ -92,29 +125,12 @@
 
   @Override
   protected void serviceStart() throws Exception {
-    // Launch the aggregation thread
-    appAggregationExecutor = new ScheduledThreadPoolExecutor(
-        AppLevelTimelineCollector.AGGREGATION_EXECUTOR_NUM_THREADS,
-        new ThreadFactoryBuilder()
-            .setNameFormat("TimelineCollector Aggregation thread #%d")
-            .build());
-    appAggregator = new AppLevelAggregator();
-    appAggregationExecutor.scheduleAtFixedRate(appAggregator,
-        AppLevelTimelineCollector.AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS,
-        AppLevelTimelineCollector.AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS,
-        TimeUnit.SECONDS);
     super.serviceStart();
   }
 
   @Override
   protected void serviceStop() throws Exception {
-    appAggregationExecutor.shutdown();
-    if (!appAggregationExecutor.awaitTermination(10, TimeUnit.SECONDS)) {
-      LOG.info("App-level aggregator shutdown timed out, shutdown now. ");
-      appAggregationExecutor.shutdownNow();
-    }
-    // Perform one round of aggregation after the aggregation executor is done.
-    appAggregator.aggregate();
+    cancelRenewalOrRegenerationFutureForApp();
     super.serviceStop();
   }
 
@@ -122,49 +138,4 @@
   public TimelineCollectorContext getTimelineEntityContext() {
     return context;
   }
-
-  @Override
-  protected Set<String> getEntityTypesSkipAggregation() {
-    return entityTypesSkipAggregation;
-  }
-
-  private class AppLevelAggregator implements Runnable {
-
-    private void aggregate() {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("App-level real-time aggregating");
-      }
-      if (!isReadyToAggregate()) {
-        LOG.warn("App-level collector is not ready, skip aggregation. ");
-        return;
-      }
-      try {
-        TimelineCollectorContext currContext = getTimelineEntityContext();
-        Map<String, AggregationStatusTable> aggregationGroups
-            = getAggregationGroups();
-        if (aggregationGroups == null
-            || aggregationGroups.isEmpty()) {
-          LOG.debug("App-level collector is empty, skip aggregation. ");
-          return;
-        }
-        TimelineEntity resultEntity = TimelineCollector.aggregateWithoutGroupId(
-            aggregationGroups, currContext.getAppId(),
-            TimelineEntityType.YARN_APPLICATION.toString());
-        TimelineEntities entities = new TimelineEntities();
-        entities.addEntity(resultEntity);
-        putEntitiesAsync(entities, currentUser);
-      } catch (Exception e) {
-        LOG.error("Error aggregating timeline metrics", e);
-      }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("App-level real-time aggregation complete");
-      }
-    }
-
-    @Override
-    public void run() {
-      aggregate();
-    }
-  }
-
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollectorWithAgg.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollectorWithAgg.java
new file mode 100644
index 0000000..6c0d693
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollectorWithAgg.java
@@ -0,0 +1,150 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.collector;
+
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
+import org.apache.hadoop.conf.Configuration;
+
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Service that handles aggregations for applications
+ * and makes use of {@link AppLevelTimelineCollector} class for
+ * writes to Timeline Service.
+ *
+ * App-related lifecycle management is handled by this service.
+ */
+@Private
+@Unstable
+public class AppLevelTimelineCollectorWithAgg
+    extends AppLevelTimelineCollector {
+  private static final Log LOG = LogFactory.getLog(TimelineCollector.class);
+
+  private final static int AGGREGATION_EXECUTOR_NUM_THREADS = 1;
+  private final static int AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS = 15;
+  private static Set<String> entityTypesSkipAggregation
+      = initializeSkipSet();
+
+  private ScheduledThreadPoolExecutor appAggregationExecutor;
+  private AppLevelAggregator appAggregator;
+
+  public AppLevelTimelineCollectorWithAgg(ApplicationId appId, String user) {
+    super(appId, user);
+  }
+
+  private static Set<String> initializeSkipSet() {
+    Set<String> result = new HashSet<>();
+    result.add(TimelineEntityType.YARN_APPLICATION.toString());
+    result.add(TimelineEntityType.YARN_FLOW_RUN.toString());
+    result.add(TimelineEntityType.YARN_FLOW_ACTIVITY.toString());
+    return result;
+  }
+
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    super.serviceInit(conf);
+  }
+
+  @Override
+  protected void serviceStart() throws Exception {
+    // Launch the aggregation thread
+    appAggregationExecutor = new ScheduledThreadPoolExecutor(
+        AppLevelTimelineCollectorWithAgg.AGGREGATION_EXECUTOR_NUM_THREADS,
+        new ThreadFactoryBuilder()
+            .setNameFormat("TimelineCollector Aggregation thread #%d")
+            .build());
+    appAggregator = new AppLevelAggregator();
+    appAggregationExecutor.scheduleAtFixedRate(appAggregator,
+        AppLevelTimelineCollectorWithAgg.
+          AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS,
+        AppLevelTimelineCollectorWithAgg.
+          AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS,
+        TimeUnit.SECONDS);
+    super.serviceStart();
+  }
+
+  @Override
+  protected void serviceStop() throws Exception {
+    appAggregationExecutor.shutdown();
+    if (!appAggregationExecutor.awaitTermination(10, TimeUnit.SECONDS)) {
+      LOG.info("App-level aggregator shutdown timed out, shutdown now. ");
+      appAggregationExecutor.shutdownNow();
+    }
+    // Perform one round of aggregation after the aggregation executor is done.
+    appAggregator.aggregate();
+    super.serviceStop();
+  }
+
+  @Override
+  protected Set<String> getEntityTypesSkipAggregation() {
+    return entityTypesSkipAggregation;
+  }
+
+  private class AppLevelAggregator implements Runnable {
+
+    private void aggregate() {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("App-level real-time aggregating");
+      }
+      if (!isReadyToAggregate()) {
+        LOG.warn("App-level collector is not ready, skip aggregation. ");
+        return;
+      }
+      try {
+        TimelineCollectorContext currContext = getTimelineEntityContext();
+        Map<String, AggregationStatusTable> aggregationGroups
+            = getAggregationGroups();
+        if (aggregationGroups == null
+            || aggregationGroups.isEmpty()) {
+          LOG.debug("App-level collector is empty, skip aggregation. ");
+          return;
+        }
+        TimelineEntity resultEntity = TimelineCollector.aggregateWithoutGroupId(
+            aggregationGroups, currContext.getAppId(),
+            TimelineEntityType.YARN_APPLICATION.toString());
+        TimelineEntities entities = new TimelineEntities();
+        entities.addEntity(resultEntity);
+        putEntitiesAsync(entities, getCurrentUser());
+      } catch (Exception e) {
+        LOG.error("Error aggregating timeline metrics", e);
+      }
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("App-level real-time aggregation complete");
+      }
+    }
+
+    @Override
+    public void run() {
+      aggregate();
+    }
+  }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
index 1719782..68a68f0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
@@ -18,33 +18,43 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.collector;
 
-import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
-
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URI;
-import java.util.HashMap;
+import java.util.LinkedHashSet;
+import java.util.Set;
+import java.util.concurrent.Future;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer2;
-import org.apache.hadoop.http.lib.StaticUserWebFilter;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.CollectorNodemanagerProtocol;
 import org.apache.hadoop.yarn.server.api.protocolrecords.GetTimelineCollectorContextRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.GetTimelineCollectorContextResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.ReportNewCollectorInfoRequest;
+import org.apache.hadoop.yarn.server.timelineservice.security.TimelineV2DelegationTokenSecretManagerService;
+import org.apache.hadoop.yarn.server.util.timeline.TimelineServerUtils;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
 import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -65,17 +75,82 @@
 
   private volatile CollectorNodemanagerProtocol nmCollectorService;
 
+  private TimelineV2DelegationTokenSecretManagerService tokenMgrService;
+
+  private final boolean runningAsAuxService;
+
+  private UserGroupInformation loginUGI;
+
+  private ScheduledThreadPoolExecutor tokenRenewalExecutor;
+
+  private long tokenRenewInterval;
+
+  private static final long TIME_BEFORE_RENEW_DATE = 10 * 1000; // 10 seconds.
+
+  private static final long TIME_BEFORE_EXPIRY = 5 * 60 * 1000; // 5 minutes.
+
   static final String COLLECTOR_MANAGER_ATTR_KEY = "collector.manager";
 
   @VisibleForTesting
   protected NodeTimelineCollectorManager() {
+    this(true);
+  }
+
+  protected NodeTimelineCollectorManager(boolean asAuxService) {
     super(NodeTimelineCollectorManager.class.getName());
+    this.runningAsAuxService = asAuxService;
+  }
+
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    tokenMgrService = createTokenManagerService();
+    addService(tokenMgrService);
+    this.loginUGI = UserGroupInformation.getCurrentUser();
+    tokenRenewInterval = conf.getLong(
+        YarnConfiguration.TIMELINE_DELEGATION_TOKEN_RENEW_INTERVAL,
+        YarnConfiguration.DEFAULT_TIMELINE_DELEGATION_TOKEN_RENEW_INTERVAL);
+    super.serviceInit(conf);
   }
 
   @Override
   protected void serviceStart() throws Exception {
-    startWebApp();
+    if (UserGroupInformation.isSecurityEnabled()) {
+      // Do security login for cases where collector is running outside NM.
+      if (!runningAsAuxService) {
+        try {
+          doSecureLogin();
+        } catch(IOException ie) {
+          throw new YarnRuntimeException("Failed to login", ie);
+        }
+      }
+      this.loginUGI = UserGroupInformation.getLoginUser();
+    }
+    tokenRenewalExecutor = new ScheduledThreadPoolExecutor(
+        1, new ThreadFactoryBuilder().setNameFormat(
+            "App Collector Token Renewal thread").build());
     super.serviceStart();
+    startWebApp();
+  }
+
+  protected TimelineV2DelegationTokenSecretManagerService
+      createTokenManagerService() {
+    return new TimelineV2DelegationTokenSecretManagerService();
+  }
+
+  @VisibleForTesting
+  public TimelineV2DelegationTokenSecretManagerService
+      getTokenManagerService() {
+    return tokenMgrService;
+  }
+
+  private void doSecureLogin() throws IOException {
+    Configuration conf = getConfig();
+    InetSocketAddress addr = NetUtils.createSocketAddr(conf.getTrimmed(
+        YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
+            YarnConfiguration.DEFAULT_TIMELINE_SERVICE_BIND_HOST), 0,
+                YarnConfiguration.TIMELINE_SERVICE_BIND_HOST);
+    SecurityUtil.login(conf, YarnConfiguration.TIMELINE_SERVICE_KEYTAB,
+        YarnConfiguration.TIMELINE_SERVICE_PRINCIPAL, addr.getHostName());
   }
 
   @Override
@@ -83,16 +158,95 @@
     if (timelineRestServer != null) {
       timelineRestServer.stop();
     }
+    if (tokenRenewalExecutor != null) {
+      tokenRenewalExecutor.shutdownNow();
+    }
     super.serviceStop();
   }
 
+  @VisibleForTesting
+  public Token<TimelineDelegationTokenIdentifier> generateTokenForAppCollector(
+      String user) {
+    Token<TimelineDelegationTokenIdentifier> token  = tokenMgrService.
+        generateToken(UserGroupInformation.createRemoteUser(user),
+            loginUGI.getShortUserName());
+    token.setService(new Text(timelineRestServerBindAddress));
+    return token;
+  }
+
+  @VisibleForTesting
+  public long renewTokenForAppCollector(
+      AppLevelTimelineCollector appCollector) throws IOException {
+    if (appCollector.getDelegationTokenForApp() != null) {
+      return tokenMgrService.renewToken(appCollector.getDelegationTokenForApp(),
+          appCollector.getAppDelegationTokenRenewer());
+    } else {
+      LOG.info("Delegation token not available for renewal for app " +
+          appCollector.getTimelineEntityContext().getAppId());
+      return -1;
+    }
+  }
+
+  @VisibleForTesting
+  public void cancelTokenForAppCollector(
+      AppLevelTimelineCollector appCollector) throws IOException {
+    if (appCollector.getDelegationTokenForApp() != null) {
+      tokenMgrService.cancelToken(appCollector.getDelegationTokenForApp(),
+          appCollector.getAppUser());
+    }
+  }
+
+  private long getRenewalDelay(long renewInterval) {
+    return ((renewInterval > TIME_BEFORE_RENEW_DATE) ?
+        renewInterval - TIME_BEFORE_RENEW_DATE : renewInterval);
+  }
+
+  private long getRegenerationDelay(long tokenMaxDate) {
+    long regenerateTime = tokenMaxDate - Time.now();
+    return ((regenerateTime > TIME_BEFORE_EXPIRY) ?
+        regenerateTime - TIME_BEFORE_EXPIRY : regenerateTime);
+  }
+
+  private org.apache.hadoop.yarn.api.records.Token generateTokenAndSetTimer(
+      ApplicationId appId, AppLevelTimelineCollector appCollector)
+      throws IOException {
+    Token<TimelineDelegationTokenIdentifier> timelineToken =
+        generateTokenForAppCollector(appCollector.getAppUser());
+    TimelineDelegationTokenIdentifier tokenId =
+        timelineToken.decodeIdentifier();
+    long renewalDelay = getRenewalDelay(tokenRenewInterval);
+    long regenerationDelay = getRegenerationDelay(tokenId.getMaxDate());
+    if (renewalDelay > 0 || regenerationDelay > 0) {
+      boolean isTimerForRenewal = renewalDelay < regenerationDelay;
+      Future<?> renewalOrRegenerationFuture = tokenRenewalExecutor.schedule(
+          new CollectorTokenRenewer(appId, isTimerForRenewal),
+          isTimerForRenewal? renewalDelay : regenerationDelay,
+          TimeUnit.MILLISECONDS);
+      appCollector.setDelegationTokenAndFutureForApp(timelineToken,
+          renewalOrRegenerationFuture, tokenId.getMaxDate(),
+          tokenId.getRenewer().toString());
+    }
+    LOG.info("Generated a new token " + timelineToken + " for app " + appId);
+    return org.apache.hadoop.yarn.api.records.Token.newInstance(
+        timelineToken.getIdentifier(), timelineToken.getKind().toString(),
+        timelineToken.getPassword(), timelineToken.getService().toString());
+  }
+
   @Override
   protected void doPostPut(ApplicationId appId, TimelineCollector collector) {
     try {
       // Get context info from NM
       updateTimelineCollectorContext(appId, collector);
+      // Generate token for app collector.
+      org.apache.hadoop.yarn.api.records.Token token = null;
+      if (UserGroupInformation.isSecurityEnabled() &&
+          collector instanceof AppLevelTimelineCollector) {
+        AppLevelTimelineCollector appCollector =
+            (AppLevelTimelineCollector) collector;
+        token = generateTokenAndSetTimer(appId, appCollector);
+      }
       // Report to NM if a new collector is added.
-      reportNewCollectorToNM(appId);
+      reportNewCollectorInfoToNM(appId, token);
     } catch (YarnException | IOException e) {
       // throw exception here as it cannot be used if failed communicate with NM
       LOG.error("Failed to communicate with NM Collector Service for " + appId);
@@ -100,11 +254,29 @@
     }
   }
 
+  @Override
+  protected void postRemove(ApplicationId appId, TimelineCollector collector) {
+    if (collector instanceof AppLevelTimelineCollector) {
+      try {
+        cancelTokenForAppCollector((AppLevelTimelineCollector) collector);
+      } catch (IOException e) {
+        LOG.warn("Failed to cancel token for app collector with appId " +
+            appId, e);
+      }
+    }
+  }
+
   /**
    * Launch the REST web server for this collector manager.
    */
   private void startWebApp() {
     Configuration conf = getConfig();
+    String initializers = conf.get("hadoop.http.filter.initializers", "");
+    Set<String> defaultInitializers = new LinkedHashSet<String>();
+    TimelineServerUtils.addTimelineAuthFilter(
+        initializers, defaultInitializers, tokenMgrService);
+    TimelineServerUtils.setTimelineFilters(
+        conf, initializers, defaultInitializers);
     String bindAddress = conf.get(YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
         YarnConfiguration.DEFAULT_TIMELINE_SERVICE_BIND_HOST) + ":0";
     try {
@@ -114,16 +286,10 @@
           .addEndpoint(URI.create(
               (YarnConfiguration.useHttps(conf) ? "https://" : "http://") +
                   bindAddress));
+      if (YarnConfiguration.useHttps(conf)) {
+        builder = WebAppUtils.loadSslConfiguration(builder, conf);
+      }
       timelineRestServer = builder.build();
-      // TODO: replace this by an authentication filter in future.
-      HashMap<String, String> options = new HashMap<>();
-      String username = conf.get(HADOOP_HTTP_STATIC_USER,
-          DEFAULT_HADOOP_HTTP_STATIC_USER);
-      options.put(HADOOP_HTTP_STATIC_USER, username);
-      HttpServer2.defineFilter(timelineRestServer.getWebAppContext(),
-          "static_user_filter_timeline",
-          StaticUserWebFilter.StaticUserFilter.class.getName(),
-          options, new String[] {"/*"});
 
       timelineRestServer.addJerseyResourcePackage(
           TimelineCollectorWebService.class.getPackage().getName() + ";"
@@ -144,11 +310,12 @@
         timelineRestServerBindAddress);
   }
 
-  private void reportNewCollectorToNM(ApplicationId appId)
+  private void reportNewCollectorInfoToNM(ApplicationId appId,
+      org.apache.hadoop.yarn.api.records.Token token)
       throws YarnException, IOException {
     ReportNewCollectorInfoRequest request =
         ReportNewCollectorInfoRequest.newInstance(appId,
-            this.timelineRestServerBindAddress);
+            this.timelineRestServerBindAddress, token);
     LOG.info("Report a new collector for application: " + appId +
         " to the NM Collector Service.");
     getNMCollectorService().reportNewCollectorInfo(request);
@@ -220,4 +387,76 @@
   public String getRestServerBindAddress() {
     return timelineRestServerBindAddress;
   }
+
+  private final class CollectorTokenRenewer implements Runnable {
+    private ApplicationId appId;
+    // Indicates whether timer is for renewal or regeneration of token.
+    private boolean timerForRenewal = true;
+    private CollectorTokenRenewer(ApplicationId applicationId,
+        boolean forRenewal) {
+      appId = applicationId;
+      timerForRenewal = forRenewal;
+    }
+
+    private void renewToken(AppLevelTimelineCollector appCollector)
+        throws IOException {
+      long newExpirationTime = renewTokenForAppCollector(appCollector);
+      // Set renewal or regeneration timer based on delay.
+      long renewalDelay = 0;
+      if (newExpirationTime > 0) {
+        LOG.info("Renewed token for " + appId + " with new expiration " +
+            "timestamp = " + newExpirationTime);
+        renewalDelay = getRenewalDelay(newExpirationTime - Time.now());
+      }
+      long regenerationDelay =
+          getRegenerationDelay(appCollector.getAppDelegationTokenMaxDate());
+      if (renewalDelay > 0 || regenerationDelay > 0) {
+        this.timerForRenewal = renewalDelay < regenerationDelay;
+        Future<?> renewalOrRegenerationFuture = tokenRenewalExecutor.schedule(
+            this, timerForRenewal ? renewalDelay : regenerationDelay,
+            TimeUnit.MILLISECONDS);
+        appCollector.setRenewalOrRegenerationFutureForApp(
+            renewalOrRegenerationFuture);
+      }
+    }
+
+    private void regenerateToken(AppLevelTimelineCollector appCollector)
+        throws IOException {
+      org.apache.hadoop.yarn.api.records.Token token =
+          generateTokenAndSetTimer(appId, appCollector);
+      // Report to NM if a new collector is added.
+      try {
+        reportNewCollectorInfoToNM(appId, token);
+      } catch (YarnException e) {
+        LOG.warn("Unable to report regenerated token to NM for " + appId);
+      }
+    }
+
+    @Override
+    public void run() {
+      TimelineCollector collector = get(appId);
+      if (collector == null) {
+        LOG.info("Cannot find active collector while " + (timerForRenewal ?
+            "renewing" : "regenerating") + " token for " + appId);
+        return;
+      }
+      AppLevelTimelineCollector appCollector =
+          (AppLevelTimelineCollector) collector;
+
+      synchronized (collector) {
+        if (!collector.isStopped()) {
+          try {
+            if (timerForRenewal) {
+              renewToken(appCollector);
+            } else {
+              regenerateToken(appCollector);
+            }
+          } catch (Exception e) {
+            LOG.warn("Unable to " + (timerForRenewal ? "renew" : "regenerate") +
+                " token for " + appId, e);
+          }
+        }
+      }
+    }
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
index e4e6421..66f9aab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
@@ -61,7 +61,7 @@
   private ScheduledExecutorService scheduler;
 
   public PerNodeTimelineCollectorsAuxService() {
-    this(new NodeTimelineCollectorManager());
+    this(new NodeTimelineCollectorManager(true));
   }
 
   @VisibleForTesting PerNodeTimelineCollectorsAuxService(
@@ -114,11 +114,12 @@
    * exists, no new service is created.
    *
    * @param appId Application Id to be added.
+   * @param user Application Master container user.
    * @return whether it was added successfully
    */
-  public boolean addApplication(ApplicationId appId) {
+  public boolean addApplication(ApplicationId appId, String user) {
     AppLevelTimelineCollector collector =
-        new AppLevelTimelineCollector(appId);
+        new AppLevelTimelineCollectorWithAgg(appId, user);
     return (collectorManager.putIfAbsent(appId, collector)
         == collector);
   }
@@ -147,7 +148,7 @@
     if (context.getContainerType() == ContainerType.APPLICATION_MASTER) {
       ApplicationId appId = context.getContainerId().
           getApplicationAttemptId().getApplicationId();
-      addApplication(appId);
+      addApplication(appId, context.getUser());
     }
   }
 
@@ -202,7 +203,8 @@
     PerNodeTimelineCollectorsAuxService auxService = null;
     try {
       auxService = collectorManager == null ?
-          new PerNodeTimelineCollectorsAuxService() :
+          new PerNodeTimelineCollectorsAuxService(
+              new NodeTimelineCollectorManager(false)) :
           new PerNodeTimelineCollectorsAuxService(collectorManager);
       ShutdownHookManager.get().addShutdownHook(new ShutdownHook(auxService),
           SHUTDOWN_HOOK_PRIORITY);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
index 37387f1..8202431 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
@@ -63,6 +63,8 @@
 
   private volatile boolean readyToAggregate = false;
 
+  private volatile boolean isStopped = false;
+
   public TimelineCollector(String name) {
     super(name);
   }
@@ -79,9 +81,14 @@
 
   @Override
   protected void serviceStop() throws Exception {
+    isStopped = true;
     super.serviceStop();
   }
 
+  boolean isStopped() {
+    return isStopped;
+  }
+
   protected void setWriter(TimelineWriter w) {
     this.writer = w;
   }
@@ -139,7 +146,7 @@
     // flush the writer buffer concurrently and swallow any exception
     // caused by the timeline enitites that are being put here.
     synchronized (writer) {
-      response = writeTimelineEntities(entities);
+      response = writeTimelineEntities(entities, callerUgi);
       flushBufferedTimelineEntities();
     }
 
@@ -147,15 +154,14 @@
   }
 
   private TimelineWriteResponse writeTimelineEntities(
-      TimelineEntities entities) throws IOException {
+      TimelineEntities entities, UserGroupInformation callerUgi)
+      throws IOException {
     // Update application metrics for aggregation
     updateAggregateStatus(entities, aggregationGroups,
         getEntityTypesSkipAggregation());
 
     final TimelineCollectorContext context = getTimelineEntityContext();
-    return writer.write(context.getClusterId(), context.getUserId(),
-        context.getFlowName(), context.getFlowVersion(),
-        context.getFlowRunId(), context.getAppId(), entities);
+    return writer.write(context, entities, callerUgi);
   }
 
   /**
@@ -187,7 +193,7 @@
           callerUgi + ")");
     }
 
-    writeTimelineEntities(entities);
+    writeTimelineEntities(entities, callerUgi);
   }
 
   /**
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
index 94b95ad..7909a2e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
@@ -29,7 +29,7 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -47,7 +47,7 @@
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
-public class TimelineCollectorManager extends AbstractService {
+public class TimelineCollectorManager extends CompositeService {
   private static final Logger LOG =
       LoggerFactory.getLogger(TimelineCollectorManager.class);
 
@@ -57,7 +57,7 @@
   private boolean writerFlusherRunning;
 
   @Override
-  public void serviceInit(Configuration conf) throws Exception {
+  protected void serviceInit(Configuration conf) throws Exception {
     writer = createTimelineWriter(conf);
     writer.init(conf);
     // create a single dedicated thread for flushing the writer on a periodic
@@ -184,9 +184,11 @@
     if (collector == null) {
       LOG.error("the collector for " + appId + " does not exist!");
     } else {
-      postRemove(appId, collector);
-      // stop the service to do clean up
-      collector.stop();
+      synchronized (collector) {
+        postRemove(appId, collector);
+        // stop the service to do clean up
+        collector.stop();
+      }
       LOG.info("The collector service for " + appId + " was removed");
     }
     return collector != null;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineDataToRetrieve.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineDataToRetrieve.java
index 325050a..8d09c00 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineDataToRetrieve.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineDataToRetrieve.java
@@ -57,6 +57,11 @@
  * metricsToRetrieve is specified, this limit defines an upper limit to the
  * number of metrics to return. This parameter is ignored if METRICS are not to
  * be fetched.</li>
+ * <li><b>metricsTimeStart</b> - Metric values before this timestamp would not
+ * be retrieved. If null or {@literal <0}, defaults to 0.</li>
+ * <li><b>metricsTimeEnd</b> - Metric values after this timestamp would not
+ * be retrieved. If null or {@literal <0}, defaults to {@link Long#MAX_VALUE}.
+ * </li>
  * </ul>
  */
 @Private
@@ -66,6 +71,10 @@
   private TimelineFilterList metricsToRetrieve;
   private EnumSet<Field> fieldsToRetrieve;
   private Integer metricsLimit;
+  private Long metricsTimeBegin;
+  private Long metricsTimeEnd;
+  private static final long DEFAULT_METRICS_BEGIN_TIME = 0L;
+  private static final long DEFAULT_METRICS_END_TIME = Long.MAX_VALUE;
 
   /**
    * Default limit of number of metrics to return.
@@ -73,12 +82,12 @@
   public static final Integer DEFAULT_METRICS_LIMIT = 1;
 
   public TimelineDataToRetrieve() {
-    this(null, null, null, null);
+    this(null, null, null, null, null, null);
   }
 
   public TimelineDataToRetrieve(TimelineFilterList confs,
       TimelineFilterList metrics, EnumSet<Field> fields,
-      Integer limitForMetrics) {
+      Integer limitForMetrics, Long metricTimeBegin, Long metricTimeEnd) {
     this.confsToRetrieve = confs;
     this.metricsToRetrieve = metrics;
     this.fieldsToRetrieve = fields;
@@ -91,6 +100,20 @@
     if (this.fieldsToRetrieve == null) {
       this.fieldsToRetrieve = EnumSet.noneOf(Field.class);
     }
+    if (metricTimeBegin == null || metricTimeBegin < 0) {
+      this.metricsTimeBegin = DEFAULT_METRICS_BEGIN_TIME;
+    } else {
+      this.metricsTimeBegin = metricTimeBegin;
+    }
+    if (metricTimeEnd == null || metricTimeEnd < 0) {
+      this.metricsTimeEnd = DEFAULT_METRICS_END_TIME;
+    } else {
+      this.metricsTimeEnd = metricTimeEnd;
+    }
+    if (this.metricsTimeBegin > this.metricsTimeEnd) {
+      throw new IllegalArgumentException("metricstimebegin should not be " +
+          "greater than metricstimeend");
+    }
   }
 
   public TimelineFilterList getConfsToRetrieve() {
@@ -137,6 +160,14 @@
     return metricsLimit;
   }
 
+  public Long getMetricsTimeBegin() {
+    return this.metricsTimeBegin;
+  }
+
+  public Long getMetricsTimeEnd() {
+    return metricsTimeEnd;
+  }
+
   public void setMetricsLimit(Integer limit) {
     if (limit == null || limit < 1) {
       this.metricsLimit = DEFAULT_METRICS_LIMIT;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineEntityFilters.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineEntityFilters.java
index 8f2b725..a415d34 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineEntityFilters.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineEntityFilters.java
@@ -35,10 +35,10 @@
  * <li><b>limit</b> - A limit on the number of entities to return. If null or
  * {@literal < 0}, defaults to {@link #DEFAULT_LIMIT}. The maximum possible
  * value for limit can be {@link Long#MAX_VALUE}.</li>
- * <li><b>createdTimeBegin</b> - Matched entities should not be created
- * before this timestamp. If null or {@literal <=0}, defaults to 0.</li>
- * <li><b>createdTimeEnd</b> - Matched entities should not be created after
- * this timestamp. If null or {@literal <=0}, defaults to
+ * <li><b>createdTimeBegin</b> - Matched entities should not be created before
+ * this timestamp. If null or {@literal <=0}, defaults to 0.</li>
+ * <li><b>createdTimeEnd</b> - Matched entities should not be created after this
+ * timestamp. If null or {@literal <=0}, defaults to
  * {@link Long#MAX_VALUE}.</li>
  * <li><b>relatesTo</b> - Matched entities should or should not relate to given
  * entities depending on what's specified in the filter. The entities in
@@ -99,40 +99,42 @@
  * filter list, event filters can be evaluated with logical AND/OR and we can
  * create a hierarchy of these {@link TimelineExistsFilter} objects. If null or
  * empty, the filter is not applied.</li>
+ * <li><b>fromId</b> - If specified, retrieve the next set of entities from the
+ * given fromId. The set of entities retrieved is inclusive of specified fromId.
+ * fromId should be taken from the value associated with FROM_ID info key in
+ * entity response which was sent earlier.</li>
  * </ul>
  */
 @Private
 @Unstable
-public class TimelineEntityFilters {
-  private long limit;
+public final class TimelineEntityFilters {
+  private final long limit;
   private long createdTimeBegin;
   private long createdTimeEnd;
-  private TimelineFilterList relatesTo;
-  private TimelineFilterList isRelatedTo;
-  private TimelineFilterList infoFilters;
-  private TimelineFilterList configFilters;
-  private TimelineFilterList metricFilters;
-  private TimelineFilterList eventFilters;
+  private final TimelineFilterList relatesTo;
+  private final TimelineFilterList isRelatedTo;
+  private final TimelineFilterList infoFilters;
+  private final TimelineFilterList configFilters;
+  private final TimelineFilterList metricFilters;
+  private final TimelineFilterList eventFilters;
+  private final String fromId;
   private static final long DEFAULT_BEGIN_TIME = 0L;
   private static final long DEFAULT_END_TIME = Long.MAX_VALUE;
 
+
   /**
    * Default limit of number of entities to return for getEntities API.
    */
   public static final long DEFAULT_LIMIT = 100;
 
-  public TimelineEntityFilters() {
-    this(null, null, null, null, null, null, null, null, null);
-  }
-
-  public TimelineEntityFilters(
+  private TimelineEntityFilters(
       Long entityLimit, Long timeBegin, Long timeEnd,
       TimelineFilterList entityRelatesTo,
       TimelineFilterList entityIsRelatedTo,
       TimelineFilterList entityInfoFilters,
       TimelineFilterList entityConfigFilters,
       TimelineFilterList  entityMetricFilters,
-      TimelineFilterList entityEventFilters) {
+      TimelineFilterList entityEventFilters, String fromId) {
     if (entityLimit == null || entityLimit < 0) {
       this.limit = DEFAULT_LIMIT;
     } else {
@@ -154,89 +156,119 @@
     this.configFilters = entityConfigFilters;
     this.metricFilters = entityMetricFilters;
     this.eventFilters = entityEventFilters;
+    this.fromId = fromId;
   }
 
   public long getLimit() {
     return limit;
   }
 
-  public void setLimit(Long entityLimit) {
-    if (entityLimit == null || entityLimit < 0) {
-      this.limit = DEFAULT_LIMIT;
-    } else {
-      this.limit = entityLimit;
-    }
-  }
-
   public long getCreatedTimeBegin() {
     return createdTimeBegin;
   }
 
-  public void setCreatedTimeBegin(Long timeBegin) {
-    if (timeBegin == null || timeBegin < 0) {
-      this.createdTimeBegin = DEFAULT_BEGIN_TIME;
-    } else {
-      this.createdTimeBegin = timeBegin;
-    }
-  }
-
   public long getCreatedTimeEnd() {
     return createdTimeEnd;
   }
 
-  public void setCreatedTimeEnd(Long timeEnd) {
-    if (timeEnd == null || timeEnd < 0) {
-      this.createdTimeEnd = DEFAULT_END_TIME;
-    } else {
-      this.createdTimeEnd = timeEnd;
-    }
-  }
-
   public TimelineFilterList getRelatesTo() {
     return relatesTo;
   }
 
-  public void setRelatesTo(TimelineFilterList relations) {
-    this.relatesTo = relations;
-  }
-
   public TimelineFilterList getIsRelatedTo() {
     return isRelatedTo;
   }
 
-  public void setIsRelatedTo(TimelineFilterList relations) {
-    this.isRelatedTo = relations;
-  }
-
   public TimelineFilterList getInfoFilters() {
     return infoFilters;
   }
 
-  public void setInfoFilters(TimelineFilterList filters) {
-    this.infoFilters = filters;
-  }
-
   public TimelineFilterList getConfigFilters() {
     return configFilters;
   }
 
-  public void setConfigFilters(TimelineFilterList filters) {
-    this.configFilters = filters;
-  }
-
   public TimelineFilterList getMetricFilters() {
     return metricFilters;
   }
 
-  public void setMetricFilters(TimelineFilterList filters) {
-    this.metricFilters = filters;
-  }
-
   public TimelineFilterList getEventFilters() {
     return eventFilters;
   }
 
-  public void setEventFilters(TimelineFilterList filters) {
-    this.eventFilters = filters;
+  public String getFromId() {
+    return fromId;
+  }
+
+  /**
+   * A builder class to build an instance of TimelineEntityFilters.
+   */
+  public static class Builder {
+    private Long entityLimit;
+    private Long createdTimeBegin;
+    private Long createdTimeEnd;
+    private TimelineFilterList relatesToFilters;
+    private TimelineFilterList isRelatedToFilters;
+    private TimelineFilterList entityInfoFilters;
+    private TimelineFilterList entityConfigFilters;
+    private TimelineFilterList entityMetricFilters;
+    private TimelineFilterList entityEventFilters;
+    private String entityFromId;
+
+    public Builder entityLimit(Long limit) {
+      this.entityLimit = limit;
+      return this;
+    }
+
+    public Builder createdTimeBegin(Long timeBegin) {
+      this.createdTimeBegin = timeBegin;
+      return this;
+    }
+
+    public Builder createTimeEnd(Long timeEnd) {
+      this.createdTimeEnd = timeEnd;
+      return this;
+    }
+
+    public Builder relatesTo(TimelineFilterList relatesTo) {
+      this.relatesToFilters = relatesTo;
+      return this;
+    }
+
+    public Builder isRelatedTo(TimelineFilterList isRelatedTo) {
+      this.isRelatedToFilters = isRelatedTo;
+      return this;
+    }
+
+    public Builder infoFilters(TimelineFilterList infoFilters) {
+      this.entityInfoFilters = infoFilters;
+      return this;
+    }
+
+    public Builder configFilters(TimelineFilterList configFilters) {
+      this.entityConfigFilters = configFilters;
+      return this;
+    }
+
+    public Builder metricFilters(TimelineFilterList metricFilters) {
+      this.entityMetricFilters = metricFilters;
+      return this;
+    }
+
+    public Builder eventFilters(TimelineFilterList eventFilters) {
+      this.entityEventFilters = eventFilters;
+      return this;
+    }
+
+    public Builder fromId(String fromId) {
+      this.entityFromId = fromId;
+      return this;
+    }
+
+    public TimelineEntityFilters build() {
+      return new TimelineEntityFilters(entityLimit, createdTimeBegin,
+          createdTimeEnd, relatesToFilters, isRelatedToFilters,
+          entityInfoFilters, entityConfigFilters, entityMetricFilters,
+          entityEventFilters, entityFromId);
+    }
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderContext.java
index 633bb23..67c3d29 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderContext.java
@@ -31,6 +31,8 @@
 
   private String entityType;
   private String entityId;
+  private Long entityIdPrefix;
+  private String doAsUser;
   public TimelineReaderContext(String clusterId, String userId, String flowName,
       Long flowRunId, String appId, String entityType, String entityId) {
     super(clusterId, userId, flowName, flowRunId, appId);
@@ -38,16 +40,33 @@
     this.entityId = entityId;
   }
 
+  public TimelineReaderContext(String clusterId, String userId, String flowName,
+      Long flowRunId, String appId, String entityType, Long entityIdPrefix,
+      String entityId) {
+    this(clusterId, userId, flowName, flowRunId, appId, entityType, entityId);
+    this.entityIdPrefix = entityIdPrefix;
+  }
+
+  public TimelineReaderContext(String clusterId, String userId, String flowName,
+      Long flowRunId, String appId, String entityType, Long entityIdPrefix,
+      String entityId, String doasUser) {
+    this(clusterId, userId, flowName, flowRunId, appId, entityType, entityId);
+    this.entityIdPrefix = entityIdPrefix;
+    this.doAsUser = doasUser;
+  }
+
   public TimelineReaderContext(TimelineReaderContext other) {
     this(other.getClusterId(), other.getUserId(), other.getFlowName(),
         other.getFlowRunId(), other.getAppId(), other.getEntityType(),
-        other.getEntityId());
+        other.getEntityIdPrefix(), other.getEntityId(), other.getDoAsUser());
   }
 
   @Override
   public int hashCode() {
     final int prime = 31;
     int result = super.hashCode();
+    result = prime * result
+        + ((entityIdPrefix == null) ? 0 : entityIdPrefix.hashCode());
     result = prime * result + ((entityId == null) ? 0 : entityId.hashCode());
     result =
         prime * result + ((entityType == null) ? 0 : entityType.hashCode());
@@ -95,4 +114,20 @@
   public void setEntityId(String id) {
     this.entityId = id;
   }
+
+  public Long getEntityIdPrefix() {
+    return entityIdPrefix;
+  }
+
+  public void setEntityIdPrefix(Long entityIdPrefix) {
+    this.entityIdPrefix = entityIdPrefix;
+  }
+
+  public String getDoAsUser() {
+    return doAsUser;
+  }
+
+  public void setDoAsUser(String doAsUser) {
+    this.doAsUser = doAsUser;
+  }
 }
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java
index 4cff3bc..67e5849 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java
@@ -32,8 +32,6 @@
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader;
 
-import com.google.common.annotations.VisibleForTesting;
-
 /**
  * This class wraps over the timeline reader store implementation. It does some
  * non trivial manipulation of the timeline data before or after getting
@@ -43,8 +41,6 @@
 @Unstable
 public class TimelineReaderManager extends AbstractService {
 
-  @VisibleForTesting
-  public static final String UID_KEY = "UID";
   private TimelineReader reader;
 
   public TimelineReaderManager(TimelineReader timelineReader) {
@@ -94,18 +90,18 @@
         FlowActivityEntity activityEntity = (FlowActivityEntity)entity;
         context.setUserId(activityEntity.getUser());
         context.setFlowName(activityEntity.getFlowName());
-        entity.setUID(UID_KEY,
+        entity.setUID(TimelineReaderUtils.UID_KEY,
             TimelineUIDConverter.FLOW_UID.encodeUID(context));
         return;
       case YARN_FLOW_RUN:
         FlowRunEntity runEntity = (FlowRunEntity)entity;
         context.setFlowRunId(runEntity.getRunId());
-        entity.setUID(UID_KEY,
+        entity.setUID(TimelineReaderUtils.UID_KEY,
             TimelineUIDConverter.FLOWRUN_UID.encodeUID(context));
         return;
       case YARN_APPLICATION:
         context.setAppId(entity.getId());
-        entity.setUID(UID_KEY,
+        entity.setUID(TimelineReaderUtils.UID_KEY,
             TimelineUIDConverter.APPLICATION_UID.encodeUID(context));
         return;
       default:
@@ -113,9 +109,15 @@
       }
     }
     context.setEntityType(entity.getType());
+    context.setEntityIdPrefix(entity.getIdPrefix());
     context.setEntityId(entity.getId());
-    entity.setUID(UID_KEY,
-        TimelineUIDConverter.GENERIC_ENTITY_UID.encodeUID(context));
+    if (context.getDoAsUser() != null) {
+      entity.setUID(TimelineReaderUtils.UID_KEY,
+          TimelineUIDConverter.SUB_APPLICATION_ENTITY_UID.encodeUID(context));
+    } else {
+      entity.setUID(TimelineReaderUtils.UID_KEY,
+          TimelineUIDConverter.GENERIC_ENTITY_UID.encodeUID(context));
+    }
   }
 
   /**
@@ -176,4 +178,24 @@
     }
     return entity;
   }
+
+  /**
+   * Gets a list of available timeline entity types for an application. This can
+   * be done by making a call to the backend storage implementation. The meaning
+   * of each argument in detail is the same as {@link TimelineReader#getEntity}.
+   * If cluster ID has not been supplied by the client, fills the cluster id
+   * from config before making a call to backend storage.
+   *
+   * @param context Timeline context within the scope of which entity types
+   *                have to be fetched. Entity type field of this context should
+   *                be null.
+   * @return A set which contains available timeline entity types, represented
+   * as strings if found, empty otherwise.
+   * @throws IOException  if any problem occurs while getting entity types.
+   */
+  public Set<String> getEntityTypes(TimelineReaderContext context)
+      throws IOException{
+    context.setClusterId(getClusterID(context.getClusterId(), getConfig()));
+    return reader.getEntityTypes(new TimelineReaderContext(context));
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
index d7eff32..5c049ea 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
@@ -18,19 +18,18 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.reader;
 
-import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
-
+import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.net.URI;
-import java.util.HashMap;
-import java.util.Map;
+import java.util.LinkedHashSet;
+import java.util.Set;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer2;
-import org.apache.hadoop.http.lib.StaticUserWebFilter;
 import org.apache.hadoop.security.HttpCrossOriginFilterInitializer;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -40,7 +39,10 @@
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.server.timelineservice.reader.security.TimelineReaderAuthenticationFilterInitializer;
+import org.apache.hadoop.yarn.server.timelineservice.reader.security.TimelineReaderWhitelistAuthorizationFilterInitializer;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader;
+import org.apache.hadoop.yarn.server.util.timeline.TimelineServerUtils;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
 import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
@@ -71,6 +73,17 @@
     if (!YarnConfiguration.timelineServiceV2Enabled(conf)) {
       throw new YarnException("timeline service v.2 is not enabled");
     }
+    InetSocketAddress bindAddr = conf.getSocketAddr(
+        YarnConfiguration.TIMELINE_SERVICE_ADDRESS,
+            YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ADDRESS,
+                YarnConfiguration.DEFAULT_TIMELINE_SERVICE_PORT);
+    // Login from keytab if security is enabled.
+    try {
+      SecurityUtil.login(conf, YarnConfiguration.TIMELINE_SERVICE_KEYTAB,
+          YarnConfiguration.TIMELINE_SERVICE_PRINCIPAL, bindAddr.getHostName());
+    } catch(IOException e) {
+      throw new YarnRuntimeException("Failed to login from keytab", e);
+    }
 
     TimelineReader timelineReaderStore = createTimelineReaderStore(conf);
     timelineReaderStore.init(conf);
@@ -130,29 +143,43 @@
     super.serviceStop();
   }
 
-  private void startTimelineReaderWebApp() {
-    Configuration conf = getConfig();
-    String bindAddress = WebAppUtils.getWebAppBindURL(conf,
-        YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
-        WebAppUtils.getTimelineReaderWebAppURL(conf));
-    LOG.info("Instantiating TimelineReaderWebApp at " + bindAddress);
+  protected void addFilters(Configuration conf) {
     boolean enableCorsFilter = conf.getBoolean(
         YarnConfiguration.TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED,
         YarnConfiguration.TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED_DEFAULT);
-    // setup CORS
+    // Setup CORS
     if (enableCorsFilter) {
       conf.setBoolean(HttpCrossOriginFilterInitializer.PREFIX
           + HttpCrossOriginFilterInitializer.ENABLED_SUFFIX, true);
     }
+    String initializers = conf.get("hadoop.http.filter.initializers", "");
+    Set<String> defaultInitializers = new LinkedHashSet<String>();
+    if (!initializers.contains(
+        TimelineReaderAuthenticationFilterInitializer.class.getName())) {
+      defaultInitializers.add(
+          TimelineReaderAuthenticationFilterInitializer.class.getName());
+    }
+
+    defaultInitializers.add(
+        TimelineReaderWhitelistAuthorizationFilterInitializer.class.getName());
+
+    TimelineServerUtils.setTimelineFilters(
+        conf, initializers, defaultInitializers);
+  }
+
+  private void startTimelineReaderWebApp() {
+    Configuration conf = getConfig();
+    addFilters(conf);
+    String bindAddress = WebAppUtils.getWebAppBindURL(conf,
+        YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
+        WebAppUtils.getTimelineReaderWebAppURL(conf));
+    LOG.info("Instantiating TimelineReaderWebApp at " + bindAddress);
     try {
       HttpServer2.Builder builder = new HttpServer2.Builder()
             .setName("timeline")
             .setConf(conf)
             .addEndpoint(URI.create("http://" + bindAddress));
       readerWebServer = builder.build();
-
-      setupOptions(conf);
-
       readerWebServer.addJerseyResourcePackage(
           TimelineReaderWebServices.class.getPackage().getName() + ";"
               + GenericExceptionHandler.class.getPackage().getName() + ";"
@@ -168,24 +195,8 @@
     }
   }
 
-  /**
-   * Sets up some options and filters.
-   *
-   * @param conf Configuration
-   */
-  protected void setupOptions(Configuration conf) {
-    Map<String, String> options = new HashMap<>();
-    String username = conf.get(HADOOP_HTTP_STATIC_USER,
-        DEFAULT_HADOOP_HTTP_STATIC_USER);
-    options.put(HADOOP_HTTP_STATIC_USER, username);
-    HttpServer2.defineFilter(readerWebServer.getWebAppContext(),
-        "static_user_filter_timeline",
-        StaticUserWebFilter.StaticUserFilter.class.getName(),
-        options, new String[] {"/*"});
-  }
-
   @VisibleForTesting
-  int getWebServerPort() {
+  public int getWebServerPort() {
     return readerWebServer.getConnectorAddress(0).getPort();
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java
index c93c631..4fd8468 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java
@@ -24,14 +24,33 @@
 
 import org.apache.commons.lang.StringUtils;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Set of utility methods to be used across timeline reader.
  */
-final class TimelineReaderUtils {
+public final class TimelineReaderUtils {
   private TimelineReaderUtils() {
   }
 
   /**
+   * Default delimiter for joining strings.
+   */
+  @VisibleForTesting
+  public static final char DEFAULT_DELIMITER_CHAR = '!';
+
+  /**
+   * Default escape character used for joining strings.
+   */
+  @VisibleForTesting
+  public static final char DEFAULT_ESCAPE_CHAR = '*';
+
+  public static final String FROMID_KEY = "FROM_ID";
+
+  @VisibleForTesting
+  public static final String UID_KEY = "UID";
+
+  /**
    * Split the passed string along the passed delimiter character while looking
    * for escape char to interpret the splitted parts correctly. For delimiter or
    * escape character to be interpreted as part of the string, they have to be
@@ -168,4 +187,14 @@
     // Join the strings after they have been escaped.
     return StringUtils.join(strs, delimiterChar);
   }
+
+  public static List<String> split(final String str)
+      throws IllegalArgumentException {
+    return split(str, DEFAULT_DELIMITER_CHAR, DEFAULT_ESCAPE_CHAR);
+  }
+
+  public static String joinAndEscapeStrings(final String[] strs) {
+    return joinAndEscapeStrings(strs, DEFAULT_DELIMITER_CHAR,
+        DEFAULT_ESCAPE_CHAR);
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
index b3e3cdc..dfe04f9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
@@ -265,6 +265,15 @@
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the entities
+   *     would not contain metric values before this timestamp(Optional query
+   *     param).
+   * @param metricsTimeEnd If specified, returned metrics for the entities would
+   *     not contain metric values after this timestamp(Optional query param).
+   * @param fromId If specified, retrieve the next set of entities from the
+   *     given fromId. The set of entities retrieved is inclusive of specified
+   *     fromId. fromId should be taken from the value associated with FROM_ID
+   *     info key in entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing
    *     a set of <cite>TimelineEntity</cite> instances of the given entity type
@@ -295,7 +304,10 @@
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("fromid") String fromId) {
     String url = req.getRequestURI() +
         (req.getQueryString() == null ? "" :
             QUERY_STRING_SEP + req.getQueryString());
@@ -318,9 +330,11 @@
       entities = timelineReaderManager.getEntities(context,
           TimelineReaderWebServicesUtils.createTimelineEntityFilters(
           limit, createdTimeStart, createdTimeEnd, relatesTo, isRelatedTo,
-          infofilters, conffilters, metricfilters, eventfilters),
+              infofilters, conffilters, metricfilters, eventfilters,
+              fromId),
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
-          confsToRetrieve, metricsToRetrieve, fields, metricsLimit));
+          confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+          metricsTimeStart, metricsTimeEnd));
     } catch (Exception e) {
       handleException(e, url, startTime,
           "createdTime start/end or limit or flowrunid");
@@ -401,6 +415,15 @@
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the entities
+   *     would not contain metric values before this timestamp(Optional query
+   *     param).
+   * @param metricsTimeEnd If specified, returned metrics for the entities would
+   *     not contain metric values after this timestamp(Optional query param).
+   * @param fromId If specified, retrieve the next set of entities from the
+   *     given fromId. The set of entities retrieved is inclusive of specified
+   *     fromId. fromId should be taken from the value associated with FROM_ID
+   *     info key in entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing
    *     a set of <cite>TimelineEntity</cite> instances of the given entity type
@@ -436,11 +459,15 @@
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("fromid") String fromId) {
     return getEntities(req, res, null, appId, entityType, userId, flowName,
         flowRunId, limit, createdTimeStart, createdTimeEnd, relatesTo,
         isRelatedTo, infofilters, conffilters, metricfilters, eventfilters,
-        confsToRetrieve, metricsToRetrieve, fields, metricsLimit);
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+        metricsTimeStart, metricsTimeEnd, fromId);
   }
 
   /**
@@ -511,6 +538,15 @@
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the entities
+   *     would not contain metric values before this timestamp(Optional query
+   *     param).
+   * @param metricsTimeEnd If specified, returned metrics for the entities would
+   *     not contain metric values after this timestamp(Optional query param).
+   * @param fromId If specified, retrieve the next set of entities from the
+   *     given fromId. The set of entities retrieved is inclusive of specified
+   *     fromId. fromId should be taken from the value associated with FROM_ID
+   *     info key in entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing
    *     a set of <cite>TimelineEntity</cite> instances of the given entity type
@@ -547,7 +583,10 @@
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("fromid") String fromId) {
     String url = req.getRequestURI() +
         (req.getQueryString() == null ? "" :
             QUERY_STRING_SEP + req.getQueryString());
@@ -560,14 +599,17 @@
     TimelineReaderManager timelineReaderManager = getTimelineReaderManager();
     Set<TimelineEntity> entities = null;
     try {
-      entities = timelineReaderManager.getEntities(
-          TimelineReaderWebServicesUtils.createTimelineReaderContext(
-          clusterId, userId, flowName, flowRunId, appId, entityType, null),
+      TimelineReaderContext context = TimelineReaderWebServicesUtils
+          .createTimelineReaderContext(clusterId, userId, flowName, flowRunId,
+              appId, entityType, null, null);
+      entities = timelineReaderManager.getEntities(context,
           TimelineReaderWebServicesUtils.createTimelineEntityFilters(
           limit, createdTimeStart, createdTimeEnd, relatesTo, isRelatedTo,
-          infofilters, conffilters, metricfilters, eventfilters),
+              infofilters, conffilters, metricfilters, eventfilters,
+              fromId),
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
-          confsToRetrieve, metricsToRetrieve, fields, metricsLimit));
+          confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+          metricsTimeStart, metricsTimeEnd));
     } catch (Exception e) {
       handleException(e, url, startTime,
           "createdTime start/end or limit or flowrunid");
@@ -609,6 +651,10 @@
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the entity would
+   *     not contain metric values before this timestamp(Optional query param).
+   * @param metricsTimeEnd If specified, returned metrics for the entity would
+   *     not contain metric values after this timestamp(Optional query param).
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *     <cite>TimelineEntity</cite> instance is returned.<br>
@@ -630,7 +676,9 @@
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd) {
     String url = req.getRequestURI() +
         (req.getQueryString() == null ? "" :
             QUERY_STRING_SEP + req.getQueryString());
@@ -650,7 +698,8 @@
       }
       entity = timelineReaderManager.getEntity(context,
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
-          confsToRetrieve, metricsToRetrieve, fields, metricsLimit));
+          confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+          metricsTimeStart, metricsTimeEnd));
     } catch (Exception e) {
       handleException(e, url, startTime, "flowrunid");
     }
@@ -704,6 +753,12 @@
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the entity would
+   *     not contain metric values before this timestamp(Optional query param).
+   * @param metricsTimeEnd If specified, returned metrics for the entity would
+   *     not contain metric values after this timestamp(Optional query param).
+   * @param entityIdPrefix Defines the id prefix for the entity to be fetched.
+   *     If specified, then entity retrieval will be faster.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *     <cite>TimelineEntity</cite> instance is returned.<br>
@@ -730,10 +785,13 @@
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("entityidprefix") String entityIdPrefix) {
     return getEntity(req, res, null, appId, entityType, entityId, userId,
         flowName, flowRunId, confsToRetrieve, metricsToRetrieve, fields,
-        metricsLimit);
+        metricsLimit, metricsTimeStart, metricsTimeEnd, entityIdPrefix);
   }
 
   /**
@@ -775,6 +833,12 @@
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the entity would
+   *     not contain metric values before this timestamp(Optional query param).
+   * @param metricsTimeEnd If specified, returned metrics for the entity would
+   *     not contain metric values after this timestamp(Optional query param).
+   * @param entityIdPrefix Defines the id prefix for the entity to be fetched.
+   *     If specified, then entity retrieval will be faster.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *     <cite>TimelineEntity</cite> instance is returned.<br>
@@ -802,7 +866,10 @@
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("entityidprefix") String entityIdPrefix) {
     String url = req.getRequestURI() +
         (req.getQueryString() == null ? "" :
             QUERY_STRING_SEP + req.getQueryString());
@@ -817,9 +884,11 @@
     try {
       entity = timelineReaderManager.getEntity(
           TimelineReaderWebServicesUtils.createTimelineReaderContext(
-          clusterId, userId, flowName, flowRunId, appId, entityType, entityId),
+              clusterId, userId, flowName, flowRunId, appId, entityType,
+              entityIdPrefix, entityId),
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
-          confsToRetrieve, metricsToRetrieve, fields, metricsLimit));
+          confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+          metricsTimeStart, metricsTimeEnd));
     } catch (Exception e) {
       handleException(e, url, startTime, "flowrunid");
     }
@@ -886,7 +955,7 @@
       context.setEntityType(TimelineEntityType.YARN_FLOW_RUN.toString());
       entity = timelineReaderManager.getEntity(context,
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
-          null, metricsToRetrieve, null, null));
+          null, metricsToRetrieve, null, null, null, null));
     } catch (Exception e) {
       handleException(e, url, startTime, "flowrunid");
     }
@@ -993,9 +1062,9 @@
       entity = timelineReaderManager.getEntity(
           TimelineReaderWebServicesUtils.createTimelineReaderContext(
           clusterId, userId, flowName, flowRunId, null,
-          TimelineEntityType.YARN_FLOW_RUN.toString(), null),
+              TimelineEntityType.YARN_FLOW_RUN.toString(), null, null),
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
-          null, metricsToRetrieve, null, null));
+          null, metricsToRetrieve, null, null, null, null));
     } catch (Exception e) {
       handleException(e, url, startTime, "flowrunid");
     }
@@ -1039,6 +1108,10 @@
    *     METRICS makes sense for flow runs hence only ALL or METRICS are
    *     supported as fields for fetching flow runs. Other fields will lead to
    *     HTTP 400 (Bad Request) response. (Optional query param).
+   * @param fromId If specified, retrieve the next set of flow run entities
+   *     from the given fromId. The set of entities retrieved is inclusive of
+   *     specified fromId. fromId should be taken from the value associated
+   *     with FROM_ID info key in entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *     set of <cite>FlowRunEntity</cite> instances for the given flow are
@@ -1060,7 +1133,8 @@
       @QueryParam("createdtimestart") String createdTimeStart,
       @QueryParam("createdtimeend") String createdTimeEnd,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
-      @QueryParam("fields") String fields) {
+      @QueryParam("fields") String fields,
+      @QueryParam("fromid") String fromId) {
     String url = req.getRequestURI() +
         (req.getQueryString() == null ? "" :
             QUERY_STRING_SEP + req.getQueryString());
@@ -1082,11 +1156,12 @@
       entities = timelineReaderManager.getEntities(context,
           TimelineReaderWebServicesUtils.createTimelineEntityFilters(
           limit, createdTimeStart, createdTimeEnd, null, null, null,
-          null, null, null),
+              null, null, null, fromId),
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
-          null, metricsToRetrieve, fields, null));
+          null, metricsToRetrieve, fields, null, null, null));
     } catch (Exception e) {
-      handleException(e, url, startTime, "createdTime start/end or limit");
+      handleException(e, url, startTime,
+          "createdTime start/end or limit or fromId");
     }
     long endTime = Time.monotonicNow();
     if (entities == null) {
@@ -1124,6 +1199,10 @@
    *     METRICS makes sense for flow runs hence only ALL or METRICS are
    *     supported as fields for fetching flow runs. Other fields will lead to
    *     HTTP 400 (Bad Request) response. (Optional query param).
+   * @param fromId If specified, retrieve the next set of flow run entities
+   *     from the given fromId. The set of entities retrieved is inclusive of
+   *     specified fromId. fromId should be taken from the value associated
+   *     with FROM_ID info key in entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *     set of <cite>FlowRunEntity</cite> instances for the given flow are
@@ -1146,9 +1225,10 @@
       @QueryParam("createdtimestart") String createdTimeStart,
       @QueryParam("createdtimeend") String createdTimeEnd,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
-      @QueryParam("fields") String fields) {
+      @QueryParam("fields") String fields,
+      @QueryParam("fromid") String fromId) {
     return getFlowRuns(req, res, null, userId, flowName, limit,
-        createdTimeStart, createdTimeEnd, metricsToRetrieve, fields);
+        createdTimeStart, createdTimeEnd, metricsToRetrieve, fields, fromId);
   }
 
   /**
@@ -1179,6 +1259,10 @@
    *     METRICS makes sense for flow runs hence only ALL or METRICS are
    *     supported as fields for fetching flow runs. Other fields will lead to
    *     HTTP 400 (Bad Request) response. (Optional query param).
+   * @param fromId If specified, retrieve the next set of flow run entities
+   *     from the given fromId. The set of entities retrieved is inclusive of
+   *     specified fromId. fromId should be taken from the value associated
+   *     with FROM_ID info key in entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *     set of <cite>FlowRunEntity</cite> instances for the given flow are
@@ -1202,7 +1286,8 @@
       @QueryParam("createdtimestart") String createdTimeStart,
       @QueryParam("createdtimeend") String createdTimeEnd,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
-      @QueryParam("fields") String fields) {
+      @QueryParam("fields") String fields,
+      @QueryParam("fromid") String fromId) {
     String url = req.getRequestURI() +
         (req.getQueryString() == null ? "" :
             QUERY_STRING_SEP + req.getQueryString());
@@ -1218,14 +1303,15 @@
       entities = timelineReaderManager.getEntities(
           TimelineReaderWebServicesUtils.createTimelineReaderContext(
           clusterId, userId, flowName, null, null,
-          TimelineEntityType.YARN_FLOW_RUN.toString(), null),
+              TimelineEntityType.YARN_FLOW_RUN.toString(), null, null),
           TimelineReaderWebServicesUtils.createTimelineEntityFilters(
           limit, createdTimeStart, createdTimeEnd, null, null, null,
-          null, null, null),
+              null, null, null, fromId),
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
-          null, metricsToRetrieve, fields, null));
+          null, metricsToRetrieve, fields, null, null, null));
     } catch (Exception e) {
-      handleException(e, url, startTime, "createdTime start/end or limit");
+      handleException(e, url, startTime,
+          "createdTime start/end or limit or fromId");
     }
     long endTime = Time.monotonicNow();
     if (entities == null) {
@@ -1261,6 +1347,10 @@
    *     2 dates.
    *     "daterange=20150711-" returns flows active on and after 20150711.
    *     "daterange=-20150711" returns flows active on and before 20150711.
+   * @param fromId If specified, retrieve the next set of flows from the given
+   *     fromId. The set of flows retrieved is inclusive of specified fromId.
+   *     fromId should be taken from the value associated with FROM_ID info key
+   *     in flow entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *     set of <cite>FlowActivityEntity</cite> instances are returned.<br>
@@ -1277,8 +1367,9 @@
       @Context HttpServletRequest req,
       @Context HttpServletResponse res,
       @QueryParam("limit") String limit,
-      @QueryParam("daterange") String dateRange) {
-    return getFlows(req, res, null, limit, dateRange);
+      @QueryParam("daterange") String dateRange,
+      @QueryParam("fromid") String fromId) {
+    return getFlows(req, res, null, limit, dateRange, fromId);
   }
 
   /**
@@ -1307,6 +1398,10 @@
    *     2 dates.
    *     "daterange=20150711-" returns flows active on and after 20150711.
    *     "daterange=-20150711" returns flows active on and before 20150711.
+   * @param fromId If specified, retrieve the next set of flows from the given
+   *     fromId. The set of flows retrieved is inclusive of specified fromId.
+   *     fromId should be taken from the value associated with FROM_ID info key
+   *     in flow entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *     set of <cite>FlowActivityEntity</cite> instances are returned.<br>
@@ -1324,7 +1419,8 @@
       @Context HttpServletResponse res,
       @PathParam("clusterid") String clusterId,
       @QueryParam("limit") String limit,
-      @QueryParam("daterange") String dateRange) {
+      @QueryParam("daterange") String dateRange,
+      @QueryParam("fromid") String fromId) {
     String url = req.getRequestURI() +
         (req.getQueryString() == null ? "" :
             QUERY_STRING_SEP + req.getQueryString());
@@ -1340,15 +1436,14 @@
       DateRange range = parseDateRange(dateRange);
       TimelineEntityFilters entityFilters =
           TimelineReaderWebServicesUtils.createTimelineEntityFilters(
-          limit, null, null, null, null, null, null, null, null);
-      entityFilters.setCreatedTimeBegin(range.dateStart);
-      entityFilters.setCreatedTimeEnd(range.dateEnd);
+              limit, range.dateStart, range.dateEnd,
+              null, null, null, null, null, null, fromId);
       entities = timelineReaderManager.getEntities(
           TimelineReaderWebServicesUtils.createTimelineReaderContext(
           clusterId, null, null, null, null,
-          TimelineEntityType.YARN_FLOW_ACTIVITY.toString(), null),
+              TimelineEntityType.YARN_FLOW_ACTIVITY.toString(), null, null),
           entityFilters, TimelineReaderWebServicesUtils.
-          createTimelineDataToRetrieve(null, null, null, null));
+              createTimelineDataToRetrieve(null, null, null, null, null, null));
     } catch (Exception e) {
       handleException(e, url, startTime, "limit");
     }
@@ -1389,6 +1484,10 @@
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the apps would
+   *     not contain metric values before this timestamp(Optional query param).
+   * @param metricsTimeEnd If specified, returned metrics for the apps would
+   *     not contain metric values after this timestamp(Optional query param).
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *     <cite>TimelineEntity</cite> instance is returned.<br>
@@ -1410,7 +1509,9 @@
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd) {
     String url = req.getRequestURI() +
         (req.getQueryString() == null ? "" :
             QUERY_STRING_SEP + req.getQueryString());
@@ -1431,7 +1532,8 @@
       context.setEntityType(TimelineEntityType.YARN_APPLICATION.toString());
       entity = timelineReaderManager.getEntity(context,
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
-          confsToRetrieve, metricsToRetrieve, fields, metricsLimit));
+          confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+          metricsTimeStart, metricsTimeEnd));
     } catch (Exception e) {
       handleException(e, url, startTime, "flowrunid");
     }
@@ -1480,6 +1582,10 @@
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the app would
+   *     not contain metric values before this timestamp(Optional query param).
+   * @param metricsTimeEnd If specified, returned metrics for the app would
+   *     not contain metric values after this timestamp(Optional query param).
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *     <cite>TimelineEntity</cite> instance is returned.<br>
@@ -1504,9 +1610,12 @@
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd) {
     return getApp(req, res, null, appId, flowName, flowRunId, userId,
-        confsToRetrieve, metricsToRetrieve, fields, metricsLimit);
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+        metricsTimeStart, metricsTimeEnd);
   }
 
   /**
@@ -1544,6 +1653,10 @@
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the app would
+   *     not contain metric values before this timestamp(Optional query param).
+   * @param metricsTimeEnd If specified, returned metrics for the app would
+   *     not contain metric values after this timestamp(Optional query param).
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *     <cite>TimelineEntity</cite> instance is returned.<br>
@@ -1569,7 +1682,9 @@
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd) {
     String url = req.getRequestURI() +
         (req.getQueryString() == null ? "" :
             QUERY_STRING_SEP + req.getQueryString());
@@ -1585,9 +1700,10 @@
       entity = timelineReaderManager.getEntity(
           TimelineReaderWebServicesUtils.createTimelineReaderContext(
           clusterId, userId, flowName, flowRunId, appId,
-          TimelineEntityType.YARN_APPLICATION.toString(), null),
+              TimelineEntityType.YARN_APPLICATION.toString(), null, null),
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
-          confsToRetrieve, metricsToRetrieve, fields, metricsLimit));
+          confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+          metricsTimeStart, metricsTimeEnd));
     } catch (Exception e) {
       handleException(e, url, startTime, "flowrunid");
     }
@@ -1660,6 +1776,14 @@
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the apps would
+   *     not contain metric values before this timestamp(Optional query param).
+   * @param metricsTimeEnd If specified, returned metrics for the apps would
+   *     not contain metric values after this timestamp(Optional query param).
+   * @param fromId If specified, retrieve the next set of applications
+   *     from the given fromId. The set of applications retrieved is inclusive
+   *     of specified fromId. fromId should be taken from the value associated
+   *     with FROM_ID info key in entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing
    *     a set of <cite>TimelineEntity</cite> instances representing apps is
@@ -1689,7 +1813,10 @@
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("fromid") String fromId) {
     String url = req.getRequestURI() +
         (req.getQueryString() == null ? "" :
             QUERY_STRING_SEP + req.getQueryString());
@@ -1711,9 +1838,11 @@
       entities = timelineReaderManager.getEntities(context,
           TimelineReaderWebServicesUtils.createTimelineEntityFilters(
           limit, createdTimeStart, createdTimeEnd, relatesTo, isRelatedTo,
-          infofilters, conffilters, metricfilters, eventfilters),
+              infofilters, conffilters, metricfilters, eventfilters,
+              fromId),
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
-          confsToRetrieve, metricsToRetrieve, fields, metricsLimit));
+          confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+          metricsTimeStart, metricsTimeEnd));
     } catch (Exception e) {
       handleException(e, url, startTime,
           "createdTime start/end or limit or flowrunid");
@@ -1787,6 +1916,14 @@
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the apps would
+   *     not contain metric values before this timestamp(Optional query param).
+   * @param metricsTimeEnd If specified, returned metrics for the apps would
+   *     not contain metric values after this timestamp(Optional query param).
+   * @param fromId If specified, retrieve the next set of applications
+   *     from the given fromId. The set of applications retrieved is inclusive
+   *     of specified fromId. fromId should be taken from the value associated
+   *     with FROM_ID info key in entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing
    *     a set of <cite>TimelineEntity</cite> instances representing apps is
@@ -1818,12 +1955,16 @@
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("fromid") String fromId) {
     return getEntities(req, res, null, null,
         TimelineEntityType.YARN_APPLICATION.toString(), userId, flowName,
         flowRunId, limit, createdTimeStart, createdTimeEnd, relatesTo,
         isRelatedTo, infofilters, conffilters, metricfilters, eventfilters,
-        confsToRetrieve, metricsToRetrieve, fields, metricsLimit);
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+        metricsTimeStart, metricsTimeEnd, fromId);
   }
 
   /**
@@ -1887,6 +2028,14 @@
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the apps would
+   *     not contain metric values before this timestamp(Optional query param).
+   * @param metricsTimeEnd If specified, returned metrics for the apps would
+   *     not contain metric values after this timestamp(Optional query param).
+   * @param fromId If specified, retrieve the next set of applications
+   *     from the given fromId. The set of applications retrieved is inclusive
+   *     of specified fromId. fromId should be taken from the value associated
+   *     with FROM_ID info key in entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing
    *     a set of <cite>TimelineEntity</cite> instances representing apps is
@@ -1920,12 +2069,16 @@
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("fromid") String fromId) {
     return getEntities(req, res, clusterId, null,
         TimelineEntityType.YARN_APPLICATION.toString(), userId, flowName,
         flowRunId, limit, createdTimeStart, createdTimeEnd, relatesTo,
         isRelatedTo, infofilters, conffilters, metricfilters, eventfilters,
-        confsToRetrieve, metricsToRetrieve, fields, metricsLimit);
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+        metricsTimeStart, metricsTimeEnd, fromId);
   }
 
   /**
@@ -1986,6 +2139,14 @@
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the apps would
+   *     not contain metric values before this timestamp(Optional query param).
+   * @param metricsTimeEnd If specified, returned metrics for the apps would
+   *     not contain metric values after this timestamp(Optional query param).
+   * @param fromId If specified, retrieve the next set of applications
+   *     from the given fromId. The set of applications retrieved is inclusive
+   *     of specified fromId. fromId should be taken from the value associated
+   *     with FROM_ID info key in entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing
    *     a set of <cite>TimelineEntity</cite> instances representing apps is
@@ -2016,12 +2177,16 @@
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("fromid") String fromId) {
     return getEntities(req, res, null, null,
         TimelineEntityType.YARN_APPLICATION.toString(), userId, flowName,
         null, limit, createdTimeStart, createdTimeEnd, relatesTo, isRelatedTo,
         infofilters, conffilters, metricfilters, eventfilters,
-        confsToRetrieve, metricsToRetrieve, fields, metricsLimit);
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+        metricsTimeStart, metricsTimeEnd, fromId);
   }
 
   /**
@@ -2083,6 +2248,14 @@
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the apps would
+   *     not contain metric values before this timestamp(Optional query param).
+   * @param metricsTimeEnd If specified, returned metrics for the apps would
+   *     not contain metric values after this timestamp(Optional query param).
+   * @param fromId If specified, retrieve the next set of applications
+   *     from the given fromId. The set of applications retrieved is inclusive
+   *     of specified fromId. fromId should be taken from the value associated
+   *     with FROM_ID info key in entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing
    *     a set of <cite>TimelineEntity</cite> instances representing apps is
@@ -2114,12 +2287,16 @@
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("fromid") String fromId) {
     return getEntities(req, res, clusterId, null,
         TimelineEntityType.YARN_APPLICATION.toString(), userId, flowName,
         null, limit, createdTimeStart, createdTimeEnd, relatesTo, isRelatedTo,
         infofilters, conffilters, metricfilters, eventfilters,
-        confsToRetrieve, metricsToRetrieve, fields, metricsLimit);
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+        metricsTimeStart, metricsTimeEnd, fromId);
   }
 
   /**
@@ -2190,6 +2367,17 @@
    *          have to be retrieved, then metricsLimit will be considered as 1
    *          i.e. latest single value of metric(s) will be returned. (Optional
    *          query param).
+   * @param metricsTimeStart If specified, returned metrics for the app attempts
+   *          would not contain metric values before this timestamp(Optional
+   *          query param).
+   * @param metricsTimeEnd If specified, returned metrics for the app attempts
+   *          would not contain metric values after this timestamp(Optional
+   *          query param).
+   * @param fromId If specified, retrieve the next set of application-attempt
+   *         entities from the given fromId. The set of application-attempt
+   *         entities retrieved is inclusive of specified fromId. fromId should
+   *         be taken from the value associated with FROM_ID info key in
+   *         entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *         set of <cite>TimelineEntity</cite> instances of the app-attempt
@@ -2222,12 +2410,16 @@
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("fromid") String fromId) {
 
     return getAppAttempts(req, res, null, appId, userId, flowName, flowRunId,
         limit, createdTimeStart, createdTimeEnd, relatesTo, isRelatedTo,
         infofilters, conffilters, metricfilters, eventfilters, confsToRetrieve,
-        metricsToRetrieve, fields, metricsLimit);
+        metricsToRetrieve, fields, metricsLimit, metricsTimeStart,
+        metricsTimeEnd, fromId);
   }
 
   /**
@@ -2299,6 +2491,17 @@
    *          have to be retrieved, then metricsLimit will be considered as 1
    *          i.e. latest single value of metric(s) will be returned. (Optional
    *          query param).
+   * @param metricsTimeStart If specified, returned metrics for the app attempts
+   *          would not contain metric values before this timestamp(Optional
+   *          query param).
+   * @param metricsTimeEnd If specified, returned metrics for the app attempts
+   *          would not contain metric values after this timestamp(Optional
+   *          query param).
+   * @param fromId If specified, retrieve the next set of application-attempt
+   *         entities from the given fromId. The set of application-attempt
+   *         entities retrieved is inclusive of specified fromId. fromId should
+   *         be taken from the value associated with FROM_ID info key in
+   *         entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *         set of <cite>TimelineEntity</cite> instances of the app-attempts
@@ -2332,13 +2535,17 @@
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("fromid") String fromId) {
 
     return getEntities(req, res, clusterId, appId,
         TimelineEntityType.YARN_APPLICATION_ATTEMPT.toString(), userId,
         flowName, flowRunId, limit, createdTimeStart, createdTimeEnd, relatesTo,
         isRelatedTo, infofilters, conffilters, metricfilters, eventfilters,
-        confsToRetrieve, metricsToRetrieve, fields, metricsLimit);
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+        metricsTimeStart, metricsTimeEnd, fromId);
   }
 
   /**
@@ -2381,6 +2588,14 @@
    *          have to be retrieved, then metricsLimit will be considered as 1
    *          i.e. latest single value of metric(s) will be returned. (Optional
    *          query param).
+   * @param metricsTimeStart If specified, returned metrics for the app attempt
+   *          would not contain metric values before this timestamp(Optional
+   *          query param).
+   * @param metricsTimeEnd If specified, returned metrics for the app attempt
+   *          would not contain metric values after this timestamp(Optional
+   *          query param).
+   * @param entityIdPrefix Defines the id prefix for the entity to be fetched.
+   *          If specified, then entity retrieval will be faster.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *         <cite>TimelineEntity</cite> instance is returned.<br>
@@ -2405,9 +2620,13 @@
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("entityidprefix") String entityIdPrefix) {
     return getAppAttempt(req, res, null, appId, appAttemptId, userId, flowName,
-        flowRunId, confsToRetrieve, metricsToRetrieve, fields, metricsLimit);
+        flowRunId, confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+        metricsTimeStart, metricsTimeEnd, entityIdPrefix);
   }
 
   /**
@@ -2450,6 +2669,14 @@
    *          have to be retrieved, then metricsLimit will be considered as 1
    *          i.e. latest single value of metric(s) will be returned. (Optional
    *          query param).
+   * @param metricsTimeStart If specified, returned metrics for the app attempt
+   *          would not contain metric values before this timestamp(Optional
+   *          query param).
+   * @param metricsTimeEnd If specified, returned metrics for the app attempt
+   *          would not contain metric values after this timestamp(Optional
+   *          query param).
+   * @param entityIdPrefix Defines the id prefix for the entity to be fetched.
+   *          If specified, then entity retrieval will be faster.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *         <cite>TimelineEntity</cite> instance is returned.<br>
@@ -2476,11 +2703,14 @@
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("entityidprefix") String entityIdPrefix) {
     return getEntity(req, res, clusterId, appId,
         TimelineEntityType.YARN_APPLICATION_ATTEMPT.toString(), appAttemptId,
         userId, flowName, flowRunId, confsToRetrieve, metricsToRetrieve, fields,
-        metricsLimit);
+        metricsLimit, metricsTimeStart, metricsTimeEnd, entityIdPrefix);
   }
 
   /**
@@ -2553,6 +2783,17 @@
    *          have to be retrieved, then metricsLimit will be considered as 1
    *          i.e. latest single value of metric(s) will be returned. (Optional
    *          query param).
+   * @param metricsTimeStart If specified, returned metrics for the containers
+   *          would not contain metric values before this timestamp(Optional
+   *          query param).
+   * @param metricsTimeEnd If specified, returned metrics for the containers
+   *          would not contain metric values after this timestamp(Optional
+   *          query param).
+   * @param fromId If specified, retrieve the next set of container
+   *         entities from the given fromId. The set of container
+   *         entities retrieved is inclusive of specified fromId. fromId should
+   *         be taken from the value associated with FROM_ID info key in
+   *         entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *         set of <cite>TimelineEntity</cite> instances of the containers
@@ -2586,11 +2827,15 @@
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("fromid") String fromId) {
     return getContainers(req, res, null, appId, appattemptId, userId, flowName,
         flowRunId, limit, createdTimeStart, createdTimeEnd, relatesTo,
         isRelatedTo, infofilters, conffilters, metricfilters, eventfilters,
-        confsToRetrieve, metricsToRetrieve, fields, metricsLimit);
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+        metricsTimeStart, metricsTimeEnd, fromId);
   }
 
   /**
@@ -2664,6 +2909,17 @@
    *          have to be retrieved, then metricsLimit will be considered as 1
    *          i.e. latest single value of metric(s) will be returned. (Optional
    *          query param).
+   * @param metricsTimeStart If specified, returned metrics for the containers
+   *          would not contain metric values before this timestamp(Optional
+   *          query param).
+   * @param metricsTimeEnd If specified, returned metrics for the containers
+   *          would not contain metric values after this timestamp(Optional
+   *          query param).
+   * @param fromId If specified, retrieve the next set of container
+   *         entities from the given fromId. The set of container
+   *         entities retrieved is inclusive of specified fromId. fromId should
+   *         be taken from the value associated with FROM_ID info key in
+   *         entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *         set of <cite>TimelineEntity</cite> instances of the containers
@@ -2699,7 +2955,10 @@
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("fromid") String fromId) {
 
     String entityType = TimelineEntityType.YARN_CONTAINER.toString();
     String parentEntityType =
@@ -2717,7 +2976,8 @@
     return getEntities(req, res, clusterId, appId, entityType, userId, flowName,
         flowRunId, limit, createdTimeStart, createdTimeEnd, relatesTo,
         isRelatedTo, infofilter, conffilters, metricfilters, eventfilters,
-        confsToRetrieve, metricsToRetrieve, fields, metricsLimit);
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+        metricsTimeStart, metricsTimeEnd, fromId);
   }
 
   /**
@@ -2759,6 +3019,14 @@
    *          have to be retrieved, then metricsLimit will be considered as 1
    *          i.e. latest single value of metric(s) will be returned. (Optional
    *          query param).
+   * @param metricsTimeStart If specified, returned metrics for the container
+   *          would not contain metric values before this timestamp(Optional
+   *          query param).
+   * @param metricsTimeEnd If specified, returned metrics for the container
+   *          would not contain metric values after this timestamp(Optional
+   *          query param).
+   * @param entityIdPrefix Defines the id prefix for the entity to be fetched.
+   *          If specified, then entity retrieval will be faster.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing
    *         <cite>TimelineEntity</cite> instance is returned.<br>
@@ -2783,9 +3051,13 @@
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("entityidprefix") String entityIdPrefix) {
     return getContainer(req, res, null, appId, containerId, userId, flowName,
-        flowRunId, confsToRetrieve, metricsToRetrieve, fields, metricsLimit);
+        flowRunId, confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+        entityIdPrefix, metricsTimeStart, metricsTimeEnd);
   }
 
   /**
@@ -2828,6 +3100,14 @@
    *          have to be retrieved, then metricsLimit will be considered as 1
    *          i.e. latest single value of metric(s) will be returned. (Optional
    *          query param).
+   * @param metricsTimeStart If specified, returned metrics for the container
+   *          would not contain metric values before this timestamp(Optional
+   *          query param).
+   * @param metricsTimeEnd If specified, returned metrics for the container
+   *          would not contain metric values after this timestamp(Optional
+   *          query param).
+   * @param entityIdPrefix Defines the id prefix for the entity to be fetched.
+   *          If specified, then entity retrieval will be faster.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *         <cite>TimelineEntity</cite> instance is returned.<br>
@@ -2854,10 +3134,273 @@
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("entityidprefix") String entityIdPrefix) {
     return getEntity(req, res, clusterId, appId,
         TimelineEntityType.YARN_CONTAINER.toString(), containerId, userId,
         flowName, flowRunId, confsToRetrieve, metricsToRetrieve, fields,
-        metricsLimit);
+        metricsLimit, metricsTimeStart, metricsTimeEnd, entityIdPrefix);
   }
-}
\ No newline at end of file
+
+  /**
+   * Returns a set of available entity types for a given app id. Cluster ID is
+   * not provided by client so default cluster ID has to be taken. If userid,
+   * flow name and flow run id which are optional query parameters are not
+   * specified, they will be queried based on app id and cluster id from the
+   * flow context information stored in underlying storage implementation.
+   *
+   * @param req Servlet request.
+   * @param res Servlet response.
+   * @param appId Application id to be queried(Mandatory path param).
+   * @param flowName Flow name which should match for the app(Optional query
+   *     param).
+   * @param flowRunId Run id which should match for the app(Optional query
+   *     param).
+   * @param userId User id which should match for the app(Optional query param).
+   *
+   * @return If successful, a HTTP 200(OK) response having a JSON representing a
+   *     list contains all timeline entity types is returned.<br>
+   *     On failures,<br>
+   *     If any problem occurs in parsing request, HTTP 400(Bad Request) is
+   *     returned.<br>
+   *     If flow context information cannot be retrieved or app for the given
+   *     app id cannot be found, HTTP 404(Not Found) is returned.<br>
+   *     For all other errors while retrieving data, HTTP 500(Internal Server
+   *     Error) is returned.
+   */
+  @GET
+  @Path("/apps/{appid}/entity-types")
+  @Produces(MediaType.APPLICATION_JSON)
+  public Set<String> getEntityTypes(
+      @Context HttpServletRequest req,
+      @Context HttpServletResponse res,
+      @PathParam("appid") String appId,
+      @QueryParam("flowname") String flowName,
+      @QueryParam("flowrunid") String flowRunId,
+      @QueryParam("userid") String userId) {
+    return getEntityTypes(req, res, null, appId, flowName, flowRunId, userId);
+  }
+
+  /**
+   * Returns a set of available entity types for a given app id. If userid,
+   * flow name and flow run id which are optional query parameters are not
+   * specified, they will be queried based on app id and cluster id from the
+   * flow context information stored in underlying storage implementation.
+   *
+   * @param req Servlet request.
+   * @param res Servlet response.
+   * @param clusterId Cluster id to which the app to be queried belong to(
+   *     Mandatory path param).
+   * @param appId Application id to be queried(Mandatory path param).
+   * @param flowName Flow name which should match for the app(Optional query
+   *     param).
+   * @param flowRunId Run id which should match for the app(Optional query
+   *     param).
+   * @param userId User id which should match for the app(Optional query param).
+   *
+   * @return If successful, a HTTP 200(OK) response having a JSON representing a
+   *     list contains all timeline entity types is returned.<br>
+   *     On failures,<br>
+   *     If any problem occurs in parsing request, HTTP 400(Bad Request) is
+   *     returned.<br>
+   *     If flow context information cannot be retrieved or app for the given
+   *     app id cannot be found, HTTP 404(Not Found) is returned.<br>
+   *     For all other errors while retrieving data, HTTP 500(Internal Server
+   *     Error) is returned.
+   */
+  @GET
+  @Path("/clusters/{clusterid}/apps/{appid}/entity-types")
+  @Produces(MediaType.APPLICATION_JSON)
+  public Set<String> getEntityTypes(
+      @Context HttpServletRequest req,
+      @Context HttpServletResponse res,
+      @PathParam("clusterid") String clusterId,
+      @PathParam("appid") String appId,
+      @QueryParam("flowname") String flowName,
+      @QueryParam("flowrunid") String flowRunId,
+      @QueryParam("userid") String userId) {
+    String url = req.getRequestURI() +
+        (req.getQueryString() == null ? "" :
+            QUERY_STRING_SEP + req.getQueryString());
+    UserGroupInformation callerUGI =
+        TimelineReaderWebServicesUtils.getUser(req);
+    LOG.info("Received URL " + url + " from user " +
+        TimelineReaderWebServicesUtils.getUserName(callerUGI));
+    long startTime = Time.monotonicNow();
+    init(res);
+    TimelineReaderManager timelineReaderManager = getTimelineReaderManager();
+    Set<String> results = null;
+    try {
+      results = timelineReaderManager.getEntityTypes(
+          TimelineReaderWebServicesUtils.createTimelineReaderContext(
+          clusterId, userId, flowName, flowRunId, appId,
+              null, null, null));
+    } catch (Exception e) {
+      handleException(e, url, startTime, "flowrunid");
+    }
+    long endTime = Time.monotonicNow();
+    LOG.info("Processed URL " + url +
+        " (Took " + (endTime - startTime) + " ms.)");
+    return results;
+  }
+
+  @GET
+  @Path("/users/{userid}/entities/{entitytype}")
+  @Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8)
+  public Set<TimelineEntity> getSubAppEntities(
+      @Context HttpServletRequest req,
+      @Context HttpServletResponse res,
+      @PathParam("userid") String userId,
+      @PathParam("entitytype") String entityType,
+      @QueryParam("limit") String limit,
+      @QueryParam("createdtimestart") String createdTimeStart,
+      @QueryParam("createdtimeend") String createdTimeEnd,
+      @QueryParam("relatesto") String relatesTo,
+      @QueryParam("isrelatedto") String isRelatedTo,
+      @QueryParam("infofilters") String infofilters,
+      @QueryParam("conffilters") String conffilters,
+      @QueryParam("metricfilters") String metricfilters,
+      @QueryParam("eventfilters") String eventfilters,
+      @QueryParam("confstoretrieve") String confsToRetrieve,
+      @QueryParam("metricstoretrieve") String metricsToRetrieve,
+      @QueryParam("fields") String fields,
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("fromid") String fromId) {
+    return getSubAppEntities(req, res, null, userId, entityType, limit,
+        createdTimeStart, createdTimeEnd, relatesTo, isRelatedTo, infofilters,
+        conffilters, metricfilters, eventfilters, confsToRetrieve,
+        metricsToRetrieve, fields, metricsLimit, metricsTimeStart,
+        metricsTimeEnd, fromId);
+  }
+
+  @GET
+  @Path("/clusters/{clusterid}/users/{userid}/entities/{entitytype}")
+  @Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8)
+  public Set<TimelineEntity> getSubAppEntities(
+      @Context HttpServletRequest req,
+      @Context HttpServletResponse res,
+      @PathParam("clusterid") String clusterId,
+      @PathParam("userid") String userId,
+      @PathParam("entitytype") String entityType,
+      @QueryParam("limit") String limit,
+      @QueryParam("createdtimestart") String createdTimeStart,
+      @QueryParam("createdtimeend") String createdTimeEnd,
+      @QueryParam("relatesto") String relatesTo,
+      @QueryParam("isrelatedto") String isRelatedTo,
+      @QueryParam("infofilters") String infofilters,
+      @QueryParam("conffilters") String conffilters,
+      @QueryParam("metricfilters") String metricfilters,
+      @QueryParam("eventfilters") String eventfilters,
+      @QueryParam("confstoretrieve") String confsToRetrieve,
+      @QueryParam("metricstoretrieve") String metricsToRetrieve,
+      @QueryParam("fields") String fields,
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("fromid") String fromId) {
+    String url = req.getRequestURI() +
+        (req.getQueryString() == null ? "" :
+            QUERY_STRING_SEP + req.getQueryString());
+    UserGroupInformation callerUGI =
+        TimelineReaderWebServicesUtils.getUser(req);
+    LOG.info("Received URL " + url + " from user " +
+        TimelineReaderWebServicesUtils.getUserName(callerUGI));
+    long startTime = Time.monotonicNow();
+    init(res);
+    TimelineReaderManager timelineReaderManager = getTimelineReaderManager();
+    Set<TimelineEntity> entities = null;
+    try {
+      TimelineReaderContext context =
+          TimelineReaderWebServicesUtils.createTimelineReaderContext(clusterId,
+              null, null, null, null, entityType, null, null, userId);
+      entities = timelineReaderManager.getEntities(context,
+          TimelineReaderWebServicesUtils.createTimelineEntityFilters(
+          limit, createdTimeStart, createdTimeEnd, relatesTo, isRelatedTo,
+              infofilters, conffilters, metricfilters, eventfilters,
+              fromId),
+          TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
+          confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+          metricsTimeStart, metricsTimeEnd));
+    } catch (Exception e) {
+      handleException(e, url, startTime,
+          "createdTime start/end or limit");
+    }
+    long endTime = Time.monotonicNow();
+    if (entities == null) {
+      entities = Collections.emptySet();
+    }
+    LOG.info("Processed URL " + url +
+        " (Took " + (endTime - startTime) + " ms.)");
+    return entities;
+  }
+
+  @GET
+  @Path("/users/{userid}/entities/{entitytype}/{entityid}")
+  @Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8)
+  public Set<TimelineEntity> getSubAppEntities(@Context HttpServletRequest req,
+      @Context HttpServletResponse res, @PathParam("userid") String userId,
+      @PathParam("entitytype") String entityType,
+      @PathParam("entityid") String entityId,
+      @QueryParam("confstoretrieve") String confsToRetrieve,
+      @QueryParam("metricstoretrieve") String metricsToRetrieve,
+      @QueryParam("fields") String fields,
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("entityidprefix") String entityIdPrefix) {
+    return getSubAppEntities(req, res, null, userId, entityType, entityId,
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+        metricsTimeStart, metricsTimeEnd, entityIdPrefix);
+  }
+
+  @GET
+  @Path("/clusters/{clusterid}/users/{userid}/entities/{entitytype}/{entityid}")
+  @Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8)
+  public Set<TimelineEntity> getSubAppEntities(@Context HttpServletRequest req,
+      @Context HttpServletResponse res,
+      @PathParam("clusterid") String clusterId,
+      @PathParam("userid") String userId,
+      @PathParam("entitytype") String entityType,
+      @PathParam("entityid") String entityId,
+      @QueryParam("confstoretrieve") String confsToRetrieve,
+      @QueryParam("metricstoretrieve") String metricsToRetrieve,
+      @QueryParam("fields") String fields,
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("entityidprefix") String entityIdPrefix) {
+    String url = req.getRequestURI() + (req.getQueryString() == null ? ""
+        : QUERY_STRING_SEP + req.getQueryString());
+    UserGroupInformation callerUGI =
+        TimelineReaderWebServicesUtils.getUser(req);
+    LOG.info("Received URL " + url + " from user "
+        + TimelineReaderWebServicesUtils.getUserName(callerUGI));
+    long startTime = Time.monotonicNow();
+    init(res);
+    TimelineReaderManager timelineReaderManager = getTimelineReaderManager();
+    Set<TimelineEntity> entities = null;
+    try {
+      TimelineReaderContext context = TimelineReaderWebServicesUtils
+          .createTimelineReaderContext(clusterId, null, null, null, null,
+              entityType, entityIdPrefix, entityId, userId);
+      entities = timelineReaderManager.getEntities(context,
+          new TimelineEntityFilters.Builder().build(),
+          TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
+              confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+              metricsTimeStart, metricsTimeEnd));
+    } catch (Exception e) {
+      handleException(e, url, startTime, "");
+    }
+    long endTime = Time.monotonicNow();
+    if (entities == null) {
+      entities = Collections.emptySet();
+    }
+    LOG.info(
+        "Processed URL " + url + " (Took " + (endTime - startTime) + " ms.)");
+    return entities;
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
index 7fc8cb8..f83c1ac 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
@@ -18,11 +18,13 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.reader;
 
+import java.security.Principal;
 import java.util.EnumSet;
 
 import javax.servlet.http.HttpServletRequest;
 
 import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
@@ -30,7 +32,7 @@
 /**
  * Set of utility methods to be used by timeline reader web services.
  */
-final class TimelineReaderWebServicesUtils {
+public final class TimelineReaderWebServicesUtils {
 
   private TimelineReaderWebServicesUtils() {
   }
@@ -49,10 +51,20 @@
    */
   static TimelineReaderContext createTimelineReaderContext(String clusterId,
       String userId, String flowName, String flowRunId, String appId,
-      String entityType, String entityId) {
+      String entityType, String entityIdPrefix, String entityId) {
     return new TimelineReaderContext(parseStr(clusterId), parseStr(userId),
         parseStr(flowName), parseLongStr(flowRunId), parseStr(appId),
-        parseStr(entityType), parseStr(entityId));
+        parseStr(entityType), parseLongStr(entityIdPrefix), parseStr(entityId));
+  }
+
+  static TimelineReaderContext createTimelineReaderContext(String clusterId,
+      String userId, String flowName, String flowRunId, String appId,
+      String entityType, String entityIdPrefix, String entityId,
+      String doAsUser) {
+    return new TimelineReaderContext(parseStr(clusterId), parseStr(userId),
+        parseStr(flowName), parseLongStr(flowRunId), parseStr(appId),
+        parseStr(entityType), parseLongStr(entityIdPrefix), parseStr(entityId),
+        parseStr(doAsUser));
   }
 
   /**
@@ -73,12 +85,46 @@
   static TimelineEntityFilters createTimelineEntityFilters(String limit,
       String createdTimeStart, String createdTimeEnd, String relatesTo,
       String isRelatedTo, String infofilters, String conffilters,
-      String metricfilters, String eventfilters) throws TimelineParseException {
-    return new TimelineEntityFilters(parseLongStr(limit),
-        parseLongStr(createdTimeStart), parseLongStr(createdTimeEnd),
-        parseRelationFilters(relatesTo), parseRelationFilters(isRelatedTo),
-        parseKVFilters(infofilters, false), parseKVFilters(conffilters, true),
-        parseMetricFilters(metricfilters), parseEventFilters(eventfilters));
+      String metricfilters, String eventfilters,
+      String fromid) throws TimelineParseException {
+    return createTimelineEntityFilters(
+        limit, parseLongStr(createdTimeStart),
+        parseLongStr(createdTimeEnd),
+        relatesTo, isRelatedTo, infofilters,
+        conffilters, metricfilters, eventfilters, fromid);
+  }
+
+  /**
+   * Parse the passed filters represented as strings and convert them into a
+   * {@link TimelineEntityFilters} object.
+   * @param limit Limit to number of entities to return.
+   * @param createdTimeStart Created time start for the entities to return.
+   * @param createdTimeEnd Created time end for the entities to return.
+   * @param relatesTo Entities to return must match relatesTo.
+   * @param isRelatedTo Entities to return must match isRelatedTo.
+   * @param infofilters Entities to return must match these info filters.
+   * @param conffilters Entities to return must match these metric filters.
+   * @param metricfilters Entities to return must match these metric filters.
+   * @param eventfilters Entities to return must match these event filters.
+   * @return a {@link TimelineEntityFilters} object.
+   * @throws TimelineParseException if any problem occurs during parsing.
+   */
+  static TimelineEntityFilters createTimelineEntityFilters(String limit,
+      Long createdTimeStart, Long createdTimeEnd, String relatesTo,
+      String isRelatedTo, String infofilters, String conffilters,
+      String metricfilters, String eventfilters,
+      String fromid) throws TimelineParseException {
+    return new TimelineEntityFilters.Builder()
+        .entityLimit(parseLongStr(limit))
+        .createdTimeBegin(createdTimeStart)
+        .createTimeEnd(createdTimeEnd)
+        .relatesTo(parseRelationFilters(relatesTo))
+        .isRelatedTo(parseRelationFilters(isRelatedTo))
+        .infoFilters(parseKVFilters(infofilters, false))
+        .configFilters(parseKVFilters(conffilters, true))
+        .metricFilters(parseMetricFilters(metricfilters))
+        .eventFilters(parseEventFilters(eventfilters))
+        .fromId(parseStr(fromid)).build();
   }
 
   /**
@@ -92,11 +138,13 @@
    * @throws TimelineParseException if any problem occurs during parsing.
    */
   static TimelineDataToRetrieve createTimelineDataToRetrieve(String confs,
-      String metrics, String fields, String metricsLimit)
+      String metrics, String fields, String metricsLimit,
+      String metricsTimeBegin, String metricsTimeEnd)
       throws TimelineParseException {
     return new TimelineDataToRetrieve(parseDataToRetrieve(confs),
         parseDataToRetrieve(metrics), parseFieldsStr(fields,
-        TimelineParseConstants.COMMA_DELIMITER), parseIntStr(metricsLimit));
+        TimelineParseConstants.COMMA_DELIMITER), parseIntStr(metricsLimit),
+        parseLongStr(metricsTimeBegin), parseLongStr(metricsTimeEnd));
   }
 
   /**
@@ -207,20 +255,40 @@
    * @return trimmed string if string is not null, null otherwise.
    */
   static String parseStr(String str) {
-    return str == null ? null : str.trim();
+    return StringUtils.trimToNull(str);
   }
 
   /**
-   * Get UGI from HTTP request.
+   * Get UGI based on the remote user in the HTTP request.
+   *
    * @param req HTTP request.
    * @return UGI.
    */
-  static UserGroupInformation getUser(HttpServletRequest req) {
-    String remoteUser = req.getRemoteUser();
+  public static UserGroupInformation getUser(HttpServletRequest req) {
+    return getCallerUserGroupInformation(req, false);
+  }
+
+  /**
+   * Get UGI from the HTTP request.
+   *
+   * @param hsr HTTP request.
+   * @param usePrincipal if true, use principal name else use remote user name
+   * @return UGI.
+   */
+  public static UserGroupInformation getCallerUserGroupInformation(
+      HttpServletRequest hsr, boolean usePrincipal) {
+
+    String remoteUser = hsr.getRemoteUser();
+    if (usePrincipal) {
+      Principal princ = hsr.getUserPrincipal();
+      remoteUser = princ == null ? null : princ.getName();
+    }
+
     UserGroupInformation callerUGI = null;
     if (remoteUser != null) {
       callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
     }
+
     return callerUGI;
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineUIDConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineUIDConverter.java
index 08e5405..b875828 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineUIDConverter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineUIDConverter.java
@@ -137,6 +137,41 @@
     }
   },
 
+  // Sub Application Entity UID should contain cluster, user, entity type and
+  // entity id
+  SUB_APPLICATION_ENTITY_UID {
+    @Override
+    String encodeUID(TimelineReaderContext context) {
+      if (context == null) {
+        return null;
+      }
+      if (context.getClusterId() == null || context.getDoAsUser() == null
+          || context.getEntityType() == null || context.getEntityId() == null) {
+        return null;
+      }
+      String[] entityTupleArr = {context.getClusterId(), context.getDoAsUser(),
+          context.getEntityType(), context.getEntityIdPrefix().toString(),
+          context.getEntityId()};
+      return joinAndEscapeUIDParts(entityTupleArr);
+    }
+
+    @Override
+    TimelineReaderContext decodeUID(String uId) throws Exception {
+      if (uId == null) {
+        return null;
+      }
+      List<String> entityTupleList = splitUID(uId);
+      if (entityTupleList.size() == 5) {
+        // Flow information exists.
+        return new TimelineReaderContext(entityTupleList.get(0), null, null,
+            null, null, entityTupleList.get(2),
+            Long.parseLong(entityTupleList.get(3)), entityTupleList.get(4),
+            entityTupleList.get(1));
+      }
+      return null;
+    }
+  },
+
   // Generic Entity UID should contain cluster, user, flow name, flowrun id,
   // app id, entity type and entity id OR should contain cluster, appid, entity
   // type and entity id(i.e.without flow context info).
@@ -155,12 +190,14 @@
         // Flow information exists.
         String[] entityTupleArr = {context.getClusterId(), context.getUserId(),
             context.getFlowName(), context.getFlowRunId().toString(),
-            context.getAppId(), context.getEntityType(), context.getEntityId()};
+            context.getAppId(), context.getEntityType(),
+            context.getEntityIdPrefix().toString(), context.getEntityId() };
         return joinAndEscapeUIDParts(entityTupleArr);
       } else {
         // Only entity and app information exists. Flow info does not exist.
         String[] entityTupleArr = {context.getClusterId(), context.getAppId(),
-            context.getEntityType(), context.getEntityId()};
+            context.getEntityType(), context.getEntityIdPrefix().toString(),
+            context.getEntityId() };
         return joinAndEscapeUIDParts(entityTupleArr);
       }
     }
@@ -171,20 +208,21 @@
         return null;
       }
       List<String> entityTupleList = splitUID(uId);
-      // Should have 7 parts i.e. cluster, user, flow name, flowrun id, app id,
-      // entity type and entity id OR should have 4 parts i.e. cluster, app id,
+      // Should have 8 parts i.e. cluster, user, flow name, flowrun id, app id,
+      // entity type and entity id OR should have 5 parts i.e. cluster, app id,
       // entity type and entity id.
-      if (entityTupleList.size() == 7) {
+      if (entityTupleList.size() == 8) {
         // Flow information exists.
         return new TimelineReaderContext(entityTupleList.get(0),
             entityTupleList.get(1), entityTupleList.get(2),
             Long.parseLong(entityTupleList.get(3)), entityTupleList.get(4),
-            entityTupleList.get(5), entityTupleList.get(6));
-      } else if (entityTupleList.size() == 4) {
+            entityTupleList.get(5), Long.parseLong(entityTupleList.get(6)),
+            entityTupleList.get(7));
+      } else if (entityTupleList.size() == 5) {
         // Flow information does not exist.
         return new TimelineReaderContext(entityTupleList.get(0), null, null,
             null, entityTupleList.get(1), entityTupleList.get(2),
-            entityTupleList.get(3));
+            Long.parseLong(entityTupleList.get(3)), entityTupleList.get(4));
       } else {
         return null;
       }
@@ -192,39 +230,29 @@
   };
 
   /**
-   * Delimiter used for UID.
-   */
-  public static final char UID_DELIMITER_CHAR = '!';
-
-  /**
-   * Escape Character used if delimiter or escape character itself is part of
-   * different components of UID.
-   */
-  public static final char UID_ESCAPE_CHAR = '*';
-
-  /**
-   * Split UID using {@link #UID_DELIMITER_CHAR} and {@link #UID_ESCAPE_CHAR}.
+   * Split UID using {@link TimelineReaderUtils#DEFAULT_DELIMITER_CHAR} and
+   * {@link TimelineReaderUtils#DEFAULT_ESCAPE_CHAR}.
    * @param uid UID to be splitted.
    * @return a list of different parts of UID split across delimiter.
    * @throws IllegalArgumentException if UID is not properly escaped.
    */
   private static List<String> splitUID(String uid)
       throws IllegalArgumentException {
-    return TimelineReaderUtils.split(uid, UID_DELIMITER_CHAR, UID_ESCAPE_CHAR);
+    return TimelineReaderUtils.split(uid);
   }
 
   /**
-   * Join different parts of UID delimited by {@link #UID_DELIMITER_CHAR} with
-   * delimiter and escape character escaped using {@link #UID_ESCAPE_CHAR} if
-   * UID parts contain them.
+   * Join different parts of UID delimited by
+   * {@link TimelineReaderUtils#DEFAULT_DELIMITER_CHAR} with delimiter and
+   * escape character escaped using
+   * {@link TimelineReaderUtils#DEFAULT_ESCAPE_CHAR} if UID parts contain them.
    * @param parts an array of UID parts to be joined.
    * @return a string joined using the delimiter with escape and delimiter
-   *     characters escaped if they are part of the string parts to be joined.
-   *     Returns null if one of the parts is null.
+   *         characters escaped if they are part of the string parts to be
+   *         joined. Returns null if one of the parts is null.
    */
   private static String joinAndEscapeUIDParts(String[] parts) {
-    return TimelineReaderUtils.joinAndEscapeStrings(parts, UID_DELIMITER_CHAR,
-        UID_ESCAPE_CHAR);
+    return TimelineReaderUtils.joinAndEscapeStrings(parts);
   }
 
   /**
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
new file mode 100644
index 0000000..e0e1f4d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.reader.security;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.http.FilterContainer;
+import org.apache.hadoop.security.AuthenticationWithProxyUserFilter;
+import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilterInitializer;
+
+/**
+ * Filter initializer to initialize {@link AuthenticationWithProxyUserFilter}
+ * for ATSv2 timeline reader server with timeline service specific
+ * configurations.
+ */
+public class TimelineReaderAuthenticationFilterInitializer extends
+    TimelineAuthenticationFilterInitializer{
+
+  /**
+   * Initializes {@link AuthenticationWithProxyUserFilter}
+   * <p>
+   * Propagates to {@link AuthenticationWithProxyUserFilter} configuration all
+   * YARN configuration properties prefixed with
+   * {@value TimelineAuthenticationFilterInitializer#PREFIX}.
+   *
+   * @param container
+   *          The filter container
+   * @param conf
+   *          Configuration for run-time parameters
+   */
+  @Override
+  public void initFilter(FilterContainer container, Configuration conf) {
+    setAuthFilterConfig(conf);
+    container.addGlobalFilter("Timeline Reader Authentication Filter",
+        AuthenticationWithProxyUserFilter.class.getName(),
+        getFilterConfig());
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
new file mode 100644
index 0000000..b22ea3fa
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
@@ -0,0 +1,123 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.reader.security;
+
+import java.io.IOException;
+
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.webapp.ForbiddenException;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderWebServicesUtils;
+
+/**
+ * Filter to check if a particular user is allowed to read ATSv2 data.
+ */
+
+public class TimelineReaderWhitelistAuthorizationFilter implements Filter {
+
+  public static final String EMPTY_STRING = "";
+
+  private static final Log LOG =
+      LogFactory.getLog(TimelineReaderWhitelistAuthorizationFilter.class);
+
+  private boolean isWhitelistReadAuthEnabled = false;
+
+  private AccessControlList allowedUsersAclList;
+  private AccessControlList adminAclList;
+
+  @Override
+  public void destroy() {
+    // NOTHING
+  }
+
+  @Override
+  public void doFilter(ServletRequest request, ServletResponse response,
+      FilterChain chain) throws IOException, ServletException {
+    if (isWhitelistReadAuthEnabled) {
+      UserGroupInformation callerUGI = TimelineReaderWebServicesUtils
+          .getCallerUserGroupInformation((HttpServletRequest) request, true);
+      if (callerUGI == null) {
+        String msg = "Unable to obtain user name, user not authenticated";
+        throw new AuthorizationException(msg);
+      }
+      if (!(adminAclList.isUserAllowed(callerUGI)
+          || allowedUsersAclList.isUserAllowed(callerUGI))) {
+        String userName = callerUGI.getShortUserName();
+        String msg = "User " + userName
+            + " is not allowed to read TimelineService V2 data.";
+        Response.status(Status.FORBIDDEN).entity(msg).build();
+        throw new ForbiddenException("user " + userName
+            + " is not allowed to read TimelineService V2 data");
+      }
+    }
+    if (chain != null) {
+      chain.doFilter(request, response);
+    }
+  }
+
+  @Override
+  public void init(FilterConfig conf) throws ServletException {
+    String isWhitelistReadAuthEnabledStr = conf
+        .getInitParameter(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED);
+    if (isWhitelistReadAuthEnabledStr == null) {
+      isWhitelistReadAuthEnabled =
+          YarnConfiguration.DEFAULT_TIMELINE_SERVICE_READ_AUTH_ENABLED;
+    } else {
+      isWhitelistReadAuthEnabled =
+          Boolean.valueOf(isWhitelistReadAuthEnabledStr);
+    }
+
+    if (isWhitelistReadAuthEnabled) {
+      String listAllowedUsers = conf.getInitParameter(
+          YarnConfiguration.TIMELINE_SERVICE_READ_ALLOWED_USERS);
+      if (StringUtils.isEmpty(listAllowedUsers)) {
+        listAllowedUsers =
+            YarnConfiguration.DEFAULT_TIMELINE_SERVICE_READ_ALLOWED_USERS;
+      }
+      LOG.info("listAllowedUsers=" + listAllowedUsers);
+      allowedUsersAclList = new AccessControlList(listAllowedUsers);
+      LOG.info("allowedUsersAclList=" + allowedUsersAclList.getUsers());
+      // also allow admins
+      String adminAclListStr =
+          conf.getInitParameter(YarnConfiguration.YARN_ADMIN_ACL);
+      if (StringUtils.isEmpty(adminAclListStr)) {
+        adminAclListStr =
+            TimelineReaderWhitelistAuthorizationFilter.EMPTY_STRING;
+        LOG.info("adminAclList not set, hence setting it to \"\"");
+      }
+      adminAclList = new AccessControlList(adminAclListStr);
+      LOG.info("adminAclList=" + adminAclList.getUsers());
+    }
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilterInitializer.java
new file mode 100644
index 0000000..a970731
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilterInitializer.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.reader.security;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.http.FilterContainer;
+import org.apache.hadoop.http.FilterInitializer;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+
+/**
+ * Filter initializer to initialize
+ * {@link TimelineReaderWhitelistAuthorizationFilter} for ATSv2 timeline reader
+ * with timeline service specific configurations.
+ */
+public class TimelineReaderWhitelistAuthorizationFilterInitializer
+    extends FilterInitializer {
+
+  /**
+   * Initializes {@link TimelineReaderWhitelistAuthorizationFilter}.
+   *
+   * @param container The filter container
+   * @param conf Configuration for run-time parameters
+   */
+  @Override
+  public void initFilter(FilterContainer container, Configuration conf) {
+    Map<String, String> params = new HashMap<String, String>();
+    String isWhitelistReadAuthEnabled = Boolean.toString(
+        conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED,
+            YarnConfiguration.DEFAULT_TIMELINE_SERVICE_READ_AUTH_ENABLED));
+    params.put(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED,
+        isWhitelistReadAuthEnabled);
+    params.put(YarnConfiguration.TIMELINE_SERVICE_READ_ALLOWED_USERS,
+        conf.get(YarnConfiguration.TIMELINE_SERVICE_READ_ALLOWED_USERS,
+            YarnConfiguration.DEFAULT_TIMELINE_SERVICE_READ_ALLOWED_USERS));
+
+    params.put(YarnConfiguration.YARN_ADMIN_ACL,
+        conf.get(YarnConfiguration.YARN_ADMIN_ACL,
+            // using a default of ""
+            // instead of DEFAULT_YARN_ADMIN_ACL
+            // The reason being, DEFAULT_YARN_ADMIN_ACL is set to all users
+            // and we do not wish to allow everyone by default if
+            // read auth is enabled and YARN_ADMIN_ACL is unset
+            TimelineReaderWhitelistAuthorizationFilter.EMPTY_STRING));
+    container.addGlobalFilter("Timeline Reader Whitelist Authorization Filter",
+        TimelineReaderWhitelistAuthorizationFilter.class.getName(), params);
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/package-info.java
similarity index 71%
copy from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
copy to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/package-info.java
index 5d14c6f..5888c98 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/package-info.java
@@ -16,10 +16,10 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
-
-export default Ember.Route.extend({
-  model() {
-    return this.store.findAll('yarn-queue');
-  },
-});
\ No newline at end of file
+/**
+ * Package org.apache.hadoop.server.timelineservice.reader.security contains
+ * classes to be used to support SPNEGO authentication for timeline reader.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.yarn.server.timelineservice.reader.security;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/CollectorNodemanagerSecurityInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/CollectorNodemanagerSecurityInfo.java
new file mode 100644
index 0000000..0eb5ee5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/CollectorNodemanagerSecurityInfo.java
@@ -0,0 +1,69 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.timelineservice.security;
+
+import java.lang.annotation.Annotation;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.api.CollectorNodemanagerProtocolPB;
+
+/**
+ * SecurityInfo implementation for CollectorNodemanager protocol.
+ */
+@Public
+@Evolving
+public class CollectorNodemanagerSecurityInfo extends SecurityInfo {
+
+  @Override
+  public KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) {
+    if (!protocol
+        .equals(CollectorNodemanagerProtocolPB.class)) {
+      return null;
+    }
+    return new KerberosInfo() {
+
+      @Override
+      public Class<? extends Annotation> annotationType() {
+        return null;
+      }
+
+      @Override
+      public String serverPrincipal() {
+        return YarnConfiguration.NM_PRINCIPAL;
+      }
+
+      @Override
+      public String clientPrincipal() {
+        return null;
+      }
+    };
+  }
+
+  @Override
+  public TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
+    return null;
+  }
+}
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/TimelineV2DelegationTokenSecretManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/TimelineV2DelegationTokenSecretManagerService.java
new file mode 100644
index 0000000..de5ccdc
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/TimelineV2DelegationTokenSecretManagerService.java
@@ -0,0 +1,126 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.security;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
+import org.apache.hadoop.yarn.server.timeline.security.TimelineDelgationTokenSecretManagerService;
+
+/**
+ * The service wrapper of {@link TimelineV2DelegationTokenSecretManager}.
+ */
+public class TimelineV2DelegationTokenSecretManagerService extends
+    TimelineDelgationTokenSecretManagerService {
+
+  public TimelineV2DelegationTokenSecretManagerService() {
+    super(TimelineV2DelegationTokenSecretManagerService.class.getName());
+  }
+
+  @Override
+  protected AbstractDelegationTokenSecretManager
+      <TimelineDelegationTokenIdentifier>
+      createTimelineDelegationTokenSecretManager(long secretKeyInterval,
+          long tokenMaxLifetime, long tokenRenewInterval,
+          long tokenRemovalScanInterval) {
+    return new TimelineV2DelegationTokenSecretManager(secretKeyInterval,
+        tokenMaxLifetime, tokenRenewInterval, tokenRemovalScanInterval);
+  }
+
+  public Token<TimelineDelegationTokenIdentifier> generateToken(
+      UserGroupInformation ugi, String renewer) {
+    return ((TimelineV2DelegationTokenSecretManager)
+        getTimelineDelegationTokenSecretManager()).generateToken(ugi, renewer);
+  }
+
+  public long renewToken(Token<TimelineDelegationTokenIdentifier> token,
+      String renewer) throws IOException {
+    return getTimelineDelegationTokenSecretManager().renewToken(token, renewer);
+  }
+
+  public void cancelToken(Token<TimelineDelegationTokenIdentifier> token,
+      String canceller) throws IOException {
+    getTimelineDelegationTokenSecretManager().cancelToken(token, canceller);
+  }
+
+  /**
+   * Delegation token secret manager for ATSv2.
+   */
+  @Private
+  @Unstable
+  public static class TimelineV2DelegationTokenSecretManager extends
+      AbstractDelegationTokenSecretManager<TimelineDelegationTokenIdentifier> {
+
+    private static final Log LOG =
+        LogFactory.getLog(TimelineV2DelegationTokenSecretManager.class);
+
+    /**
+     * Create a timeline v2 secret manager.
+     * @param delegationKeyUpdateInterval the number of milliseconds for rolling
+     *        new secret keys.
+     * @param delegationTokenMaxLifetime the maximum lifetime of the delegation
+     *        tokens in milliseconds
+     * @param delegationTokenRenewInterval how often the tokens must be renewed
+     *        in milliseconds
+     * @param delegationTokenRemoverScanInterval how often the tokens are
+     *        scanned for expired tokens in milliseconds
+     */
+    public TimelineV2DelegationTokenSecretManager(
+        long delegationKeyUpdateInterval, long delegationTokenMaxLifetime,
+        long delegationTokenRenewInterval,
+        long delegationTokenRemoverScanInterval) {
+      super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
+          delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
+    }
+
+    public Token<TimelineDelegationTokenIdentifier> generateToken(
+        UserGroupInformation ugi, String renewer) {
+      Text realUser = null;
+      if (ugi.getRealUser() != null) {
+        realUser = new Text(ugi.getRealUser().getUserName());
+      }
+      TimelineDelegationTokenIdentifier identifier = createIdentifier();
+      identifier.setOwner(new Text(ugi.getUserName()));
+      identifier.setRenewer(new Text(renewer));
+      identifier.setRealUser(realUser);
+      byte[] password = createPassword(identifier);
+      return new Token<TimelineDelegationTokenIdentifier>(identifier.getBytes(),
+          password, identifier.getKind(), null);
+    }
+
+    @Override
+    public TimelineDelegationTokenIdentifier createIdentifier() {
+      return new TimelineDelegationTokenIdentifier();
+    }
+
+    @Override
+    protected void logExpireToken(TimelineDelegationTokenIdentifier ident)
+        throws IOException {
+      LOG.info("Token " + ident + " expired.");
+    }
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/package-info.java
similarity index 72%
copy from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
copy to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/package-info.java
index 5d14c6f..8250092 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/package-info.java
@@ -16,10 +16,10 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
-
-export default Ember.Route.extend({
-  model() {
-    return this.store.findAll('yarn-queue');
-  },
-});
\ No newline at end of file
+/**
+ * Package org.apache.hadoop.server.timelineservice.security contains classes
+ * to be used to generate delegation tokens for ATSv2.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.yarn.server.timelineservice.security;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java
index b4e792b..adb8821 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java
@@ -32,6 +32,7 @@
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeMap;
+import java.util.TreeSet;
 
 import com.fasterxml.jackson.core.JsonGenerationException;
 import com.fasterxml.jackson.databind.JsonMappingException;
@@ -408,4 +409,24 @@
             context.getEntityType());
     return getEntities(dir, context.getEntityType(), filters, dataToRetrieve);
   }
+
+  @Override public Set<String> getEntityTypes(TimelineReaderContext context)
+      throws IOException {
+    Set<String> result = new TreeSet<>();
+    String flowRunPath = getFlowRunPath(context.getUserId(),
+        context.getClusterId(), context.getFlowName(), context.getFlowRunId(),
+        context.getAppId());
+    File dir = new File(new File(rootPath, ENTITIES_DIR),
+        context.getClusterId() + File.separator + flowRunPath
+            + File.separator + context.getAppId());
+    File[] fileList = dir.listFiles();
+    if (fileList != null) {
+      for (File f : fileList) {
+        if (f.isDirectory()) {
+          result.add(f.getName());
+        }
+      }
+    }
+    return result;
+  }
 }
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
index 1f527f2..ee41970 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
@@ -28,12 +28,14 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse.TimelineWriteError;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -68,10 +70,17 @@
   }
 
   @Override
-  public TimelineWriteResponse write(String clusterId, String userId,
-      String flowName, String flowVersion, long flowRunId, String appId,
-      TimelineEntities entities) throws IOException {
+  public TimelineWriteResponse write(TimelineCollectorContext context,
+      TimelineEntities entities, UserGroupInformation callerUgi)
+      throws IOException {
     TimelineWriteResponse response = new TimelineWriteResponse();
+    String clusterId = context.getClusterId();
+    String userId = context.getUserId();
+    String flowName = context.getFlowName();
+    String flowVersion = context.getFlowVersion();
+    long flowRunId = context.getFlowRunId();
+    String appId = context.getAppId();
+
     for (TimelineEntity entity : entities.getEntities()) {
       write(clusterId, userId, flowName, flowVersion, flowRunId, appId, entity,
           response);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineReader.java
index e8eabf1..16d623a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineReader.java
@@ -125,8 +125,8 @@
    *    <li><b>flowRunId</b> - Context flow run id.</li>
    *    <li><b>appId</b> - Context app id.</li>
    *    </ul>
-   *    Although entityId is also part of context, it has no meaning for
-   *    getEntities.<br>
+   *    Although entityIdPrefix and entityId are also part of context,
+   *    it has no meaning for getEntities.<br>
    *    Fields in context which are mandatory depends on entity type. Entity
    *    type is always mandatory. In addition to entity type, below is the list
    *    of context fields which are mandatory, based on entity type.<br>
@@ -161,8 +161,10 @@
    *    {@link TimelineDataToRetrieve} for details.
    * @return A set of <cite>TimelineEntity</cite> instances of the given entity
    *    type in the given context scope which matches the given predicates
-   *    ordered by created time, descending. Each entity will only contain the
-   *    metadata(id, type and created time) plus the given fields to retrieve.
+   *    ordered by enitityIdPrefix(for generic entities only).
+   *    Each entity will only contain
+   *    the metadata(id, type , idPrefix and created time) plus the given
+   *    fields to retrieve.
    *    <br>
    *    If entityType is YARN_FLOW_ACTIVITY, entities returned are of type
    *    <cite>FlowActivityEntity</cite>.<br>
@@ -177,4 +179,17 @@
       TimelineReaderContext context,
       TimelineEntityFilters filters,
       TimelineDataToRetrieve dataToRetrieve) throws IOException;
+
+  /**
+   * The API to list all available entity types of the given context.
+   *
+   * @param context A context defines the scope of this query. The incoming
+   * context should contain at least the cluster id and application id.
+   *
+   * @return A set of entity types available in the given context.
+   *
+   * @throws IOException if an exception occurred while listing from backend
+   * storage.
+   */
+  Set<String> getEntityTypes(TimelineReaderContext context) throws IOException;
 }
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineWriter.java
index 663a18a..12bc1cb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineWriter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineWriter.java
@@ -21,10 +21,12 @@
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.Service;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 
 /**
  * This interface is for storing application timeline information.
@@ -34,25 +36,19 @@
 public interface TimelineWriter extends Service {
 
   /**
-   * Stores the entire information in {@link TimelineEntities} to the
-   * timeline store. Any errors occurring for individual write request objects
-   * will be reported in the response.
+   * Stores the entire information in {@link TimelineEntities} to the timeline
+   * store. Any errors occurring for individual write request objects will be
+   * reported in the response.
    *
-   * @param clusterId context cluster ID
-   * @param userId context user ID
-   * @param flowName context flow name
-   * @param flowVersion context flow version
-   * @param flowRunId run id for the flow.
-   * @param appId context app ID.
-   * @param data
-   *          a {@link TimelineEntities} object.
+   * @param context a {@link TimelineCollectorContext}
+   * @param data a {@link TimelineEntities} object.
+   * @param callerUgi {@link UserGroupInformation}.
    * @return a {@link TimelineWriteResponse} object.
-   * @throws IOException if there is any exception encountered while storing
-   *     or writing entities to the backend storage.
+   * @throws IOException if there is any exception encountered while storing or
+   *           writing entities to the back end storage.
    */
-  TimelineWriteResponse write(String clusterId, String userId,
-      String flowName, String flowVersion, long flowRunId, String appId,
-      TimelineEntities data) throws IOException;
+  TimelineWriteResponse write(TimelineCollectorContext context,
+      TimelineEntities data, UserGroupInformation callerUgi) throws IOException;
 
   /**
    * Aggregates the entity information to the timeline store based on which
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineStorageUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineStorageUtils.java
index 7f7d640..203d950 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineStorageUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineStorageUtils.java
@@ -371,5 +371,4 @@
     return (obj instanceof Short) || (obj instanceof Integer) ||
         (obj instanceof Long);
   }
-
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
new file mode 100644
index 0000000..4389219
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
@@ -0,0 +1,14 @@
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+org.apache.hadoop.yarn.server.timelineservice.security.CollectorNodemanagerSecurityInfo
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestNMTimelineCollectorManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestNMTimelineCollectorManager.java
index 7bc89c5..af9acce 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestNMTimelineCollectorManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestNMTimelineCollectorManager.java
@@ -95,7 +95,7 @@
       Callable<Boolean> task = new Callable<Boolean>() {
         public Boolean call() {
           AppLevelTimelineCollector collector =
-              new AppLevelTimelineCollector(appId);
+              new AppLevelTimelineCollectorWithAgg(appId, "user");
           return (collectorManager.putIfAbsent(appId, collector) == collector);
         }
       };
@@ -126,7 +126,7 @@
       Callable<Boolean> task = new Callable<Boolean>() {
         public Boolean call() {
           AppLevelTimelineCollector collector =
-              new AppLevelTimelineCollector(appId);
+              new AppLevelTimelineCollectorWithAgg(appId, "user");
           boolean successPut =
               (collectorManager.putIfAbsent(appId, collector) == collector);
           return successPut && collectorManager.remove(appId);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestTimelineCollector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestTimelineCollector.java
index 0f17553..ec45428 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestTimelineCollector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestTimelineCollector.java
@@ -41,8 +41,6 @@
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyLong;
-import static org.mockito.Matchers.anyString;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.times;
@@ -156,9 +154,8 @@
     collector.putEntities(
         entities, UserGroupInformation.createRemoteUser("test-user"));
 
-    verify(writer, times(1)).write(
-        anyString(), anyString(), anyString(), anyString(), anyLong(),
-        anyString(), any(TimelineEntities.class));
+    verify(writer, times(1)).write(any(TimelineCollectorContext.class),
+        any(TimelineEntities.class), any(UserGroupInformation.class));
     verify(writer, times(1)).flush();
   }
 
@@ -175,9 +172,8 @@
     collector.putEntitiesAsync(
         entities, UserGroupInformation.createRemoteUser("test-user"));
 
-    verify(writer, times(1)).write(
-        anyString(), anyString(), anyString(), anyString(), anyLong(),
-        anyString(), any(TimelineEntities.class));
+    verify(writer, times(1)).write(any(TimelineCollectorContext.class),
+        any(TimelineEntities.class), any(UserGroupInformation.class));
     verify(writer, never()).flush();
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java
index 4bd37f8..f760834 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java
@@ -238,7 +238,7 @@
       assertEquals(3, entity.getConfigs().size());
       assertEquals(3, entity.getMetrics().size());
       assertTrue("UID should be present",
-          entity.getInfo().containsKey(TimelineReaderManager.UID_KEY));
+          entity.getInfo().containsKey(TimelineReaderUtils.UID_KEY));
       // Includes UID.
       assertEquals(3, entity.getInfo().size());
       // No events will be returned as events are not part of fields.
@@ -265,7 +265,7 @@
       assertEquals(3, entity.getConfigs().size());
       assertEquals(3, entity.getMetrics().size());
       assertTrue("UID should be present",
-          entity.getInfo().containsKey(TimelineReaderManager.UID_KEY));
+          entity.getInfo().containsKey(TimelineReaderUtils.UID_KEY));
       // Includes UID.
       assertEquals(3, entity.getInfo().size());
       assertEquals(2, entity.getEvents().size());
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWhitelistAuthorizationFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWhitelistAuthorizationFilter.java
new file mode 100644
index 0000000..bd4f0c2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWhitelistAuthorizationFilter.java
@@ -0,0 +1,380 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.reader;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.security.Principal;
+import java.security.PrivilegedExceptionAction;
+import java.util.Collections;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletContext;
+import javax.servlet.ServletException;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.reader.security.TimelineReaderWhitelistAuthorizationFilter;
+import org.apache.hadoop.yarn.webapp.ForbiddenException;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+/**
+ * Unit tests for {@link TimelineReaderWhitelistAuthorizationFilter}.
+ *
+ */
+public class TestTimelineReaderWhitelistAuthorizationFilter {
+
+  final private static String GROUP1_NAME = "group1";
+  final private static String GROUP2_NAME = "group2";
+  final private static String GROUP3_NAME = "group3";
+  final private static String[] GROUP_NAMES =
+      new String[] {GROUP1_NAME, GROUP2_NAME, GROUP3_NAME};
+
+  private static class DummyFilterConfig implements FilterConfig {
+    final private Map<String, String> map;
+
+    DummyFilterConfig(Map<String, String> map) {
+      this.map = map;
+    }
+
+    @Override
+    public String getFilterName() {
+      return "dummy";
+    }
+
+    @Override
+    public String getInitParameter(String arg0) {
+      return map.get(arg0);
+    }
+
+    @Override
+    public Enumeration<String> getInitParameterNames() {
+      return Collections.enumeration(map.keySet());
+    }
+
+    @Override
+    public ServletContext getServletContext() {
+      return null;
+    }
+  }
+
+  @Test
+  public void checkFilterAllowedUser() throws ServletException, IOException {
+    Map<String, String> map = new HashMap<String, String>();
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED, "true");
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_ALLOWED_USERS,
+        "user1,user2");
+    TimelineReaderWhitelistAuthorizationFilter f =
+        new TimelineReaderWhitelistAuthorizationFilter();
+    FilterConfig fc = new DummyFilterConfig(map);
+    f.init(fc);
+    HttpServletRequest mockHsr = Mockito.mock(HttpServletRequest.class);
+    Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
+      @Override
+      public String getName() {
+        return "user1";
+      }
+    });
+
+    ServletResponse r = Mockito.mock(ServletResponse.class);
+    f.doFilter(mockHsr, r, null);
+  }
+
+  @Test(expected = ForbiddenException.class)
+  public void checkFilterNotAllowedUser() throws ServletException, IOException {
+    Map<String, String> map = new HashMap<String, String>();
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED, "true");
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_ALLOWED_USERS,
+        "user1,user2");
+    TimelineReaderWhitelistAuthorizationFilter f =
+        new TimelineReaderWhitelistAuthorizationFilter();
+    FilterConfig fc = new DummyFilterConfig(map);
+    f.init(fc);
+    HttpServletRequest mockHsr = Mockito.mock(HttpServletRequest.class);
+    Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
+      @Override
+      public String getName() {
+        return "testuser1";
+      }
+    });
+    ServletResponse r = Mockito.mock(ServletResponse.class);
+    f.doFilter(mockHsr, r, null);
+  }
+
+  @Test
+  public void checkFilterAllowedUserGroup()
+      throws ServletException, IOException, InterruptedException {
+    Map<String, String> map = new HashMap<String, String>();
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED, "true");
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_ALLOWED_USERS,
+        "user2 group1,group2");
+    TimelineReaderWhitelistAuthorizationFilter f =
+        new TimelineReaderWhitelistAuthorizationFilter();
+    FilterConfig fc = new DummyFilterConfig(map);
+    f.init(fc);
+    HttpServletRequest mockHsr = Mockito.mock(HttpServletRequest.class);
+    Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
+      @Override
+      public String getName() {
+        return "user1";
+      }
+    });
+    ServletResponse r = Mockito.mock(ServletResponse.class);
+    UserGroupInformation user1 =
+        UserGroupInformation.createUserForTesting("user1", GROUP_NAMES);
+    user1.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        f.doFilter(mockHsr, r, null);
+        return null;
+      }
+    });
+  }
+
+  @Test(expected = ForbiddenException.class)
+  public void checkFilterNotAlloweGroup()
+      throws ServletException, IOException, InterruptedException {
+    Map<String, String> map = new HashMap<String, String>();
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED, "true");
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_ALLOWED_USERS,
+        " group5,group6");
+    TimelineReaderWhitelistAuthorizationFilter f =
+        new TimelineReaderWhitelistAuthorizationFilter();
+    FilterConfig fc = new DummyFilterConfig(map);
+    f.init(fc);
+    HttpServletRequest mockHsr = Mockito.mock(HttpServletRequest.class);
+    Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
+      @Override
+      public String getName() {
+        return "user200";
+      }
+    });
+    ServletResponse r = Mockito.mock(ServletResponse.class);
+    UserGroupInformation user1 =
+        UserGroupInformation.createUserForTesting("user200", GROUP_NAMES);
+    user1.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        f.doFilter(mockHsr, r, null);
+        return null;
+      }
+    });
+  }
+
+  @Test
+  public void checkFilterAllowAdmins()
+      throws ServletException, IOException, InterruptedException {
+    // check that users in admin acl list are allowed to read
+    Map<String, String> map = new HashMap<String, String>();
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED, "true");
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_ALLOWED_USERS,
+        "user3 group5,group6");
+    map.put(YarnConfiguration.YARN_ADMIN_ACL, " group1,group2");
+    TimelineReaderWhitelistAuthorizationFilter f =
+        new TimelineReaderWhitelistAuthorizationFilter();
+    FilterConfig fc = new DummyFilterConfig(map);
+    f.init(fc);
+    HttpServletRequest mockHsr = Mockito.mock(HttpServletRequest.class);
+    Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
+      @Override
+      public String getName() {
+        return "user90";
+      }
+    });
+    ServletResponse r = Mockito.mock(ServletResponse.class);
+    UserGroupInformation user1 =
+        UserGroupInformation.createUserForTesting("user90", GROUP_NAMES);
+    user1.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        f.doFilter(mockHsr, r, null);
+        return null;
+      }
+    });
+  }
+
+  @Test
+  public void checkFilterAllowAdminsWhenNoUsersSet()
+      throws ServletException, IOException, InterruptedException {
+    // check that users in admin acl list are allowed to read
+    Map<String, String> map = new HashMap<String, String>();
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED, "true");
+    map.put(YarnConfiguration.YARN_ADMIN_ACL, " group1,group2");
+    TimelineReaderWhitelistAuthorizationFilter f =
+        new TimelineReaderWhitelistAuthorizationFilter();
+    FilterConfig fc = new DummyFilterConfig(map);
+    f.init(fc);
+    HttpServletRequest mockHsr = Mockito.mock(HttpServletRequest.class);
+    Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
+      @Override
+      public String getName() {
+        return "user90";
+      }
+    });
+    ServletResponse r = Mockito.mock(ServletResponse.class);
+    UserGroupInformation user1 =
+        UserGroupInformation.createUserForTesting("user90", GROUP_NAMES);
+    user1.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        f.doFilter(mockHsr, r, null);
+        return null;
+      }
+    });
+  }
+
+  @Test(expected = ForbiddenException.class)
+  public void checkFilterAllowNoOneWhenAdminAclsEmptyAndUserAclsEmpty()
+      throws ServletException, IOException, InterruptedException {
+    // check that users in admin acl list are allowed to read
+    Map<String, String> map = new HashMap<String, String>();
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED, "true");
+    TimelineReaderWhitelistAuthorizationFilter f =
+        new TimelineReaderWhitelistAuthorizationFilter();
+    FilterConfig fc = new DummyFilterConfig(map);
+    f.init(fc);
+    HttpServletRequest mockHsr = Mockito.mock(HttpServletRequest.class);
+    Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
+      @Override
+      public String getName() {
+        return "user88";
+      }
+    });
+    ServletResponse r = Mockito.mock(ServletResponse.class);
+    UserGroupInformation user1 =
+        UserGroupInformation.createUserForTesting("user88", GROUP_NAMES);
+    user1.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        f.doFilter(mockHsr, r, null);
+        return null;
+      }
+    });
+  }
+
+  @Test
+  public void checkFilterReadAuthDisabledNoAclSettings()
+      throws ServletException, IOException, InterruptedException {
+    // Default settings for Read Auth Enabled (false)
+    // No values in admin acls or allowed read user list
+    Map<String, String> map = new HashMap<String, String>();
+    TimelineReaderWhitelistAuthorizationFilter f =
+        new TimelineReaderWhitelistAuthorizationFilter();
+    FilterConfig fc = new DummyFilterConfig(map);
+    f.init(fc);
+    HttpServletRequest mockHsr = Mockito.mock(HttpServletRequest.class);
+    Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
+      @Override
+      public String getName() {
+        return "user437";
+      }
+    });
+    ServletResponse r = Mockito.mock(ServletResponse.class);
+    UserGroupInformation user1 =
+        UserGroupInformation.createUserForTesting("user437", GROUP_NAMES);
+    user1.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        f.doFilter(mockHsr, r, null);
+        return null;
+      }
+    });
+  }
+
+  @Test
+  public void checkFilterReadAuthDisabledButAclSettingsPopulated()
+      throws ServletException, IOException, InterruptedException {
+    Map<String, String> map = new HashMap<String, String>();
+    // Default settings for Read Auth Enabled (false)
+    // But some values in admin acls and allowed read user list
+    map.put(YarnConfiguration.YARN_ADMIN_ACL, "user1,user2 group9,group21");
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_ALLOWED_USERS,
+        "user27,user36 group5,group6");
+    TimelineReaderWhitelistAuthorizationFilter f =
+        new TimelineReaderWhitelistAuthorizationFilter();
+    FilterConfig fc = new DummyFilterConfig(map);
+    f.init(fc);
+
+    HttpServletRequest mockHsr = mock(HttpServletRequest.class);
+    when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
+      @Override
+      public String getName() {
+        return "user37";
+      }
+    });
+
+    ServletResponse r = Mockito.mock(ServletResponse.class);
+    UserGroupInformation user1 =
+        // both username and group name are not part of admin and
+        // read allowed users
+        // but read auth is turned off
+        UserGroupInformation.createUserForTesting("user37", GROUP_NAMES);
+    user1.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        f.doFilter(mockHsr, r, null);
+        return null;
+      }
+    });
+
+    // test with username in read allowed users
+    Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
+      @Override
+      public String getName() {
+        return "user27";
+      }
+    });
+    ServletResponse r2 = Mockito.mock(ServletResponse.class);
+    UserGroupInformation user2 =
+        UserGroupInformation.createUserForTesting("user27", GROUP_NAMES);
+    user2.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        f.doFilter(mockHsr, r2, null);
+        return null;
+      }
+    });
+
+    // test with username in admin users
+    Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
+      @Override
+      public String getName() {
+        return "user2";
+      }
+    });
+    ServletResponse r3 = Mockito.mock(ServletResponse.class);
+    UserGroupInformation user3 =
+        UserGroupInformation.createUserForTesting("user2", GROUP_NAMES);
+    user3.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        f.doFilter(mockHsr, r3, null);
+        return null;
+      }
+    });
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineUIDConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineUIDConverter.java
index d5e791b..12b3fc0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineUIDConverter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineUIDConverter.java
@@ -53,21 +53,30 @@
     assertEquals(context, TimelineUIDConverter.APPLICATION_UID.decodeUID(uid));
 
     context = new TimelineReaderContext("yarn_cluster", "root", "hive_join",
-        1234L, "application_1111111111_1111", "YARN_CONTAINER",
+        1234L, "application_1111111111_1111", "YARN_CONTAINER", 12345L,
         "container_1111111111_1111_01_000001");
     uid = TimelineUIDConverter.GENERIC_ENTITY_UID.encodeUID(context);
     assertEquals("yarn_cluster!root!hive_join!1234!application_1111111111_1111!"
-        + "YARN_CONTAINER!container_1111111111_1111_01_000001", uid);
+        + "YARN_CONTAINER!12345!container_1111111111_1111_01_000001", uid);
     assertEquals(
         context, TimelineUIDConverter.GENERIC_ENTITY_UID.decodeUID(uid));
     context = new TimelineReaderContext("yarn_cluster", null, null, null,
-        "application_1111111111_1111", "YARN_CONTAINER",
+        "application_1111111111_1111", "YARN_CONTAINER", 54321L,
         "container_1111111111_1111_01_000001");
     uid = TimelineUIDConverter.GENERIC_ENTITY_UID.encodeUID(context);
     assertEquals("yarn_cluster!application_1111111111_1111!YARN_CONTAINER!" +
-        "container_1111111111_1111_01_000001", uid);
+        "54321!container_1111111111_1111_01_000001", uid);
     assertEquals(
         context, TimelineUIDConverter.GENERIC_ENTITY_UID.decodeUID(uid));
+
+    context = new TimelineReaderContext("yarn_cluster", null, null, null, null,
+        "YARN_CONTAINER", 54321L, "container_1111111111_1111_01_000001",
+        "user1");
+    uid = TimelineUIDConverter.SUB_APPLICATION_ENTITY_UID.encodeUID(context);
+    assertEquals("yarn_cluster!user1!YARN_CONTAINER!"
+        + "54321!container_1111111111_1111_01_000001", uid);
+    assertEquals(context,
+        TimelineUIDConverter.SUB_APPLICATION_ENTITY_UID.decodeUID(uid));
   }
 
   @Test
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java
index 35af169..46873ab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java
@@ -319,7 +319,7 @@
     TimelineEntity result = reader.getEntity(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", "id_1"),
-        new TimelineDataToRetrieve(null, null, null, null));
+        new TimelineDataToRetrieve(null, null, null, null, null, null));
     Assert.assertEquals(
         (new TimelineEntity.Identifier("app", "id_1")).toString(),
         result.getIdentifier().toString());
@@ -334,7 +334,7 @@
     TimelineEntity result = reader.getEntity(
         new TimelineReaderContext("cluster1", null, null, null, "app1", "app",
         "id_1"),
-        new TimelineDataToRetrieve(null, null, null, null));
+        new TimelineDataToRetrieve(null, null, null, null, null, null));
     Assert.assertEquals(
         (new TimelineEntity.Identifier("app", "id_1")).toString(),
         result.getIdentifier().toString());
@@ -351,7 +351,7 @@
     TimelineEntity result = reader.getEntity(
         new TimelineReaderContext("cluster1", null, null, null, "app2",
         "app", "id_5"),
-        new TimelineDataToRetrieve(null, null, null, null));
+        new TimelineDataToRetrieve(null, null, null, null, null, null));
     Assert.assertEquals(
         (new TimelineEntity.Identifier("app", "id_5")).toString(),
         result.getIdentifier().toString());
@@ -365,7 +365,8 @@
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", "id_1"),
         new TimelineDataToRetrieve(null, null,
-        EnumSet.of(Field.INFO, Field.CONFIGS, Field.METRICS), null));
+        EnumSet.of(Field.INFO, Field.CONFIGS, Field.METRICS), null, null,
+        null));
     Assert.assertEquals(
         (new TimelineEntity.Identifier("app", "id_1")).toString(),
         result.getIdentifier().toString());
@@ -383,7 +384,8 @@
     TimelineEntity result = reader.getEntity(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", "id_1"),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     Assert.assertEquals(
         (new TimelineEntity.Identifier("app", "id_1")).toString(),
         result.getIdentifier().toString());
@@ -398,8 +400,9 @@
   public void testGetAllEntities() throws Exception {
     Set<TimelineEntity> result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
-        "app", null), new TimelineEntityFilters(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        "app", null), new TimelineEntityFilters.Builder().build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     // All 4 entities will be returned
     Assert.assertEquals(4, result.size());
   }
@@ -409,8 +412,8 @@
     Set<TimelineEntity> result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(2L, null, null, null, null, null, null,
-        null, null), new TimelineDataToRetrieve());
+        new TimelineEntityFilters.Builder().entityLimit(2L).build(),
+        new TimelineDataToRetrieve());
     Assert.assertEquals(2, result.size());
     // Needs to be rewritten once hashcode and equals for
     // TimelineEntity is implemented
@@ -424,8 +427,8 @@
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(3L, null, null, null, null, null, null,
-        null, null), new TimelineDataToRetrieve());
+        new TimelineEntityFilters.Builder().entityLimit(3L).build(),
+        new TimelineDataToRetrieve());
     // Even though 2 entities out of 4 have same created time, one entity
     // is left out due to limit
     Assert.assertEquals(3, result.size());
@@ -437,8 +440,8 @@
     Set<TimelineEntity> result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, 1425016502030L, 1425016502060L, null,
-        null, null, null, null, null),
+        new TimelineEntityFilters.Builder().createdTimeBegin(1425016502030L)
+            .createTimeEnd(1425016502060L).build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(1, result.size());
     // Only one entity with ID id_4 should be returned.
@@ -452,9 +455,9 @@
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
             "app", null),
-            new TimelineEntityFilters(null, null, 1425016502010L, null, null,
-            null, null, null, null),
-            new TimelineDataToRetrieve());
+        new TimelineEntityFilters.Builder().createTimeEnd(1425016502010L)
+            .build(),
+        new TimelineDataToRetrieve());
     Assert.assertEquals(3, result.size());
     for (TimelineEntity entity : result) {
       if (entity.getId().equals("id_4")) {
@@ -466,8 +469,8 @@
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, 1425016502010L, null, null, null,
-        null, null, null, null),
+        new TimelineEntityFilters.Builder().createdTimeBegin(1425016502010L)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(1, result.size());
     for (TimelineEntity entity : result) {
@@ -486,8 +489,7 @@
     Set<TimelineEntity> result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList,
-        null, null, null),
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList).build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(1, result.size());
     // Only one entity with ID id_3 should be returned.
@@ -506,8 +508,8 @@
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(1, result.size());
     for (TimelineEntity entity : result) {
@@ -525,8 +527,7 @@
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, eventFilters),
+        new TimelineEntityFilters.Builder().eventFilters(eventFilters).build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(1, result.size());
     for (TimelineEntity entity : result) {
@@ -542,8 +543,8 @@
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(2, result.size());
     // Two entities with IDs' id_1 and id_2 should be returned.
@@ -569,8 +570,8 @@
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList1, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList1)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(2, result.size());
     for (TimelineEntity entity : result) {
@@ -592,8 +593,8 @@
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList2, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList2)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(2, result.size());
     for (TimelineEntity entity : result) {
@@ -610,8 +611,8 @@
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList3, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList3)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(1, result.size());
     for(TimelineEntity entity : result) {
@@ -628,8 +629,8 @@
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList4, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList4)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(0, result.size());
 
@@ -641,8 +642,8 @@
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList5, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList5)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(1, result.size());
     for (TimelineEntity entity : result) {
@@ -665,8 +666,8 @@
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList1, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(2, result.size());
     // Two entities with IDs' id_2 and id_3 should be returned.
@@ -684,8 +685,8 @@
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList2, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList2)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(1, result.size());
     for (TimelineEntity entity : result) {
@@ -702,8 +703,8 @@
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList3, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList3)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(0, result.size());
 
@@ -715,8 +716,8 @@
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList4, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList4)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(2, result.size());
     for (TimelineEntity entity : result) {
@@ -731,8 +732,8 @@
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList5, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList5)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(2, result.size());
     for (TimelineEntity entity : result) {
@@ -749,8 +750,8 @@
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList1,
-        null, null, null),
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList1)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(0, result.size());
 
@@ -762,8 +763,8 @@
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList2,
-        null, null, null),
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList2)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(2, result.size());
     for (TimelineEntity entity : result) {
@@ -780,8 +781,8 @@
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList3,
-        null, null, null),
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList3)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(0, result.size());
 
@@ -793,8 +794,8 @@
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList4,
-        null, null, null),
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList4)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(1, result.size());
     for (TimelineEntity entity : result) {
@@ -815,8 +816,7 @@
     Set<TimelineEntity> result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, relatesTo, null, null,
-        null, null, null),
+        new TimelineEntityFilters.Builder().relatesTo(relatesTo).build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(1, result.size());
     // Only one entity with ID id_1 should be returned.
@@ -835,8 +835,7 @@
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, isRelatedTo, null,
-        null, null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(isRelatedTo).build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(2, result.size());
     // Two entities with IDs' id_1 and id_3 should be returned.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineWriterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineWriterImpl.java
index 4f12c57..bb9f54f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineWriterImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineWriterImpl.java
@@ -30,11 +30,13 @@
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetricOperation;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.junit.Rule;
 import org.junit.Test;
@@ -89,8 +91,10 @@
           outputRoot);
       fsi.init(conf);
       fsi.start();
-      fsi.write("cluster_id", "user_id", "flow_name", "flow_version", 12345678L,
-          "app_id", te);
+      fsi.write(
+          new TimelineCollectorContext("cluster_id", "user_id", "flow_name",
+              "flow_version", 12345678L, "app_id"),
+          te, UserGroupInformation.createRemoteUser("user_id"));
 
       String fileName = fsi.getOutputRoot() + File.separator + "entities" +
           File.separator + "cluster_id" + File.separator + "user_id" +
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilterInitializer.java
index 63c2cf3..07478ca 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilterInitializer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilterInitializer.java
@@ -22,20 +22,23 @@
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
-import junit.framework.TestCase;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.*;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
-import org.junit.Test;
 
-public class TestAmFilterInitializer extends TestCase {
+/**
+ * Test class for {@Link AmFilterInitializer}.
+ */
+public class TestAmFilterInitializer {
 
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
+  @Before
+  public void setUp() throws Exception {
     NetUtils.addStaticResolution("host1", "172.0.0.1");
     NetUtils.addStaticResolution("host2", "172.0.0.1");
     NetUtils.addStaticResolution("host3", "172.0.0.1");
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
index 3e3580c..8a6c137 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
@@ -129,6 +129,7 @@
 
 
 The figure shows a sequence diagram for the following job execution flow:
+
 1. The Router receives an application submission request that is complaint to the YARN Application Client Protocol.
 2. The router interrogates a routing table / policy to choose the “home RM” for the job (the policy configuration is received from the state-store on heartbeat).
 3. The router queries the membership state to determine the endpoint of the home RM.
@@ -160,15 +161,50 @@
 | Property | Example | Description |
 |:---- |:---- |
 |`yarn.federation.enabled` | `true` | Whether federation is enabled or not |
+|`yarn.resourcemanager.cluster-id` | `<unique-subcluster-id>` | The unique subcluster identifier for this RM (same as the one used for HA). |
+
+####State-Store:
+
+Currently, we support ZooKeeper and SQL based implementations of the state-store.
+
+**Note:** The State-Store implementation must always be overwritten with one of the below.
+
+ZooKeeper: one must set the ZooKeeper settings for Hadoop:
+
+| Property | Example | Description |
+|:---- |:---- |
+|`yarn.federation.state-store.class` | `org.apache.hadoop.yarn.server.federation.store.impl.ZookeeperFederationStateStore` | The type of state-store to use. |
+|`hadoop.zk.address` | `host:port` | The address for the ZooKeeper ensemble. |
+
+SQL: one must setup the following parameters:
+
+| Property | Example | Description |
+|:---- |:---- |
 |`yarn.federation.state-store.class` | `org.apache.hadoop.yarn.server.federation.store.impl.SQLFederationStateStore` | The type of state-store to use. |
 |`yarn.federation.state-store.sql.url` | `jdbc:mysql://<host>:<port>/FederationStateStore` | For SQLFederationStateStore the name of the DB where the state is stored. |
 |`yarn.federation.state-store.sql.jdbc-class` | `com.mysql.jdbc.jdbc2.optional.MysqlDataSource` | For SQLFederationStateStore the jdbc class to use. |
 |`yarn.federation.state-store.sql.username` | `<dbuser>` | For SQLFederationStateStore the username for the DB connection. |
 |`yarn.federation.state-store.sql.password` | `<dbpass>` | For SQLFederationStateStore the password for the DB connection. |
-|`yarn.resourcemanager.cluster-id` | `<unique-subcluster-id>` | The unique subcluster identifier for this RM (same as the one used for HA). |
+
+We provide scripts for MySQL and Microsoft SQL Server.
+
+For MySQL, one must download the latest jar version 5.x from [MVN Repository](https://mvnrepository.com/artifact/mysql/mysql-connector-java) and add it to the CLASSPATH.
+Then the DB schema is created by executing the following SQL scripts in the database:
+
+1. **sbin/FederationStateStore/MySQL/FederationStateStoreDatabase.sql**.
+2. **sbin/FederationStateStore/MySQL/FederationStateStoreUser.sql**.
+3. **sbin/FederationStateStore/MySQL/FederationStateStoreTables.sql**.
+4. **sbin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql**.
+
+In the same directory we provide scripts to drop the Stored Procedures, the Tables, the User and the Database.
+
+**Note:** the FederationStateStoreUser.sql defines a default user/password for the DB that you are **highly encouraged** to set this to a proper strong password.
+
+For SQL-Server, the process is similar, but the jdbc driver is already included.
+SQL-Server scripts are located in **sbin/FederationStateStore/SQLServer/**.
 
 
-Optional:
+####Optional:
 
 | Property | Example | Description |
 |:---- |:---- |
@@ -236,22 +272,6 @@
 |`yarn.federation.statestore.max-connections` | `1` | The maximum number of parallel connections from each AMRMProxy to the state-store. This value is typically lower than the router one, since we have many AMRMProxy that could burn-through many DB connections quickly. |
 |`yarn.federation.cache-ttl.secs` | `300` | The time to leave for the AMRMProxy cache. Typically larger than at the router, as the number of AMRMProxy is large, and we want to limit the load to the centralized state-store. |
 
-###State-Store:
-
-Currently, we support only SQL based implementation of state-store (ZooKeeper is in the works), i.e. either MySQL or Microsoft SQL Server.
-
-For MySQL, one must download the latest jar version 5.x from [MVN Repository](https://mvnrepository.com/artifact/mysql/mysql-connector-java) and add it to the CLASSPATH.
-Then the DB schema is created by executing the following SQL scripts in the database:
-1. **sbin/FederationStateStore/MySQL/FederationStateStoreDatabase.sql**.
-2. **sbin/FederationStateStore/MySQL/FederationStateStoreUser.sql**.
-3. **sbin/FederationStateStore/MySQL/FederationStateStoreTables.sql**.
-4. **sbin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql**.
-In the same directory we provide scripts to drop the Stored Procedures, the Tables, the User and the Database.
-**Note:** the FederationStateStoreUser.sql defines a default user/password for the DB that you are **highly encouraged** to set this to a proper strong password.
-
-For SQL-Server, the process is similar, but the jdbc driver is already included in the pom (license allows it).
-SQL-Server scripts are located in **sbin/FederationStateStore/SQLServer/**.
-
 Running a Sample Job
 --------------------
 In order to submit jobs to a Federation cluster one must create a seperate set of configs for the client from which jobs will be submitted. In these, the **conf/yarn-site.xml** should have the following additional configurations:
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index 04822c9..2de305d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
@@ -73,12 +73,8 @@
 
 ### <a name="Current_Status"></a>Current Status and Future Plans
 
-YARN Timeline Service v.2 is currently in alpha ("alpha 1"). It is very much work in progress, and
-many things can and will change rapidly. Users must enable Timeline Service v.2 only on a test or
-experimental cluster to test the feature.
-
-Most importantly, **security is not enabled**. Do not set up or use Timeline Service v.2 until
-security is implemented if security is a requirement.
+YARN Timeline Service v.2 is currently in alpha ("alpha 2"). It is a work in progress, and
+many things can and will change rapidly.
 
 A complete end-to-end flow of writes and reads is functional, with Apache HBase as the backend.
 You should be able to start generating data. When enabled, all YARN-generic events are
@@ -95,16 +91,19 @@
 instance. Currently, it is not possible to write to Timeline Service outside the context of a YARN
 application (i.e. no off-cluster client).
 
+Starting from alpha2, Timeline Service v.2 supports simple authorization in terms of a
+configurable whitelist of users and groups who can read timeline data. Cluster admins are
+allowed by default to read timeline data.
+
 When YARN Timeline Service v.2 is disabled, one can expect no functional or performance impact
 on any other existing functionality.
 
 The work to make it truly production-ready continues. Some key items include
 
 * More robust storage fault tolerance
-* Security
 * Support for off-cluster clients
-* More complete and integrated web UI
 * Better support for long-running apps
+* Support for ACLs
 * Offline (time-based periodic) aggregation for flows, users, and queues for reporting and
 analysis
 * Timeline collectors as separate instances from node managers
@@ -127,6 +126,7 @@
 | **`yarn.timeline-service.writer.class`** | The class for the backend storage writer. Defaults to HBase storage writer. |
 | **`yarn.timeline-service.reader.class`** | The class for the backend storage reader. Defaults to HBase storage reader. |
 | **`yarn.system-metrics-publisher.enabled`** | The setting that controls whether yarn system metrics is published on the Timeline service or not by RM And NM. Defaults to `false`. |
+| **`yarn.timeline-service.schema.prefix`** | The schema prefix for hbase tables. Defaults to "prod.". |
 
 #### Advanced configuration
 
@@ -136,12 +136,34 @@
 | `yarn.timeline-service.address` | Address for the Timeline server to start the RPC server. Defaults to `${yarn.timeline-service.hostname}:10200`. |
 | `yarn.timeline-service.webapp.address` | The http address of the Timeline service web application. Defaults to `${yarn.timeline-service.hostname}:8188`. |
 | `yarn.timeline-service.webapp.https.address` | The https address of the Timeline service web application. Defaults to `${yarn.timeline-service.hostname}:8190`. |
+| **`yarn.timeline-service.hbase.configuration.file`** | Optional URL to an hbase-site.xml configuration file to be used to connect to the timeline-service hbase cluster. If empty or not specified, then the HBase configuration will be loaded from the classpath. When specified the values in the specified configuration file will override those from the ones that are present on the classpath. Defaults to `null`. |
 | **`yarn.timeline-service.writer.flush-interval-seconds`** | The setting that controls how often the timeline collector flushes the timeline writer. Defaults to `60`. |
 | **`yarn.timeline-service.app-collector.linger-period.ms`** | Time period till which the application collector will be alive in NM, after the  application master container finishes. Defaults to `1000` (1 second). |
 | **`yarn.timeline-service.timeline-client.number-of-async-entities-to-merge`** | Time line V2 client tries to merge these many number of async entities (if available) and then call the REST ATS V2 API to submit. Defaults to `10`. |
 | **`yarn.timeline-service.hbase.coprocessor.app-final-value-retention-milliseconds`** | The setting that controls how long the final value of a metric of a completed app is retained before merging into the flow sum. Defaults to `259200000` (3 days). This should be set in the HBase cluster. |
 | **`yarn.rm.system-metrics-publisher.emit-container-events`** | The setting that controls whether yarn container metrics is published to the timeline server or not by RM. This configuration setting is for ATS V2. Defaults to `false`. |
 
+#### Security Configuration
+
+
+Security can be enabled by setting `yarn.timeline-service.http-authentication.type`
+to `kerberos`, after which the following configuration options are available:
+
+
+| Configuration Property | Description |
+|:---- |:---- |
+| `yarn.timeline-service.http-authentication.type` | Defines authentication used for the timeline server(collector/reader) HTTP endpoint. Supported values are: `simple` / `kerberos` / #AUTHENTICATION_HANDLER_CLASSNAME#. Defaults to `simple`. |
+| `yarn.timeline-service.http-authentication.simple.anonymous.allowed` | Indicates if anonymous requests are allowed by the timeline server when using 'simple' authentication. Defaults to `true`. |
+| `yarn.timeline-service.http-authentication.kerberos.principal` | The Kerberos principal to be used for the Timeline Server(Collector/Reader) HTTP endpoint. |
+| `yarn.timeline-service.http-authentication.kerberos.keytab` | The Kerberos keytab to be used for the Timeline Server(Collector/Reader) HTTP endpoint.. |
+| `yarn.timeline-service.principal` | The Kerberos principal for the timeline reader. NM principal would be used for timeline collector as it runs as an auxiliary service inside NM. |
+| `yarn.timeline-service.keytab` | The Kerberos keytab for the timeline reader. NM keytab would be used for timeline collector as it runs as an auxiliary service inside NM. |
+| `yarn.timeline-service.delegation.key.update-interval` | Defaults to `86400000` (1 day). |
+| `yarn.timeline-service.delegation.token.renew-interval` | Defaults to `86400000` (1 day). |
+| `yarn.timeline-service.delegation.token.max-lifetime` | Defaults to `604800000` (7 days). |
+| `yarn.timeline-service.read.authentication.enabled` | Enables or disables authorization checks for reading timeline service v2 data. Default is `false` which is disabled. |
+| `yarn.timeline-service.read.allowed.users` | Comma separated list of user, followed by space, then comma separated list of groups. It will allow this list of users and groups to read the data and reject everyone else. Default value is set to none. If authorization is enabled, then this configuration is mandatory.  |
+
 #### Enabling CORS support
 To enable cross-origin support (CORS) for the Timeline Service v.2, please set the following configuration parameters:
 
@@ -156,39 +178,92 @@
 ### <a name="Enabling_Timeline_Service_v2"></a>Enabling Timeline Service v.2
 
 #### Preparing Apache HBase cluster for storage
+There are a few steps to be done for preparing the storage for Timeline Service v.2:
+
+Step 1) [Set up the HBase cluster](#Set_up_the_HBase_cluster)
+
+Step 2) [Enable the coprocessor](#Enable_the_coprocessor)
+
+Step 3) [Create the schema for Timeline Service v.2](#Create_schema)
+
+Each step is explained in more detail below.
+
+##### <a name="Set_up_the_HBase_cluster"> </a>Step 1) Set up the HBase cluster
 The first part is to set up or pick an Apache HBase cluster to use as the storage cluster. The
-version of Apache HBase that is supported with Timeline Service v.2 is 1.1.x. The 1.0.x versions
-do not work with Timeline Service v.2. The 1.2.x versions have not been tested.
+version of Apache HBase that is supported with Timeline Service v.2 is 1.2.6. The 1.0.x versions
+do not work with Timeline Service v.2. Later versions of HBase have not been tested with
+Timeline Service.
 
-Once you have an Apache HBase cluster ready to use for this purpose, perform the following steps.
+HBase has different deployment modes. Refer to the HBase book for understanding them and pick a
+mode that is suitable for your setup.
+(http://hbase.apache.org/book.html#standalone_dist)
 
-First, add the timeline service jar to the HBase classpath in all HBase machines in the cluster. It
-is needed for the coprocessor as well as the schema creator. For example,
+##### Simple deployment for HBase
+If you are intent on a simple deploy profile for the Apache HBase cluster
+where the data loading is light but the data needs to persist across node
+comings and goings, you could consider the "Standalone HBase over HDFS" deploy mode.
 
-    cp hadoop-yarn-server-timelineservice-hbase-3.0.0-alpha1-SNAPSHOT.jar /usr/hbase/lib/
+This is a useful variation on the standalone HBase setup and has all HBase daemons running inside
+one JVM but rather than persisting to the local filesystem, it persists to an HDFS instance.
+Writing to HDFS where data is replicated ensures that data is persisted across node
+comings and goings. To configure this standalone variant, edit your `hbase-site.xml` setting
+the `hbase.rootdir` to point at a directory in your HDFS instance but then set
+`hbase.cluster.distributed` to false. For example:
 
-Then, enable the coprocessor that handles the aggregation. To enable it, add the following entry in
-region servers' `hbase-site.xml` file (generally located in the `conf` directory) as follows:
+```
+<configuration>
+  <property>
+    <name>hbase.rootdir</name>
+    <value>hdfs://namenode.example.org:8020/hbase</value>
+  </property>
+  <property>
+    <name>hbase.cluster.distributed</name>
+    <value>false</value>
+  </property>
+</configuration>
+```
+
+For more details on this mode, refer to
+http://hbase.apache.org/book.html#standalone.over.hdfs .
+
+Once you have an Apache HBase cluster ready to use, perform the following steps.
+
+##### <a name="Enable_the_coprocessor"> </a>Step 2) Enable the coprocessor
+In this version, the coprocessor is loaded dynamically (table coprocessor for the `flowrun` table).
+
+Copy the timeline service jar to HDFS from where HBase can load it. It
+is needed for the `flowrun` table creation in the schema creator. The default HDFS location is `/hbase/coprocessor`.
+For example,
+
+    hadoop fs -mkdir /hbase/coprocessor
+    hadoop fs -put hadoop-yarn-server-timelineservice-hbase-3.0.0-alpha1-SNAPSHOT.jar
+           /hbase/coprocessor/hadoop-yarn-server-timelineservice.jar
+
+
+If you want to place the jar at a different location on hdfs, there also exists a yarn
+configuration setting called `yarn.timeline-service.hbase.coprocessor.jar.hdfs.location`.
+For example,
 
 ```
 <property>
-  <name>hbase.coprocessor.region.classes</name>
-  <value>org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunCoprocessor</value>
+  <name>yarn.timeline-service.hbase.coprocessor.jar.hdfs.location</name>
+  <value>/custom/hdfs/path/jarName</value>
 </property>
 ```
 
-Restart the region servers and the master to pick up the timeline service jar as well as the config
-change. In this version, the coprocessor is loaded statically (i.e. system coprocessor) as opposed
-to a dynamically (table coprocessor).
-
+##### <a name="Create_schema"> </a>Step 3) Create the timeline service schema
 Finally, run the schema creator tool to create the necessary tables:
 
     bin/hadoop org.apache.hadoop.yarn.server.timelineservice.storage.TimelineSchemaCreator -create
 
 The `TimelineSchemaCreator` tool supports a few options that may come handy especially when you
 are testing. For example, you can use `-skipExistingTable` (`-s` for short) to skip existing tables
+and continue to create other tables rather than failing the schema creation. By default, the tables
+will have a schema prefix of "prod.". When no option or '-help' ('-h' for short) is provided, the
+command usage is printed.
 and continue to create other tables rather than failing the schema creation. When no option or '-help'
-('-h' for short) is provided, the command usage is printed.
+('-h' for short) is provided, the command usage is printed. By default, the tables
+will have a schema prefix of "prod."
 
 #### Enabling Timeline Service v.2
 Following are the basic configurations to start Timeline service v.2:
@@ -241,7 +316,22 @@
 ```
 
 Also, add the `hbase-site.xml` configuration file to the client Hadoop cluster configuration so
-that it can write data to the Apache HBase cluster you are using.
+that it can write data to the Apache HBase cluster you are using, or set
+`yarn.timeline-service.hbase.configuration.file` to the file URL pointing to
+`hbase-site.xml` for the same. For example:
+
+```
+<property>
+  <description> Optional URL to an hbase-site.xml configuration file to be
+  used to connect to the timeline-service hbase cluster. If empty or not
+  specified, then the HBase configuration will be loaded from the classpath.
+  When specified the values in the specified configuration file will override
+  those from the ones that are present on the classpath.
+  </description>
+  <name>yarn.timeline-service.hbase.configuration.file</name>
+  <value>file:/etc/hbase/hbase-ats-dc1/hbase-site.xml</value>
+</property>
+```
 
 #### Running Timeline Service v.2
 Restart the resource manager as well as the node managers to pick up the new configuration. The
@@ -263,6 +353,15 @@
 </property>
 ```
 
+### Upgrade from alpha1 to alpha2
+If you are currently running Timeline Service v2 alpha1 version, we recommend the following:
+
+- Clear existing data in tables (truncate tables) since the row key for AppToFlow has changed.
+
+- The coprocessor is now a dynamically loaded table level coprocessor in alpha2. We
+recommend dropping the table, replacing the coprocessor jar on hdfs with the alpha2 one,
+restarting the Region servers and recreating the `flowrun` table.
+
 ### <a name="Publishing_of_application_specific_data"></a> Publishing application specific data
 
 This section is for YARN application developers that want to integrate with Timeline Service v.2.
@@ -447,6 +546,8 @@
   "daterange=20150711-20150714" returns flows active between these 2 dates.<br/>
   "daterange=20150711-" returns flows active on and after 20150711.<br/>
   "daterange=-20150711" returns flows active on and before 20150711.<br/>
+1. `fromid` -  If specified, retrieve the next set of flows from the given fromid. The set of entities retrieved is inclusive of specified fromid.
+   fromid should be taken from the value associated with FROM_ID info key in flow entity response which was sent earlier.
 
 #### Example JSON Response:
 
@@ -492,6 +593,7 @@
         "info": {
           "SYSTEM_INFO_CLUSTER": "test-cluster",
           "UID": "test-cluster!sjlee!ds-date",
+          "FROM_ID": "test-cluster!1460419200000!sjlee!ds-date",
           "SYSTEM_INFO_FLOW_NAME": "ds-date",
           "SYSTEM_INFO_DATE": 1460419200000,
           "SYSTEM_INFO_USER": "sjlee"
@@ -543,6 +645,8 @@
 1. `fields` - Specifies which fields to retrieve. For querying flow runs, only `ALL` or `METRICS` are valid fields.
   Other fields will lead to HTTP 400 (Bad Request) response. If not specified, in response, id, type, createdtime and info fields
   will be returned.
+1. `fromid` -  If specified, retrieve the next set of flow run entities from the given fromid. The set of entities retrieved is inclusive of specified fromid.
+   fromid should be taken from the value associated with FROM_ID info key in flow entity response which was sent earlier.
 
 #### Example JSON Response:
 
@@ -555,6 +659,7 @@
         "createdtime": 1460420587974,
         "info": {
           "UID": "test-cluster!sjlee!ds-date!1460420587974",
+          "FROM_ID": "test-cluster!sjlee!ds-date!1460420587974",
           "SYSTEM_INFO_FLOW_RUN_ID": 1460420587974,
           "SYSTEM_INFO_FLOW_NAME": "ds-date",
           "SYSTEM_INFO_FLOW_RUN_END_TIME": 1460420595198,
@@ -571,6 +676,7 @@
         "createdtime": 1460420305659,
         "info": {
           "UID": "test-cluster!sjlee!ds-date!1460420305659",
+          "FROM_ID": "test-cluster!sjlee!ds-date!1460420305659",
           "SYSTEM_INFO_FLOW_RUN_ID": 1460420305659,
           "SYSTEM_INFO_FLOW_NAME": "ds-date",
           "SYSTEM_INFO_FLOW_RUN_END_TIME": 1460420311966,
@@ -642,6 +748,7 @@
       "isrelatedto": {},
       "info": {
         "UID":"yarn-cluster!varun!QuasiMonteCarlo!1465246348599",
+        "FROM_ID":"yarn-cluster!varun!QuasiMonteCarlo!1465246348599",
         "SYSTEM_INFO_FLOW_RUN_END_TIME":1465246378051,
         "SYSTEM_INFO_FLOW_NAME":"QuasiMonteCarlo",
         "SYSTEM_INFO_USER":"varun",
@@ -746,6 +853,10 @@
   or metricstoretrieve is specified. Ignored otherwise. The maximum possible value for metricslimit can be maximum value of
   Integer. If it is not specified or has a value less than 1, and metrics have to be retrieved, then metricslimit will be
   considered as 1 i.e. latest single value of metric(s) will be returned.
+1. `metricsTimeStart` - If specified, then metrics for the entity after this timestamp are returned.
+1. `metricsTimeEnd` - If specified, then metrics for the entity before this timestamp are returned.
+1. `fromid` -  If specified, retrieve the next set of application entities from the given fromid. The set of entities retrieved is inclusive of specified fromid.
+   fromid should be taken from the value associated with FROM_ID info key in flow entity response which was sent earlier.
 
 #### Example JSON Response:
 
@@ -760,6 +871,7 @@
         "configs": { },
         "info": {
           "UID": "yarn-cluster!application_1465246237936_0001"
+          "FROM_ID": "yarn-cluster!varun!QuasiMonteCarlo!1465246348599!application_1465246237936_0001",
         },
         "relatesto": { }
       },
@@ -773,6 +885,7 @@
         "configs": { },
         "info": {
           "UID": "yarn-cluster!application_1464983628730_0005"
+          "FROM_ID": "yarn-cluster!varun!QuasiMonteCarlo!1465246348599!application_1464983628730_0005",
         },
         "relatesto": { }
       }
@@ -873,6 +986,10 @@
   or metricstoretrieve is specified. Ignored otherwise. The maximum possible value for metricslimit can be maximum value of
   Integer. If it is not specified or has a value less than 1, and metrics have to be retrieved, then metricslimit will be
   considered as 1 i.e. latest single value of metric(s) will be returned.
+1. `metricsTimeStart` - If specified, then metrics for the entity after this timestamp are returned.
+1. `metricsTimeEnd` - If specified, then metrics for the entity before this timestamp are returned.
+1. `fromid` -  If specified, retrieve the next set of application entities from the given fromid. The set of entities retrieved is inclusive of specified fromid.
+   fromid should be taken from the value associated with FROM_ID info key in flow entity response which was sent earlier.
 
 #### Example JSON Response:
 
@@ -885,6 +1002,7 @@
         "createdtime": 1460419580171,
         "info": {
           "UID": "test-cluster!sjlee!ds-date!1460419580171!application_1460419579913_0002"
+          "FROM_ID": "test-cluster!sjlee!ds-date!1460419580171!application_1460419579913_0002",
         },
         "configs": {},
         "isrelatedto": {},
@@ -949,6 +1067,8 @@
   or metricstoretrieve is specified. Ignored otherwise. The maximum possible value for metricslimit can be maximum value of
   Integer. If it is not specified or has a value less than 1, and metrics have to be retrieved, then metricslimit will be
   considered as 1 i.e. latest single value of metric(s) will be returned.
+1. `metricsTimeStart` - If specified, then metrics for the entity after this timestamp are returned.
+1. `metricsTimeEnd` - If specified, then metrics for the entity before this timestamp are returned.
 
 #### Example JSON Response:
 
@@ -973,7 +1093,7 @@
 1. If flow context information cannot be retrieved or application for the given app id cannot be found, HTTP 404 (Not Found) is returned.
 1. For non-recoverable errors while retrieving data, HTTP 500 (Internal Server Error) is returned.
 
-### <a name="REST_API_LIST_ENTITIES"></a>Query generic entities
+### <a name="REST_API_LIST_ENTITIES_WITH_IN_SCOPE_OF_APPLICATION"></a>Query generic entities with in the scope of Application
 
 With this API, you can query generic entities identified by cluster ID, application ID and
 per-framework entity type. If the REST endpoint without the cluster name is used, the cluster
@@ -1073,6 +1193,10 @@
   or metricstoretrieve is specified. Ignored otherwise. The maximum possible value for metricslimit can be maximum value of
   Integer. If it is not specified or has a value less than 1, and metrics have to be retrieved, then metricslimit will be
   considered as 1 i.e. latest single value of metric(s) will be returned.
+1. `metricsTimeStart` - If specified, then metrics for the entity after this timestamp are returned.
+1. `metricsTimeEnd` - If specified, then metrics for the entity before this timestamp are returned.
+1. `fromid` -  If specified, retrieve the next set of generic entities from the given fromid. The set of entities retrieved is inclusive of specified fromid.
+   fromid should be taken from the value associated with FROM_ID info key in flow entity response which was sent earlier.
 
 #### Example JSON Response:
 
@@ -1087,6 +1211,7 @@
         "configs": { },
         "info": {
           "UID": "yarn-cluster!application_1465246237936_0001!YARN_APPLICATION_ATTEMPT!appattempt_1465246237936_0001_000001"
+          "FROM_ID": "yarn-cluster!sjlee!ds-date!1460419580171!application_1465246237936_0001!YARN_APPLICATION_ATTEMPT!0!appattempt_1465246237936_0001_000001"
         },
         "relatesto": { }
       },
@@ -1100,6 +1225,7 @@
         "configs": { },
         "info": {
           "UID": "yarn-cluster!application_1465246237936_0001!YARN_APPLICATION_ATTEMPT!appattempt_1465246237936_0001_000002"
+          "FROM_ID": "yarn-cluster!sjlee!ds-date!1460419580171!application_1465246237936_0001!YARN_APPLICATION_ATTEMPT!0!appattempt_1465246237936_0001_000002"
         },
         "relatesto": { }
       }
@@ -1112,7 +1238,142 @@
 1. If flow context information cannot be retrieved, HTTP 404 (Not Found) is returned.
 1. For non-recoverable errors while retrieving data, HTTP 500 (Internal Server Error) is returned.
 
-### <a name="REST_API_LIST_ENTITY"></a>Query generic entity
+### <a name="REST_API_LIST_ENTITIES"></a>Query generic entities.
+
+With this API, you can query generic entities per user identified by cluster ID, doAsUser and
+entity type. If the REST endpoint without the cluster name is used, the cluster
+specified by the configuration `yarn.resourcemanager.cluster-id` in `yarn-site.xml` is taken.
+If number of matching entities are more than the limit, the most recent
+entities up to the limit will be returned. This endpoint can be used to query generic entity which
+clients put into the backend. For instance, we can query user entities by specifying entity type as `TEZ_DAG_ID`.
+If none of the entities match the predicates, an empty list will be returned.
+**Note** : As of today, we can query only those entities which are published with doAsUser which is different from application owner.
+
+#### HTTP request:
+
+    GET /ws/v2/timeline/clusters/{cluster name}/users/{userid}/entities/{entitytype}
+
+    or
+
+    GET /ws/v2/timeline/users/{userid}/entities/{entitytype}
+
+#### Query Parameters Supported:
+
+1. `limit` - If specified, defines the number of entities to return. The maximum possible value for limit is maximum value of Long. If it is not specified
+  or has a value less than 0, then limit will be considered as 100.
+1. `createdtimestart` - If specified, then only entities created after this timestamp are returned.
+1. `createdtimeend` -  If specified, then only entities created before this timestamp are returned.
+1. `relatesto` - If specified, matched entities must relate to or not relate to given entities associated with a entity type.
+  relatesto is represented as an expression of the form :<br/>
+  "(&lt;entitytype&gt;:&lt;entityid&gt;:&lt;entityid&gt;...,&lt;entitytype&gt;:&lt;entityid&gt;:&lt;entityid&gt;...) &lt;op&gt; !(&lt;entitytype&gt;:&lt;entityid&gt;:&lt;entityid&gt;...,&lt;entitytype&gt;:&lt;entityid&gt;:&lt;entityid&gt;...)".<br/>
+  If relatesto expression has entity type - entity id(s) relations specified within enclosing brackets proceeding "!", this means entities with
+  these relations in its relatesto field, will not be returned. For expressions or subexpressions without "!", all entities which have the specified
+  relations in its relatesto field, will be returned. "op" is a logical operator and can be either AND or OR. entity type can be followed by any number
+  of entity id(s). And we can combine any number of ANDs' and ORs' to create complex expressions. Brackets can be used to club expressions together.<br/>
+  _For example_ : relatesto can be "(((type1:id1:id2:id3,type3:id9) AND !(type2:id7:id8)) OR (type1:id4))".<br/>
+  Please note that URL unsafe characters such as spaces will have to be suitably encoded.
+1. `isrelatedto` - If specified, matched entities must be related to or not related to given entities associated with a entity type. isrelatedto is
+  represented in the same form as relatesto.
+1. `infofilters` - If specified, matched entities must have exact matches to the given info key and must be either equal or not equal to
+  given value. The info key is a string but value can be any object. infofilters are represented as an expression of the form :<br/>
+  "(&lt;key&gt; &lt;compareop&gt; &lt;value&gt;) &lt;op&gt; (&lt;key&gt; &lt;compareop&gt; &lt;value&gt;)".<br/>
+  Here op can be either of AND or OR. And compareop can be either of "eq", "ne" or "ene".<br/>
+  "eq" means equals, "ne" means not equals and existence of key is not required for a match and "ene" means not equals but existence of key is
+  required. We can combine any number of ANDs' and ORs' to create complex expressions.  Brackets can be used to club expressions together.<br/>
+  _For example_ : infofilters can be "(((infokey1 eq value1) AND (infokey2 ne value1)) OR (infokey1 ene value3))".<br/>
+  Note : If value is an object then value can be given in the form of JSON format without any space.<br/>
+  _For example_ : infofilters can be (infokey1 eq {"&lt;key&gt;":"&lt;value&gt;","&lt;key&gt;":"&lt;value&gt;"...}).<br/>
+  Please note that URL unsafe characters such as spaces will have to be suitably encoded.
+1. `conffilters` - If specified, matched entities must have exact matches to the given config name and must be either equal or not equal
+  to the given config value. Both the config name and value must be strings. conffilters are represented in the same form as infofilters.
+1. `metricfilters` - If specified, matched entities must have exact matches to the given metric and satisfy the specified relation with the
+  metric value. Metric id must be a string and and metric value must be an integral value.  metricfilters are represented as an expression of the form :<br/>
+  "(&lt;metricid&gt; &lt;compareop&gt; &lt;metricvalue&gt;) &lt;op&gt; (&lt;metricid&gt; &lt;compareop&gt; &lt;metricvalue&gt;)"<br/>
+  Here op can be either of AND or OR. And compareop can be either of "eq", "ne", "ene", "gt", "ge", "lt" and "le".<br/>
+  "eq" means equals, "ne" means not equals and existence of metric is not required for a match, "ene" means not equals but existence of metric is
+  required, "gt" means greater than, "ge" means greater than or equals, "lt" means less than and "le" means less than or equals. We can combine
+  any number of ANDs' and ORs' to create complex expressions.  Brackets can be used to club expressions together.<br/>
+  _For example_ : metricfilters can be "(((metric1 eq 50) AND (metric2 gt 40)) OR (metric1 lt 20))".<br/>
+  This in essence is an expression equivalent to "(metric1 == 50 AND metric2 &gt; 40) OR (metric1 &lt; 20)"<br/>
+  Please note that URL unsafe characters such as spaces will have to be suitably encoded.
+1. `eventfilters` - If specified, matched entities must contain or not contain the given events depending on the expression. eventfilters is
+  represented as an expression of the form :<br/>
+  "(&lt;eventid&gt;,&lt;eventid&gt;) &lt;op&gt; !(&lt;eventid&gt;,&lt;eventid&gt;,&lt;eventid&gt;)".<br/>
+  Here, "!" means none of the comma-separated list of events within the enclosed brackets proceeding "!" must exist for a match to occur.
+  If "!" is not specified, the specified events within the enclosed brackets must exist. op is a logical operator and can be either AND or OR.
+  We can combine any number of ANDs' and ORs' to create complex expressions. Brackets can be used to club expressions together.<br/>
+  _For example_ : eventfilters can be "(((event1,event2) AND !(event4)) OR (event3,event7,event5))".<br/>
+  Please note that URL unsafe characters such as spaces will have to be suitably encoded.
+1. `metricstoretrieve` - If specified, defines which metrics to retrieve or which ones not to retrieve and send back in response.
+  metricstoretrieve can be an expression of the form :<br/>
+  (&lt;metricprefix&gt;,&lt;metricprefix&gt;,&lt;metricprefix&gt;,&lt;metricprefix&gt;...)<br/>
+  This specifies a comma separated list of metric id prefixes. Only metrics matching any of the prefixes will be retrieved. Brackets are optional for
+  the simple expression. Alternatively, expressions can be of the form:<br/>
+  !(&lt;metricprefix&gt;,&lt;metricprefix&gt;,&lt;metricprefix&gt;,&lt;metricprefix&gt;...)<br/>
+  This specifies a comma separated list of metric id prefixes. Only metrics not matching any of the prefixes will be retrieved.<br/>
+  If metricstoretrieve is specified, metrics will be retrieved irrespective of whether `METRICS` is specified in fields query param
+  or not. Please note that URL unsafe characters such as spaces will have to be suitably encoded.
+1. `confstoretrieve` - If specified, defines which configs to retrieve or which ones not to retrieve and send back in response.
+  confstoretrieve can be an expression of the form :<br/>
+  (&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;...)<br/>
+  This specifies a comma separated list of config name prefixes. Only configs matching any of the prefixes will be retrieved. Brackets are optional for
+  the simple expression. Alternatively, expressions can be of the form:<br/>
+  !(&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;...)<br/>
+  This specifies a comma separated list of config name prefixes. Only configs not matching any of the prefixes will be retrieved.<br/>
+  If confstoretrieve is specified, configs will be retrieved irrespective of whether `CONFIGS` is specified in fields query param
+  or not. Please note that URL unsafe characters such as spaces will have to be suitably encoded.
+1. `fields` - Specifies which fields to retrieve. Possible values for fields can be `EVENTS`, `INFO`, `CONFIGS`, `METRICS`, `RELATES_TO`,
+ `IS_RELATED_TO` and `ALL`. All fields will be retrieved if `ALL` is specified. Multiple fields can be specified as a comma-separated list.
+  If fields is not specified, in response, entity id, entity type, createdtime and UID in info field will be returned.
+1. `metricslimit` - If specified, defines the number of metrics to return. Considered only if fields contains METRICS/ALL
+  or metricstoretrieve is specified. Ignored otherwise. The maximum possible value for metricslimit can be maximum value of
+  Integer. If it is not specified or has a value less than 1, and metrics have to be retrieved, then metricslimit will be
+  considered as 1 i.e. latest single value of metric(s) will be returned.
+1. `metricsTimeStart` - If specified, then metrics for the entity after this timestamp are returned.
+1. `metricsTimeEnd` - If specified, then metrics for the entity before this timestamp are returned.
+1. `fromid` -  If specified, retrieve the next set of generic entities from the given fromid. The set of entities retrieved is inclusive of specified fromid.
+   fromid should be taken from the value associated with FROM_ID info key in flow entity response which was sent earlier.
+
+#### Example JSON Response:
+
+    [
+      {
+        "metrics": [ ],
+        "events": [ ],
+        "type": "TEZ_DAG_ID",
+        "id": "dag_1465246237936_0001_000001",
+        "createdtime": 1465246358873,
+        "isrelatedto": { },
+        "configs": { },
+        "info": {
+          "UID": "yarn-cluster!sjlee!TEZ_DAG_ID!0!dag_1465246237936_0001_000001"
+          "FROM_ID": "sjlee!yarn-cluster!TEZ_DAG_ID!0!dag_1465246237936_0001_000001"
+        },
+        "relatesto": { }
+      },
+      {
+        "metrics": [ ],
+        "events": [ ],
+        "type": "TEZ_DAG_ID",
+        "id": "dag_1465246237936_0001_000002",
+        "createdtime": 1465246359045,
+        "isrelatedto": { },
+        "configs": { },
+        "info": {
+          "UID": "yarn-cluster!sjlee!TEZ_DAG_ID!0!dag_1465246237936_0001_000002!userX"
+          "FROM_ID": "sjlee!yarn-cluster!TEZ_DAG_ID!0!dag_1465246237936_0001_000002!userX"
+        },
+        "relatesto": { }
+      }
+    ]
+
+#### Response Codes
+
+1. If successful, a HTTP 200(OK) response is returned.
+1. If any problem occurs in parsing request, HTTP 400 (Bad Request) is returned.
+1. For non-recoverable errors while retrieving data, HTTP 500 (Internal Server Error) is returned.
+
+### <a name="REST_API_LIST_ENTITY"></a>Query generic entity with in the scope of Application
 
 With this API, you can query a specific generic entity identified by cluster ID, application ID,
 per-framework entity type and entity ID. If the REST endpoint without the cluster name is used, the
@@ -1166,6 +1427,9 @@
   or metricstoretrieve is specified. Ignored otherwise. The maximum possible value for metricslimit can be maximum value of
   Integer. If it is not specified or has a value less than 1, and metrics have to be retrieved, then metricslimit will be
   considered as 1 i.e. latest single value of metric(s) will be returned.
+1. `metricsTimeStart` - If specified, then metrics for the entity after this timestamp are returned.
+1. `metricsTimeEnd` - If specified, then metrics for the entity before this timestamp are returned.
+1. `entityidprefix` Defines the id prefix for the entity to be fetched. If specified, then entity retrieval will be faster.
 
 #### Example JSON Response:
 
@@ -1178,7 +1442,8 @@
       "isrelatedto": { },
       "configs": { },
       "info": {
-        "UID": "yarn-cluster!application_1465246237936_0001!YARN_APPLICATION_ATTEMPT!appattempt_1465246237936_0001_000001"
+        "UID": "yarn-cluster!application_1465246237936_0001!YARN_APPLICATION_ATTEMPT!0!appattempt_1465246237936_0001_000001"
+        "FROM_ID": "yarn-cluster!sjlee!ds-date!1460419580171!application_1465246237936_0001!YARN_APPLICATION_ATTEMPT!0!appattempt_1465246237936_0001_000001"
       },
       "relatesto": { }
     }
@@ -1189,3 +1454,117 @@
 1. If any problem occurs in parsing request, HTTP 400 (Bad Request) is returned.
 1. If flow context information cannot be retrieved or entity for the given entity id cannot be found, HTTP 404 (Not Found) is returned.
 1. For non-recoverable errors while retrieving data, HTTP 500 (Internal Server Error) is returned.
+
+### <a name="REST_API_LIST_ENTITIES"></a>Query generic entity.
+
+With this API, you can query generic entity per user identified by cluster ID, doAsUser and
+entity type and entity ID. If the REST endpoint without the cluster name is used, the cluster
+specified by the configuration `yarn.resourcemanager.cluster-id` in `yarn-site.xml` is taken.
+If number of matching entities are more than the limit, the most recent
+entities up to the limit will be returned. This endpoint can be used to query generic entity which
+clients put into the backend. For instance, we can query user entities by specifying entity type as `TEZ_DAG_ID`.
+If none of the entities match the predicates, an empty list will be returned.
+**Note** : As of today, we can query only those entities which are published with doAsUser which is different from application owner.
+
+#### HTTP request:
+
+    GET /ws/v2/timeline/clusters/{cluster name}/users/{userid}/entities/{entitytype}/{entityid}
+
+    or
+
+    GET /ws/v2/timeline/users/{userid}/entities/{entitytype}/{entityid}
+
+#### Query Parameters Supported:
+
+1. `metricstoretrieve` - If specified, defines which metrics to retrieve or which ones not to retrieve and send back in response.
+  metricstoretrieve can be an expression of the form :<br/>
+  (&lt;metricprefix&gt;,&lt;metricprefix&gt;,&lt;metricprefix&gt;,&lt;metricprefix&gt;...)<br/>
+  This specifies a comma separated list of metric id prefixes. Only metrics matching any of the prefixes will be retrieved. Brackets are optional for
+  the simple expression. Alternatively, expressions can be of the form:<br/>
+  !(&lt;metricprefix&gt;,&lt;metricprefix&gt;,&lt;metricprefix&gt;,&lt;metricprefix&gt;...)<br/>
+  This specifies a comma separated list of metric id prefixes. Only metrics not matching any of the prefixes will be retrieved.<br/>
+  If metricstoretrieve is specified, metrics will be retrieved irrespective of whether `METRICS` is specified in fields query param
+  or not. Please note that URL unsafe characters such as spaces will have to be suitably encoded.
+1. `confstoretrieve` - If specified, defines which configs to retrieve or which ones not to retrieve and send back in response.
+  confstoretrieve can be an expression of the form :<br/>
+  (&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;...)<br/>
+  This specifies a comma separated list of config name prefixes. Only configs matching any of the prefixes will be retrieved. Brackets are optional for
+  the simple expression. Alternatively, expressions can be of the form:<br/>
+  !(&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;...)<br/>
+  This specifies a comma separated list of config name prefixes. Only configs not matching any of the prefixes will be retrieved.<br/>
+  If confstoretrieve is specified, configs will be retrieved irrespective of whether `CONFIGS` is specified in fields query param
+  or not. Please note that URL unsafe characters such as spaces will have to be suitably encoded.
+1. `fields` - Specifies which fields to retrieve. Possible values for fields can be `EVENTS`, `INFO`, `CONFIGS`, `METRICS`, `RELATES_TO`,
+ `IS_RELATED_TO` and `ALL`. All fields will be retrieved if `ALL` is specified. Multiple fields can be specified as a comma-separated list.
+  If fields is not specified, in response, entity id, entity type, createdtime and UID in info field will be returned.
+1. `metricslimit` - If specified, defines the number of metrics to return. Considered only if fields contains METRICS/ALL
+  or metricstoretrieve is specified. Ignored otherwise. The maximum possible value for metricslimit can be maximum value of
+  Integer. If it is not specified or has a value less than 1, and metrics have to be retrieved, then metricslimit will be
+  considered as 1 i.e. latest single value of metric(s) will be returned.
+1. `metricsTimeStart` - If specified, then metrics for the entity after this timestamp are returned.
+1. `metricsTimeEnd` - If specified, then metrics for the entity before this timestamp are returned.
+1. `fromid` -  If specified, retrieve the next set of generic entities from the given fromid. The set of entities retrieved is inclusive of specified fromid.
+   fromid should be taken from the value associated with FROM_ID info key in flow entity response which was sent earlier.
+
+#### Example JSON Response:
+
+    [
+      {
+        "metrics": [ ],
+        "events": [ ],
+        "type": "TEZ_DAG_ID",
+        "id": "dag_1465246237936_0001_000001",
+        "createdtime": 1465246358873,
+        "isrelatedto": { },
+        "configs": { },
+        "info": {
+          "UID": "yarn-cluster!sjlee!TEZ_DAG_ID!0!dag_1465246237936_0001_000001!userX"
+          "FROM_ID": "sjlee!yarn-cluster!TEZ_DAG_ID!0!dag_1465246237936_0001_000001!userX"
+        },
+        "relatesto": { }
+      }
+    ]
+
+#### Response Codes
+
+1. If successful, a HTTP 200(OK) response is returned.
+1. If any problem occurs in parsing request, HTTP 400 (Bad Request) is returned.
+1. For non-recoverable errors while retrieving data, HTTP 500 (Internal Server Error) is returned.
+
+### <a name="REST_API_LIST_ENTITY_TYPES"></a>Query generic entity types
+
+With this API, you can query set of available entity types for a given app id. If the REST endpoint without the cluster name is used, the cluster specified by the configuration yarn.resourcemanager.cluster-id in yarn-site.xml is taken.  If userid, flow name and flow run id which are optional query parameters are not specified, they will be queried based on app id and cluster id from the flow context information stored in underlying storage implementation.
+
+#### HTTP request:
+
+    GET /ws/v2/timeline/apps/{appid}/entity-types
+
+    or
+
+    GET /ws/v2/timeline/clusters/{clusterid}/apps/{appid}/entity-types
+
+#### Query Parameters Supported:
+
+1. `userid` -  If specified, entity must belong to this user. This query param must be specified along with flowname and flowrunid query params, otherwise it will be ignored.
+  If userid, flowname and flowrunid are not specified then timeline reader will fetch flow context information based on cluster and appid while executing the query.
+1. `flowname` - If specified, entity must belong to this flow name. This query param must be specified along with userid and flowrunid query params, otherwise it will be ignored.
+  If userid, flowname and flowrunid are not specified, we would have to fetch flow context information based on cluster and appid while executing the query.
+1. `flowrunid` - If specified, entity must belong to this flow run id. This query param must be specified along with userid and flowname query params, otherwise it will be ignored.
+  If userid, flowname and flowrunid are not specified, we would have to fetch flow context information based on cluster and appid while executing the query.
+
+#### Example JSON Response:
+
+    {
+      YARN_APPLICATION_ATTEMPT,
+      YARN_CONTAINER,
+      MAPREDUCE_JOB,
+      MAPREDUCE_TASK,
+      MAPREDUCE_TASK_ATTEMPT
+    }
+
+#### Response Codes
+
+1. If successful, a HTTP 200 (OK) response is returned.
+1. If any problem occurs in parsing request, HTTP 400 (Bad Request) is returned.
+1. If flow context information cannot be retrieved or entity for the given entity id cannot be found, HTTP 404 (Not Found) is returned.
+1. For non-recoverable errors while retrieving data, HTTP 500 (Internal Server Error) is returned.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
index 478377fa..5f430ec 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
@@ -189,7 +189,7 @@
 ```
   Usage: yarn rmadmin
      -refreshQueues
-     -refreshNodes [-g [timeout in seconds]]
+     -refreshNodes [-g|graceful [timeout in seconds] -client|server]
      -refreshNodesResources
      -refreshSuperUserGroupsConfiguration
      -refreshUserToGroupsMappings
@@ -214,7 +214,7 @@
 | COMMAND\_OPTIONS | Description |
 |:---- |:---- |
 | -refreshQueues | Reload the queues' acls, states and scheduler specific properties. ResourceManager will reload the mapred-queues configuration file. |
-| -refreshNodes [-g|graceful [timeout in seconds] -client|server] | Refresh the hosts information at the ResourceManager. -g option indicates graceful decommission of excluded hosts, in which case, the optional timeout indicates maximal time in seconds ResourceManager should wait before forcefully mark the node as decommissioned. |
+| -refreshNodes [-g&#124;graceful [timeout in seconds] -client&#124;server] | Refresh the hosts information at the ResourceManager. Here [-g&#124;graceful [timeout in seconds] -client&#124;server] is optional, if we specify the timeout then ResourceManager will wait for timeout before marking the NodeManager as decommissioned. The -client&#124;server indicates if the timeout tracking should be handled by the client or the ResourceManager. The client-side tracking is blocking, while the server-side tracking is not. Omitting the timeout, or a timeout of -1, indicates an infinite timeout. Known Issue: the server-side tracking will immediately decommission if an RM HA failover occurs. |
 | -refreshNodesResources | Refresh resources of NodeManagers at the ResourceManager. |
 | -refreshSuperUserGroupsConfiguration | Refresh superuser proxy groups mappings. |
 | -refreshUserToGroupsMappings | Refresh user-to-groups mappings. |
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/capacity-queue.js
similarity index 86%
copy from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
copy to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/capacity-queue.js
index 5d14c6f..7eb9f76 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/capacity-queue.js
@@ -16,10 +16,8 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
+import YarnQueueAdapter from './yarn-queue';
 
-export default Ember.Route.extend({
-  model() {
-    return this.store.findAll('yarn-queue');
-  },
-});
\ No newline at end of file
+export default YarnQueueAdapter.extend({
+
+});
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/fair-queue.js
similarity index 86%
copy from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
copy to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/fair-queue.js
index 5d14c6f..7eb9f76 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/fair-queue.js
@@ -16,10 +16,8 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
+import YarnQueueAdapter from './yarn-queue';
 
-export default Ember.Route.extend({
-  model() {
-    return this.store.findAll('yarn-queue');
-  },
-});
\ No newline at end of file
+export default YarnQueueAdapter.extend({
+
+});
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/fifo-queue.js
similarity index 86%
copy from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
copy to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/fifo-queue.js
index 5d14c6f..7eb9f76 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/fifo-queue.js
@@ -16,10 +16,8 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
+import YarnQueueAdapter from './yarn-queue';
 
-export default Ember.Route.extend({
-  model() {
-    return this.store.findAll('yarn-queue');
-  },
-});
\ No newline at end of file
+export default YarnQueueAdapter.extend({
+
+});
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/yarn-queue.js
similarity index 95%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/yarn-queue.js
index f2017df..8184c39 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/yarn-queue.js
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-import AbstractAdapter from './abstract';
+import AbstractAdapter from '../abstract';
 
 export default AbstractAdapter.extend({
   address: "rmWebAddress",
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
index 3d72b2f..1a81a32 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
@@ -39,6 +39,9 @@
   // mainSvg
   mainSvg: undefined,
 
+  used: undefined,
+  max: undefined,
+
   // Init data
   initData: function() {
     this.map = { };
@@ -52,7 +55,8 @@
       }.bind(this));
 
     // var selected = this.get("selected");
-
+    this.used = this.get("used");
+    this.max = this.get("max");
     this.initQueue("root", 1, this.treeData);
   },
 
@@ -81,7 +85,6 @@
       // Queue is not existed
       return;
     }
-
     if (depth > this.maxDepth) {
       this.maxDepth = this.maxDepth + 1;
     }
@@ -149,7 +152,9 @@
     nodeEnter.append("circle")
       .attr("r", 1e-6)
       .style("fill", function(d) {
-        var usedCap = d.queueData.get("usedCapacity");
+        var maxCap = d.queueData.get(this.max);
+        maxCap = maxCap == undefined ? 100 : maxCap;
+        var usedCap = d.queueData.get(this.used) / maxCap * 100.0;
         if (usedCap <= 60.0) {
           return "LimeGreen";
         } else if (usedCap <= 100.0) {
@@ -157,7 +162,7 @@
         } else {
           return "LightCoral";
         }
-      });
+      }.bind(this));
 
     // append percentage
     nodeEnter.append("text")
@@ -166,13 +171,15 @@
       .attr("fill", "white")
       .attr("text-anchor", function() { return "middle"; })
       .text(function(d) {
-        var usedCap = d.queueData.get("usedCapacity");
+        var maxCap = d.queueData.get(this.max);
+        maxCap = maxCap == undefined ? 100 : maxCap;
+        var usedCap = d.queueData.get(this.used) / maxCap * 100.0;
         if (usedCap >= 100.0) {
           return usedCap.toFixed(0) + "%";
         } else {
           return usedCap.toFixed(1) + "%";
         }
-      })
+      }.bind(this))
       .style("fill-opacity", 1e-6);
 
     // append queue name
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/cluster-metric.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/cluster-metric.js
index bc6e27a..dcc0c29 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/cluster-metric.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/cluster-metric.js
@@ -39,6 +39,7 @@
   totalNodes: DS.attr('number'),
   lostNodes: DS.attr('number'),
   unhealthyNodes: DS.attr('number'),
+  decommissioningNodes: DS.attr('number'),
   decommissionedNodes: DS.attr('number'),
   rebootedNodes: DS.attr('number'),
   activeNodes: DS.attr('number'),
@@ -87,11 +88,15 @@
       value: this.get("unhealthyNodes")
     });
     arr.push({
+      label: "Decommissioning",
+      value: this.get("decommissioningNodes")
+    });
+    arr.push({
       label: "Decomissioned",
       value: this.get("decommissionedNodes")
     });
     return arr;
-  }.property("activeNodes", "unhealthyNodes", "decommissionedNodes"),
+  }.property("activeNodes", "unhealthyNodes", "decommissioningNodes", "decommissionedNodes"),
 
   getMemoryDataForDonutChart: function() {
     var type = "MB";
@@ -130,4 +135,4 @@
 
     return arr;
   }.property("allocatedVirtualCores", "reservedVirtualCores", "availableVirtualCores"),
-});
\ No newline at end of file
+});
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/capacity-queue.js
similarity index 94%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue.js
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/capacity-queue.js
index 27c48f7..1cb07bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/capacity-queue.js
@@ -35,6 +35,7 @@
   numPendingApplications: DS.attr('number'),
   numActiveApplications: DS.attr('number'),
   users: DS.hasMany('YarnUser'),
+  type: DS.attr('string'),
 
   isLeafQueue: function() {
     var len = this.get("children.length");
@@ -59,7 +60,7 @@
         value: this.get("name") === "root" ? 100 : this.get("absMaxCapacity")
       }
     ];
-  }.property("absCapacity", "absUsedCapacity", "absMaxCapacity"),
+  }.property("absCapacity", "usedCapacity", "absMaxCapacity"),
 
   userUsagesDonutChartData: function() {
     var data = [];
@@ -90,5 +91,5 @@
         value: this.get("numActiveApplications") || 0
       }
     ];
-  }.property()
+  }.property("numPendingApplications", "numActiveApplications")
 });
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/fair-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/fair-queue.js
new file mode 100644
index 0000000..be71362
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/fair-queue.js
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import DS from 'ember-data';
+
+export default DS.Model.extend({
+  name: DS.attr('string'),
+  children: DS.attr('array'),
+  parent: DS.attr('string'),
+  maxApps: DS.attr('number'),
+  minResources: DS.attr(),
+  maxResources: DS.attr(),
+  usedResources: DS.attr(),
+  demandResources: DS.attr(),
+  steadyFairResources: DS.attr(),
+  fairResources: DS.attr(),
+  clusterResources: DS.attr(),
+  pendingContainers: DS.attr('number'),
+  allocatedContainers: DS.attr('number'),
+  reservedContainers: DS.attr('number'),
+  schedulingPolicy: DS.attr('string'),
+  preemptable: DS.attr('number'),
+  numPendingApplications: DS.attr('number'),
+  numActiveApplications: DS.attr('number'),
+  type: DS.attr('string'),
+
+  isLeafQueue: function() {
+    var len = this.get("children.length");
+    if (!len) {
+      return true;
+    }
+    return len <= 0;
+  }.property("children"),
+
+  capacitiesBarChartData: function() {
+    return [
+      {
+        label: "Steady Fair Memory",
+        value: this.get("steadyFairResources.memory")
+      },
+      {
+        label: "Used Memory",
+        value: this.get("usedResources.memory")
+      },
+      {
+        label: "Maximum Memory",
+        value: this.get("maxResources.memory")
+      }
+    ];
+  }.property("maxResources.memory", "usedResources.memory", "maxResources.memory"),
+
+  numOfApplicationsDonutChartData: function() {
+    return [
+      {
+        label: "Pending Apps",
+        value: this.get("numPendingApplications") || 0 // TODO, fix the REST API so root will return #applications as well.
+      },
+      {
+        label: "Active Apps",
+        value: this.get("numActiveApplications") || 0
+      }
+    ];
+  }.property()
+});
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/fifo-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/fifo-queue.js
new file mode 100644
index 0000000..2386dc4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/fifo-queue.js
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import DS from 'ember-data';
+
+export default DS.Model.extend({
+  name: DS.attr('string'),
+  capacity: DS.attr('number'),
+  usedCapacity: DS.attr('number'),
+  state: DS.attr('string'),
+  minQueueMemoryCapacity: DS.attr('number'),
+  maxQueueMemoryCapacity: DS.attr('number'),
+  numNodes: DS.attr('number'),
+  usedNodeCapacity: DS.attr('number'),
+  availNodeCapacity: DS.attr('number'),
+  totalNodeCapacity: DS.attr('number'),
+  numContainers: DS.attr('number'),
+  type: DS.attr('string'),
+
+  capacitiesBarChartData: function() {
+    return [
+      {
+        label: "Available Capacity",
+        value: this.get("availNodeCapacity")
+      },
+      {
+        label: "Used Capacity",
+        value: this.get("usedNodeCapacity")
+      },
+      {
+        label: "Total Capacity",
+        value: this.get("totalNodeCapacity")
+      }
+    ];
+  }.property("availNodeCapacity", "usedNodeCapacity", "totalNodeCapacity")
+
+});
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/yarn-queue.js
similarity index 86%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/yarn-queue.js
index 5d14c6f..dcf5f487 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/yarn-queue.js
@@ -16,10 +16,8 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
+import DS from 'ember-data';
 
-export default Ember.Route.extend({
-  model() {
-    return this.store.findAll('yarn-queue');
-  },
-});
\ No newline at end of file
+export default DS.Model.extend({
+  type: DS.attr('string')
+});
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-rm-node.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-rm-node.js
index 461bf00..6baeca2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-rm-node.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-rm-node.js
@@ -53,7 +53,7 @@
   nodeStateStyle: function() {
     var style = "default";
     var nodeState = this.get("state");
-    if (nodeState === "REBOOTED") {
+    if (nodeState === "REBOOTED" || nodeState === "DECOMMISSIONING") {
       style = "warning";
     } else if (nodeState === "UNHEALTHY" || nodeState === "DECOMMISSIONED" ||
           nodeState === "LOST" || nodeState === "SHUTDOWN") {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
index b5db17d..3c6abd4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
@@ -28,7 +28,7 @@
         {
           state: "RUNNING"
         }),
-      queues: this.store.query('yarn-queue', {}),
+      queues: this.store.query('yarn-queue.yarn-queue', {}),
     });
   },
 
@@ -39,6 +39,6 @@
   unloadAll() {
     this.store.unloadAll('ClusterMetric');
     this.store.unloadAll('yarn-app');
-    this.store.unloadAll('yarn-queue');
+    this.store.unloadAll('yarn-queue.yarn-queue');
   }
 });
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
index 1c4546c..cd4ed09 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js
@@ -22,22 +22,28 @@
 
 export default AbstractRoute.extend({
   model(param) {
-    return Ember.RSVP.hash({
-      selected : param.queue_name,
-      queues: this.store.query('yarn-queue', {}),
-      selectedQueue : undefined,
-      apps: this.store.query('yarn-app', {
-        queue: param.queue_name
-      })
-    });
+      return Ember.RSVP.hash({
+        selected : param.queue_name,
+        queues: this.store.query("yarn-queue.yarn-queue", {}).then((model) => {
+          let type = model.get('firstObject').get('type');
+          return this.store.query("yarn-queue." + type + "-queue", {});
+        }),
+        selectedQueue : undefined,
+        apps: this.store.query('yarn-app', {
+          queue: param.queue_name
+        })
+      });
   },
 
   afterModel(model) {
-    model.selectedQueue = this.store.peekRecord('yarn-queue', model.selected);
+    var type = model.queues.get('firstObject').constructor.modelName;
+    model.selectedQueue = this.store.peekRecord(type, model.selected);
   },
 
   unloadAll() {
-    this.store.unloadAll('yarn-queue');
+    this.store.unloadAll('yarn-queue.capacity-queue');
+    this.store.unloadAll('yarn-queue.fair-queue');
+    this.store.unloadAll('yarn-queue.fifo-queue');
     this.store.unloadAll('yarn-app');
   }
 });
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues.js
index e4f145d..7d8a200 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues.js
@@ -30,17 +30,23 @@
     }
     return Ember.RSVP.hash({
       selected : queueName,
-      queues: this.store.query('yarn-queue', {}),
+      queues: this.store.query("yarn-queue.yarn-queue", {}).then((model) => {
+        let type = model.get('firstObject').get('type');
+        return this.store.query("yarn-queue." + type + "-queue", {});
+      }),
       selectedQueue : undefined
     });
   },
 
   afterModel(model) {
-    model.selectedQueue = this.store.peekRecord('yarn-queue', model.selected);
+    var type = model.queues.get('firstObject').constructor.modelName;
+    model.selectedQueue = this.store.peekRecord(type, model.selected);
   },
 
   unloadAll() {
-    this.store.unloadAll('yarn-queue');
+    this.store.unloadAll('yarn-queue.capacity-queue');
+    this.store.unloadAll('yarn-queue.fair-queue');
+    this.store.unloadAll('yarn-queue.fifo-queue');
     this.store.unloadAll('yarn-app');
   },
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
deleted file mode 100644
index 436c6d8..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js
+++ /dev/null
@@ -1,25 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import Ember from 'ember';
-
-export default Ember.Route.extend({
-  beforeModel() {
-    this.transitionTo('yarn-queues.root');
-  }
-});
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js
similarity index 98%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue.js
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js
index 4fc1a29..c7350ef 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js
@@ -53,7 +53,6 @@
         });
       }
 
-
       var fixedPayload = {
         id: id,
         type: primaryModelClass.modelName, // yarn-queue
@@ -73,6 +72,7 @@
           preemptionDisabled: payload.preemptionDisabled,
           numPendingApplications: payload.numPendingApplications,
           numActiveApplications: payload.numActiveApplications,
+          type: "capacity",
         },
         // Relationships
         relationships: {
@@ -81,7 +81,6 @@
           }
         }
       };
-
       return {
         queue: this._super(store, primaryModelClass, fixedPayload, id, requestType),
         includedData: includedData
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/fair-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/fair-queue.js
new file mode 100644
index 0000000..2215d2d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/fair-queue.js
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import DS from 'ember-data';
+
+export default DS.JSONAPISerializer.extend({
+
+    normalizeSingleResponse(store, primaryModelClass, payload, id,
+      requestType) {
+      var children = [];
+      if (payload.childQueues) {
+        payload.childQueues.queue.forEach(function(queue) {
+          children.push(queue.queueName);
+        });
+      }
+
+      var fixedPayload = {
+        id: id,
+        type: primaryModelClass.modelName,
+        attributes: {
+          name: payload.queueName,
+          parent: payload.myParent,
+          children: children,
+          maxApps: payload.maxApps,
+          minResources: payload.minResources,
+          maxResources: payload.maxResources,
+          usedResources: payload.usedResources,
+          demandResources: payload.demandResources,
+          steadyFairResources: payload.steadyFairResources,
+          fairResources: payload.fairResources,
+          clusterResources: payload.clusterResources,
+          pendingContainers: payload.pendingContainers,
+          allocatedContainers: payload.allocatedContainers,
+          reservedContainers: payload.reservedContainers,
+          schedulingPolicy: payload.schedulingPolicy,
+          preemptable: payload.preemptable,
+          numPendingApplications: payload.numPendingApps,
+          numActiveApplications: payload.numActiveApps,
+          type: "fair",
+        },
+      };
+      return this._super(store, primaryModelClass, fixedPayload, id, requestType);
+    },
+
+    handleQueue(store, primaryModelClass, payload, id, requestType) {
+      var data = [];
+      var includedData = [];
+      if(!payload) return data;
+      var result = this.normalizeSingleResponse(store, primaryModelClass,
+        payload, id, requestType);
+
+      data.push(result);
+
+      if (payload.childQueues) {
+        for (var i = 0; i < payload.childQueues.queue.length; i++) {
+          var queue = payload.childQueues.queue[i];
+          queue.myParent = payload.queueName;
+          var childResult = this.handleQueue(store, primaryModelClass, queue,
+            queue.queueName,
+            requestType);
+
+          data = data.concat(childResult);
+        }
+      }
+
+      return data;
+    },
+
+    normalizeArrayResponse(store, primaryModelClass, payload, id, requestType) {
+      var normalizedArrayResponse = {};
+      var result = this.handleQueue(store, primaryModelClass,
+        payload.scheduler.schedulerInfo.rootQueue, "root", requestType);
+
+      normalizedArrayResponse.data = result;
+      return normalizedArrayResponse;
+    }
+});
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/fifo-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/fifo-queue.js
new file mode 100644
index 0000000..297ec18
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/fifo-queue.js
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import DS from 'ember-data';
+
+export default DS.JSONAPISerializer.extend({
+
+    normalizeSingleResponse(store, primaryModelClass, payload, id,
+      requestType) {
+
+      var fixedPayload = {
+        id: id,
+        type: primaryModelClass.modelName,
+        attributes: {
+          name: id,
+          capacity: payload.capacity * 100,
+          usedCapacity: payload.usedCapacity * 100,
+          usedNodeCapacity: payload.usedNodeCapacity,
+          availNodeCapacity: payload.availNodeCapacity,
+          totalNodeCapacity: payload.totalNodeCapacity,
+          numNodes: payload.numNodes,
+          numContainers: payload.numContainers,
+          state: payload.qstate,
+          minQueueMemoryCapacity: payload.minQueueMemoryCapacity,
+          maxQueueMemoryCapacity: payload.maxQueueMemoryCapacity,
+          type: "fifo",
+        },
+
+      };
+
+      return this._super(store, primaryModelClass, fixedPayload, id,
+        requestType);
+    },
+
+    normalizeArrayResponse(store, primaryModelClass, payload, id, requestType) {
+      var normalizedArrayResponse = {};
+      normalizedArrayResponse.data = [
+        this.normalizeSingleResponse(store, primaryModelClass,
+          payload.scheduler.schedulerInfo, "root", requestType)
+      ];
+
+      return normalizedArrayResponse;
+    }
+});
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/yarn-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/yarn-queue.js
new file mode 100644
index 0000000..b2e0f2f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/yarn-queue.js
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import DS from 'ember-data';
+
+export default DS.JSONAPISerializer.extend({
+
+    normalizeSingleResponse(store, primaryModelClass, payload, id,
+      requestType) {
+
+      var fixedPayload = {
+        id: id,
+        type: primaryModelClass.modelName,
+        attributes: {
+          type: payload.type.split(/(?=[A-Z])/)[0]
+        }
+      };
+      return this._super(store, primaryModelClass, fixedPayload, id,
+        requestType);
+    },
+
+    normalizeArrayResponse(store, primaryModelClass, payload, id, requestType) {
+      var normalizedArrayResponse = {};
+
+      normalizedArrayResponse.data = [
+        this.normalizeSingleResponse(store, primaryModelClass,
+          payload.scheduler.schedulerInfo, "root", requestType)
+        ];
+
+      return normalizedArrayResponse;
+    }
+});
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/cluster-overview.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/cluster-overview.hbs
index ac8f20a..e549ce5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/cluster-overview.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/cluster-overview.hbs
@@ -86,22 +86,6 @@
         </div>
       </div>
     </div>
-
-    <div class="col-lg-4 container-fluid">
-      <div class="panel panel-default">
-        <div class="panel-heading">
-          Node Managers
-        </div>
-        <div class="container-fluid" id="nodes-donut-chart">
-          {{donut-chart data=model.clusterMetrics.firstObject.getNodesDataForDonutChart
-          showLabels=true
-          parentId="nodes-donut-chart"
-          ratio=0.6
-          maxHeight=350
-          colorTargets="good error warn"}}
-        </div>
-      </div>
-    </div>
   </div>
 
   <hr>
@@ -142,6 +126,23 @@
       </div>
     </div>
   </div>
+  <div class="row">
+    <div class="col-lg-6 container-fluid">
+      <div class="panel panel-default">
+        <div class="panel-heading">
+          Node Managers
+        </div>
+        <div class="container-fluid" id="nodes-donut-chart">
+          {{donut-chart data=model.clusterMetrics.firstObject.getNodesDataForDonutChart
+          showLabels=true
+          parentId="nodes-donut-chart"
+          ratio=0.6
+          maxHeight=350
+          colorTargets="good error warn"}}
+        </div>
+      </div>
+    </div>
+  </div>
 </div>
 
 {{/if}}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-navigator.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-navigator.hbs
index d8dd236..e3b0a90 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-navigator.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-navigator.hbs
@@ -20,9 +20,12 @@
 <div class="row">
   <div class="col-md-12 container-fluid">
     <div class="panel panel-default" id="tree-selector-container">
-     {{tree-selector model=model parentId="tree-selector-container" selected=selected}}
+      <div class="panel-heading">
+        Scheduler: {{model.firstObject.type}}
+      </div>
+     {{tree-selector model=model parentId="tree-selector-container" selected=selected used=used max=max}}
     </div>
   </div>
 </div>
 
-{{outlet}}
\ No newline at end of file
+{{outlet}}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-configuration-table.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue-conf-table.hbs
similarity index 99%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-configuration-table.hbs
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue-conf-table.hbs
index 17a1e1a..3f6017f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-configuration-table.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue-conf-table.hbs
@@ -51,4 +51,4 @@
     </tr>
   {{/if}}
   </tbody>
-</table>
\ No newline at end of file
+</table>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue-info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue-info.hbs
new file mode 100644
index 0000000..7d44e69
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue-info.hbs
@@ -0,0 +1,84 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="row">
+
+  <div class="col-lg-6 container-fluid">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        Queue Capacities: {{model.selected}}
+      </div>
+      <div class="container-fluid" id="capacity-bar-chart">
+        <br/>
+        {{bar-chart data=model.selectedQueue.capacitiesBarChartData
+        title=""
+        parentId="capacity-bar-chart"
+        textWidth=170
+        ratio=0.55
+        maxHeight=350}}
+      </div>
+    </div>
+  </div>
+
+  <div class="col-lg-6 container-fluid">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        Queue Information: {{model.selected}}
+      </div>
+        {{yarn-queue.capacity-queue-conf-table queue=model.selectedQueue}}
+    </div>
+  </div>
+
+</div>
+
+<div class="row">
+
+    <div class="col-lg-6 container-fluid">
+      <div class="panel panel-default">
+        <div class="panel-heading">
+          Running Apps: {{model.selected}}
+        </div>
+        <div class="container-fluid" id="numapplications-donut-chart">
+          {{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData
+          showLabels=true
+          parentId="numapplications-donut-chart"
+          ratio=0.6
+          maxHeight=350}}
+        </div>
+      </div>
+    </div>
+
+  {{#if model.selectedQueue.hasUserUsages}}
+    <div class="col-lg-6 container-fluid">
+      <div class="panel panel-default">
+        <div class="panel-heading">
+          User Usages: {{model.selected}}
+        </div>
+        <div class="container-fluid"  id="userusage-donut-chart">
+          {{donut-chart data=model.selectedQueue.userUsagesDonutChartData
+          showLabels=true
+          parentId="userusage-donut-chart"
+          type="memory"
+          ratio=0.6
+          maxHeight=350}}
+        </div>
+      </div>
+    </div>
+  {{/if}}
+
+</div>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs
new file mode 100644
index 0000000..8b63b66
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs
@@ -0,0 +1,63 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+{{queue-navigator model=model.queues selected=model.selected
+  used="usedCapacity" max="absMaxCapacity"}}
+
+<div class="row">
+  <div class="col-lg-4 container-fluid">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        Queue Information: {{model.selected}}
+      </div>
+        {{yarn-queue.capacity-queue-conf-table queue=model.selectedQueue}}
+    </div>
+  </div>
+
+  <div class="col-lg-4 container-fluid">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        Queue Capacities: {{model.selected}}
+      </div>
+      <div class="container-fluid" id="capacity-bar-chart">
+        <br/>
+        {{bar-chart data=model.selectedQueue.capacitiesBarChartData
+        title=""
+        parentId="capacity-bar-chart"
+        textWidth=175
+        ratio=0.55
+        maxHeight=350}}
+      </div>
+    </div>
+  </div>
+
+  <div class="col-lg-4 container-fluid">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        Running Apps: {{model.selected}}
+      </div>
+      <div class="container-fluid" id="numapplications-donut-chart">
+        {{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData
+        showLabels=true
+        parentId="numapplications-donut-chart"
+        ratio=0.6
+        maxHeight=350}}
+      </div>
+    </div>
+  </div>
+</div>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-configuration-table.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-conf-table.hbs
similarity index 63%
copy from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-configuration-table.hbs
copy to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-conf-table.hbs
index 17a1e1a..00fabcc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-configuration-table.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-conf-table.hbs
@@ -25,30 +25,28 @@
   </thead>
   <tbody>
     <tr>
-      <td>Configured Capacity</td>
-      <td>{{queue.capacity}}</td>
+      <td>Fair Memory, VCores</td>
+      <td>{{queue.fairResources.memory}} MB, {{queue.fairResources.vCores}}</td>
     </tr>
     <tr>
-      <td>Configured Max Capacity</td>
-      <td>{{queue.maxCapacity}}</td>
+      <td>Minimum Memory, VCores</td>
+      <td>{{queue.minResources.memory}} MB, {{queue.minResources.vCores}}</td>
     </tr>
     <tr>
-      <td>State</td>
-      <td>{{queue.state}}</td>
-    </tr>
-  {{#if queue.isLeafQueue}}
-    <tr>
-      <td>User Limit Percent</td>
-      <td>{{queue.userLimit}}</td>
+      <td>Cluster Memory, VCores</td>
+      <td>{{queue.clusterResources.memory}} MB, {{queue.clusterResources.vCores}}</td>
     </tr>
     <tr>
-      <td>User Limit Factor</td>
-      <td>{{queue.userLimitFactor}}</td>
+      <td>Pending, Allocated, Reserved Containers</td>
+      <td>{{queue.pendingContainers}} , {{queue.allocatedContainers}} , {{queue.reservedContainers}}</td>
     </tr>
     <tr>
-      <td>Preemption Disabled</td>
-      <td>{{queue.preemptionDisabled}}</td>
+      <td>Scheduling Policy</td>
+      <td>{{queue.schedulingPolicy}}</td>
     </tr>
-  {{/if}}
+    <tr>
+      <td>Preemption Enabled</td>
+      <td>{{queue.preemptable}}</td>
+    </tr>
   </tbody>
-</table>
\ No newline at end of file
+</table>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-info.hbs
new file mode 100644
index 0000000..a770bfe
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-info.hbs
@@ -0,0 +1,66 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="row">
+
+  <div class="col-lg-6 container-fluid">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        Queue Capacities: {{model.selected}}
+      </div>
+      <div class="container-fluid" id="capacity-bar-chart">
+        <br/>
+        {{bar-chart data=model.selectedQueue.capacitiesBarChartData
+        title=""
+        parentId="capacity-bar-chart"
+        textWidth=170
+        ratio=0.55
+        maxHeight=350}}
+      </div>
+    </div>
+  </div>
+
+  <div class="col-lg-6 container-fluid">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        Queue Information: {{model.selected}}
+      </div>
+        {{yarn-queue.fair-queue-conf-table queue=model.selectedQueue}}
+    </div>
+  </div>
+
+</div>
+
+<div class="row">
+
+    <div class="col-lg-6 container-fluid">
+      <div class="panel panel-default">
+        <div class="panel-heading">
+          Running Apps: {{model.selected}}
+        </div>
+        <div class="container-fluid" id="numapplications-donut-chart">
+          {{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData
+          showLabels=true
+          parentId="numapplications-donut-chart"
+          ratio=0.6
+          maxHeight=350}}
+        </div>
+      </div>
+    </div>
+
+</div>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs
new file mode 100644
index 0000000..0341108
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs
@@ -0,0 +1,63 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+{{queue-navigator model=model.queues selected=model.selected
+  used="usedResources.memory" max="clusterResources.memory"}}
+
+<div class="row">
+  <div class="col-lg-4 container-fluid">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        Queue Information: {{model.selected}}
+      </div>
+        {{yarn-queue.fair-queue-conf-table queue=model.selectedQueue}}
+    </div>
+  </div>
+
+  <div class="col-lg-4 container-fluid">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        Queue Capacities: {{model.selected}}
+      </div>
+      <div class="container-fluid" id="capacity-bar-chart">
+        <br/>
+        {{bar-chart data=model.selectedQueue.capacitiesBarChartData
+        title=""
+        parentId="capacity-bar-chart"
+        textWidth=150
+        ratio=0.55
+        maxHeight=350}}
+      </div>
+    </div>
+  </div>
+
+  <div class="col-lg-4 container-fluid">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        Running Apps: {{model.selected}}
+      </div>
+      <div class="container-fluid" id="numapplications-donut-chart">
+        {{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData
+        showLabels=true
+        parentId="numapplications-donut-chart"
+        ratio=0.6
+        maxHeight=350}}
+      </div>
+    </div>
+  </div>
+</div>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-configuration-table.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fifo-queue-conf-table.hbs
similarity index 75%
copy from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-configuration-table.hbs
copy to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fifo-queue-conf-table.hbs
index 17a1e1a..4ced3e7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-configuration-table.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fifo-queue-conf-table.hbs
@@ -29,26 +29,28 @@
       <td>{{queue.capacity}}</td>
     </tr>
     <tr>
-      <td>Configured Max Capacity</td>
-      <td>{{queue.maxCapacity}}</td>
+      <td>Used Capacity</td>
+      <td>{{queue.usedCapacity}}</td>
     </tr>
     <tr>
       <td>State</td>
       <td>{{queue.state}}</td>
     </tr>
-  {{#if queue.isLeafQueue}}
     <tr>
-      <td>User Limit Percent</td>
-      <td>{{queue.userLimit}}</td>
+      <td>Minimum Queue Memory Capacity</td>
+      <td>{{queue.minQueueMemoryCapacity}}</td>
     </tr>
     <tr>
-      <td>User Limit Factor</td>
-      <td>{{queue.userLimitFactor}}</td>
+      <td>Maximum Queue Memory Capacity</td>
+      <td>{{queue.maxQueueMemoryCapacity}}</td>
     </tr>
     <tr>
-      <td>Preemption Disabled</td>
-      <td>{{queue.preemptionDisabled}}</td>
+      <td>Number of Nodes</td>
+      <td>{{queue.numNodes}}</td>
     </tr>
-  {{/if}}
+    <tr>
+      <td>Number of Containers</td>
+      <td>{{queue.numContainers}}</td>
+    </tr>
   </tbody>
-</table>
\ No newline at end of file
+</table>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fifo-queue-info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fifo-queue-info.hbs
new file mode 100644
index 0000000..7f4e8a7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fifo-queue-info.hbs
@@ -0,0 +1,47 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="row">
+
+  <div class="col-lg-6 container-fluid">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        Queue Capacities: {{model.selected}}
+      </div>
+      <div class="container-fluid" id="capacity-bar-chart">
+        <br/>
+        {{bar-chart data=model.selectedQueue.capacitiesBarChartData
+        title=""
+        parentId="capacity-bar-chart"
+        textWidth=170
+        ratio=0.55
+        maxHeight=350}}
+      </div>
+    </div>
+  </div>
+
+  <div class="col-lg-6 container-fluid">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        Queue Information: {{model.selected}}
+      </div>
+        {{yarn-queue.fifo-queue-conf-table queue=model.selectedQueue}}
+    </div>
+  </div>
+
+</div>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fifo-queue.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fifo-queue.hbs
new file mode 100644
index 0000000..46d79f0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fifo-queue.hbs
@@ -0,0 +1,48 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+{{queue-navigator model=model.queues selected=model.selected
+  used="usedNodeCapacity" max="totalNodeCapacity"}}
+
+<div class="row">
+  <div class="col-lg-6 container-fluid">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        Queue Information: {{model.selected}}
+      </div>
+        {{yarn-queue.fifo-queue-conf-table queue=model.selectedQueue}}
+    </div>
+  </div>
+
+  <div class="col-lg-6 container-fluid">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        Queue Capacities: {{model.selected}}
+      </div>
+      <div class="container-fluid" id="capacity-bar-chart">
+        <br/>
+        {{bar-chart data=model.selectedQueue.capacitiesBarChartData
+        title=""
+        parentId="capacity-bar-chart"
+        textWidth=150
+        ratio=0.55
+        maxHeight=350}}
+      </div>
+    </div>
+  </div>
+</div>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-nodes.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-nodes.hbs
index 795d00e..79d9efa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-nodes.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-nodes.hbs
@@ -47,7 +47,7 @@
   <div class="col-md-10 container-fluid">
     {{#if model.clusterMetrics}}
       <div class="row">
-        <div class="col-lg-5 container-fluid">
+        <div class="col-lg-6 container-fluid">
           <div class="panel panel-default">
             <div class="panel-heading">
               Node Managers
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue/info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue/info.hbs
index c112ef9..2f138a7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue/info.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue/info.hbs
@@ -16,69 +16,10 @@
  * limitations under the License.
 }}
 
-<div class="row">
-
-  <div class="col-lg-6 container-fluid">
-    <div class="panel panel-default">
-      <div class="panel-heading">
-        Queue Capacities: {{model.selected}}
-      </div>
-      <div class="container-fluid" id="capacity-bar-chart">
-        <br/>
-        {{bar-chart data=model.selectedQueue.capacitiesBarChartData
-        title=""
-        parentId="capacity-bar-chart"
-        textWidth=170
-        ratio=0.55
-        maxHeight=350}}
-      </div>
-    </div>
-  </div>
-
-  <div class="col-lg-6 container-fluid">
-    <div class="panel panel-default">
-      <div class="panel-heading">
-        Queue Information: {{model.selected}}
-      </div>
-      {{queue-configuration-table queue=model.selectedQueue}}
-    </div>
-  </div>
-
-</div>
-
-<div class="row">
-
-  <div class="col-lg-6 container-fluid">
-    <div class="panel panel-default">
-      <div class="panel-heading">
-        Running Apps: {{model.selected}}
-      </div>
-      <div class="container-fluid" id="numapplications-donut-chart">
-        {{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData
-        showLabels=true
-        parentId="numapplications-donut-chart"
-        ratio=0.6
-        maxHeight=350}}
-      </div>
-    </div>
-  </div>
-
-  {{#if model.selectedQueue.hasUserUsages}}
-    <div class="col-lg-6 container-fluid">
-      <div class="panel panel-default">
-        <div class="panel-heading">
-          User Usages: {{model.selected}}
-        </div>
-        <div class="container-fluid"  id="userusage-donut-chart">
-          {{donut-chart data=model.selectedQueue.userUsagesDonutChartData
-          showLabels=true
-          parentId="userusage-donut-chart"
-          type="memory"
-          ratio=0.6
-          maxHeight=350}}
-        </div>
-      </div>
-    </div>
-  {{/if}}
-
-</div>
+{{#if (eq model.queues.firstObject.type "capacity")}}
+  {{yarn-queue.capacity-queue-info model=model}}
+{{else if (eq model.queues.firstObject.type "fair")}}
+  {{yarn-queue.fair-queue-info model=model}}
+{{else}}
+  {{yarn-queue.fifo-queue-info model=model}}
+{{/if}}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
index 6dfb2201..fccdb5b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
@@ -17,54 +17,14 @@
 }}
 
 {{breadcrumb-bar breadcrumbs=breadcrumbs}}
-
 <div class="container-fluid">
-  {{queue-navigator model=model.queues selected=model.selected}}
-
-  <div class="row">
-
-    <div class="col-lg-4 container-fluid">
-      <div class="panel panel-default">
-        <div class="panel-heading">
-          Queue Information: {{model.selected}}
-        </div>
-        {{queue-configuration-table queue=model.selectedQueue}}
-      </div>
-    </div>
-
-    <div class="col-lg-4 container-fluid">
-      <div class="panel panel-default">
-        <div class="panel-heading">
-          Queue Capacities: {{model.selected}}
-        </div>
-        <div class="container-fluid" id="capacity-bar-chart">
-          <br/>
-          {{bar-chart data=model.selectedQueue.capacitiesBarChartData
-          title=""
-          parentId="capacity-bar-chart"
-          textWidth=150
-          ratio=0.55
-          maxHeight=350}}
-        </div>
-      </div>
-    </div>
-
-    <div class="col-lg-4 container-fluid">
-      <div class="panel panel-default">
-        <div class="panel-heading">
-          Running Apps: {{model.selected}}
-        </div>
-        <div class="container-fluid" id="numapplications-donut-chart">
-          {{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData
-          showLabels=true
-          parentId="numapplications-donut-chart"
-          ratio=0.6
-          maxHeight=350}}
-        </div>
-      </div>
-    </div>
-
-  </div>
+  {{#if (eq model.queues.firstObject.type "capacity")}}
+    {{yarn-queue.capacity-queue model=model}}
+  {{else if (eq model.queues.firstObject.type "fair")}}
+    {{yarn-queue.fair-queue model=model}}
+  {{else}}
+    {{yarn-queue.fifo-queue model=model}}
+  {{/if}}
 </div>
 
 {{outlet}}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/color-utils.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/color-utils.js
index 6c0cfee..af0cdf4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/color-utils.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/color-utils.js
@@ -55,7 +55,6 @@
       }
     }
 
-    console.log(colors);
     return colors;
   },
 
diff --git a/start-build-env.sh b/start-build-env.sh
index 94af7e4..5a18151 100755
--- a/start-build-env.sh
+++ b/start-build-env.sh
@@ -35,7 +35,9 @@
 FROM hadoop-build
 RUN groupadd --non-unique -g ${GROUP_ID} ${USER_NAME}
 RUN useradd -g ${GROUP_ID} -u ${USER_ID} -k /root -m ${USER_NAME}
+RUN echo "${USER_NAME}\tALL=NOPASSWD: ALL" > "/etc/sudoers.d/hadoop-build-${USER_ID}"
 ENV HOME /home/${USER_NAME}
+
 UserSpecificDocker
 
 # By mapping the .m2 directory you can do an mvn install from