Merge branch 'trunk' into HDDS-1880-Decom
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 371bdde..657c223 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -106,12 +106,12 @@
 ENV PATH "${PATH}:/opt/cmake/bin"
 
 ######
-# Install Google Protobuf 2.5.0 (2.6.0 ships with Xenial)
+# Install Google Protobuf 3.7.1 (2.6.0 ships with Xenial)
 ######
 # hadolint ignore=DL3003
 RUN mkdir -p /opt/protobuf-src \
     && curl -L -s -S \
-      https://github.com/google/protobuf/releases/download/v2.5.0/protobuf-2.5.0.tar.gz \
+      https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz \
       -o /opt/protobuf.tar.gz \
     && tar xzf /opt/protobuf.tar.gz --strip-components 1 -C /opt/protobuf-src \
     && cd /opt/protobuf-src \
@@ -200,17 +200,6 @@
 ###
 ENV MAVEN_OPTS -Xms256m -Xmx1536m
 
-RUN mkdir -p /opt/protobuf-3.7-src \
-    && curl -L -s -S \
-      https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz \
-      -o /opt/protobuf-3.7.1.tar.gz \
-    && tar xzf /opt/protobuf-3.7.1.tar.gz --strip-components 1 -C /opt/protobuf-3.7-src \
-    && cd /opt/protobuf-3.7-src \
-    && ./configure --prefix=/opt/protobuf-3.7 \
-    && make install \
-    && cd /root \
-    && rm -rf /opt/protobuf-3.7-src
-
 ###
 # Everything past this point is either not needed for testing or breaks Yetus.
 # So tell Yetus not to read the rest of the file:
@@ -222,11 +211,6 @@
     && dpkg --install hugo.deb \
     && rm hugo.deb
 
-######
-# Install Google Protobuf 3.7.1 (2.6.0 ships with Xenial)
-# Keep 2.5.0 as well, until 3.7.1 upgrade is complete.
-######
-# hadolint ignore=DL3003
 
 # Add a welcome message and environment checks.
 COPY hadoop_env_checks.sh /root/hadoop_env_checks.sh
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index a06915e..84d3ae5 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -381,6 +381,20 @@
     </resources>
     <plugins>
       <plugin>
+        <groupId>org.xolstice.maven.plugins</groupId>
+        <artifactId>protobuf-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>src-compile-protoc</id>
+            <configuration><skip>false</skip></configuration>
+          </execution>
+          <execution>
+            <id>src-test-compile-protoc</id>
+            <configuration><skip>false</skip></configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-maven-plugins</artifactId>
         <executions>
@@ -401,58 +415,6 @@
             </configuration>
           </execution>
           <execution>
-            <id>compile-protoc</id>
-            <goals>
-              <goal>protoc</goal>
-            </goals>
-            <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>${basedir}/src/main/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/main/proto</directory>
-                <includes>
-                  <include>HAServiceProtocol.proto</include>
-                  <include>IpcConnectionContext.proto</include>
-                  <include>ProtocolInfo.proto</include>
-                  <include>RpcHeader.proto</include>
-                  <include>ZKFCProtocol.proto</include>
-                  <include>ProtobufRpcEngine.proto</include>
-                  <include>Security.proto</include>
-                  <include>GetUserMappingsProtocol.proto</include>
-                  <include>TraceAdmin.proto</include>
-                  <include>RefreshAuthorizationPolicyProtocol.proto</include>
-                  <include>RefreshUserMappingsProtocol.proto</include>
-                  <include>RefreshCallQueueProtocol.proto</include>
-                  <include>GenericRefreshProtocol.proto</include>
-                  <include>FSProtos.proto</include>
-                </includes>
-              </source>
-            </configuration>
-          </execution>
-          <execution>
-            <id>compile-test-protoc</id>
-            <goals>
-              <goal>test-protoc</goal>
-            </goals>
-            <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>${basedir}/src/test/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/test/proto</directory>
-                <includes>
-                  <include>test.proto</include>
-                  <include>test_rpc_service.proto</include>
-                </includes>
-              </source>
-            </configuration>
-          </execution>
-          <execution>
             <id>resource-gz</id>
             <phase>generate-resources</phase>
             <goals>
diff --git a/hadoop-common-project/hadoop-common/src/CMakeLists.txt b/hadoop-common-project/hadoop-common/src/CMakeLists.txt
index 771c685..10591f6 100644
--- a/hadoop-common-project/hadoop-common/src/CMakeLists.txt
+++ b/hadoop-common-project/hadoop-common/src/CMakeLists.txt
@@ -170,7 +170,7 @@
     set(CMAKE_FIND_LIBRARY_SUFFIXES ${STORED_CMAKE_FIND_LIBRARY_SUFFIXES})
 
     if(PMDK_LIBRARY)
-        GET_FILENAME_COMPONENT(HADOOP_PMDK_LIBRARY ${PMDK_LIBRARY} NAME)
+        GET_FILENAME_COMPONENT(HADOOP_PMDK_LIBRARY ${PMDK_LIBRARY} REALPATH)
         set(PMDK_SOURCE_FILES ${SRC}/io/nativeio/pmdk_load.c)
     else(PMDK_LIBRARY)
         MESSAGE(FATAL_ERROR "The required PMDK library is NOT found. PMDK_LIBRARY=${PMDK_LIBRARY}")
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
index 5828b0b..1780cda 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
@@ -73,8 +73,17 @@
       // we want a/b
       final Path itemPath = new Path(item.path.toString());
       final Path itemParentPath = itemPath.getParent();
+
+      if(itemParentPath == null) {
+        throw new PathNotFoundException(String.format(
+            "Item: %s parent's path is null. This can happen if mkdir is " +
+                "called on root, so there's no parent.", itemPath.toString()));
+      }
+
       if (!item.fs.exists(itemParentPath)) {
-        throw new PathNotFoundException(itemParentPath.toString());
+        throw new PathNotFoundException(String.format(
+            "mkdir failed for path: %s. Item parent path not found: %s.",
+        itemPath.toString(), itemParentPath.toString()));
       }
     }
     if (!item.fs.mkdirs(item.path)) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
index 1d0eab7..973afa3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
@@ -120,16 +120,19 @@
       public String getMessage() {
         String msg;
         switch (stateCode) {
+        // -1 represents UNSUPPORTED.
         case -1:
-          msg = "The native code is built without PMDK support.";
+          msg = "The native code was built without PMDK support.";
           break;
+        // 1 represents PMDK_LIB_NOT_FOUND.
         case 1:
-          msg = "The native code is built with PMDK support, but PMDK libs " +
-              "are NOT found in execution environment or failed to be loaded.";
+          msg = "The native code was built with PMDK support, but PMDK libs " +
+              "were NOT found in execution environment or failed to be loaded.";
           break;
+        // 0 represents SUPPORTED.
         case 0:
-          msg = "The native code is built with PMDK support, and PMDK libs " +
-              "are loaded successfully.";
+          msg = "The native code was built with PMDK support, and PMDK libs " +
+              "were loaded successfully.";
           break;
         default:
           msg = "The state code: " + stateCode + " is unrecognized!";
@@ -140,7 +143,7 @@
 
     // Denotes the state of supporting PMDK. The value is set by JNI.
     private static SupportState pmdkSupportState =
-        SupportState.PMDK_LIB_NOT_FOUND;
+        SupportState.UNSUPPORTED;
 
     private static final Logger LOG = LoggerFactory.getLogger(NativeIO.class);
 
@@ -177,6 +180,14 @@
       LOG.error("The state code: " + stateCode + " is unrecognized!");
     }
 
+    public static String getPmdkSupportStateMessage() {
+      if (getPmdkLibPath() != null) {
+        return pmdkSupportState.getMessage() +
+            " The pmdk lib path: " + getPmdkLibPath();
+      }
+      return pmdkSupportState.getMessage();
+    }
+
     public static boolean isPmdkAvailable() {
       LOG.info(pmdkSupportState.getMessage());
       return pmdkSupportState == SupportState.SUPPORTED;
@@ -242,8 +253,13 @@
           NativeIO.POSIX.pmemSync(region.getAddress(), region.getLength());
         }
       }
+
+      public static String getPmdkLibPath() {
+        return POSIX.getPmdkLibPath();
+      }
     }
 
+    private static native String getPmdkLibPath();
     private static native boolean isPmemCheck(long address, long length);
     private static native PmemMappedRegion pmemCreateMapFile(String path,
         long length);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java
index 776839c..2338824 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java
@@ -28,6 +28,7 @@
 import org.apache.hadoop.io.compress.zlib.ZlibFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.nativeio.NativeIO;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -69,6 +70,7 @@
     boolean snappyLoaded = false;
     boolean isalLoaded = false;
     boolean zStdLoaded = false;
+    boolean pmdkLoaded = false;
     // lz4 is linked within libhadoop
     boolean lz4Loaded = nativeHadoopLoaded;
     boolean bzip2Loaded = Bzip2Factory.isNativeBzip2Loaded(conf);
@@ -80,6 +82,7 @@
     String zlibLibraryName = "";
     String snappyLibraryName = "";
     String isalDetail = "";
+    String pmdkDetail = "";
     String zstdLibraryName = "";
     String lz4LibraryName = "";
     String bzip2LibraryName = "";
@@ -110,6 +113,12 @@
         isalLoaded = true;
       }
 
+      pmdkDetail = NativeIO.POSIX.getPmdkSupportStateMessage();
+      pmdkLoaded = NativeIO.POSIX.isPmdkAvailable();
+      if (pmdkLoaded) {
+        pmdkDetail = NativeIO.POSIX.Pmem.getPmdkLibPath();
+      }
+
       openSslDetail = OpensslCipher.getLoadingFailureReason();
       if (openSslDetail != null) {
         openSslLoaded = false;
@@ -148,6 +157,7 @@
     System.out.printf("bzip2:   %b %s%n", bzip2Loaded, bzip2LibraryName);
     System.out.printf("openssl: %b %s%n", openSslLoaded, openSslDetail);
     System.out.printf("ISA-L:   %b %s%n", isalLoaded, isalDetail);
+    System.out.printf("PMDK:    %b %s%n", pmdkLoaded, pmdkDetail);
 
     if (Shell.WINDOWS) {
       System.out.printf("winutils: %b %s%n", winutilsExists, winutilsPath);
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
index 3a0641b..b0b5151 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
@@ -292,10 +292,13 @@
   if (mid == 0) {
     return 0;
   }
+
   if (strlen(errMsg) > 0) {
+    // Set PMDK support state to 1 which represents PMDK_LIB_NOT_FOUND.
     (*env)->CallStaticVoidMethod(env, clazz, mid, 1);
     return 0;
   }
+  // Set PMDK support state to 0 which represents SUPPORTED.
   (*env)->CallStaticVoidMethod(env, clazz, mid, 0);
   return 1;
 }
@@ -1620,7 +1623,7 @@
     char msg[1000];
     succeed = pmdkLoader->pmem_msync(address, length);
     // succeed = -1 failure
-    if (succeed = -1) {
+    if (succeed == -1) {
       snprintf(msg, sizeof(msg), "Failed to msync region. address: %x, length: %x, error msg: %s", address, length, pmem_errormsg());
       THROW(env, "java/io/IOException", msg);
       return;
@@ -1631,6 +1634,15 @@
   #endif
   }
 
+JNIEXPORT jstring JNICALL Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_getPmdkLibPath
+  (JNIEnv * env, jclass thisClass) {
+    jstring libpath = NULL;
+
+    #ifdef HADOOP_PMDK_LIBRARY
+      libpath = (*env)->NewStringUTF(env, HADOOP_PMDK_LIBRARY);
+    #endif
+    return libpath;
+  }
 
 #ifdef __cplusplus
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/pmdk_load.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/pmdk_load.c
index f7d6cfb..502508c 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/pmdk_load.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/pmdk_load.c
@@ -59,11 +59,11 @@
 void load_pmdk_lib(char* err, size_t err_len) {
   const char* errMsg;
   const char* library = NULL;
-#ifdef UNIX
-  Dl_info dl_info;
-#else
-  LPTSTR filename = NULL;
-#endif
+  #ifdef UNIX
+    Dl_info dl_info;
+  #else
+    LPTSTR filename = NULL;
+  #endif
 
   err[0] = '\0';
 
@@ -88,15 +88,15 @@
     snprintf(err, err_len, "Loading functions from PMDK failed: %s", errMsg);
   }
 
-#ifdef UNIX
-  if(dladdr(pmdkLoader->pmem_map_file, &dl_info)) {
-    library = dl_info.dli_fname;
-  }
-#else
-  if (GetModuleFileName(pmdkLoader->libec, filename, 256) > 0) {
-    library = filename;
-  }
-#endif
+  #ifdef UNIX
+    if (dladdr(pmdkLoader->pmem_map_file, &dl_info)) {
+      library = dl_info.dli_fname;
+    }
+  #else
+    if (GetModuleFileName(pmdkLoader->libec, filename, 256) > 0) {
+      library = filename;
+    }
+  #endif
 
   if (library == NULL) {
     library = HADOOP_PMDK_LIBRARY;
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/pmdk_load.h b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/pmdk_load.h
index c93a076..a668377 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/pmdk_load.h
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/pmdk_load.h
@@ -80,11 +80,6 @@
 #endif
 
 /**
- * Return 0 if not support, 1 otherwise.
- */
-int build_support_pmdk();
-
-/**
  * Initialize and load PMDK library, returning error message if any.
  *
  * @param err     The err message buffer.
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/FSProtos.proto b/hadoop-common-project/hadoop-common/src/main/proto/FSProtos.proto
index c3b768a..c895bce 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/FSProtos.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/FSProtos.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.fs";
 option java_outer_classname = "FSProtos";
 option java_generic_services = true;
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/GenericRefreshProtocol.proto b/hadoop-common-project/hadoop-common/src/main/proto/GenericRefreshProtocol.proto
index fe46549..6296f88 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/GenericRefreshProtocol.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/GenericRefreshProtocol.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.ipc.proto";
 option java_outer_classname = "GenericRefreshProtocolProtos";
 option java_generic_services = true;
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/GetUserMappingsProtocol.proto b/hadoop-common-project/hadoop-common/src/main/proto/GetUserMappingsProtocol.proto
index 51552b8..cb91a13 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/GetUserMappingsProtocol.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/GetUserMappingsProtocol.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.tools.proto";
 option java_outer_classname = "GetUserMappingsProtocolProtos";
 option java_generic_services = true;
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/HAServiceProtocol.proto b/hadoop-common-project/hadoop-common/src/main/proto/HAServiceProtocol.proto
index 16ee9a2..5a88a7f 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/HAServiceProtocol.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/HAServiceProtocol.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.ha.proto";
 option java_outer_classname = "HAServiceProtocolProtos";
 option java_generic_services = true;
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/IpcConnectionContext.proto b/hadoop-common-project/hadoop-common/src/main/proto/IpcConnectionContext.proto
index 4557e89..16e2fb7 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/IpcConnectionContext.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/IpcConnectionContext.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.ipc.protobuf";
 option java_outer_classname = "IpcConnectionContextProtos";
 option java_generate_equals_and_hash = true;
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto
index a17e2078..fa11313 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax = "proto2";
 /**
  * These are the messages used by Hadoop RPC for the Rpc Engine Protocol Buffer
  * to marshal the request and response in the RPC layer.
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/ProtocolInfo.proto b/hadoop-common-project/hadoop-common/src/main/proto/ProtocolInfo.proto
index fdbc440..0e9d0d4 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/ProtocolInfo.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/ProtocolInfo.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.ipc.protobuf";
 option java_outer_classname = "ProtocolInfoProtos";
 option java_generic_services = true;
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/RefreshAuthorizationPolicyProtocol.proto b/hadoop-common-project/hadoop-common/src/main/proto/RefreshAuthorizationPolicyProtocol.proto
index 5ef1c2d..f57c6d6 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/RefreshAuthorizationPolicyProtocol.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/RefreshAuthorizationPolicyProtocol.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.security.proto";
 option java_outer_classname = "RefreshAuthorizationPolicyProtocolProtos";
 option java_generic_services = true;
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/RefreshCallQueueProtocol.proto b/hadoop-common-project/hadoop-common/src/main/proto/RefreshCallQueueProtocol.proto
index 67ed133..463b7c5 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/RefreshCallQueueProtocol.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/RefreshCallQueueProtocol.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.ipc.proto";
 option java_outer_classname = "RefreshCallQueueProtocolProtos";
 option java_generic_services = true;
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/RefreshUserMappingsProtocol.proto b/hadoop-common-project/hadoop-common/src/main/proto/RefreshUserMappingsProtocol.proto
index 41031ed..a1130f5 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/RefreshUserMappingsProtocol.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/RefreshUserMappingsProtocol.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.security.proto";
 option java_outer_classname = "RefreshUserMappingsProtocolProtos";
 option java_generic_services = true;
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto b/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto
index e8d8cbb..4705b42 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.ipc.protobuf";
 option java_outer_classname = "RpcHeaderProtos";
 option java_generate_equals_and_hash = true;
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/Security.proto b/hadoop-common-project/hadoop-common/src/main/proto/Security.proto
index 037a878..5177a86 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/Security.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/Security.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.security.proto";
 option java_outer_classname = "SecurityProtos";
 option java_generic_services = true;
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/TraceAdmin.proto b/hadoop-common-project/hadoop-common/src/main/proto/TraceAdmin.proto
index 52d2a90..8cf131b 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/TraceAdmin.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/TraceAdmin.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.tracing";
 option java_outer_classname = "TraceAdminPB";
 option java_generic_services = true;
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/ZKFCProtocol.proto b/hadoop-common-project/hadoop-common/src/main/proto/ZKFCProtocol.proto
index a2b8dd1..98bc05f 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/ZKFCProtocol.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/ZKFCProtocol.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.ha.proto";
 option java_outer_classname = "ZKFCProtocolProtos";
 option java_generic_services = true;
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.2.1/CHANGELOG.3.2.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.2.1/CHANGELOG.3.2.1.md
new file mode 100644
index 0000000..64e249e
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.2.1/CHANGELOG.3.2.1.md
@@ -0,0 +1,553 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# Apache Hadoop Changelog
+
+## Release 3.2.1 - 2019-09-10
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-15922](https://issues.apache.org/jira/browse/HADOOP-15922) | DelegationTokenAuthenticationFilter get wrong doAsUser since it does not decode URL |  Major | common, kms | He Xiaoqiao | He Xiaoqiao |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-15950](https://issues.apache.org/jira/browse/HADOOP-15950) | Failover for LdapGroupsMapping |  Major | common, security | Lukas Majercak | Lukas Majercak |
+| [YARN-7055](https://issues.apache.org/jira/browse/YARN-7055) | YARN Timeline Service v.2: beta 1 / GA |  Major | timelineclient, timelinereader, timelineserver | Vrushali C |  |
+| [YARN-9761](https://issues.apache.org/jira/browse/YARN-9761) | Allow overriding application submissions based on server side configs |  Major | . | Jonathan Hung | pralabhkumar |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-15676](https://issues.apache.org/jira/browse/HADOOP-15676) | Cleanup TestSSLHttpServer |  Minor | common | Szilard Nemeth | Szilard Nemeth |
+| [YARN-8896](https://issues.apache.org/jira/browse/YARN-8896) | Limit the maximum number of container assignments per heartbeat |  Major | . | Weiwei Yang | Zhankun Tang |
+| [YARN-8618](https://issues.apache.org/jira/browse/YARN-8618) | Yarn Service: When all the components of a service have restart policy NEVER then initiation of service upgrade should fail |  Major | . | Chandni Singh | Chandni Singh |
+| [HADOOP-15804](https://issues.apache.org/jira/browse/HADOOP-15804) | upgrade to commons-compress 1.18 |  Major | . | PJ Fanning | Akira Ajisaka |
+| [YARN-8916](https://issues.apache.org/jira/browse/YARN-8916) | Define a constant "docker" string in "ContainerRuntimeConstants.java" for better maintainability |  Minor | . | Zhankun Tang | Zhankun Tang |
+| [YARN-8908](https://issues.apache.org/jira/browse/YARN-8908) | Fix errors in yarn-default.xml related to GPU/FPGA |  Major | . | Zhankun Tang | Zhankun Tang |
+| [HDFS-13941](https://issues.apache.org/jira/browse/HDFS-13941) | make storageId in BlockPoolTokenSecretManager.checkAccess optional |  Major | . | Ajay Kumar | Ajay Kumar |
+| [HDFS-14029](https://issues.apache.org/jira/browse/HDFS-14029) | Sleep in TestLazyPersistFiles should be put into a loop |  Trivial | hdfs | Adam Antal | Adam Antal |
+| [YARN-8915](https://issues.apache.org/jira/browse/YARN-8915) | Update the doc about the default value of "maximum-container-assignments" for capacity scheduler |  Minor | . | Zhankun Tang | Zhankun Tang |
+| [HADOOP-15855](https://issues.apache.org/jira/browse/HADOOP-15855) | Review hadoop credential doc, including object store details |  Minor | documentation, security | Steve Loughran | Steve Loughran |
+| [YARN-7225](https://issues.apache.org/jira/browse/YARN-7225) | Add queue and partition info to RM audit log |  Major | resourcemanager | Jonathan Hung | Eric Payne |
+| [HADOOP-15687](https://issues.apache.org/jira/browse/HADOOP-15687) | Credentials class should allow access to aliases |  Trivial | . | Lars Francke | Lars Francke |
+| [YARN-8969](https://issues.apache.org/jira/browse/YARN-8969) | AbstractYarnScheduler#getNodeTracker should return generic type to avoid type casting |  Major | . | Wanqiang Ji | Wanqiang Ji |
+| [YARN-8977](https://issues.apache.org/jira/browse/YARN-8977) | Remove unnecessary type casting when calling AbstractYarnScheduler#getSchedulerNode |  Trivial | . | Wanqiang Ji | Wanqiang Ji |
+| [HDFS-14070](https://issues.apache.org/jira/browse/HDFS-14070) | Refactor NameNodeWebHdfsMethods to allow better extensibility |  Major | . | CR Hota | CR Hota |
+| [HADOOP-15926](https://issues.apache.org/jira/browse/HADOOP-15926) | Document upgrading the section in NOTICE.txt when upgrading the version of AWS SDK |  Minor | documentation | Akira Ajisaka | Dinesh Chitlangia |
+| [HADOOP-12558](https://issues.apache.org/jira/browse/HADOOP-12558) | distcp documentation is woefully out of date |  Critical | documentation, tools/distcp | Allen Wittenauer | Dinesh Chitlangia |
+| [HDFS-14063](https://issues.apache.org/jira/browse/HDFS-14063) | Support noredirect param for CREATE/APPEND/OPEN/GETFILECHECKSUM in HttpFS |  Major | . | Íñigo Goiri | Íñigo Goiri |
+| [HADOOP-15919](https://issues.apache.org/jira/browse/HADOOP-15919) | AliyunOSS: Enable Yarn to use OSS |  Major | fs/oss | wujinhu | wujinhu |
+| [HDFS-14064](https://issues.apache.org/jira/browse/HDFS-14064) | WEBHDFS: Support Enable/Disable EC Policy |  Major | . | Ayush Saxena | Ayush Saxena |
+| [HADOOP-15943](https://issues.apache.org/jira/browse/HADOOP-15943) | AliyunOSS: add missing owner & group attributes for oss FileStatus |  Major | fs/oss | wujinhu | wujinhu |
+| [MAPREDUCE-7164](https://issues.apache.org/jira/browse/MAPREDUCE-7164) | FileOutputCommitter does not report progress while merging paths. |  Major | . | Kuhu Shukla | Kuhu Shukla |
+| [YARN-9069](https://issues.apache.org/jira/browse/YARN-9069) | Fix SchedulerInfo#getSchedulerType for custom schedulers |  Minor | . | Bilwa S T | Bilwa S T |
+| [HDFS-14095](https://issues.apache.org/jira/browse/HDFS-14095) | EC: Track Erasure Coding commands in DFS statistics |  Major | erasure-coding | Ayush Saxena | Ayush Saxena |
+| [HDFS-14112](https://issues.apache.org/jira/browse/HDFS-14112) | Avoid recursive call to external authorizer for getContentSummary. |  Critical | namenode | Jitendra Nath Pandey | Tsz Wo Nicholas Sze |
+| [YARN-9036](https://issues.apache.org/jira/browse/YARN-9036) | Escape newlines in health report in YARN UI |  Major | . | Jonathan Hung | Keqiu Hu |
+| [YARN-9041](https://issues.apache.org/jira/browse/YARN-9041) | Performance Optimization of method FSPreemptionThread#identifyContainersToPreempt |  Major | fairscheduler, scheduler preemption | Wanqiang Ji | Wanqiang Ji |
+| [YARN-9085](https://issues.apache.org/jira/browse/YARN-9085) | Add Guaranteed and MaxCapacity to CSQueueMetrics |  Major | . | Jonathan Hung | Jonathan Hung |
+| [HDFS-14124](https://issues.apache.org/jira/browse/HDFS-14124) | EC : Support EC Commands (set/get/unset EcPolicy) via WebHdfs |  Major | erasure-coding, httpfs, webhdfs | Souryakanta Dwivedy | Ayush Saxena |
+| [HADOOP-15808](https://issues.apache.org/jira/browse/HADOOP-15808) | Harden Token service loader use |  Major | security | Steve Loughran | Steve Loughran |
+| [YARN-9122](https://issues.apache.org/jira/browse/YARN-9122) | Add table of contents to YARN Service API document |  Minor | documentation | Akira Ajisaka | Zhankun Tang |
+| [HDFS-14171](https://issues.apache.org/jira/browse/HDFS-14171) | Performance improvement in Tailing EditLog |  Major | namenode | Kenneth Yang | Kenneth Yang |
+| [HADOOP-15481](https://issues.apache.org/jira/browse/HADOOP-15481) | Emit FairCallQueue stats as metrics |  Major | metrics, rpc-server | Erik Krogen | Christopher Gregorian |
+| [HADOOP-15994](https://issues.apache.org/jira/browse/HADOOP-15994) | Upgrade Jackson2 to 2.9.8 |  Major | security | Akira Ajisaka | lqjacklee |
+| [HADOOP-16019](https://issues.apache.org/jira/browse/HADOOP-16019) | ZKDelegationTokenSecretManager won't log exception message occured in function setJaasConfiguration |  Minor | common | luhuachao | luhuachao |
+| [HDFS-14213](https://issues.apache.org/jira/browse/HDFS-14213) | Remove Jansson from BUILDING.txt |  Minor | documentation | Akira Ajisaka | Dinesh Chitlangia |
+| [HDFS-14221](https://issues.apache.org/jira/browse/HDFS-14221) | Replace Guava Optional with Java Optional |  Major | . | Arpit Agarwal | Arpit Agarwal |
+| [HDFS-14222](https://issues.apache.org/jira/browse/HDFS-14222) | Make ThrottledAsyncChecker constructor public |  Major | . | Arpit Agarwal | Arpit Agarwal |
+| [HADOOP-16075](https://issues.apache.org/jira/browse/HADOOP-16075) | Upgrade checkstyle version to 8.16 |  Minor | build | Dinesh Chitlangia | Dinesh Chitlangia |
+| [HADOOP-16089](https://issues.apache.org/jira/browse/HADOOP-16089) | AliyunOSS: update oss-sdk version to 3.4.1 |  Major | fs/oss | wujinhu | wujinhu |
+| [HDFS-14231](https://issues.apache.org/jira/browse/HDFS-14231) | DataXceiver#run() should not log exceptions caused by InvalidToken exception as an error |  Major | hdfs | Kitti Nanasi | Kitti Nanasi |
+| [YARN-7171](https://issues.apache.org/jira/browse/YARN-7171) | RM UI should sort memory / cores numerically |  Major | . | Eric Maynard | Ahmed Hussein |
+| [YARN-9282](https://issues.apache.org/jira/browse/YARN-9282) | Typo in javadoc of class LinuxContainerExecutor: hadoop.security.authetication should be 'authentication' |  Trivial | . | Szilard Nemeth | Charan Hebri |
+| [HADOOP-16108](https://issues.apache.org/jira/browse/HADOOP-16108) | Tail Follow Interval Should Allow To Specify The Sleep Interval To Save Unnecessary RPC's |  Major | . | Harshakiran Reddy | Ayush Saxena |
+| [YARN-8295](https://issues.apache.org/jira/browse/YARN-8295) | [UI2] Improve "Resource Usage" tab error message when there are no data available. |  Minor | yarn-ui-v2 | Gergely Novák | Charan Hebri |
+| [YARN-7824](https://issues.apache.org/jira/browse/YARN-7824) | [UI2] Yarn Component Instance page should include link to container logs |  Major | yarn-ui-v2 | Yesha Vora | Akhil PB |
+| [HADOOP-15281](https://issues.apache.org/jira/browse/HADOOP-15281) | Distcp to add no-rename copy option |  Major | tools/distcp | Steve Loughran | Andrew Olson |
+| [YARN-9309](https://issues.apache.org/jira/browse/YARN-9309) | Improve graph text in SLS to avoid overlapping |  Minor | . | Bilwa S T | Bilwa S T |
+| [HDFS-14235](https://issues.apache.org/jira/browse/HDFS-14235) | Handle ArrayIndexOutOfBoundsException in DataNodeDiskMetrics#slowDiskDetectionDaemon |  Major | . | Surendra Singh Lilhore | Ranith Sardar |
+| [YARN-9168](https://issues.apache.org/jira/browse/YARN-9168) | DistributedShell client timeout should be -1 by default |  Minor | . | Zhankun Tang | Zhankun Tang |
+| [YARN-9087](https://issues.apache.org/jira/browse/YARN-9087) | Improve logging for initialization of Resource plugins |  Major | yarn | Szilard Nemeth | Szilard Nemeth |
+| [YARN-9121](https://issues.apache.org/jira/browse/YARN-9121) | Replace GpuDiscoverer.getInstance() to a readable object for easy access control |  Major | . | Szilard Nemeth | Szilard Nemeth |
+| [YARN-9139](https://issues.apache.org/jira/browse/YARN-9139) | Simplify initializer code of GpuDiscoverer |  Major | . | Szilard Nemeth | Szilard Nemeth |
+| [HDFS-14247](https://issues.apache.org/jira/browse/HDFS-14247) | Repeat adding node description into network topology |  Minor | datanode | HuangTao | HuangTao |
+| [YARN-9332](https://issues.apache.org/jira/browse/YARN-9332) | RackResolver tool should accept multiple hosts |  Minor | yarn | Lantao Jin | Lantao Jin |
+| [HADOOP-16140](https://issues.apache.org/jira/browse/HADOOP-16140) | hadoop fs expunge to add -immediate option to purge trash immediately |  Major | fs | Stephen O'Donnell | Stephen O'Donnell |
+| [YARN-9138](https://issues.apache.org/jira/browse/YARN-9138) | Improve test coverage for nvidia-smi binary execution of GpuDiscoverer |  Major | . | Szilard Nemeth | Szilard Nemeth |
+| [MAPREDUCE-7191](https://issues.apache.org/jira/browse/MAPREDUCE-7191) | JobHistoryServer should log exception when loading/parsing history file failed |  Minor | mrv2 | Jiandan Yang | Jiandan Yang |
+| [MAPREDUCE-7192](https://issues.apache.org/jira/browse/MAPREDUCE-7192) | JobHistoryServer attempts page support jump to  containers log page in NM when logAggregation is disable |  Major | mrv2 | Jiandan Yang | Jiandan Yang |
+| [HDFS-14346](https://issues.apache.org/jira/browse/HDFS-14346) | Better time precision in getTimeDuration |  Minor | namenode | Chao Sun | Chao Sun |
+| [HDFS-14366](https://issues.apache.org/jira/browse/HDFS-14366) | Improve HDFS append performance |  Major | hdfs | Chao Sun | Chao Sun |
+| [HADOOP-16196](https://issues.apache.org/jira/browse/HADOOP-16196) | Path Parameterize Comparable |  Minor | common | David Mollitor | David Mollitor |
+| [HADOOP-16181](https://issues.apache.org/jira/browse/HADOOP-16181) | HadoopExecutors shutdown Cleanup |  Minor | util | David Mollitor | David Mollitor |
+| [HADOOP-16147](https://issues.apache.org/jira/browse/HADOOP-16147) | Allow CopyListing sequence file keys and values to be more easily customized |  Major | tools/distcp | Andrew Olson | Andrew Olson |
+| [MAPREDUCE-7190](https://issues.apache.org/jira/browse/MAPREDUCE-7190) | Add SleepJob additional parameter to make parallel runs distinguishable |  Major | . | Adam Antal | Adam Antal |
+| [YARN-9394](https://issues.apache.org/jira/browse/YARN-9394) | Use new API of RackResolver to get better performance |  Major | yarn | Lantao Jin | Lantao Jin |
+| [HADOOP-16208](https://issues.apache.org/jira/browse/HADOOP-16208) | Do Not Log InterruptedException in Client |  Minor | common | David Mollitor | David Mollitor |
+| [YARN-9463](https://issues.apache.org/jira/browse/YARN-9463) | Add queueName info when failing with queue capacity sanity check |  Trivial | capacity scheduler | Aihua Xu | Aihua Xu |
+| [HADOOP-16227](https://issues.apache.org/jira/browse/HADOOP-16227) | Upgrade checkstyle to 8.19 |  Major | build | Akira Ajisaka | Akira Ajisaka |
+| [HDFS-14432](https://issues.apache.org/jira/browse/HDFS-14432) | dfs.datanode.shared.file.descriptor.paths duplicated in hdfs-default.xml |  Minor | hdfs | puleya7 | puleya7 |
+| [HDFS-14463](https://issues.apache.org/jira/browse/HDFS-14463) | Add Log Level link under NameNode and DataNode Web UI Utilities dropdown |  Trivial | webhdfs | Siyao Meng | Siyao Meng |
+| [YARN-9529](https://issues.apache.org/jira/browse/YARN-9529) | Log correct cpu controller path on error while initializing CGroups. |  Major | nodemanager | Jonathan Hung | Jonathan Hung |
+| [HADOOP-16289](https://issues.apache.org/jira/browse/HADOOP-16289) | Allow extra jsvc startup option in hadoop\_start\_secure\_daemon in hadoop-functions.sh |  Major | scripts | Siyao Meng | Siyao Meng |
+| [HADOOP-16307](https://issues.apache.org/jira/browse/HADOOP-16307) | Intern User Name and Group Name in FileStatus |  Major | fs | David Mollitor | David Mollitor |
+| [HADOOP-16294](https://issues.apache.org/jira/browse/HADOOP-16294) | Enable access to context by DistCp subclasses |  Trivial | tools/distcp | Andrew Olson | Andrew Olson |
+| [HDFS-14507](https://issues.apache.org/jira/browse/HDFS-14507) | Document -blockingDecommission option for hdfs dfsadmin -listOpenFiles |  Minor | documentation | Siyao Meng | Siyao Meng |
+| [HDFS-14451](https://issues.apache.org/jira/browse/HDFS-14451) | Incorrect header or version mismatch log message |  Minor | ipc | David Mollitor | Shweta |
+| [HDFS-14502](https://issues.apache.org/jira/browse/HDFS-14502) | keepResults option in NNThroughputBenchmark should call saveNamespace() |  Major | benchmarks, hdfs | Konstantin Shvachko | Konstantin Shvachko |
+| [HADOOP-16323](https://issues.apache.org/jira/browse/HADOOP-16323) | https everywhere in Maven settings |  Minor | build | Akira Ajisaka | Akira Ajisaka |
+| [YARN-9563](https://issues.apache.org/jira/browse/YARN-9563) | Resource report REST API could return NaN or Inf |  Minor | . | Ahmed Hussein | Ahmed Hussein |
+| [YARN-9545](https://issues.apache.org/jira/browse/YARN-9545) | Create healthcheck REST endpoint for ATSv2 |  Major | ATSv2 | Zoltan Siegl | Zoltan Siegl |
+| [HDFS-10659](https://issues.apache.org/jira/browse/HDFS-10659) | Namenode crashes after Journalnode re-installation in an HA cluster due to missing paxos directory |  Major | ha, journal-node | Amit Anand | star |
+| [HDFS-14513](https://issues.apache.org/jira/browse/HDFS-14513) | FSImage which is saving should be clean while NameNode shutdown |  Major | namenode | He Xiaoqiao | He Xiaoqiao |
+| [YARN-9543](https://issues.apache.org/jira/browse/YARN-9543) | [UI2] Handle ATSv2 server down or failures cases gracefully in YARN UI v2 |  Major | ATSv2, yarn-ui-v2 | Zoltan Siegl | Zoltan Siegl |
+| [HADOOP-16369](https://issues.apache.org/jira/browse/HADOOP-16369) | Fix zstandard shortname misspelled as zts |  Major | . | Jonathan Eagles | Jonathan Eagles |
+| [HDFS-14560](https://issues.apache.org/jira/browse/HDFS-14560) | Allow block replication parameters to be refreshable |  Major | namenode | Stephen O'Donnell | Stephen O'Donnell |
+| [HDFS-12770](https://issues.apache.org/jira/browse/HDFS-12770) | Add doc about how to disable client socket cache |  Trivial | hdfs-client | Weiwei Yang | Weiwei Yang |
+| [HADOOP-9157](https://issues.apache.org/jira/browse/HADOOP-9157) | Better option for curl in hadoop-auth-examples |  Minor | documentation | Jingguo Yao | Andras Bokor |
+| [HDFS-14340](https://issues.apache.org/jira/browse/HDFS-14340) | Lower the log level when can't get postOpAttr |  Minor | nfs | Anuhan Torgonshar | Anuhan Torgonshar |
+| [HADOOP-15914](https://issues.apache.org/jira/browse/HADOOP-15914) | hadoop jar command has no help argument |  Major | common | Adam Antal | Adam Antal |
+| [YARN-9630](https://issues.apache.org/jira/browse/YARN-9630) | [UI2] Add a link in docs's top page |  Major | documentation, yarn-ui-v2 | Wanqiang Ji | Wanqiang Ji |
+| [HADOOP-16156](https://issues.apache.org/jira/browse/HADOOP-16156) | [Clean-up] Remove NULL check before instanceof and fix checkstyle in InnerNodeImpl |  Minor | . | Shweta | Shweta |
+| [HADOOP-14385](https://issues.apache.org/jira/browse/HADOOP-14385) | HttpExceptionUtils#validateResponse swallows exceptions |  Trivial | . | Wei-Chiu Chuang | Wei-Chiu Chuang |
+| [HDFS-12564](https://issues.apache.org/jira/browse/HDFS-12564) | Add the documents of swebhdfs configurations on the client side |  Major | documentation, webhdfs | Takanobu Asanuma | Takanobu Asanuma |
+| [HDFS-14403](https://issues.apache.org/jira/browse/HDFS-14403) | Cost-Based RPC FairCallQueue |  Major | ipc, namenode | Erik Krogen | Christopher Gregorian |
+| [HADOOP-16266](https://issues.apache.org/jira/browse/HADOOP-16266) | Add more fine-grained processing time metrics to the RPC layer |  Minor | ipc | Christopher Gregorian | Erik Krogen |
+| [YARN-9629](https://issues.apache.org/jira/browse/YARN-9629) | Support configurable MIN\_LOG\_ROLLING\_INTERVAL |  Minor | log-aggregation, nodemanager, yarn | Adam Antal | Adam Antal |
+| [HDFS-13694](https://issues.apache.org/jira/browse/HDFS-13694) | Making md5 computing being in parallel with image loading |  Major | . | zhouyingchao | Lisheng Sun |
+| [HDFS-14632](https://issues.apache.org/jira/browse/HDFS-14632) | Reduce useless #getNumLiveDataNodes call in SafeModeMonitor |  Major | namenode | He Xiaoqiao | He Xiaoqiao |
+| [YARN-9573](https://issues.apache.org/jira/browse/YARN-9573) | DistributedShell cannot specify LogAggregationContext |  Major | distributed-shell, log-aggregation, yarn | Adam Antal | Adam Antal |
+| [YARN-9337](https://issues.apache.org/jira/browse/YARN-9337) | GPU auto-discovery script runs even when the resource is given by hand |  Major | yarn | Adam Antal | Adam Antal |
+| [YARN-9127](https://issues.apache.org/jira/browse/YARN-9127) | Create more tests to verify GpuDeviceInformationParser |  Major | . | Szilard Nemeth | Peter Bacsko |
+| [YARN-9326](https://issues.apache.org/jira/browse/YARN-9326) | Fair Scheduler configuration defaults are not documented in case of min and maxResources |  Major | docs, documentation, fairscheduler, yarn | Adam Antal | Adam Antal |
+| [HDFS-14547](https://issues.apache.org/jira/browse/HDFS-14547) | DirectoryWithQuotaFeature.quota costs additional memory even the storage type quota is not set. |  Major | . | Jinglun | Jinglun |
+| [HDFS-14693](https://issues.apache.org/jira/browse/HDFS-14693) | NameNode should log a warning when EditLog IPC logger's pending size exceeds limit. |  Minor | namenode | Xudong Cao | Xudong Cao |
+| [YARN-9094](https://issues.apache.org/jira/browse/YARN-9094) | Remove unused interface method: NodeResourceUpdaterPlugin#handleUpdatedResourceFromRM |  Trivial | . | Szilard Nemeth | Gergely Pollak |
+| [YARN-9096](https://issues.apache.org/jira/browse/YARN-9096) | Some GpuResourcePlugin and ResourcePluginManager methods are synchronized unnecessarily |  Major | . | Szilard Nemeth | Gergely Pollak |
+| [YARN-9092](https://issues.apache.org/jira/browse/YARN-9092) | Create an object for cgroups mount enable and cgroups mount path as they belong together |  Minor | . | Szilard Nemeth | Gergely Pollak |
+| [YARN-9124](https://issues.apache.org/jira/browse/YARN-9124) | Resolve contradiction in ResourceUtils: addMandatoryResources / checkMandatoryResources work differently |  Minor | . | Szilard Nemeth | Adam Antal |
+| [YARN-8199](https://issues.apache.org/jira/browse/YARN-8199) | Logging fileSize of log files under NM Local Dir |  Major | log-aggregation | Prabhu Joseph | Prabhu Joseph |
+| [YARN-9729](https://issues.apache.org/jira/browse/YARN-9729) | [UI2] Fix error message for logs when ATSv2 is offline |  Major | yarn-ui-v2 | Zoltan Siegl | Zoltan Siegl |
+| [YARN-9135](https://issues.apache.org/jira/browse/YARN-9135) | NM State store ResourceMappings serialization are tested with Strings instead of real Device objects |  Major | . | Szilard Nemeth | Peter Bacsko |
+| [HDFS-14370](https://issues.apache.org/jira/browse/HDFS-14370) | Edit log tailing fast-path should allow for backoff |  Major | namenode, qjm | Erik Krogen | Erik Krogen |
+| [YARN-9442](https://issues.apache.org/jira/browse/YARN-9442) | container working directory has group read permissions |  Minor | yarn | Jim Brennan | Jim Brennan |
+| [HADOOP-16459](https://issues.apache.org/jira/browse/HADOOP-16459) | Backport [HADOOP-16266] "Add more fine-grained processing time metrics to the RPC layer" to branch-2 |  Major | . | Erik Krogen | Erik Krogen |
+| [HDFS-14491](https://issues.apache.org/jira/browse/HDFS-14491) | More Clarity on Namenode UI Around Blocks and Replicas |  Minor | . | Alan Jackoway | Siyao Meng |
+| [YARN-9134](https://issues.apache.org/jira/browse/YARN-9134) | No test coverage for redefining FPGA / GPU resource types in TestResourceUtils |  Major | . | Szilard Nemeth | Peter Bacsko |
+| [YARN-9133](https://issues.apache.org/jira/browse/YARN-9133) | Make tests more easy to comprehend in TestGpuResourceHandler |  Major | . | Szilard Nemeth | Peter Bacsko |
+| [YARN-9140](https://issues.apache.org/jira/browse/YARN-9140) | Code cleanup in ResourcePluginManager.initialize and in TestResourcePluginManager |  Trivial | . | Szilard Nemeth | Peter Bacsko |
+| [YARN-9676](https://issues.apache.org/jira/browse/YARN-9676) | Add DEBUG and TRACE level messages to AppLogAggregatorImpl and connected classes |  Major | . | Adam Antal | Adam Antal |
+| [YARN-9488](https://issues.apache.org/jira/browse/YARN-9488) | Skip YARNFeatureNotEnabledException from ClientRMService |  Minor | resourcemanager | Prabhu Joseph | Prabhu Joseph |
+| [YARN-8586](https://issues.apache.org/jira/browse/YARN-8586) | Extract log aggregation related fields and methods from RMAppImpl |  Major | . | Szilard Nemeth | Peter Bacsko |
+| [YARN-9100](https://issues.apache.org/jira/browse/YARN-9100) | Add tests for GpuResourceAllocator and do minor code cleanup |  Major | . | Szilard Nemeth | Peter Bacsko |
+| [HADOOP-15246](https://issues.apache.org/jira/browse/HADOOP-15246) | SpanReceiverInfo - Prefer ArrayList over LinkedList |  Trivial | common | David Mollitor | David Mollitor |
+| [HADOOP-16158](https://issues.apache.org/jira/browse/HADOOP-16158) | DistCp to support checksum validation when copy blocks in parallel |  Major | tools/distcp | Kai Xie | Kai Xie |
+| [HDFS-14746](https://issues.apache.org/jira/browse/HDFS-14746) | Trivial test code update after HDFS-14687 |  Trivial | ec | Wei-Chiu Chuang | kevin su |
+| [HDFS-13709](https://issues.apache.org/jira/browse/HDFS-13709) | Report bad block to NN when transfer block encounter EIO exception |  Major | datanode | Chen Zhang | Chen Zhang |
+| [HDFS-14665](https://issues.apache.org/jira/browse/HDFS-14665) | HttpFS: LISTSTATUS response is missing HDFS-specific fields |  Major | httpfs | Siyao Meng | Siyao Meng |
+| [HDFS-14276](https://issues.apache.org/jira/browse/HDFS-14276) | [SBN read] Reduce tailing overhead |  Major | ha, namenode | Wei-Chiu Chuang | Ayush Saxena |
+| [HADOOP-16061](https://issues.apache.org/jira/browse/HADOOP-16061) | Update Apache Yetus to 0.10.0 |  Major | build | Akira Ajisaka | Akira Ajisaka |
+| [YARN-9756](https://issues.apache.org/jira/browse/YARN-9756) | Create metric that sums total memory/vcores preempted per round |  Major | capacity scheduler | Eric Payne | Manikandan R |
+| [HDFS-14748](https://issues.apache.org/jira/browse/HDFS-14748) | Make DataNodePeerMetrics#minOutlierDetectionSamples configurable |  Major | . | Lisheng Sun | Lisheng Sun |
+| [HADOOP-15998](https://issues.apache.org/jira/browse/HADOOP-15998) | Ensure jar validation works on Windows. |  Blocker | build | Brian Grunkemeyer | Brian Grunkemeyer |
+| [HDFS-14633](https://issues.apache.org/jira/browse/HDFS-14633) | The StorageType quota and consume in QuotaFeature is not handled for rename |  Major | . | Jinglun | Jinglun |
+| [YARN-9810](https://issues.apache.org/jira/browse/YARN-9810) | Add queue capacity/maxcapacity percentage metrics |  Major | . | Jonathan Hung | Shubham Gupta |
+| [YARN-9763](https://issues.apache.org/jira/browse/YARN-9763) | Print application tags in application summary |  Major | . | Jonathan Hung | Manoj Kumar |
+| [YARN-9795](https://issues.apache.org/jira/browse/YARN-9795) | ClusterMetrics to include AM allocation delay |  Minor | . | Fengnan Li | Fengnan Li |
+| [YARN-8995](https://issues.apache.org/jira/browse/YARN-8995) | Log events info in AsyncDispatcher when event queue size cumulatively reaches a certain number every time. |  Major | metrics, nodemanager, resourcemanager | zhuqi | zhuqi |
+
+
+### BUG FIXES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HDFS-13973](https://issues.apache.org/jira/browse/HDFS-13973) | getErasureCodingPolicy should log path in audit event |  Major | hdfs | Shweta | Shweta |
+| [YARN-8868](https://issues.apache.org/jira/browse/YARN-8868) | Set HTTPOnly attribute to Cookie |  Major | . | Chandni Singh | Chandni Singh |
+| [HDFS-14003](https://issues.apache.org/jira/browse/HDFS-14003) | Fix findbugs warning in trunk for FSImageFormatPBINode |  Major | . | Yiqun Lin | Yiqun Lin |
+| [YARN-8910](https://issues.apache.org/jira/browse/YARN-8910) | Misleading log statement in NM when max retries is -1 |  Minor | . | Chandni Singh | Chandni Singh |
+| [YARN-7502](https://issues.apache.org/jira/browse/YARN-7502) | Nodemanager restart docs should describe nodemanager supervised property |  Major | documentation | Jason Lowe | Suma Shivaprasad |
+| [YARN-8826](https://issues.apache.org/jira/browse/YARN-8826) | Fix lingering timeline collector after serviceStop in TimelineCollectorManager |  Trivial | ATSv2 | Prabha Manepalli | Prabha Manepalli |
+| [HDFS-14021](https://issues.apache.org/jira/browse/HDFS-14021) | TestReconstructStripedBlocksWithRackAwareness#testReconstructForNotEnoughRacks fails intermittently |  Major | erasure-coding, test | Xiao Chen | Xiao Chen |
+| [MAPREDUCE-7151](https://issues.apache.org/jira/browse/MAPREDUCE-7151) | RMContainerAllocator#handleJobPriorityChange expects application\_priority always |  Major | . | Bibin A Chundatt | Bilwa S T |
+| [HDFS-14028](https://issues.apache.org/jira/browse/HDFS-14028) | HDFS OIV temporary dir deletes folder |  Major | hdfs | Adam Antal | Adam Antal |
+| [HDFS-14027](https://issues.apache.org/jira/browse/HDFS-14027) | DFSStripedOutputStream should implement both hsync methods |  Critical | erasure-coding | Xiao Chen | Xiao Chen |
+| [HADOOP-15899](https://issues.apache.org/jira/browse/HADOOP-15899) | Update AWS Java SDK versions in NOTICE.txt |  Major | . | Akira Ajisaka | Akira Ajisaka |
+| [HADOOP-15900](https://issues.apache.org/jira/browse/HADOOP-15900) | Update JSch versions in LICENSE.txt |  Major | . | Akira Ajisaka | Akira Ajisaka |
+| [HDFS-14042](https://issues.apache.org/jira/browse/HDFS-14042) | Fix NPE when PROVIDED storage is missing |  Major | . | Íñigo Goiri | Virajith Jalaparti |
+| [HDFS-14043](https://issues.apache.org/jira/browse/HDFS-14043) | Tolerate corrupted seen\_txid file |  Major | hdfs, namenode | Lukas Majercak | Lukas Majercak |
+| [YARN-8970](https://issues.apache.org/jira/browse/YARN-8970) | Improve the debug message in CS#allocateContainerOnSingleNode |  Trivial | . | Weiwei Yang | Zhankun Tang |
+| [YARN-8865](https://issues.apache.org/jira/browse/YARN-8865) | RMStateStore contains large number of expired RMDelegationToken |  Major | resourcemanager | Wilfred Spiegelenburg | Wilfred Spiegelenburg |
+| [HDFS-14048](https://issues.apache.org/jira/browse/HDFS-14048) | DFSOutputStream close() throws exception on subsequent call after DataNode restart |  Major | hdfs-client | Erik Krogen | Erik Krogen |
+| [MAPREDUCE-7156](https://issues.apache.org/jira/browse/MAPREDUCE-7156) | NullPointerException when reaching max shuffle connections |  Major | mrv2 | Peter Bacsko | Peter Bacsko |
+| [YARN-8866](https://issues.apache.org/jira/browse/YARN-8866) | Fix a parsing error for crossdomain.xml |  Major | build, yarn-ui-v2 | Takanobu Asanuma | Takanobu Asanuma |
+| [HDFS-14039](https://issues.apache.org/jira/browse/HDFS-14039) | ec -listPolicies doesn't show correct state for the default policy when the default is not RS(6,3) |  Major | erasure-coding | Xiao Chen | Kitti Nanasi |
+| [HADOOP-15916](https://issues.apache.org/jira/browse/HADOOP-15916) | Upgrade Maven Surefire plugin to 3.0.0-M1 |  Blocker | build | Akira Ajisaka | Akira Ajisaka |
+| [YARN-9002](https://issues.apache.org/jira/browse/YARN-9002) | YARN Service keytab does not support s3, wasb, gs and is restricted to HDFS and local filesystem only |  Major | yarn-native-services | Gour Saha | Gour Saha |
+| [YARN-8233](https://issues.apache.org/jira/browse/YARN-8233) | NPE in CapacityScheduler#tryCommit when handling allocate/reserve proposal whose allocatedOrReservedContainer is null |  Critical | capacityscheduler | Tao Yang | Tao Yang |
+| [HDFS-14065](https://issues.apache.org/jira/browse/HDFS-14065) | Failed Storage Locations shows nothing in the Datanode Volume Failures |  Major | . | Ayush Saxena | Ayush Saxena |
+| [HADOOP-15923](https://issues.apache.org/jira/browse/HADOOP-15923) | create-release script should set max-cache-ttl as well as default-cache-ttl for gpg-agent |  Blocker | build | Akira Ajisaka | Akira Ajisaka |
+| [HADOOP-15912](https://issues.apache.org/jira/browse/HADOOP-15912) | start-build-env.sh still creates an invalid /etc/sudoers.d/hadoop-build-${USER\_ID} file entry after HADOOP-15802 |  Major | build | Akira Ajisaka | Akira Ajisaka |
+| [HADOOP-15869](https://issues.apache.org/jira/browse/HADOOP-15869) | BlockDecompressorStream#decompress should not return -1 in case of IOException. |  Major | . | Surendra Singh Lilhore | Surendra Singh Lilhore |
+| [MAPREDUCE-7158](https://issues.apache.org/jira/browse/MAPREDUCE-7158) | Inefficient Flush Logic in JobHistory EventWriter |  Major | . | Zichen Sun | Zichen Sun |
+| [HADOOP-15930](https://issues.apache.org/jira/browse/HADOOP-15930) | Exclude MD5 checksum files from release artifact |  Critical | build | Akira Ajisaka | Akira Ajisaka |
+| [YARN-8856](https://issues.apache.org/jira/browse/YARN-8856) | TestTimelineReaderWebServicesHBaseStorage tests failing with NoClassDefFoundError |  Major | . | Jason Lowe | Sushil Ks |
+| [HADOOP-15925](https://issues.apache.org/jira/browse/HADOOP-15925) | The config and log of gpg-agent are removed in create-release script |  Major | build | Akira Ajisaka | Dinesh Chitlangia |
+| [HDFS-13963](https://issues.apache.org/jira/browse/HDFS-13963) | NN UI is broken with IE11 |  Minor | namenode, ui | Daisuke Kobayashi | Ayush Saxena |
+| [HDFS-14056](https://issues.apache.org/jira/browse/HDFS-14056) | Fix error messages in HDFS-12716 |  Minor | hdfs | Adam Antal | Ayush Saxena |
+| [YARN-8992](https://issues.apache.org/jira/browse/YARN-8992) | Fair scheduler can delete a dynamic queue while an application attempt is being added to the queue |  Major | fairscheduler | Haibo Chen | Wilfred Spiegelenburg |
+| [YARN-8984](https://issues.apache.org/jira/browse/YARN-8984) | AMRMClient#OutstandingSchedRequests leaks when AllocationTags is null or empty |  Critical | . | Yang Wang | Yang Wang |
+| [HADOOP-15948](https://issues.apache.org/jira/browse/HADOOP-15948) | Inconsistency in get and put syntax if filename/dirname contains space |  Minor | fs | vivek kumar | Ayush Saxena |
+| [HDFS-13816](https://issues.apache.org/jira/browse/HDFS-13816) | dfs.getQuotaUsage() throws NPE on non-existent dir instead of FileNotFoundException |  Major | namenode | Vinayakumar B | Vinayakumar B |
+| [MAPREDUCE-7162](https://issues.apache.org/jira/browse/MAPREDUCE-7162) | TestEvents#testEvents fails |  Critical | jobhistoryserver, test | Zhaohui Xin | Zhaohui Xin |
+| [YARN-9056](https://issues.apache.org/jira/browse/YARN-9056) | Yarn Service Upgrade: Instance state changes from UPGRADING to READY without performing a readiness check |  Critical | . | Chandni Singh | Chandni Singh |
+| [YARN-8812](https://issues.apache.org/jira/browse/YARN-8812) | Containers fail during creating a symlink which started with hyphen for a resource file |  Minor | . | Oleksandr Shevchenko | Oleksandr Shevchenko |
+| [YARN-9030](https://issues.apache.org/jira/browse/YARN-9030) | Log aggregation changes to handle filesystems which do not support setting permissions |  Major | log-aggregation | Suma Shivaprasad | Suma Shivaprasad |
+| [YARN-9067](https://issues.apache.org/jira/browse/YARN-9067) | YARN Resource Manager is running OOM because of leak of Configuration Object |  Major | yarn-native-services | Eric Yang | Eric Yang |
+| [MAPREDUCE-7165](https://issues.apache.org/jira/browse/MAPREDUCE-7165) | mapred-site.xml is misformatted in single node setup document |  Major | documentation | Akira Ajisaka | Zhaohui Xin |
+| [HADOOP-15970](https://issues.apache.org/jira/browse/HADOOP-15970) | Upgrade plexus-utils from 2.0.5 to 3.1.0 |  Major | security | Akira Ajisaka | Akira Ajisaka |
+| [HADOOP-15966](https://issues.apache.org/jira/browse/HADOOP-15966) | Hadoop Kerberos broken on macos as java.security.krb5.realm is reset: Null realm name (601) |  Major | scripts | Steve Loughran | Steve Loughran |
+| [HADOOP-15974](https://issues.apache.org/jira/browse/HADOOP-15974) | Upgrade Curator version to 2.13.0 to fix ZK tests |  Major | . | Jason Lowe | Akira Ajisaka |
+| [YARN-9071](https://issues.apache.org/jira/browse/YARN-9071) | NM and service AM don't have updated status for reinitialized containers |  Critical | . | Billie Rinaldi | Chandni Singh |
+| [MAPREDUCE-7159](https://issues.apache.org/jira/browse/MAPREDUCE-7159) | FrameworkUploader: ensure proper permissions of generated framework tar.gz if restrictive umask is used |  Major | mrv2 | Peter Bacsko | Peter Bacsko |
+| [YARN-9009](https://issues.apache.org/jira/browse/YARN-9009) | Fix flaky test TestEntityGroupFSTimelineStore.testCleanLogs |  Minor | . | OrDTesters | OrDTesters |
+| [MAPREDUCE-7170](https://issues.apache.org/jira/browse/MAPREDUCE-7170) | Doc typo in PluggableShuffleAndPluggableSort.md |  Minor | documentation | Zhaohui Xin | Zhaohui Xin |
+| [YARN-9040](https://issues.apache.org/jira/browse/YARN-9040) | LevelDBCacheTimelineStore in ATS 1.5 leaks native memory |  Major | timelineserver | Tarun Parimi | Tarun Parimi |
+| [YARN-9084](https://issues.apache.org/jira/browse/YARN-9084) | Service Upgrade: With default readiness check, the status of upgrade is reported to be successful prematurely |  Major | . | Chandni Singh | Chandni Singh |
+| [HDFS-13661](https://issues.apache.org/jira/browse/HDFS-13661) | Ls command with e option fails when the filesystem is not HDFS |  Major | erasure-coding, tools | Takanobu Asanuma | Takanobu Asanuma |
+| [YARN-9154](https://issues.apache.org/jira/browse/YARN-9154) | Fix itemization in YARN service quickstart document |  Minor | documentation | Akira Ajisaka | Ayush Saxena |
+| [HDFS-14166](https://issues.apache.org/jira/browse/HDFS-14166) | Ls with -e option not giving the result in proper format |  Major | . | Soumyapn | Shubham Dewan |
+| [HDFS-14046](https://issues.apache.org/jira/browse/HDFS-14046) | In-Maintenance ICON is missing in datanode info page |  Major | datanode | Harshakiran Reddy | Ranith Sardar |
+| [HDFS-14183](https://issues.apache.org/jira/browse/HDFS-14183) | [SPS] Remove the -w parameter from the -satisfystoragepolicy usage |  Major | . | Ayush Saxena | Ayush Saxena |
+| [YARN-9164](https://issues.apache.org/jira/browse/YARN-9164) | Shutdown NM may cause NPE when opportunistic container scheduling is enabled |  Critical | . | lujie | lujie |
+| [YARN-8567](https://issues.apache.org/jira/browse/YARN-8567) | Fetching yarn logs fails for long running application if it is not present in timeline store |  Major | log-aggregation | Tarun Parimi | Tarun Parimi |
+| [HADOOP-16028](https://issues.apache.org/jira/browse/HADOOP-16028) | Fix NetworkTopology chooseRandom function to support excluded nodes |  Major | . | Sihai Ke | Sihai Ke |
+| [HADOOP-16030](https://issues.apache.org/jira/browse/HADOOP-16030) | AliyunOSS: bring fixes back from HADOOP-15671 |  Blocker | fs/oss | wujinhu | wujinhu |
+| [YARN-9162](https://issues.apache.org/jira/browse/YARN-9162) | Fix TestRMAdminCLI#testHelp |  Major | resourcemanager, test | Ayush Saxena | Ayush Saxena |
+| [YARN-9173](https://issues.apache.org/jira/browse/YARN-9173) | FairShare calculation broken for large values after YARN-8833 |  Major | fairscheduler | Wilfred Spiegelenburg | Wilfred Spiegelenburg |
+| [YARN-8833](https://issues.apache.org/jira/browse/YARN-8833) | Avoid potential integer overflow when computing fair shares |  Major | fairscheduler | liyakun | liyakun |
+| [HADOOP-16036](https://issues.apache.org/jira/browse/HADOOP-16036) | WASB: Disable jetty logging configuration announcement |  Major | fs/azure | Da Zhou | Da Zhou |
+| [HADOOP-16016](https://issues.apache.org/jira/browse/HADOOP-16016) | TestSSLFactory#testServerWeakCiphers sporadically fails in precommit builds |  Major | security, test | Jason Lowe | Akira Ajisaka |
+| [HDFS-14198](https://issues.apache.org/jira/browse/HDFS-14198) | Upload and Create button doesn't get enabled after getting reset. |  Major | . | Ayush Saxena | Ayush Saxena |
+| [YARN-8747](https://issues.apache.org/jira/browse/YARN-8747) | [UI2] YARN UI2 page loading failed due to js error under some time zone configuration |  Critical | webapp | collinma | collinma |
+| [YARN-9203](https://issues.apache.org/jira/browse/YARN-9203) | Fix typos in yarn-default.xml |  Trivial | documentation | Rahul Padmanabhan | Rahul Padmanabhan |
+| [YARN-9194](https://issues.apache.org/jira/browse/YARN-9194) | Invalid event: REGISTERED and LAUNCH\_FAILED at FAILED, and NullPointerException happens in RM while shutdown a NM |  Critical | . | lujie | lujie |
+| [YARN-9204](https://issues.apache.org/jira/browse/YARN-9204) |  RM fails to start if absolute resource is specified for partition capacity in CS queues |  Blocker | yarn | Jiandan Yang | Jiandan Yang |
+| [HDFS-14207](https://issues.apache.org/jira/browse/HDFS-14207) | ZKFC should catch exception when ha configuration missing |  Major | hdfs | Fei Hui | Fei Hui |
+| [HDFS-14218](https://issues.apache.org/jira/browse/HDFS-14218) | EC: Ls -e throw NPE when directory ec policy is disabled |  Major | . | Surendra Singh Lilhore | Ayush Saxena |
+| [YARN-9210](https://issues.apache.org/jira/browse/YARN-9210) | RM nodes web page can not display node info |  Blocker | yarn | Jiandan Yang | Jiandan Yang |
+| [YARN-9205](https://issues.apache.org/jira/browse/YARN-9205) | When using custom resource type, application will fail to run due to the CapacityScheduler throws InvalidResourceRequestException(GREATER\_THEN\_MAX\_ALLOCATION) |  Critical | . | Zhankun Tang | Zhankun Tang |
+| [YARN-8961](https://issues.apache.org/jira/browse/YARN-8961) | [UI2] Flow Run End Time shows 'Invalid date' |  Major | . | Charan Hebri | Akhil PB |
+| [HADOOP-16065](https://issues.apache.org/jira/browse/HADOOP-16065) | -Ddynamodb should be -Ddynamo in AWS SDK testing document |  Minor | documentation | Akira Ajisaka | Akira Ajisaka |
+| [HDFS-14228](https://issues.apache.org/jira/browse/HDFS-14228) | Incorrect getSnapshottableDirListing() javadoc |  Major | snapshots | Wei-Chiu Chuang | Dinesh Chitlangia |
+| [YARN-9222](https://issues.apache.org/jira/browse/YARN-9222) | Print launchTime in ApplicationSummary |  Major | . | Jonathan Hung | Jonathan Hung |
+| [YARN-8901](https://issues.apache.org/jira/browse/YARN-8901) | Restart "NEVER" policy does not work with component dependency |  Critical | . | Yesha Vora | Suma Shivaprasad |
+| [YARN-9237](https://issues.apache.org/jira/browse/YARN-9237) | NM should ignore sending finished apps to RM during RM fail-over |  Major | yarn | Jiandan Yang | Jiandan Yang |
+| [YARN-6616](https://issues.apache.org/jira/browse/YARN-6616) | YARN AHS shows submitTime for jobs same as startTime |  Minor | . | Prabhu Joseph | Prabhu Joseph |
+| [YARN-9099](https://issues.apache.org/jira/browse/YARN-9099) | GpuResourceAllocator#getReleasingGpus calculates number of GPUs in a wrong way |  Major | . | Szilard Nemeth | Szilard Nemeth |
+| [YARN-9262](https://issues.apache.org/jira/browse/YARN-9262) | TestRMAppAttemptTransitions is failing with an NPE |  Critical | resourcemanager | Sunil Govindan | lujie |
+| [HDFS-14232](https://issues.apache.org/jira/browse/HDFS-14232) | libhdfs is not included in binary tarball |  Critical | build, libhdfs | Akira Ajisaka | Akira Ajisaka |
+| [MAPREDUCE-7177](https://issues.apache.org/jira/browse/MAPREDUCE-7177) | Disable speculative execution in TestDFSIO |  Major | . | Kihwal Lee | Zhaohui Xin |
+| [HADOOP-16076](https://issues.apache.org/jira/browse/HADOOP-16076) | SPNEGO+SSL Client Connections with HttpClient Broken |  Major | build, security | Larry McCay | Larry McCay |
+| [HADOOP-16074](https://issues.apache.org/jira/browse/HADOOP-16074) | WASB: Update container not found error code |  Major | fs/azure | Da Zhou | Da Zhou |
+| [YARN-8498](https://issues.apache.org/jira/browse/YARN-8498) | Yarn NodeManager OOM Listener Fails Compilation on Ubuntu 18.04 |  Blocker | . | Jack Bearden | Ayush Saxena |
+| [YARN-9206](https://issues.apache.org/jira/browse/YARN-9206) | RMServerUtils does not count SHUTDOWN as an accepted state |  Major | . | Kuhu Shukla | Kuhu Shukla |
+| [HADOOP-16032](https://issues.apache.org/jira/browse/HADOOP-16032) | Distcp It should clear sub directory ACL before applying new ACL on it. |  Major | tools/distcp | Ranith Sardar | Ranith Sardar |
+| [HDFS-14140](https://issues.apache.org/jira/browse/HDFS-14140) | JournalNodeSyncer authentication is failing in secure cluster |  Major | journal-node, security | Surendra Singh Lilhore | Surendra Singh Lilhore |
+| [YARN-9257](https://issues.apache.org/jira/browse/YARN-9257) | Distributed Shell client throws a NPE for a non-existent queue |  Major | distributed-shell | Charan Hebri | Charan Hebri |
+| [YARN-8761](https://issues.apache.org/jira/browse/YARN-8761) | Service AM support for decommissioning component instances |  Major | . | Billie Rinaldi | Billie Rinaldi |
+| [HDFS-14266](https://issues.apache.org/jira/browse/HDFS-14266) | EC : Fsck -blockId shows null for EC Blocks if One Block Is Not Available. |  Major | . | Harshakiran Reddy | Ayush Saxena |
+| [HDFS-14274](https://issues.apache.org/jira/browse/HDFS-14274) | EC: NPE While Listing EC Policy For A Directory Following Replication Policy. |  Major | erasure-coding | Souryakanta Dwivedy | Ayush Saxena |
+| [HDFS-14263](https://issues.apache.org/jira/browse/HDFS-14263) | Remove unnecessary block file exists check from FsDatasetImpl#getBlockInputStream() |  Major | datanode | Surendra Singh Lilhore | Surendra Singh Lilhore |
+| [YARN-7761](https://issues.apache.org/jira/browse/YARN-7761) | [UI2] Clicking 'master container log' or 'Link' next to 'log' under application's appAttempt goes to Old UI's Log link |  Major | yarn-ui-v2 | Sumana Sathish | Akhil PB |
+| [YARN-9295](https://issues.apache.org/jira/browse/YARN-9295) | [UI2] Fix label typo in Cluster Overview page |  Trivial | yarn-ui-v2 | Charan Hebri | Charan Hebri |
+| [YARN-9308](https://issues.apache.org/jira/browse/YARN-9308) | fairscheduler-statedump.log gets generated regardless of service again after the merge of HDFS-7240 |  Blocker | fairscheduler, scheduler | Akira Ajisaka | Wilfred Spiegelenburg |
+| [YARN-9284](https://issues.apache.org/jira/browse/YARN-9284) | Fix the unit of yarn.service.am-resource.memory in the document |  Minor | documentation, yarn-native-services | Masahiro Tanaka | Masahiro Tanaka |
+| [YARN-9283](https://issues.apache.org/jira/browse/YARN-9283) | Javadoc of LinuxContainerExecutor#addSchedPriorityCommand has a wrong property name as reference |  Minor | documentation | Szilard Nemeth | Adam Antal |
+| [YARN-9286](https://issues.apache.org/jira/browse/YARN-9286) | [Timeline Server] Sorting based on FinalStatus shows pop-up message |  Minor | timelineserver | Nallasivan | Bilwa S T |
+| [HDFS-14081](https://issues.apache.org/jira/browse/HDFS-14081) | hdfs dfsadmin -metasave metasave\_test results NPE |  Major | hdfs | Shweta | Shweta |
+| [HADOOP-15813](https://issues.apache.org/jira/browse/HADOOP-15813) | Enable more reliable SSL connection reuse |  Major | common | Daryn Sharp | Daryn Sharp |
+| [HDFS-14216](https://issues.apache.org/jira/browse/HDFS-14216) | NullPointerException happens in NamenodeWebHdfs |  Critical | . | lujie | lujie |
+| [HADOOP-16105](https://issues.apache.org/jira/browse/HADOOP-16105) | WASB in secure mode does not set connectingUsingSAS |  Major | fs/azure | Steve Loughran | Steve Loughran |
+| [YARN-9238](https://issues.apache.org/jira/browse/YARN-9238) | Avoid allocating opportunistic containers to previous/removed/non-exist application attempt |  Critical | . | lujie | lujie |
+| [YARN-9118](https://issues.apache.org/jira/browse/YARN-9118) | Handle exceptions with parsing user defined GPU devices in GpuDiscoverer |  Major | . | Szilard Nemeth | Szilard Nemeth |
+| [YARN-9317](https://issues.apache.org/jira/browse/YARN-9317) | Avoid repeated YarnConfiguration#timelineServiceV2Enabled check |  Major | . | Bibin A Chundatt | Prabhu Joseph |
+| [YARN-9213](https://issues.apache.org/jira/browse/YARN-9213) | RM Web UI v1 does not show custom resource allocations for containers page |  Major | . | Szilard Nemeth | Szilard Nemeth |
+| [YARN-9329](https://issues.apache.org/jira/browse/YARN-9329) | updatePriority is blocked when using FairScheduler |  Major | . | Jiandan Yang | Jiandan Yang |
+| [YARN-9248](https://issues.apache.org/jira/browse/YARN-9248) | RMContainerImpl:Invalid event: ACQUIRED at KILLED |  Major | . | lujie | lujie |
+| [HADOOP-16018](https://issues.apache.org/jira/browse/HADOOP-16018) | DistCp won't reassemble chunks when blocks per chunk \> 0 |  Major | tools/distcp | Kai Xie | Kai Xie |
+| [YARN-9334](https://issues.apache.org/jira/browse/YARN-9334) | YARN Service Client does not work with SPNEGO when knox is configured |  Major | yarn-native-services | Tarun Parimi | Billie Rinaldi |
+| [HDFS-14305](https://issues.apache.org/jira/browse/HDFS-14305) | Serial number in BlockTokenSecretManager could overlap between different namenodes |  Major | namenode, security | Chao Sun | He Xiaoqiao |
+| [HDFS-14314](https://issues.apache.org/jira/browse/HDFS-14314) | fullBlockReportLeaseId should be reset after registering to NN |  Critical | datanode | star | star |
+| [YARN-8803](https://issues.apache.org/jira/browse/YARN-8803) | [UI2] Show flow runs in the order of recently created time in graph widgets |  Major | yarn-ui-v2 | Akhil PB | Akhil PB |
+| [HDFS-14317](https://issues.apache.org/jira/browse/HDFS-14317) | Standby does not trigger edit log rolling when in-progress edit log tailing is enabled |  Critical | . | Ekanth Sethuramalingam | Ekanth Sethuramalingam |
+| [HDFS-14333](https://issues.apache.org/jira/browse/HDFS-14333) | Datanode fails to start if any disk has errors during Namenode registration |  Major | datanode | Stephen O'Donnell | Stephen O'Donnell |
+| [HADOOP-16192](https://issues.apache.org/jira/browse/HADOOP-16192) | CallQueue backoff bug fixes: doesn't perform backoff when add() is used, and doesn't update backoff when refreshed |  Major | ipc | Erik Krogen | Erik Krogen |
+| [HDFS-14037](https://issues.apache.org/jira/browse/HDFS-14037) | Fix SSLFactory truststore reloader thread leak in URLConnectionFactory |  Major | hdfs-client, webhdfs | Takanobu Asanuma | Takanobu Asanuma |
+| [HADOOP-16225](https://issues.apache.org/jira/browse/HADOOP-16225) | Fix links to the developer mailing lists in DownstreamDev.md |  Minor | documentation | Akira Ajisaka | Wanqiang Ji |
+| [HADOOP-16226](https://issues.apache.org/jira/browse/HADOOP-16226) | new Path(String str) does not remove all the trailing slashes of str |  Minor | fs | Akira Ajisaka | Akira Ajisaka |
+| [HADOOP-16232](https://issues.apache.org/jira/browse/HADOOP-16232) | Fix errors in the checkstyle configration xmls |  Major | build | Akira Ajisaka | Wanqiang Ji |
+| [HDFS-14389](https://issues.apache.org/jira/browse/HDFS-14389) | getAclStatus returns incorrect permissions and owner when an iNodeAttributeProvider is configured |  Major | namenode | Stephen O'Donnell | Stephen O'Donnell |
+| [HDFS-14407](https://issues.apache.org/jira/browse/HDFS-14407) | Fix misuse of SLF4j logging API in DatasetVolumeChecker#checkAllVolumes |  Minor | . | Wanqiang Ji | Wanqiang Ji |
+| [YARN-9413](https://issues.apache.org/jira/browse/YARN-9413) | Queue resource leak after app fail for CapacityScheduler |  Major | capacityscheduler | Tao Yang | Tao Yang |
+| [HADOOP-14635](https://issues.apache.org/jira/browse/HADOOP-14635) | Javadoc correction for AccessControlList#buildACL |  Minor | documentation | Bibin A Chundatt | Yeliang Cang |
+| [HADOOP-14544](https://issues.apache.org/jira/browse/HADOOP-14544) | DistCp documentation for command line options is misaligned. |  Minor | documentation | Chris Nauroth | Masatake Iwasaki |
+| [HDFS-10477](https://issues.apache.org/jira/browse/HDFS-10477) | Stop decommission a rack of DataNodes caused NameNode fail over to standby |  Major | namenode | yunjiong zhao | yunjiong zhao |
+| [YARN-9487](https://issues.apache.org/jira/browse/YARN-9487) | NodeManager native build shouldn't link against librt on macOS |  Major | nodemanager | Siyao Meng | Siyao Meng |
+| [YARN-6695](https://issues.apache.org/jira/browse/YARN-6695) | Race condition in RM for publishing container events vs appFinished events causes NPE |  Critical | . | Rohith Sharma K S | Prabhu Joseph |
+| [YARN-8622](https://issues.apache.org/jira/browse/YARN-8622) | NodeManager native build fails due to getgrouplist not found on macOS |  Major | nodemanager | Ewan Higgs | Siyao Meng |
+| [HADOOP-16265](https://issues.apache.org/jira/browse/HADOOP-16265) | Configuration#getTimeDuration is not consistent between default value and manual settings. |  Major | . | star | star |
+| [HDFS-13677](https://issues.apache.org/jira/browse/HDFS-13677) | Dynamic refresh Disk configuration results in overwriting VolumeMap |  Blocker | . | xuzq | xuzq |
+| [YARN-9285](https://issues.apache.org/jira/browse/YARN-9285) | RM UI progress column is of wrong type |  Minor | yarn | Ahmed Hussein | Ahmed Hussein |
+| [HADOOP-16278](https://issues.apache.org/jira/browse/HADOOP-16278) | With S3A Filesystem, Long Running services End up Doing lot of GC and eventually die |  Major | common, hadoop-aws, metrics | Rajat Khandelwal | Rajat Khandelwal |
+| [YARN-9504](https://issues.apache.org/jira/browse/YARN-9504) | [UI2] Fair scheduler queue view page does not show actual capacity |  Major | fairscheduler, yarn-ui-v2 | Zoltan Siegl | Zoltan Siegl |
+| [YARN-9519](https://issues.apache.org/jira/browse/YARN-9519) | TFile log aggregation file format is not working for yarn.log-aggregation.TFile.remote-app-log-dir config |  Major | log-aggregation | Adam Antal | Adam Antal |
+| [YARN-9508](https://issues.apache.org/jira/browse/YARN-9508) | YarnConfiguration areNodeLabel enabled is costly in allocation flow |  Critical | . | Bibin A Chundatt | Bilwa S T |
+| [HADOOP-16247](https://issues.apache.org/jira/browse/HADOOP-16247) | NPE in FsUrlConnection |  Major | hdfs-client | Karthik Palanisamy | Karthik Palanisamy |
+| [HADOOP-16248](https://issues.apache.org/jira/browse/HADOOP-16248) | MutableQuantiles leak memory under heavy load |  Major | metrics | Alexis Daboville | Alexis Daboville |
+| [HDFS-14323](https://issues.apache.org/jira/browse/HDFS-14323) | Distcp fails in Hadoop 3.x when 2.x source webhdfs url has special characters in hdfs file path |  Major | webhdfs | Srinivasu Majeti | Srinivasu Majeti |
+| [MAPREDUCE-7205](https://issues.apache.org/jira/browse/MAPREDUCE-7205) | Treat container scheduler kill exit code as a task attempt killing event |  Major | applicationmaster, mr-am, mrv2 | Wanqiang Ji | Wanqiang Ji |
+| [HDFS-14500](https://issues.apache.org/jira/browse/HDFS-14500) | NameNode StartupProgress continues to report edit log segments after the LOADING\_EDITS phase is finished |  Major | namenode | Erik Krogen | Erik Krogen |
+| [YARN-9500](https://issues.apache.org/jira/browse/YARN-9500) | Fix typos in ResourceModel.md |  Trivial | documentation | leiqiang | leiqiang |
+| [HADOOP-16331](https://issues.apache.org/jira/browse/HADOOP-16331) | Fix ASF License check in pom.xml |  Major | . | Wanqiang Ji | Akira Ajisaka |
+| [YARN-9542](https://issues.apache.org/jira/browse/YARN-9542) | Fix LogsCLI guessAppOwner ignores custom file format suffix |  Minor | log-aggregation | Prabhu Joseph | Prabhu Joseph |
+| [HDFS-14512](https://issues.apache.org/jira/browse/HDFS-14512) | ONE\_SSD policy will be violated while write data with DistributedFileSystem.create(....favoredNodes) |  Major | . | Shen Yinjie | Ayush Saxena |
+| [HADOOP-16334](https://issues.apache.org/jira/browse/HADOOP-16334) | Fix yetus-wrapper not working when HADOOP\_YETUS\_VERSION \>= 0.9.0 |  Major | yetus | Wanqiang Ji | Wanqiang Ji |
+| [HDFS-14521](https://issues.apache.org/jira/browse/HDFS-14521) | Suppress setReplication logging. |  Major | . | Kihwal Lee | Kihwal Lee |
+| [YARN-9507](https://issues.apache.org/jira/browse/YARN-9507) | Fix NPE in NodeManager#serviceStop on startup failure |  Minor | . | Bilwa S T | Bilwa S T |
+| [YARN-8947](https://issues.apache.org/jira/browse/YARN-8947) | [UI2] Active User info missing from UI2 |  Major | yarn-ui-v2 | Akhil PB | Akhil PB |
+| [YARN-8906](https://issues.apache.org/jira/browse/YARN-8906) | [UI2] NM hostnames not displayed correctly in Node Heatmap Chart |  Major | . | Charan Hebri | Akhil PB |
+| [YARN-9580](https://issues.apache.org/jira/browse/YARN-9580) | Fulfilled reservation information in assignment is lost when transferring in ParentQueue#assignContainers |  Major | capacityscheduler | Tao Yang | Tao Yang |
+| [YARN-8625](https://issues.apache.org/jira/browse/YARN-8625) | Aggregate Resource Allocation for each job is not present in ATS |  Major | ATSv2 | Prabhu Joseph | Prabhu Joseph |
+| [HADOOP-16345](https://issues.apache.org/jira/browse/HADOOP-16345) | Potential NPE when instantiating FairCallQueue metrics |  Major | ipc | Erik Krogen | Erik Krogen |
+| [YARN-9594](https://issues.apache.org/jira/browse/YARN-9594) | Fix missing break statement in ContainerScheduler#handle |  Major | . | lujie | lujie |
+| [YARN-9565](https://issues.apache.org/jira/browse/YARN-9565) | RMAppImpl#ranNodes not cleared on FinalTransition |  Major | . | Bibin A Chundatt | Bilwa S T |
+| [YARN-9547](https://issues.apache.org/jira/browse/YARN-9547) | ContainerStatusPBImpl default execution type is not returned |  Major | . | Bibin A Chundatt | Bilwa S T |
+| [HDFS-13231](https://issues.apache.org/jira/browse/HDFS-13231) | Extend visualization for Decommissioning, Maintenance Mode under Datanode tab in the NameNode UI |  Major | datanode, namenode | Haibo Yan | Stephen O'Donnell |
+| [HDFS-14535](https://issues.apache.org/jira/browse/HDFS-14535) | The default 8KB buffer in requestFileDescriptors#BufferedOutputStream is causing lots of heap allocation in HBase when using short-circut read |  Major | hdfs-client | Zheng Hu | Zheng Hu |
+| [HDFS-13730](https://issues.apache.org/jira/browse/HDFS-13730) | BlockReaderRemote.sendReadResult throws NPE |  Major | hdfs-client | Wei-Chiu Chuang | Yuanbo Liu |
+| [YARN-9584](https://issues.apache.org/jira/browse/YARN-9584) | Should put initializeProcessTrees method call before get pid |  Critical | nodemanager | Wanqiang Ji | Wanqiang Ji |
+| [HDFS-14010](https://issues.apache.org/jira/browse/HDFS-14010) | Pass correct DF usage to ReservedSpaceCalculator builder |  Minor | . | Lukas Majercak | Lukas Majercak |
+| [HDFS-14078](https://issues.apache.org/jira/browse/HDFS-14078) | Admin helper fails to prettify NullPointerExceptions |  Major | . | Elek, Marton | Elek, Marton |
+| [HDFS-14101](https://issues.apache.org/jira/browse/HDFS-14101) | Random failure of testListCorruptFilesCorruptedBlock |  Major | test | Kihwal Lee | Zsolt Venczel |
+| [HDFS-14465](https://issues.apache.org/jira/browse/HDFS-14465) | When the Block expected replications is larger than the number of DataNodes, entering maintenance will never exit. |  Major | . | Yicong Cai | Yicong Cai |
+| [HDFS-13893](https://issues.apache.org/jira/browse/HDFS-13893) | DiskBalancer: no validations for Disk balancer commands |  Major | diskbalancer | Harshakiran Reddy | Lokesh Jain |
+| [YARN-9209](https://issues.apache.org/jira/browse/YARN-9209) | When nodePartition is not set in Placement Constraints, containers are allocated only in default partition |  Major | capacity scheduler, scheduler | Tarun Parimi | Tarun Parimi |
+| [HDFS-12487](https://issues.apache.org/jira/browse/HDFS-12487) | FsDatasetSpi.isValidBlock() lacks null pointer check inside and neither do the callers |  Major | balancer & mover, diskbalancer | liumi | liumi |
+| [HDFS-14074](https://issues.apache.org/jira/browse/HDFS-14074) | DataNode runs async disk checks  maybe  throws NullPointerException, and DataNode failed to register to NameSpace. |  Major | hdfs | guangyi lu | guangyi lu |
+| [HDFS-14541](https://issues.apache.org/jira/browse/HDFS-14541) |  When evictableMmapped or evictable size is zero, do not throw NoSuchElementException |  Major | hdfs-client, performance | Zheng Hu | Lisheng Sun |
+| [HDFS-14598](https://issues.apache.org/jira/browse/HDFS-14598) | Findbugs warning caused by HDFS-12487 |  Minor | diskbalancer | Wei-Chiu Chuang | He Xiaoqiao |
+| [YARN-9639](https://issues.apache.org/jira/browse/YARN-9639) | DecommissioningNodesWatcher cause memory leak |  Blocker | . | Bibin A Chundatt | Bilwa S T |
+| [YARN-9581](https://issues.apache.org/jira/browse/YARN-9581) | Fix WebAppUtils#getRMWebAppURLWithScheme ignores rm2 |  Major | client | Prabhu Joseph | Prabhu Joseph |
+| [YARN-9327](https://issues.apache.org/jira/browse/YARN-9327) | Improve synchronisation in ProtoUtils#convertToProtoFormat block |  Critical | . | Bibin A Chundatt | Bibin A Chundatt |
+| [YARN-9655](https://issues.apache.org/jira/browse/YARN-9655) | AllocateResponse in FederationInterceptor lost  applicationPriority |  Major | federation | hunshenshi | hunshenshi |
+| [HADOOP-16385](https://issues.apache.org/jira/browse/HADOOP-16385) | Namenode crashes with "RedundancyMonitor thread received Runtime exception" |  Major | . | krishna reddy | Ayush Saxena |
+| [YARN-9644](https://issues.apache.org/jira/browse/YARN-9644) | First RMContext object is always leaked during switch over |  Blocker | . | Bibin A Chundatt | Bibin A Chundatt |
+| [HDFS-14629](https://issues.apache.org/jira/browse/HDFS-14629) | Property value Hard Coded in DNConf.java |  Trivial | . | hemanthboyina | hemanthboyina |
+| [YARN-9557](https://issues.apache.org/jira/browse/YARN-9557) | Application fails in diskchecker when ReadWriteDiskValidator is configured. |  Critical | nodemanager | Anuruddh Nayak | Bilwa S T |
+| [HDFS-12703](https://issues.apache.org/jira/browse/HDFS-12703) | Exceptions are fatal to decommissioning monitor |  Critical | namenode | Daryn Sharp | He Xiaoqiao |
+| [HDFS-12748](https://issues.apache.org/jira/browse/HDFS-12748) | NameNode memory leak when accessing webhdfs GETHOMEDIRECTORY |  Major | hdfs | Jiandan Yang | Weiwei Yang |
+| [YARN-9625](https://issues.apache.org/jira/browse/YARN-9625) | UI2 - No link to a queue on the Queues page for Fair Scheduler |  Major | . | Charan Hebri | Zoltan Siegl |
+| [HDFS-14466](https://issues.apache.org/jira/browse/HDFS-14466) | Add a regression test for HDFS-14323 |  Minor | fs, test, webhdfs | Yuya Ebihara | Masatake Iwasaki |
+| [YARN-9235](https://issues.apache.org/jira/browse/YARN-9235) | If linux container executor is not set for a GPU cluster GpuResourceHandlerImpl is not initialized and NPE is thrown |  Major | yarn | Antal Bálint Steinbach | Adam Antal |
+| [YARN-9626](https://issues.apache.org/jira/browse/YARN-9626) | UI2 - Fair scheduler queue apps page issues |  Major | . | Charan Hebri | Zoltan Siegl |
+| [YARN-9645](https://issues.apache.org/jira/browse/YARN-9645) | Fix Invalid event FINISHED\_CONTAINERS\_PULLED\_BY\_AM at NEW on NM restart |  Major | . | krishna reddy | Bilwa S T |
+| [YARN-9682](https://issues.apache.org/jira/browse/YARN-9682) | Wrong log message when finalizing the upgrade |  Trivial | . | kyungwan nam | kyungwan nam |
+| [HADOOP-16440](https://issues.apache.org/jira/browse/HADOOP-16440) | Distcp can not preserve timestamp with -delete  option |  Major | . | ludun | ludun |
+| [MAPREDUCE-7076](https://issues.apache.org/jira/browse/MAPREDUCE-7076) | TestNNBench#testNNBenchCreateReadAndDelete failing in our internal build |  Minor | test | Rushabh S Shah | kevin su |
+| [HADOOP-16443](https://issues.apache.org/jira/browse/HADOOP-16443) | Improve help text for setfacl --set option |  Minor | fs | Stephen O'Donnell | Stephen O'Donnell |
+| [YARN-9668](https://issues.apache.org/jira/browse/YARN-9668) | UGI conf doesn't read user overridden configurations on RM and NM startup |  Major | . | Jonathan Hung | Jonathan Hung |
+| [HADOOP-9844](https://issues.apache.org/jira/browse/HADOOP-9844) | NPE when trying to create an error message response of SASL RPC |  Major | ipc | Steve Loughran | Steve Loughran |
+| [HADOOP-16245](https://issues.apache.org/jira/browse/HADOOP-16245) | Enabling SSL within LdapGroupsMapping can break system SSL configs |  Major | common, security | Erik Krogen | Erik Krogen |
+| [HDFS-14429](https://issues.apache.org/jira/browse/HDFS-14429) | Block remain in COMMITTED but not COMPLETE caused by Decommission |  Major | . | Yicong Cai | Yicong Cai |
+| [HADOOP-16435](https://issues.apache.org/jira/browse/HADOOP-16435) | RpcMetrics should not be retained forever |  Critical | rpc-server | Zoltan Haindrich | Zoltan Haindrich |
+| [YARN-9596](https://issues.apache.org/jira/browse/YARN-9596) | QueueMetrics has incorrect metrics when labelled partitions are involved |  Major | capacity scheduler | Muhammad Samir Khan | Muhammad Samir Khan |
+| [MAPREDUCE-7225](https://issues.apache.org/jira/browse/MAPREDUCE-7225) | Fix broken current folder expansion during MR job start |  Major | mrv2 | Adam Antal | Peter Bacsko |
+| [HDFS-13529](https://issues.apache.org/jira/browse/HDFS-13529) | Fix default trash policy emptier trigger time correctly |  Major | namenode | He Xiaoqiao | He Xiaoqiao |
+| [HADOOP-15681](https://issues.apache.org/jira/browse/HADOOP-15681) | AuthenticationFilter should generate valid date format for Set-Cookie header regardless of default Locale |  Minor | security | Cao Manh Dat | Cao Manh Dat |
+| [HDFS-14685](https://issues.apache.org/jira/browse/HDFS-14685) | DefaultAuditLogger doesn't print CallerContext |  Major | hdfs | xuzq | xuzq |
+| [HDFS-14462](https://issues.apache.org/jira/browse/HDFS-14462) | WebHDFS throws "Error writing request body to server" instead of DSQuotaExceededException |  Major | webhdfs | Erik Krogen | Simbarashe Dzinamarira |
+| [HDFS-14691](https://issues.apache.org/jira/browse/HDFS-14691) | Wrong usage hint for hadoop fs command test |  Minor | hdfs | Jianfei Jiang | Jianfei Jiang |
+| [HDFS-14557](https://issues.apache.org/jira/browse/HDFS-14557) | JournalNode error: Can't scan a pre-transactional edit log |  Major | ha | Wei-Chiu Chuang | Stephen O'Donnell |
+| [HDFS-14692](https://issues.apache.org/jira/browse/HDFS-14692) | Upload button should not encode complete url |  Major | . | Lokesh Jain | Lokesh Jain |
+| [HADOOP-15908](https://issues.apache.org/jira/browse/HADOOP-15908) | hadoop-build-tools jar is downloaded from remote repository instead of using from local |  Minor | build | Oleksandr Shevchenko | Oleksandr Shevchenko |
+| [HDFS-14631](https://issues.apache.org/jira/browse/HDFS-14631) | The DirectoryScanner doesn't fix the wrongly placed replica. |  Major | . | Jinglun | Jinglun |
+| [YARN-9685](https://issues.apache.org/jira/browse/YARN-9685) | NPE when rendering the info table of leaf queue in non-accessible partitions |  Major | capacityscheduler | Tao Yang | Tao Yang |
+| [HDFS-14459](https://issues.apache.org/jira/browse/HDFS-14459) | ClosedChannelException silently ignored in FsVolumeList.addBlockPool() |  Major | datanode | Stephen O'Donnell | Stephen O'Donnell |
+| [HDFS-13359](https://issues.apache.org/jira/browse/HDFS-13359) | DataXceiver hung due to the lock in FsDatasetImpl#getBlockInputStream |  Major | datanode | Yiqun Lin | Yiqun Lin |
+| [YARN-9451](https://issues.apache.org/jira/browse/YARN-9451) | AggregatedLogsBlock shows wrong NM http port |  Minor | nodemanager | Prabhu Joseph | Prabhu Joseph |
+| [YARN-9723](https://issues.apache.org/jira/browse/YARN-9723) | ApplicationPlacementContext is not required for terminated jobs during recovery |  Major | resourcemanager | Prabhu Joseph | Prabhu Joseph |
+| [HDFS-12914](https://issues.apache.org/jira/browse/HDFS-12914) | Block report leases cause missing blocks until next report |  Critical | namenode | Daryn Sharp | Santosh Marella |
+| [HDFS-14148](https://issues.apache.org/jira/browse/HDFS-14148) | HDFS OIV ReverseXML SnapshotSection parser throws exception when there are more than one snapshottable directory |  Major | hdfs | Siyao Meng | Siyao Meng |
+| [HDFS-14595](https://issues.apache.org/jira/browse/HDFS-14595) | HDFS-11848 breaks API compatibility |  Blocker | . | Wei-Chiu Chuang | Siyao Meng |
+| [HDFS-14423](https://issues.apache.org/jira/browse/HDFS-14423) | Percent (%) and plus (+) characters no longer work in WebHDFS |  Major | webhdfs | Jing Wang | Masatake Iwasaki |
+| [MAPREDUCE-7230](https://issues.apache.org/jira/browse/MAPREDUCE-7230) | TestHSWebApp.testLogsViewSingle fails |  Major | jobhistoryserver, test | Prabhu Joseph | Prabhu Joseph |
+| [YARN-9749](https://issues.apache.org/jira/browse/YARN-9749) | TestAppLogAggregatorImpl#testDFSQuotaExceeded fails on trunk |  Major | log-aggregation, test | Peter Bacsko | Adam Antal |
+| [HDFS-14687](https://issues.apache.org/jira/browse/HDFS-14687) | Standby Namenode never come out of safemode when EC files are being written. |  Critical | ec, namenode | Surendra Singh Lilhore | Surendra Singh Lilhore |
+| [HDFS-13101](https://issues.apache.org/jira/browse/HDFS-13101) | Yet another fsimage corruption related to snapshot |  Major | snapshots | Yongjun Zhang | Shashikant Banerjee |
+| [HDFS-13201](https://issues.apache.org/jira/browse/HDFS-13201) | Fix prompt message in testPolicyAndStateCantBeNull |  Minor | . | chencan | chencan |
+| [HDFS-14311](https://issues.apache.org/jira/browse/HDFS-14311) | Multi-threading conflict at layoutVersion when loading block pool storage |  Major | rolling upgrades | Yicong Cai | Yicong Cai |
+| [HDFS-14582](https://issues.apache.org/jira/browse/HDFS-14582) | Failed to start DN with ArithmeticException when NULL checksum used |  Major | datanode | Surendra Singh Lilhore | Surendra Singh Lilhore |
+| [YARN-9217](https://issues.apache.org/jira/browse/YARN-9217) | Nodemanager will fail to start if GPU is misconfigured on the node or GPU drivers missing |  Major | yarn | Antal Bálint Steinbach | Peter Bacsko |
+| [HADOOP-16494](https://issues.apache.org/jira/browse/HADOOP-16494) | Add SHA-256 or SHA-512 checksum to release artifacts to comply with the release distribution policy |  Blocker | build | Akira Ajisaka | Akira Ajisaka |
+| [YARN-9774](https://issues.apache.org/jira/browse/YARN-9774) | Fix order of arguments for assertEquals in TestSLSUtils |  Minor | test | Nikhil Navadiya | Nikhil Navadiya |
+| [HDFS-13596](https://issues.apache.org/jira/browse/HDFS-13596) | NN restart fails after RollingUpgrade from 2.x to 3.x |  Blocker | hdfs | Hanisha Koneru | Fei Hui |
+| [HDFS-14396](https://issues.apache.org/jira/browse/HDFS-14396) | Failed to load image from FSImageFile when downgrade from 3.x to 2.x |  Blocker | rolling upgrades | Fei Hui | Fei Hui |
+| [YARN-8917](https://issues.apache.org/jira/browse/YARN-8917) | Absolute (maximum) capacity of level3+ queues is wrongly calculated for absolute resource |  Critical | capacityscheduler | Tao Yang | Tao Yang |
+| [YARN-9642](https://issues.apache.org/jira/browse/YARN-9642) | Fix Memory Leak in AbstractYarnScheduler caused by timer |  Blocker | resourcemanager | Bibin A Chundatt | Bibin A Chundatt |
+| [HDFS-13977](https://issues.apache.org/jira/browse/HDFS-13977) | NameNode can kill itself if it tries to send too many txns to a QJM simultaneously |  Major | namenode, qjm | Erik Krogen | Erik Krogen |
+| [HDFS-2470](https://issues.apache.org/jira/browse/HDFS-2470) | NN should automatically set permissions on dfs.namenode.\*.dir |  Major | namenode | Aaron T. Myers | Siddharth Wagle |
+| [YARN-9438](https://issues.apache.org/jira/browse/YARN-9438) | launchTime not written to state store for running applications |  Major | . | Jonathan Hung | Jonathan Hung |
+| [YARN-9640](https://issues.apache.org/jira/browse/YARN-9640) | Slow event processing could cause too many attempt unregister events |  Critical | . | Bibin A Chundatt | Bibin A Chundatt |
+| [HDFS-12212](https://issues.apache.org/jira/browse/HDFS-12212) | Options.Rename.To\_TRASH is considered even when Options.Rename.NONE is specified |  Major | namenode | Vinayakumar B | Vinayakumar B |
+| [YARN-9714](https://issues.apache.org/jira/browse/YARN-9714) | ZooKeeper connection in ZKRMStateStore leaks after RM transitioned to standby |  Major | resourcemanager | Tao Yang | Tao Yang |
+| [HDFS-8178](https://issues.apache.org/jira/browse/HDFS-8178) | QJM doesn't move aside stale inprogress edits files |  Major | qjm | Zhe Zhang | Istvan Fajth |
+| [HDFS-14706](https://issues.apache.org/jira/browse/HDFS-14706) | Checksums are not checked if block meta file is less than 7 bytes |  Major | . | Stephen O'Donnell | Stephen O'Donnell |
+| [YARN-9797](https://issues.apache.org/jira/browse/YARN-9797) | LeafQueue#activateApplications should use resourceCalculator#fitsIn |  Blocker | . | Bibin A Chundatt | Bilwa S T |
+| [YARN-9785](https://issues.apache.org/jira/browse/YARN-9785) | Fix DominantResourceCalculator when one resource is zero |  Blocker | . | Bilwa S T | Bilwa S T |
+| [YARN-9718](https://issues.apache.org/jira/browse/YARN-9718) | Yarn REST API, services endpoint remote command ejection |  Major | . | Eric Yang | Eric Yang |
+| [HADOOP-16255](https://issues.apache.org/jira/browse/HADOOP-16255) | ChecksumFS.Make FileSystem.rename(path, path, options) doesn't rename checksum |  Major | fs | Steve Loughran | Jungtaek Lim |
+| [YARN-9817](https://issues.apache.org/jira/browse/YARN-9817) | Fix failing testcases due to not initialized AsyncDispatcher -  ArithmeticException: / by zero |  Major | test | Prabhu Joseph | Prabhu Joseph |
+| [YARN-9813](https://issues.apache.org/jira/browse/YARN-9813) | RM does not start on JDK11 when UIv2 is enabled |  Critical | resourcemanager, yarn | Adam Antal | Adam Antal |
+| [YARN-9820](https://issues.apache.org/jira/browse/YARN-9820) | RM logs InvalidStateTransitionException when app is submitted |  Critical | . | Rohith Sharma K S | Prabhu Joseph |
+
+
+### TESTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [YARN-8907](https://issues.apache.org/jira/browse/YARN-8907) | Modify a logging message in TestCapacityScheduler |  Trivial | . | Zhankun Tang | Zhankun Tang |
+| [YARN-8904](https://issues.apache.org/jira/browse/YARN-8904) | TestRMDelegationTokens can fail in testRMDTMasterKeyStateOnRollingMasterKey |  Minor | test | Wilfred Spiegelenburg | Wilfred Spiegelenburg |
+| [YARN-8944](https://issues.apache.org/jira/browse/YARN-8944) | TestContainerAllocation.testUserLimitAllocationMultipleContainers failure after YARN-8896 |  Minor | capacity scheduler | Wilfred Spiegelenburg | Wilfred Spiegelenburg |
+| [YARN-9263](https://issues.apache.org/jira/browse/YARN-9263) | TestConfigurationNodeAttributesProvider fails after Mockito updated |  Minor | . | Weiwei Yang | Weiwei Yang |
+| [YARN-9315](https://issues.apache.org/jira/browse/YARN-9315) | TestCapacitySchedulerMetrics fails intermittently |  Minor | capacity scheduler | Prabhu Joseph | Prabhu Joseph |
+| [YARN-9316](https://issues.apache.org/jira/browse/YARN-9316) | TestPlacementConstraintsUtil#testInterAppConstraintsByAppID fails intermittently |  Minor | capacity scheduler | Prabhu Joseph | Prabhu Joseph |
+| [YARN-9324](https://issues.apache.org/jira/browse/YARN-9324) | TestSchedulingRequestContainerAllocation(Async) fails with junit-4.11 |  Major | capacity scheduler | Prabhu Joseph | Prabhu Joseph |
+| [YARN-9325](https://issues.apache.org/jira/browse/YARN-9325) | TestQueueManagementDynamicEditPolicy fails intermittent |  Minor | capacity scheduler | Prabhu Joseph | Prabhu Joseph |
+| [HDFS-11950](https://issues.apache.org/jira/browse/HDFS-11950) | Disable libhdfs zerocopy test on Mac |  Minor | libhdfs | John Zhuge | Akira Ajisaka |
+
+
+### SUB-TASKS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [YARN-6989](https://issues.apache.org/jira/browse/YARN-6989) | Ensure timeline service v2 codebase gets UGI from HttpServletRequest in a consistent way |  Major | timelineserver | Vrushali C | Abhishek Modi |
+| [YARN-8834](https://issues.apache.org/jira/browse/YARN-8834) | Provide Java client for fetching Yarn specific entities from TimelineReader |  Critical | timelinereader | Rohith Sharma K S | Abhishek Modi |
+| [YARN-3879](https://issues.apache.org/jira/browse/YARN-3879) | [Storage implementation] Create HDFS backing storage implementation for ATS reads |  Major | timelineserver | Tsuyoshi Ozawa | Abhishek Modi |
+| [YARN-6098](https://issues.apache.org/jira/browse/YARN-6098) | Add documentation for Delete Queue |  Major | capacity scheduler, documentation | Naganarasimha G R | Suma Shivaprasad |
+| [YARN-8456](https://issues.apache.org/jira/browse/YARN-8456) | Fix a configuration handling bug when user leave FPGA discover executable path configuration default but set OpenCL SDK path environment variable |  Major | yarn | Zhankun Tang | Zhankun Tang |
+| [HADOOP-15868](https://issues.apache.org/jira/browse/HADOOP-15868) | AliyunOSS: update document for properties of multiple part download, multiple part upload and directory copy |  Major | fs/oss | wujinhu | wujinhu |
+| [YARN-8871](https://issues.apache.org/jira/browse/YARN-8871) | Document behavior of YARN-5742 |  Major | . | Vrushali C | Suma Shivaprasad |
+| [YARN-7754](https://issues.apache.org/jira/browse/YARN-7754) | [Atsv2] Update document for running v1 and v2 TS |  Major | . | Rohith Sharma K S | Suma Shivaprasad |
+| [HDFS-14047](https://issues.apache.org/jira/browse/HDFS-14047) | [libhdfs++] Fix hdfsGetLastExceptionRootCause bug in test\_libhdfs\_threaded.c |  Major | libhdfs, native | Anatoli Shein | Anatoli Shein |
+| [YARN-8988](https://issues.apache.org/jira/browse/YARN-8988) | Reduce the verbose log on RM heartbeat path when distributed node-attributes is enabled |  Major | . | Weiwei Yang | Tao Yang |
+| [HADOOP-15846](https://issues.apache.org/jira/browse/HADOOP-15846) | ABFS: fix mask related bugs in setAcl, modifyAclEntries and removeAclEntries. |  Major | fs/azure | Thomas Marquardt | junhua gu |
+| [HADOOP-15812](https://issues.apache.org/jira/browse/HADOOP-15812) | ABFS: Improve AbfsRestOperationException format to ensure full msg can be displayed on console |  Major | fs/azure | Da Zhou | Da Zhou |
+| [YARN-8987](https://issues.apache.org/jira/browse/YARN-8987) | Usability improvements node-attributes CLI |  Critical | . | Weiwei Yang | Bibin A Chundatt |
+| [HADOOP-15876](https://issues.apache.org/jira/browse/HADOOP-15876) | Use keySet().removeAll() to remove multiple keys from Map in AzureBlobFileSystemStore |  Minor | fs/azure | Ted Yu | Da Zhou |
+| [HADOOP-15917](https://issues.apache.org/jira/browse/HADOOP-15917) | AliyunOSS: fix incorrect ReadOps and WriteOps in statistics |  Major | fs/oss | wujinhu | wujinhu |
+| [YARN-8303](https://issues.apache.org/jira/browse/YARN-8303) | YarnClient should contact TimelineReader for application/attempt/container report |  Critical | . | Rohith Sharma K S | Abhishek Modi |
+| [HADOOP-15872](https://issues.apache.org/jira/browse/HADOOP-15872) | ABFS: Update to target 2018-11-09 REST version for ADLS Gen 2 |  Major | fs/azure | Thomas Marquardt | junhua gu |
+| [HADOOP-15940](https://issues.apache.org/jira/browse/HADOOP-15940) | ABFS: For HNS account, avoid unnecessary get call when doing Rename |  Major | fs/azure | Da Zhou | Da Zhou |
+| [YARN-8986](https://issues.apache.org/jira/browse/YARN-8986) | publish all exposed ports to random ports when using bridge network |  Minor | yarn | dockerzhang | dockerzhang |
+| [HADOOP-15932](https://issues.apache.org/jira/browse/HADOOP-15932) | Oozie unable to create sharelib in s3a filesystem |  Critical | fs, fs/s3 | Soumitra Sulav | Steve Loughran |
+| [YARN-9034](https://issues.apache.org/jira/browse/YARN-9034) | ApplicationCLI should have option to take clusterId |  Major | . | Rohith Sharma K S | Rohith Sharma K S |
+| [HDFS-13713](https://issues.apache.org/jira/browse/HDFS-13713) | Add specification of Multipart Upload API to FS specification, with contract tests |  Blocker | fs, test | Steve Loughran | Ewan Higgs |
+| [HADOOP-15968](https://issues.apache.org/jira/browse/HADOOP-15968) | ABFS: add try catch for UGI failure when initializing ABFS |  Major | fs/azure | Da Zhou | Da Zhou |
+| [HADOOP-15969](https://issues.apache.org/jira/browse/HADOOP-15969) | ABFS: getNamespaceEnabled can fail blocking user access thru ACLs |  Major | fs/azure | Da Zhou | Da Zhou |
+| [HADOOP-15972](https://issues.apache.org/jira/browse/HADOOP-15972) | ABFS: reduce list page size to to 500 |  Major | fs/azure | Da Zhou | Da Zhou |
+| [HADOOP-16004](https://issues.apache.org/jira/browse/HADOOP-16004) | ABFS: Convert 404 error response in AbfsInputStream and AbfsOutPutStream to FileNotFoundException |  Major | fs/azure | Da Zhou | Da Zhou |
+| [YARN-9126](https://issues.apache.org/jira/browse/YARN-9126) | Container reinit always fails in branch-3.2 and trunk |  Major | . | Eric Yang | Chandni Singh |
+| [YARN-8925](https://issues.apache.org/jira/browse/YARN-8925) | Updating distributed node attributes only when necessary |  Major | resourcemanager | Tao Yang | Tao Yang |
+| [HADOOP-16009](https://issues.apache.org/jira/browse/HADOOP-16009) | Replace the url of the repository in Apache Hadoop source code |  Major | documentation | Akira Ajisaka | Akira Ajisaka |
+| [HADOOP-15323](https://issues.apache.org/jira/browse/HADOOP-15323) | AliyunOSS: Improve copy file performance for AliyunOSSFileSystemStore |  Major | fs/oss | wujinhu | wujinhu |
+| [YARN-6149](https://issues.apache.org/jira/browse/YARN-6149) | Allow port range to be specified while starting NM Timeline collector manager. |  Major | timelineserver | Varun Saxena | Abhishek Modi |
+| [HADOOP-16040](https://issues.apache.org/jira/browse/HADOOP-16040) | ABFS: Bug fix for tolerateOobAppends configuration |  Major | fs/azure | Da Zhou | Da Zhou |
+| [HADOOP-15975](https://issues.apache.org/jira/browse/HADOOP-15975) | ABFS: remove timeout check for DELETE and RENAME |  Major | fs/azure | Da Zhou | Da Zhou |
+| [HADOOP-15662](https://issues.apache.org/jira/browse/HADOOP-15662) | ABFS: Better exception handling of DNS errors |  Major | fs/azure | Thomas Marquardt | Da Zhou |
+| [HADOOP-16045](https://issues.apache.org/jira/browse/HADOOP-16045) | Don't run TestDU on Windows |  Trivial | common, test | Lukas Majercak | Lukas Majercak |
+| [HADOOP-16044](https://issues.apache.org/jira/browse/HADOOP-16044) | ABFS: Better exception handling of DNS errors followup |  Major | . | Da Zhou | Da Zhou |
+| [HADOOP-16048](https://issues.apache.org/jira/browse/HADOOP-16048) | ABFS: Fix Date format parser |  Major | fs/azure | Da Zhou | Da Zhou |
+| [YARN-8101](https://issues.apache.org/jira/browse/YARN-8101) | Add UT to verify node-attributes in RM nodes rest API |  Minor | resourcemanager, restapi | Weiwei Yang | Prabhu Joseph |
+| [HADOOP-16041](https://issues.apache.org/jira/browse/HADOOP-16041) | UserAgent string for ABFS |  Major | fs/azure | Shweta | Shweta |
+| [HADOOP-16079](https://issues.apache.org/jira/browse/HADOOP-16079) | Token.toString faulting if any token listed can't load. |  Blocker | security | Steve Loughran | Steve Loughran |
+| [YARN-9275](https://issues.apache.org/jira/browse/YARN-9275) | Add link to NodeAttributes doc in PlacementConstraints document |  Minor | documentation | Weiwei Yang | Masatake Iwasaki |
+| [YARN-6735](https://issues.apache.org/jira/browse/YARN-6735) | Have a way to turn off container metrics from NMs |  Major | timelineserver | Vrushali C | Abhishek Modi |
+| [HADOOP-15954](https://issues.apache.org/jira/browse/HADOOP-15954) | ABFS: Enable owner and group conversion for MSI and login user using OAuth |  Major | fs/azure | junhua gu | Da Zhou |
+| [YARN-9253](https://issues.apache.org/jira/browse/YARN-9253) | Add UT to verify Placement Constraint in Distributed Shell |  Major | . | Prabhu Joseph | Prabhu Joseph |
+| [YARN-9252](https://issues.apache.org/jira/browse/YARN-9252) | Allocation Tag Namespace support in Distributed Shell |  Major | distributed-shell | Prabhu Joseph | Prabhu Joseph |
+| [YARN-8555](https://issues.apache.org/jira/browse/YARN-8555) | Parameterize TestSchedulingRequestContainerAllocation(Async) to cover both PC handler options |  Minor | . | Weiwei Yang | Prabhu Joseph |
+| [YARN-9293](https://issues.apache.org/jira/browse/YARN-9293) | Optimize MockAMLauncher event handling |  Major | . | Bibin A Chundatt | Bibin A Chundatt |
+| [HADOOP-16104](https://issues.apache.org/jira/browse/HADOOP-16104) | Wasb tests to downgrade to skip when test a/c is namespace enabled |  Major | fs/azure, test | Steve Loughran | Masatake Iwasaki |
+| [YARN-9258](https://issues.apache.org/jira/browse/YARN-9258) | Support to specify allocation tags without constraint in distributed shell CLI |  Major | distributed-shell | Prabhu Joseph | Prabhu Joseph |
+| [HADOOP-16136](https://issues.apache.org/jira/browse/HADOOP-16136) | ABFS: Should only transform username to short name |  Major | . | Da Zhou | Da Zhou |
+| [YARN-5336](https://issues.apache.org/jira/browse/YARN-5336) | Limit the flow name size & consider cleanup for hex chars |  Major | timelineserver | Vrushali C | Sushil Ks |
+| [YARN-3841](https://issues.apache.org/jira/browse/YARN-3841) | [Storage implementation] Adding retry semantics to HDFS backing storage |  Major | timelineserver | Tsuyoshi Ozawa | Abhishek Modi |
+| [HADOOP-16169](https://issues.apache.org/jira/browse/HADOOP-16169) | ABFS: Bug fix for getPathProperties |  Major | fs/azure | Da Zhou | Da Zhou |
+| [HADOOP-16109](https://issues.apache.org/jira/browse/HADOOP-16109) | Parquet reading S3AFileSystem causes EOF |  Blocker | fs/s3 | Dave Christianson | Steve Loughran |
+| [HADOOP-15625](https://issues.apache.org/jira/browse/HADOOP-15625) | S3A input stream to use etags/version number to detect changed source files |  Major | fs/s3 | Brahma Reddy Battula | Ben Roling |
+| [HADOOP-16124](https://issues.apache.org/jira/browse/HADOOP-16124) | Extend documentation in testing.md about endpoint constants |  Trivial | hadoop-aws | Adam Antal | Adam Antal |
+| [HADOOP-16191](https://issues.apache.org/jira/browse/HADOOP-16191) | AliyunOSS: improvements for copyFile/copyDirectory and logging |  Major | fs/oss | wujinhu | wujinhu |
+| [YARN-9387](https://issues.apache.org/jira/browse/YARN-9387) | Update document for ATS HBase Custom tablenames (-entityTableName) |  Critical | ATSv2 | Prabhu Joseph | Prabhu Joseph |
+| [YARN-9389](https://issues.apache.org/jira/browse/YARN-9389) | FlowActivity and FlowRun table prefix is wrong |  Minor | ATSv2 | Prabhu Joseph | Prabhu Joseph |
+| [YARN-9391](https://issues.apache.org/jira/browse/YARN-9391) | Disable PATH variable to be passed to Docker container |  Major | . | Eric Yang | Jim Brennan |
+| [HADOOP-16058](https://issues.apache.org/jira/browse/HADOOP-16058) | S3A tests to include Terasort |  Major | fs/s3, test | Steve Loughran | Steve Loughran |
+| [HADOOP-16220](https://issues.apache.org/jira/browse/HADOOP-16220) | Add findbugs ignores for unjustified issues during update to guava to 27.0-jre in hadoop-project |  Major | . | Gabor Bota | Gabor Bota |
+| [YARN-9418](https://issues.apache.org/jira/browse/YARN-9418) | ATSV2 /apps/appId/entities/YARN\_CONTAINER rest api does not show metrics |  Critical | ATSv2 | Prabhu Joseph | Prabhu Joseph |
+| [HADOOP-16233](https://issues.apache.org/jira/browse/HADOOP-16233) | S3AFileStatus to declare that isEncrypted() is always true |  Minor | fs/s3 | Steve Loughran | Steve Loughran |
+| [YARN-9303](https://issues.apache.org/jira/browse/YARN-9303) | Username splits won't help timelineservice.app\_flow table |  Major | ATSv2 | Prabhu Joseph | Prabhu Joseph |
+| [YARN-9382](https://issues.apache.org/jira/browse/YARN-9382) | Publish container killed, paused and resumed events to ATSv2. |  Major | . | Abhishek Modi | Abhishek Modi |
+| [YARN-9335](https://issues.apache.org/jira/browse/YARN-9335) | [atsv2] Restrict the number of elements held in timeline collector when backend is unreachable for async calls |  Major | . | Vrushali C | Abhishek Modi |
+| [HADOOP-16269](https://issues.apache.org/jira/browse/HADOOP-16269) | ABFS: add listFileStatus with StartFrom |  Major | fs/azure | Da Zhou | Da Zhou |
+| [HADOOP-16251](https://issues.apache.org/jira/browse/HADOOP-16251) | ABFS: add FSMainOperationsBaseTest |  Major | fs/azure | Da Zhou | Da Zhou |
+| [HADOOP-16306](https://issues.apache.org/jira/browse/HADOOP-16306) | AliyunOSS: Remove temporary files when upload small files to OSS |  Major | fs/oss | wujinhu | wujinhu |
+| [YARN-7537](https://issues.apache.org/jira/browse/YARN-7537) | [Atsv2] load hbase configuration from filesystem rather than URL |  Major | . | Rohith Sharma K S | Prabhu Joseph |
+| [HDFS-14553](https://issues.apache.org/jira/browse/HDFS-14553) | Make queue size of BlockReportProcessingThread configurable |  Major | namenode | He Xiaoqiao | He Xiaoqiao |
+| [HADOOP-16211](https://issues.apache.org/jira/browse/HADOOP-16211) | Update guava to 27.0-jre in hadoop-project branch-3.2 |  Major | . | Gabor Bota | Gabor Bota |
+| [YARN-8499](https://issues.apache.org/jira/browse/YARN-8499) | ATS v2 Generic TimelineStorageMonitor |  Major | ATSv2 | Sunil Govindan | Prabhu Joseph |
+| [YARN-9374](https://issues.apache.org/jira/browse/YARN-9374) | HBaseTimelineWriterImpl sync writes has to avoid thread blocking if storage down |  Major | ATSv2 | Prabhu Joseph | Prabhu Joseph |
+| [HADOOP-16401](https://issues.apache.org/jira/browse/HADOOP-16401) | ABFS: port Azure doc to 3.2 branch |  Major | fs/azure | Da Zhou | Masatake Iwasaki |
+| [HDFS-14034](https://issues.apache.org/jira/browse/HDFS-14034) | Support getQuotaUsage API in WebHDFS |  Major | fs, webhdfs | Erik Krogen | Chao Sun |
+| [YARN-9765](https://issues.apache.org/jira/browse/YARN-9765) | SLS runner crashes when run with metrics turned off. |  Major | . | Abhishek Modi | Abhishek Modi |
+| [HDFS-14674](https://issues.apache.org/jira/browse/HDFS-14674) | [SBN read] Got an unexpected txid when tail editlog |  Blocker | . | wangzhaohui | wangzhaohui |
+| [YARN-9775](https://issues.apache.org/jira/browse/YARN-9775) | RMWebServices /scheduler-conf GET returns all hadoop configurations for ZKConfigurationStore |  Major | restapi | Prabhu Joseph | Prabhu Joseph |
+| [HDFS-14779](https://issues.apache.org/jira/browse/HDFS-14779) | Fix logging error in TestEditLog#testMultiStreamsLoadEditWithConfMaxTxns |  Major | . | Jonathan Hung | Jonathan Hung |
+| [YARN-9804](https://issues.apache.org/jira/browse/YARN-9804) | Update ATSv2 document for latest feature supports |  Blocker | . | Rohith Sharma K S | Rohith Sharma K S |
+| [YARN-9821](https://issues.apache.org/jira/browse/YARN-9821) | NM hangs at serviceStop when ATSV2 Backend Hbase is Down |  Major | ATSv2 | Prabhu Joseph | Prabhu Joseph |
+
+
+### OTHER:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-15851](https://issues.apache.org/jira/browse/HADOOP-15851) | Disable wildfly logs to the console |  Major | fs/azure | Vishwajeet Dusane | Vishwajeet Dusane |
+| [HDFS-12729](https://issues.apache.org/jira/browse/HDFS-12729) | Document special paths in HDFS |  Major | documentation | Chris Douglas | Masatake Iwasaki |
+| [YARN-9191](https://issues.apache.org/jira/browse/YARN-9191) | Add cli option in DS to support enforceExecutionType in resource requests. |  Major | . | Abhishek Modi | Abhishek Modi |
+| [HADOOP-16037](https://issues.apache.org/jira/browse/HADOOP-16037) | DistCp: Document usage of Sync (-diff option) in detail |  Major | documentation, tools/distcp | Siyao Meng | Siyao Meng |
+| [HADOOP-16263](https://issues.apache.org/jira/browse/HADOOP-16263) | Update BUILDING.txt with macOS native build instructions |  Minor | . | Siyao Meng | Siyao Meng |
+| [YARN-9559](https://issues.apache.org/jira/browse/YARN-9559) | Create AbstractContainersLauncher for pluggable ContainersLauncher logic |  Major | . | Jonathan Hung | Jonathan Hung |
+| [YARN-9796](https://issues.apache.org/jira/browse/YARN-9796) | Fix ASF license issue in branch-3.2 |  Blocker | . | Rohith Sharma K S | Prabhu Joseph |
+
+
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.2.1/RELEASENOTES.3.2.1.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.2.1/RELEASENOTES.3.2.1.md
new file mode 100644
index 0000000..0a8862a
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.2.1/RELEASENOTES.3.2.1.md
@@ -0,0 +1,80 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# Apache Hadoop  3.2.1 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
+
+
+---
+
+* [YARN-8986](https://issues.apache.org/jira/browse/YARN-8986) | *Minor* | **publish all exposed ports to random ports when using bridge network**
+
+support -p and -P for bridge type network;
+
+
+---
+
+* [YARN-9071](https://issues.apache.org/jira/browse/YARN-9071) | *Critical* | **NM and service AM don't have updated status for reinitialized containers**
+
+In progress upgrade status may show READY state sooner than actual upgrade operations.  External caller to upgrade API is recommended to wait minimum 30 seconds before querying yarn app -status.
+
+
+---
+
+* [YARN-9084](https://issues.apache.org/jira/browse/YARN-9084) | *Major* | **Service Upgrade: With default readiness check, the status of upgrade is reported to be successful prematurely**
+
+Improve transient container status accuracy for upgrade.
+
+
+---
+
+* [HADOOP-15922](https://issues.apache.org/jira/browse/HADOOP-15922) | *Major* | **DelegationTokenAuthenticationFilter get wrong doAsUser since it does not decode URL**
+
+- Fix DelegationTokenAuthentication filter for incorrectly double encode doAs user parameter.
+
+
+---
+
+* [YARN-8761](https://issues.apache.org/jira/browse/YARN-8761) | *Major* | **Service AM support for decommissioning component instances**
+
+- Component instance number is not linear increment when decommission feature is used.  Application with assumption of linear increment component instance number maybe impacted by introduction of this feature.
+
+
+---
+
+* [HDFS-14305](https://issues.apache.org/jira/browse/HDFS-14305) | *Major* | **Serial number in BlockTokenSecretManager could overlap between different namenodes**
+
+NameNodes rely on independent block token key ranges to communicate block token identities to DataNodes and clients in a way that does not create conflicts between the tokens issued by multiple NameNodes. HDFS-6440 introduced the potential for overlaps in key ranges; this fixes the issue by creating 64 possible key ranges that NameNodes assign themselves to, allowing for up to 64 NameNodes to run safely. This limitation only applies within a single Namespace; there may be more than 64 NameNodes total spread among multiple federated Namespaces.
+
+
+---
+
+* [HDFS-14396](https://issues.apache.org/jira/browse/HDFS-14396) | *Blocker* | **Failed to load image from FSImageFile when downgrade from 3.x to 2.x**
+
+During a rolling upgrade from Hadoop 2.x to 3.x, NameNode cannot persist erasure coding information, and therefore a user cannot start using erasure coding feature until finalize is done.
+
+
+---
+
+* [YARN-7055](https://issues.apache.org/jira/browse/YARN-7055) | *Major* | **YARN Timeline Service v.2: beta 1 / GA**
+
+Application Timeline Server v2 is ready for production. It is GA from 3.2.1 release on wards.
+
+
+
diff --git a/hadoop-common-project/hadoop-common/src/test/proto/test.proto b/hadoop-common-project/hadoop-common/src/test/proto/test.proto
index 37e9a0b..2c41aa2 100644
--- a/hadoop-common-project/hadoop-common/src/test/proto/test.proto
+++ b/hadoop-common-project/hadoop-common/src/test/proto/test.proto
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.ipc.protobuf";
 option java_outer_classname = "TestProtos";
 option java_generate_equals_and_hash = true;
diff --git a/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto b/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto
index 0df67a0..f699027 100644
--- a/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto
+++ b/hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto
@@ -15,6 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+syntax = "proto2";
 option java_package = "org.apache.hadoop.ipc.protobuf";
 option java_outer_classname = "TestRpcServiceProtos";
 option java_generic_services = true;
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index 3272787..2a6d44a 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -135,7 +135,7 @@
     <dependency>
       <groupId>io.jaegertracing</groupId>
       <artifactId>jaeger-client</artifactId>
-      <version>0.33.1</version>
+      <version>${jaeger.version}</version>
     </dependency>
     <dependency>
       <groupId>io.opentracing</groupId>
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
index 8769bef..f5fda89 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -131,36 +131,18 @@
         </configuration>
       </plugin>
       <plugin>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-maven-plugins</artifactId>
+        <groupId>org.xolstice.maven.plugins</groupId>
+        <artifactId>protobuf-maven-plugin</artifactId>
         <executions>
           <execution>
-            <id>compile-protoc</id>
-            <goals>
-              <goal>protoc</goal>
-            </goals>
+            <id>src-compile-protoc</id>
             <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
-                <param>${basedir}/src/main/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/main/proto</directory>
-                <includes>
-                  <include>ClientDatanodeProtocol.proto</include>
-                  <include>ClientNamenodeProtocol.proto</include>
-                  <include>acl.proto</include>
-                  <include>xattr.proto</include>
-                  <include>datatransfer.proto</include>
-                  <include>hdfs.proto</include>
-                  <include>encryption.proto</include>
-                  <include>inotify.proto</include>
-                  <include>erasurecoding.proto</include>
-                  <include>ReconfigurationProtocol.proto</include>
-                </includes>
-              </source>
+              <skip>false</skip>
+              <additionalProtoPathElements>
+                <additionalProtoPathElement>
+                  ${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto
+                </additionalProtoPathElement>
+              </additionalProtoPathElements>
             </configuration>
           </execution>
         </executions>
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 97a8472..d1ca63d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -148,6 +148,9 @@
           "dfs.client.key.provider.cache.expiry";
   long    DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT =
               TimeUnit.DAYS.toMillis(10); // 10 days
+  String DFS_CLIENT_BLOCK_READER_REMOTE_BUFFER_SIZE_KEY =
+      "dfs.client.block.reader.remote.buffer.size";
+  int DFS_CLIENT_BLOCK_READER_REMOTE_BUFFER_SIZE_DEFAULT = 8192;
 
   String  DFS_DATANODE_KERBEROS_PRINCIPAL_KEY =
       "dfs.datanode.kerberos.principal";
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
index 88b1686..2109e6e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
@@ -855,7 +855,7 @@
         fileName, block, token, startOffset, length,
         verifyChecksum, clientName, peer, datanode,
         clientContext.getPeerCache(), cachingStrategy,
-        networkDistance);
+        networkDistance, configuration);
   }
 
   @Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
index ea1baed..0f2f54c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
@@ -29,6 +29,7 @@
 import java.util.UUID;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ReadOption;
 import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.PeerCache;
@@ -55,6 +56,9 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_BLOCK_READER_REMOTE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_BLOCK_READER_REMOTE_BUFFER_SIZE_KEY;
+
 /**
  * This is a wrapper around connection to datanode
  * and understands checksum, offset etc.
@@ -391,10 +395,13 @@
       Peer peer, DatanodeID datanodeID,
       PeerCache peerCache,
       CachingStrategy cachingStrategy,
-      int networkDistance) throws IOException {
+      int networkDistance, Configuration configuration) throws IOException {
     // in and out will be closed when sock is closed (by the caller)
+    int bufferSize = configuration.getInt(
+        DFS_CLIENT_BLOCK_READER_REMOTE_BUFFER_SIZE_KEY,
+        DFS_CLIENT_BLOCK_READER_REMOTE_BUFFER_SIZE_DEFAULT);
     final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
-        peer.getOutputStream()));
+        peer.getOutputStream(), bufferSize));
     new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
         verifyChecksum, cachingStrategy);
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto
index 52f6330..84cd771 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax="proto2";
 // This file contains protocol buffers that are used throughout HDFS -- i.e.
 // by the client, server, and data transfer protocols.
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index d08ad9b..ec22979 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax="proto2";
 option java_package = "org.apache.hadoop.hdfs.protocol.proto";
 option java_outer_classname = "ClientNamenodeProtocolProtos";
 option java_generic_services = true;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ReconfigurationProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ReconfigurationProtocol.proto
index 12a38b1..bad9f45 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ReconfigurationProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ReconfigurationProtocol.proto
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
+syntax="proto2";
  // This file contains protocol buffers that are used to reconfigure NameNode
  // and DataNode by HDFS admin.
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/acl.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/acl.proto
index c2529c9..e9a8ccb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/acl.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/acl.proto
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
+syntax="proto2";
 option java_package = "org.apache.hadoop.hdfs.protocol.proto";
 option java_outer_classname = "AclProtos";
 option java_generate_equals_and_hash = true;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto
index 43a03e9..66a69a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax="proto2";
 // This file contains protocol buffers that are used to transfer data
 // to and from the datanode, as well as between datanodes.
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/encryption.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/encryption.proto
index 75d3a0e..bcd82d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/encryption.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/encryption.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax="proto2";
 // This file contains protocol buffers that are used throughout HDFS -- i.e.
 // by the client, server, and data transfer protocols.
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
index 5f5f66e..de3bf4a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
- 
+syntax="proto2";
 option java_package = "org.apache.hadoop.hdfs.protocol.proto";
 option java_outer_classname = "ErasureCodingProtos";
 option java_generate_equals_and_hash = true;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
index 8a61aa6..58a3d59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax="proto2";
 // This file contains protocol buffers that are used throughout HDFS -- i.e.
 // by the client, server, and data transfer protocols.
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/inotify.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/inotify.proto
index f193408..afcccaa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/inotify.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/inotify.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax="proto2";
 // This file contains protocol buffers used to communicate edits to clients
 // as part of the inotify system.
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/xattr.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/xattr.proto
index 6c8b5eb..a53aa86 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/xattr.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/xattr.proto
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
+syntax="proto2";
 option java_package = "org.apache.hadoop.hdfs.protocol.proto";
 option java_outer_classname = "XAttrProtos";
 option java_generate_equals_and_hash = true;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
index e47a204..b5c1472 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
@@ -184,29 +184,21 @@
         </executions>
       </plugin>
       <plugin>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-maven-plugins</artifactId>
+        <groupId>org.xolstice.maven.plugins</groupId>
+        <artifactId>protobuf-maven-plugin</artifactId>
         <executions>
           <execution>
-            <id>compile-protoc</id>
-            <goals>
-              <goal>protoc</goal>
-            </goals>
+            <id>src-compile-protoc</id>
             <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>${basedir}/../hadoop-hdfs-client/src/main/proto</param>
-                <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
-                <param>${basedir}/src/main/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/main/proto</directory>
-                <includes>
-                  <include>FederationProtocol.proto</include>
-                  <include>RouterProtocol.proto</include>
-                </includes>
-              </source>
+              <skip>false</skip>
+              <additionalProtoPathElements>
+                <additionalProtoPathElement>
+                  ${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto
+                </additionalProtoPathElement>
+                <additionalProtoPathElement>
+                  ${basedir}/../hadoop-hdfs-client/src/main/proto
+                </additionalProtoPathElement>
+              </additionalProtoPathElements>
             </configuration>
           </execution>
         </executions>
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
index 6032dfe..e750294 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
@@ -269,16 +269,21 @@
   @Override
   public UpdateMountTableEntryResponse updateMountTableEntry(
       UpdateMountTableEntryRequest request) throws IOException {
-
+    MountTable updateEntry = request.getEntry();
+    MountTable oldEntry = null;
+    if (this.router.getSubclusterResolver() instanceof MountTableResolver) {
+      MountTableResolver mResolver =
+          (MountTableResolver) this.router.getSubclusterResolver();
+      oldEntry = mResolver.getMountPoint(updateEntry.getSourcePath());
+    }
     UpdateMountTableEntryResponse response = getMountTableStore()
         .updateMountTableEntry(request);
     try {
-      MountTable mountTable = request.getEntry();
-      if (mountTable != null && router.isQuotaEnabled()
-          && isQuotaUpdated(request, mountTable)) {
-        synchronizeQuota(mountTable.getSourcePath(),
-            mountTable.getQuota().getQuota(),
-            mountTable.getQuota().getSpaceQuota());
+      if (updateEntry != null && router.isQuotaEnabled()
+          && isQuotaUpdated(request, oldEntry)) {
+        synchronizeQuota(updateEntry.getSourcePath(),
+            updateEntry.getQuota().getQuota(),
+            updateEntry.getQuota().getSpaceQuota());
       }
     } catch (Exception e) {
       // Ignore exception, if any while reseting quota. Specifically to handle
@@ -289,29 +294,41 @@
     return response;
   }
 
+  /**
+   * Checks whether quota needs to be synchronized with namespace or not. Quota
+   * needs to be synchronized either if there is change in mount entry quota or
+   * there is change in remote destinations.
+   * @param request the update request.
+   * @param oldEntry the mount entry before getting updated.
+   * @return true if quota needs to be updated.
+   * @throws IOException
+   */
   private boolean isQuotaUpdated(UpdateMountTableEntryRequest request,
-      MountTable mountTable) throws IOException {
-    long nsQuota = -1;
-    long ssQuota = -1;
-
-    String path = request.getEntry().getSourcePath();
-    if (this.router.getSubclusterResolver() instanceof MountTableResolver) {
-      MountTableResolver mResolver = (MountTableResolver) this.router
-          .getSubclusterResolver();
-      MountTable entry = mResolver.getMountPoint(path);
-      if (entry != null) {
-        RouterQuotaUsage preQuota = entry.getQuota();
-        nsQuota = preQuota.getQuota();
-        ssQuota = preQuota.getSpaceQuota();
+      MountTable oldEntry) throws IOException {
+    if (oldEntry != null) {
+      MountTable updateEntry = request.getEntry();
+      // If locations are changed, the new destinations need to be in sync with
+      // the mount quota.
+      if (!oldEntry.getDestinations().equals(updateEntry.getDestinations())) {
+        return true;
       }
-    }
-    RouterQuotaUsage mountQuota = mountTable.getQuota();
-    if (nsQuota != mountQuota.getQuota()
-        || ssQuota != mountQuota.getSpaceQuota()) {
+      // Previous quota.
+      RouterQuotaUsage preQuota = oldEntry.getQuota();
+      long nsQuota = preQuota.getQuota();
+      long ssQuota = preQuota.getSpaceQuota();
+      // New quota
+      RouterQuotaUsage mountQuota = updateEntry.getQuota();
+      // If there is change in quota, the new quota needs to be synchronized.
+      if (nsQuota != mountQuota.getQuota()
+          || ssQuota != mountQuota.getSpaceQuota()) {
+        return true;
+      }
+      return false;
+    } else {
+      // If old entry is not available, sync quota always, since we can't
+      // conclude no change in quota.
       return true;
     }
-
-    return false;
   }
 
   /**
@@ -323,15 +340,30 @@
    */
   private void synchronizeQuota(String path, long nsQuota, long ssQuota)
       throws IOException {
-    if (router.isQuotaEnabled() &&
-        (nsQuota != HdfsConstants.QUOTA_DONT_SET
-        || ssQuota != HdfsConstants.QUOTA_DONT_SET)) {
-      HdfsFileStatus ret = this.router.getRpcServer().getFileInfo(path);
-      if (ret != null) {
-        this.router.getRpcServer().getQuotaModule().setQuota(path, nsQuota,
-            ssQuota, null);
+    if (isQuotaSyncRequired(nsQuota, ssQuota)) {
+      if (iStateStoreCache) {
+        ((StateStoreCache) this.router.getSubclusterResolver()).loadCache(true);
+      }
+      Quota routerQuota = this.router.getRpcServer().getQuotaModule();
+      routerQuota.setQuota(path, nsQuota, ssQuota, null);
+    }
+  }
+
+  /**
+   * Checks if quota needs to be synchronized or not.
+   * @param nsQuota namespace quota to be set.
+   * @param ssQuota space quota to be set.
+   * @return true if the quota needs to be synchronized.
+   */
+  private boolean isQuotaSyncRequired(long nsQuota, long ssQuota) {
+    // Check if quota is enabled for router or not.
+    if (router.isQuotaEnabled()) {
+      if ((nsQuota != HdfsConstants.QUOTA_DONT_SET
+          || ssQuota != HdfsConstants.QUOTA_DONT_SET)) {
+        return true;
       }
     }
+    return false;
   }
 
   @Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto
index 6a60e4a..13563b0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.hdfs.federation.protocol.proto";
 option java_outer_classname = "HdfsServerFederationProtos";
 option java_generic_services = true;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/RouterProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/RouterProtocol.proto
index d6aff49..e8fc268 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/RouterProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/RouterProtocol.proto
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.hdfs.protocol.proto";
 option java_outer_classname = "RouterProtocolProtos";
 option java_generic_services = true;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
index c3a5c17..f0e4dc1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
@@ -52,6 +52,7 @@
 import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
@@ -474,6 +475,14 @@
     assertEquals(ssQuota, quota.getSpaceQuota());
     assertEquals(3, quota.getFileAndDirectoryCount());
     assertEquals(BLOCK_SIZE, quota.getSpaceConsumed());
+
+    // verify quota sync on adding new destination to mount entry.
+    updatedMountTable = getMountTable(path);
+    nnFs1.mkdirs(new Path("/newPath"));
+    updatedMountTable.setDestinations(
+        Collections.singletonList(new RemoteLocation("ns0", "/newPath", path)));
+    updateMountTable(updatedMountTable);
+    assertEquals(nsQuota, nnFs1.getQuotaUsage(new Path("/newPath")).getQuota());
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.2.1.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.2.1.xml
new file mode 100644
index 0000000..2452f91
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.2.1.xml
@@ -0,0 +1,674 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Tue Sep 10 17:02:45 UTC 2019 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop HDFS 3.2.1"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -verbose -classpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/classes:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-3.2.1.jar:/usr/lib/jvm/java-8-openjdk-amd64/lib/tools.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-3.2.1.jar:/maven/org/slf4j/slf4j-api/1.7.25/slf4j-api-1.7.25.jar:/maven/org/apache/httpcomponents/httpclient/4.5.6/httpclient-4.5.6.jar:/maven/org/apache/httpcomponents/httpcore/4.4.10/httpcore-4.4.10.jar:/maven/com/nimbusds/nimbus-jose-jwt/4.41.1/nimbus-jose-jwt-4.41.1.jar:/maven/com/github/stephenc/jcip/jcip-annotations/1.0-1/jcip-annotations-1.0-1.jar:/maven/net/minidev/json-smart/2.3/json-smart-2.3.jar:/maven/net/minidev/accessors-smart/1.2/accessors-smart-1.2.jar:/maven/org/ow2/asm/asm/5.0.4/asm-5.0.4.jar:/maven/org/apache/zookeeper/zookeeper/3.4.13/zookeeper-3.4.13.jar:/maven/org/apache/curator/curator-framework/2.13.0/curator-framework-2.13.0.jar:/maven/org/apache/kerby/kerb-simplekdc/1.0.1/kerb-simplekdc-1.0.1.jar:/maven/org/apache/kerby/kerb-client/1.0.1/kerb-client-1.0.1.jar:/maven/org/apache/kerby/kerby-config/1.0.1/kerby-config-1.0.1.jar:/maven/org/apache/kerby/kerb-core/1.0.1/kerb-core-1.0.1.jar:/maven/org/apache/kerby/kerby-pkix/1.0.1/kerby-pkix-1.0.1.jar:/maven/org/apache/kerby/kerby-asn1/1.0.1/kerby-asn1-1.0.1.jar:/maven/org/apache/kerby/kerby-util/1.0.1/kerby-util-1.0.1.jar:/maven/org/apache/kerby/kerb-common/1.0.1/kerb-common-1.0.1.jar:/maven/org/apache/kerby/kerb-crypto/1.0.1/kerb-crypto-1.0.1.jar:/maven/org/apache/kerby/kerb-util/1.0.1/kerb-util-1.0.1.jar:/maven/org/apache/kerby/token-provider/1.0.1/token-provider-1.0.1.jar:/maven/org/apache/kerby/kerb-admin/1.0.1/kerb-admin-1.0.1.jar:/maven/org/apache/kerby/kerb-server/1.0.1/kerb-server-1.0.1.jar:/maven/org/apache/kerby/kerb-identity/1.0.1/kerb-identity-1.0.1.jar:/maven/org/apache/kerby/kerby-xdr/1.0.1/kerby-xdr-1.0.1.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-3.2.1.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/commons-net/commons-net/3.6/commons-net-3.6.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/org/eclipse/jetty/jetty-servlet/9.3.24.v20180605/jetty-servlet-9.3.24.v20180605.jar:/maven/org/eclipse/jetty/jetty-security/9.3.24.v20180605/jetty-security-9.3.24.v20180605.jar:/maven/org/eclipse/jetty/jetty-webapp/9.3.24.v20180605/jetty-webapp-9.3.24.v20180605.jar:/maven/org/eclipse/jetty/jetty-xml/9.3.24.v20180605/jetty-xml-9.3.24.v20180605.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-servlet/1.19/jersey-servlet-1.19.jar:/maven/com/sun/jersey/jersey-json/1.19/jersey-json-1.19.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/javax/xml/bind/jaxb-api/2.2.11/jaxb-api-2.2.11.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/commons-beanutils/commons-beanutils/1.9.3/commons-beanutils-1.9.3.jar:/maven/org/apache/commons/commons-configuration2/2.1.1/commons-configuration2-2.1.1.jar:/maven/org/apache/commons/commons-lang3/3.7/commons-lang3-3.7.jar:/maven/org/apache/commons/commons-text/1.4/commons-text-1.4.jar:/maven/org/apache/avro/avro/1.7.7/avro-1.7.7.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.5/snappy-java-1.0.5.jar:/maven/com/google/re2j/re2j/1.1/re2j-1.1.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.13.0/curator-client-2.13.0.jar:/maven/org/apache/curator/curator-recipes/2.13.0/curator-recipes-2.13.0.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/commons/commons-compress/1.18/commons-compress-1.18.jar:/maven/org/codehaus/woodstox/stax2-api/3.1.4/stax2-api-3.1.4.jar:/maven/com/fasterxml/woodstox/woodstox-core/5.0.3/woodstox-core-5.0.3.jar:/maven/dnsjava/dnsjava/2.1.7/dnsjava-2.1.7.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-3.2.1.jar:/maven/com/squareup/okhttp/okhttp/2.7.5/okhttp-2.7.5.jar:/maven/com/squareup/okio/okio/1.6.0/okio-1.6.0.jar:/maven/com/fasterxml/jackson/core/jackson-annotations/2.9.8/jackson-annotations-2.9.8.jar:/maven/org/apache/yetus/audience-annotations/0.5.0/audience-annotations-0.5.0.jar:/maven/com/google/guava/guava/27.0-jre/guava-27.0-jre.jar:/maven/com/google/guava/failureaccess/1.0/failureaccess-1.0.jar:/maven/com/google/guava/listenablefuture/9999.0-empty-to-avoid-conflict-with-guava/listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar:/maven/org/checkerframework/checker-qual/2.5.2/checker-qual-2.5.2.jar:/maven/com/google/errorprone/error_prone_annotations/2.2.0/error_prone_annotations-2.2.0.jar:/maven/com/google/j2objc/j2objc-annotations/1.1/j2objc-annotations-1.1.jar:/maven/org/codehaus/mojo/animal-sniffer-annotations/1.17/animal-sniffer-annotations-1.17.jar:/maven/org/eclipse/jetty/jetty-server/9.3.24.v20180605/jetty-server-9.3.24.v20180605.jar:/maven/org/eclipse/jetty/jetty-http/9.3.24.v20180605/jetty-http-9.3.24.v20180605.jar:/maven/org/eclipse/jetty/jetty-io/9.3.24.v20180605/jetty-io-9.3.24.v20180605.jar:/maven/org/eclipse/jetty/jetty-util/9.3.24.v20180605/jetty-util-9.3.24.v20180605.jar:/maven/org/eclipse/jetty/jetty-util-ajax/9.3.24.v20180605/jetty-util-ajax-9.3.24.v20180605.jar:/maven/com/sun/jersey/jersey-core/1.19/jersey-core-1.19.jar:/maven/javax/ws/rs/jsr311-api/1.1.1/jsr311-api-1.1.1.jar:/maven/com/sun/jersey/jersey-server/1.19/jersey-server-1.19.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/commons-codec/commons-codec/1.11/commons-codec-1.11.jar:/maven/commons-io/commons-io/2.5/commons-io-2.5.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/commons-daemon/commons-daemon/1.0.13/commons-daemon-1.0.13.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/javax/servlet/javax.servlet-api/3.1.0/javax.servlet-api-3.1.0.jar:/maven/org/slf4j/slf4j-log4j12/1.7.25/slf4j-log4j12-1.7.25.jar:/maven/io/netty/netty/3.10.5.Final/netty-3.10.5.Final.jar:/maven/io/netty/netty-all/4.0.52.Final/netty-all-4.0.52.Final.jar:/maven/org/apache/htrace/htrace-core4/4.1.0-incubating/htrace-core4-4.1.0-incubating.jar:/maven/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar:/maven/com/fasterxml/jackson/core/jackson-databind/2.9.8/jackson-databind-2.9.8.jar:/maven/com/fasterxml/jackson/core/jackson-core/2.9.8/jackson-core-2.9.8.jar:/maven/xerces/xercesImpl/2.11.0/xercesImpl-2.11.0.jar:/maven/xml-apis/xml-apis/1.4.01/xml-apis-1.4.01.jar -sourcepath /build/source/hadoop-hdfs-project/hadoop-hdfs/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -apidir /build/source/hadoop-hdfs-project/hadoop-hdfs/target/site/jdiff/xml -apiname Apache Hadoop HDFS 3.2.1 -->
+<package name="org.apache.hadoop.hdfs">
+  <doc>
+  <![CDATA[<p>A distributed implementation of {@link
+org.apache.hadoop.fs.FileSystem}.  This is loosely modelled after
+Google's <a href="http://research.google.com/archive/gfs.html">GFS</a>.</p>
+
+<p>The most important difference is that unlike GFS, Hadoop DFS files 
+have strictly one writer at any one time.  Bytes are always appended 
+to the end of the writer's stream.  There is no notion of "record appends"
+or "mutations" that are then checked or reordered.  Writers simply emit 
+a byte stream.  That byte stream is guaranteed to be stored in the 
+order written.</p>]]>
+  </doc>
+</package>
+<package name="org.apache.hadoop.hdfs.net">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol.datatransfer">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol.datatransfer.sasl">
+</package>
+<package name="org.apache.hadoop.hdfs.protocolPB">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.client">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.protocolPB">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.server">
+  <!-- start interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
+  <interface name="JournalNodeMXBean"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getJournalsStatus" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get status information (e.g., whether formatted) of JournalNode's journals.
+ 
+ @return A string presenting status for each journal]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This is the JMX management interface for JournalNode information]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.block">
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.delegation">
+</package>
+<package name="org.apache.hadoop.hdfs.server.aliasmap">
+  <!-- start class org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap -->
+  <class name="InMemoryAliasMap" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMapProtocol"/>
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="init" return="org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="blockPoolID" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="list" return="org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMapProtocol.IterationResult"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="marker" type="java.util.Optional"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="read" return="java.util.Optional"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+      <param name="providedStorageLocation" type="org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getBlockPoolId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="fromProvidedStorageLocationBytes" return="org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="providedStorageLocationDbFormat" type="byte[]"/>
+      <exception name="InvalidProtocolBufferException" type="com.google.protobuf.InvalidProtocolBufferException"/>
+    </method>
+    <method name="fromBlockBytes" return="org.apache.hadoop.hdfs.protocol.Block"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="blockDbFormat" type="byte[]"/>
+      <exception name="InvalidProtocolBufferException" type="com.google.protobuf.InvalidProtocolBufferException"/>
+    </method>
+    <method name="toProtoBufBytes" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="providedStorageLocation" type="org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="toProtoBufBytes" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[InMemoryAliasMap is an implementation of the InMemoryAliasMapProtocol for
+ use with LevelDB.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.balancer">
+</package>
+<package name="org.apache.hadoop.hdfs.server.blockmanagement">
+</package>
+<package name="org.apache.hadoop.hdfs.server.common">
+  <!-- start interface org.apache.hadoop.hdfs.server.common.BlockAlias -->
+  <interface name="BlockAlias"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Interface used to load provided blocks.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.hdfs.server.common.BlockAlias -->
+  <!-- start class org.apache.hadoop.hdfs.server.common.FileRegion -->
+  <class name="FileRegion" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.hdfs.server.common.BlockAlias"/>
+    <constructor name="FileRegion" type="long, org.apache.hadoop.fs.Path, long, long, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FileRegion" type="long, org.apache.hadoop.fs.Path, long, long, long, byte[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FileRegion" type="long, org.apache.hadoop.fs.Path, long, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FileRegion" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getProvidedStorageLocation" return="org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[This class is used to represent provided blocks that are file regions,
+ i.e., can be described using (path, offset, length).]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.common.FileRegion -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.common.blockaliasmap">
+  <!-- start class org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap -->
+  <class name="BlockAliasMap" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="BlockAliasMap"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getReader" return="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Reader"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="opts" type="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Reader.Options"/>
+      <param name="blockPoolID" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns a reader to the alias map.
+ @param opts reader options
+ @param blockPoolID block pool id to use
+ @return {@link Reader} to the alias map. If a Reader for the blockPoolID
+ cannot be created, this will return null.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getWriter" return="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Writer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="opts" type="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Writer.Options"/>
+      <param name="blockPoolID" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns the writer for the alias map.
+ @param opts writer options.
+ @param blockPoolID block pool id to use
+ @return {@link Writer} to the alias map.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="refresh"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Refresh the alias map.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[An abstract class used to read and write block maps for provided blocks.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.common.blockaliasmap.impl">
+  <!-- start class org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.LevelDBFileRegionAliasMap -->
+  <class name="LevelDBFileRegionAliasMap" extends="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="LevelDBFileRegionAliasMap"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getReader" return="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Reader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="opts" type="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Reader.Options"/>
+      <param name="blockPoolID" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getWriter" return="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Writer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="opts" type="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Writer.Options"/>
+      <param name="blockPoolID" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="refresh"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A LevelDB based implementation of {@link BlockAliasMap}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.LevelDBFileRegionAliasMap -->
+  <!-- start class org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap -->
+  <class name="TextFileRegionAliasMap" extends="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="TextFileRegionAliasMap"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getReader" return="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Reader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="opts" type="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Reader.Options"/>
+      <param name="blockPoolID" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getWriter" return="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Writer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="opts" type="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Writer.Options"/>
+      <param name="blockPoolID" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="refresh"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="blockPoolIDFromFileName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <method name="fileNameFromBlockPoolID" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="blockPoolID" type="java.lang.String"/>
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This class is used for block maps stored as text files,
+ with a specified delimiter.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset.impl">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web.webhdfs">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer.command">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer.connectors">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer.datamodel">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer.planner">
+</package>
+<package name="org.apache.hadoop.hdfs.server.mover">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode">
+  <!-- start interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
+  <interface name="AuditLogger"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="initialize"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Called during initialization of the logger.
+
+ @param conf The configuration object.]]>
+      </doc>
+    </method>
+    <method name="logAuditEvent"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <doc>
+      <![CDATA[Called to log an audit event.
+ <p>
+ This method must return as quickly as possible, since it's called
+ in a critical section of the NameNode's operation.
+
+ @param succeeded Whether authorization succeeded.
+ @param userName Name of the user executing the request.
+ @param addr Remote address of the request.
+ @param cmd The requested command.
+ @param src Path of affected source file.
+ @param dst Path of affected destination file (if any).
+ @param stat File information for operations that change the file's
+             metadata (permissions, owner, times, etc).]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Interface defining an audit logger.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
+  <!-- start class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
+  <class name="HdfsAuditLogger" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.hdfs.server.namenode.AuditLogger"/>
+    <constructor name="HdfsAuditLogger"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="logAuditEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="status" type="org.apache.hadoop.fs.FileStatus"/>
+    </method>
+    <method name="logAuditEvent"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="callerContext" type="org.apache.hadoop.ipc.CallerContext"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+      <doc>
+      <![CDATA[Same as
+ {@link #logAuditEvent(boolean, String, InetAddress, String, String, String,
+ FileStatus)} with additional parameters related to logging delegation token
+ tracking IDs.
+ 
+ @param succeeded Whether authorization succeeded.
+ @param userName Name of the user executing the request.
+ @param addr Remote address of the request.
+ @param cmd The requested command.
+ @param src Path of affected source file.
+ @param dst Path of affected destination file (if any).
+ @param stat File information for operations that change the file's metadata
+          (permissions, owner, times, etc).
+ @param callerContext Context information of the caller
+ @param ugi UserGroupInformation of the current user, or null if not logging
+          token tracking information
+ @param dtSecretManager The token secret manager, or null if not logging
+          token tracking information]]>
+      </doc>
+    </method>
+    <method name="logAuditEvent"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+      <doc>
+      <![CDATA[Same as
+ {@link #logAuditEvent(boolean, String, InetAddress, String, String,
+ String, FileStatus, CallerContext, UserGroupInformation,
+ DelegationTokenSecretManager)} without {@link CallerContext} information.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Extension of {@link AuditLogger}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
+  <!-- start class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
+  <class name="INodeAttributeProvider" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="INodeAttributeProvider"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="start"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Initialize the provider. This method is called at NameNode startup
+ time.]]>
+      </doc>
+    </method>
+    <method name="stop"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Shutdown the provider. This method is called at NameNode shutdown time.]]>
+      </doc>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fullPath" type="java.lang.String"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pathElements" type="java.lang.String[]"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="components" type="byte[][]"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getExternalAccessControlEnforcer" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="defaultEnforcer" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"/>
+      <doc>
+      <![CDATA[Can be over-ridden by implementations to provide a custom Access Control
+ Enforcer that can provide an alternate implementation of the
+ default permission checking logic.
+ @param defaultEnforcer The Default AccessControlEnforcer
+ @return The AccessControlEnforcer to use]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.ha">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.snapshot">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top.window">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.web.resources">
+</package>
+<package name="org.apache.hadoop.hdfs.tools">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineEditsViewer">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineImageViewer">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.snapshot">
+</package>
+<package name="org.apache.hadoop.hdfs.util">
+</package>
+<package name="org.apache.hadoop.hdfs.web">
+</package>
+<package name="org.apache.hadoop.hdfs.web.resources">
+</package>
+
+</api>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 13da176..e88f0f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -310,42 +310,30 @@
         </executions>
       </plugin>
       <plugin>
+        <groupId>org.xolstice.maven.plugins</groupId>
+        <artifactId>protobuf-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>src-compile-protoc</id>
+            <configuration>
+              <skip>false</skip>
+              <additionalProtoPathElements>
+                <additionalProtoPathElement>
+                  ${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto
+                </additionalProtoPathElement>
+                <additionalProtoPathElement>
+                  ${basedir}/../hadoop-hdfs-client/src/main/proto
+                </additionalProtoPathElement>
+              </additionalProtoPathElements>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-maven-plugins</artifactId>
         <executions>
           <execution>
-            <id>compile-protoc</id>
-            <goals>
-              <goal>protoc</goal>
-            </goals>
-            <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
-                <param>${basedir}/../hadoop-hdfs-client/src/main/proto</param>
-                <param>${basedir}/src/main/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/main/proto</directory>
-                <includes>
-                  <include>HdfsServer.proto</include>
-                  <include>DatanodeProtocol.proto</include>
-                  <include>DatanodeLifelineProtocol.proto</include>
-                  <include>HAZKInfo.proto</include>
-                  <include>InterDatanodeProtocol.proto</include>
-                  <include>JournalProtocol.proto</include>
-                  <include>NamenodeProtocol.proto</include>
-                  <include>QJournalProtocol.proto</include>
-                  <include>editlog.proto</include>
-                  <include>fsimage.proto</include>
-                  <include>AliasMapProtocol.proto</include>
-                  <include>InterQJournalProtocol.proto</include>
-                </includes>
-              </source>
-            </configuration>
-          </execution>
-          <execution>
             <id>resource-gz</id>
             <phase>generate-resources</phase>
             <goals>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
index 7889ef4..0884fc0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
@@ -226,6 +226,9 @@
           String nodeLocation = excludedNode.getNetworkLocation()
               + "/" + excludedNode.getName();
           DatanodeDescriptor dn = (DatanodeDescriptor)getNode(nodeLocation);
+          if (dn == null) {
+            continue;
+          }
           availableCount -= dn.hasStorageType(type)? 1 : 0;
         } else {
           LOG.error("Unexpected node type: {}.", excludedNode.getClass());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
index 0db8a6f..4dc51c9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
@@ -129,7 +129,7 @@
       return BlockReaderRemote.newBlockReader(
           "dummy", block, blockToken, offsetInBlock,
           block.getNumBytes() - offsetInBlock, true, "", peer, source,
-          null, stripedReader.getCachingStrategy(), -1);
+          null, stripedReader.getCachingStrategy(), -1, conf);
     } catch (IOException e) {
       LOG.info("Exception while creating remote block reader, datanode {}",
           source, e);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
index 1514927..283f13b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
@@ -267,8 +267,8 @@
     Value prevValue = mappableBlockMap.get(key);
     boolean deferred = false;
 
-    if (!dataset.datanode.getShortCircuitRegistry().
-            processBlockMunlockRequest(key)) {
+    if (cacheLoader.isTransientCache() && !dataset.datanode.
+        getShortCircuitRegistry().processBlockMunlockRequest(key)) {
       deferred = true;
     }
     if (prevValue == null) {
@@ -438,7 +438,11 @@
         }
         LOG.debug("Successfully cached {}.  We are now caching {} bytes in"
             + " total.", key, newUsedBytes);
-        dataset.datanode.getShortCircuitRegistry().processBlockMlockEvent(key);
+        // Only applicable to DRAM cache.
+        if (cacheLoader.isTransientCache()) {
+          dataset.datanode.
+              getShortCircuitRegistry().processBlockMlockEvent(key);
+        }
         numBlocksCached.addAndGet(1);
         dataset.datanode.getMetrics().incrBlocksCached(1);
         success = true;
@@ -476,6 +480,11 @@
     }
 
     private boolean shouldDefer() {
+      // Currently, defer condition is just checked for DRAM cache case.
+      if (!cacheLoader.isTransientCache()) {
+        return false;
+      }
+
       /* If revocationTimeMs == 0, this is an immediate uncache request.
        * No clients were anchored at the time we made the request. */
       if (revocationTimeMs == 0) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/AliasMapProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/AliasMapProtocol.proto
index 01dd952..8050f35 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/AliasMapProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/AliasMapProtocol.proto
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.hdfs.protocol.proto";
 option java_outer_classname = "AliasMapProtocolProtos";
 option java_generic_services = true;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeLifelineProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeLifelineProtocol.proto
index b6ab756..e10a886 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeLifelineProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeLifelineProtocol.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.hdfs.protocol.proto";
 option java_outer_classname = "DatanodeLifelineProtocolProtos";
 option java_generic_services = true;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
index 4a8f9f0..0e24130 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
@@ -24,7 +24,7 @@
 
 // This file contains protocol buffers that are used throughout HDFS -- i.e.
 // by the client, server, and data transfer protocols.
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.hdfs.protocol.proto";
 option java_outer_classname = "DatanodeProtocolProtos";
 option java_generic_services = true;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto
index aa8b6be..6d45a93 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.hdfs.server.namenode.ha.proto";
 option java_outer_classname = "HAZKInfoProtos";
 package hadoop.hdfs;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto
index 85cfb6c..78607ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto
@@ -24,7 +24,7 @@
 
 // This file contains protocol buffers that are used throughout HDFS -- i.e.
 // by the client, server, and data transfer protocols.
-
+syntax = "proto2";
 
 option java_package = "org.apache.hadoop.hdfs.protocol.proto";
 option java_outer_classname = "HdfsServerProtos";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto
index 580f8d3..47332a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto
@@ -24,7 +24,7 @@
 
 // This file contains protocol buffers that are used throughout HDFS -- i.e.
 // by the client, server, and data transfer protocols.
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.hdfs.protocol.proto";
 option java_outer_classname = "InterDatanodeProtocolProtos";
 option java_generic_services = true;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterQJournalProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterQJournalProtocol.proto
index 0ecdff1..e73ca23 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterQJournalProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterQJournalProtocol.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.hdfs.qjournal.protocol";
 option java_outer_classname = "InterQJournalProtocolProtos";
 option java_generic_services = true;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto
index 3fd029b..35c401e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto
@@ -24,7 +24,7 @@
 
 // This file contains protocol buffers that are used throughout HDFS -- i.e.
 // by the client, server, and data transfer protocols.
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.hdfs.protocol.proto";
 option java_outer_classname = "JournalProtocolProtos";
 option java_generic_services = true;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto
index 89edfbf..97f5bca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto
@@ -24,7 +24,7 @@
 
 // This file contains protocol buffers that are used throughout HDFS -- i.e.
 // by the client, server, and data transfer protocols.
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.hdfs.protocol.proto";
 option java_outer_classname = "NamenodeProtocolProtos";
 option java_generic_services = true;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto
index b4d2b31..e366d1f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto
@@ -21,7 +21,7 @@
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.hdfs.qjournal.protocol";
 option java_outer_classname = "QJournalProtocolProtos";
 option java_generic_services = true;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/editlog.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/editlog.proto
index f25fe59..22fd743 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/editlog.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/editlog.proto
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.hdfs.protocol.proto";
 option java_outer_classname = "EditLogProtos";
 option java_generate_equals_and_hash = true;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
index 5a04a9b..2bdd302 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
+syntax = "proto2";
 option java_package = "org.apache.hadoop.hdfs.server.namenode";
 option java_outer_classname = "FsImageProto";
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 63a99d8..d34e4cd 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4098,6 +4098,18 @@
   </description>
 </property>
 
+  <property>
+    <name>dfs.client.block.reader.remote.buffer.size</name>
+    <value>8192</value>
+    <description>
+      The output stream buffer size of a DFSClient remote read. The buffer default value is 8KB. The buffer includes
+      only some request parameters that are: block, blockToken, clientName, startOffset, len, verifyChecksum,
+      cachingStrategy.
+      It is recommended to adjust the value according to the workload, which can reduce unnecessary memory
+      usage and the frequency of the garbage collection. A value of 512 might be reasonable.
+    </description>
+  </property>
+
 <property>
   <name>dfs.content-summary.limit</name>
   <value>5000</value>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java
index 42b1928..3360d68 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java
@@ -23,6 +23,8 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.net.Node;
@@ -37,9 +39,11 @@
 import java.util.Set;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
+
 /**
  * This class tests the correctness of storage type info stored in
  * DFSNetworkTopology.
@@ -368,6 +372,18 @@
     }
   }
 
+  @Test
+  public void testChooseRandomWithStorageTypeWithExcludedforNullCheck()
+      throws Exception {
+    HashSet<Node> excluded = new HashSet<>();
+
+    excluded.add(new DatanodeInfoBuilder()
+        .setNodeID(DatanodeID.EMPTY_DATANODE_ID).build());
+    Node node = CLUSTER.chooseRandomWithStorageType("/", "/l1/d1/r1", excluded,
+        StorageType.ARCHIVE);
+
+    assertNotNull(node);
+  }
 
   /**
    * This test tests the wrapper method. The wrapper method only takes one scope
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java
index bc8135f..d654a60 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java
@@ -52,13 +52,17 @@
 
   private ReplicationType replicationType;
 
+  private int replicationFactor;
+
   /**
    * Constructs OzoneKey from OmKeyInfo.
    *
    */
+  @SuppressWarnings("parameternumber")
   public OzoneKey(String volumeName, String bucketName,
                   String keyName, long size, long creationTime,
-                  long modificationTime, ReplicationType type) {
+                  long modificationTime, ReplicationType type,
+                  int replicationFactor) {
     this.volumeName = volumeName;
     this.bucketName = bucketName;
     this.name = keyName;
@@ -66,6 +70,7 @@
     this.creationTime = creationTime;
     this.modificationTime = modificationTime;
     this.replicationType = type;
+    this.replicationFactor = replicationFactor;
   }
 
   /**
@@ -130,4 +135,14 @@
   public ReplicationType getReplicationType() {
     return replicationType;
   }
+
+  /**
+   * Returns the replication factor of the key.
+   *
+   * @return replicationFactor
+   */
+  public int getReplicationFactor() {
+    return replicationFactor;
+  }
+
 }
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyDetails.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyDetails.java
index 9282353..a57b663 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyDetails.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyDetails.java
@@ -46,9 +46,9 @@
                          long size, long creationTime, long modificationTime,
                          List<OzoneKeyLocation> ozoneKeyLocations,
                          ReplicationType type, Map<String, String> metadata,
-                         FileEncryptionInfo feInfo) {
+                         FileEncryptionInfo feInfo, int replicationFactor) {
     super(volumeName, bucketName, keyName, size, creationTime,
-        modificationTime, type);
+        modificationTime, type, replicationFactor);
     this.ozoneKeyLocations = ozoneKeyLocations;
     this.metadata = metadata;
     this.feInfo = feInfo;
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 202e0eb..5b6d620 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -684,7 +684,8 @@
         key.getDataSize(),
         key.getCreationTime(),
         key.getModificationTime(),
-        ReplicationType.valueOf(key.getType().toString())))
+        ReplicationType.valueOf(key.getType().toString()),
+        key.getFactor().getNumber()))
         .collect(Collectors.toList());
   }
 
@@ -712,7 +713,7 @@
         keyInfo.getKeyName(), keyInfo.getDataSize(), keyInfo.getCreationTime(),
         keyInfo.getModificationTime(), ozoneKeyLocations, ReplicationType
         .valueOf(keyInfo.getType().toString()), keyInfo.getMetadata(),
-        keyInfo.getFileEncryptionInfo());
+        keyInfo.getFileEncryptionInfo(), keyInfo.getFactor().getNumber());
   }
 
   @Override
diff --git a/hadoop-ozone/dev-support/checks/checkstyle.sh b/hadoop-ozone/dev-support/checks/checkstyle.sh
index ee11e38..7a218a4 100755
--- a/hadoop-ozone/dev-support/checks/checkstyle.sh
+++ b/hadoop-ozone/dev-support/checks/checkstyle.sh
@@ -16,6 +16,7 @@
 DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
 cd "$DIR/../../.." || exit 1
 
+BASE_DIR="$(pwd -P)"
 REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/checkstyle"}
 mkdir -p "$REPORT_DIR"
 REPORT_FILE="$REPORT_DIR/summary.txt"
@@ -23,7 +24,16 @@
 mvn -B -fn checkstyle:check -f pom.ozone.xml
 
 #Print out the exact violations with parsing XML results with sed
-find "." -name checkstyle-errors.xml -print0  | xargs -0 sed  '$!N; /<file.*\n<\/file/d;P;D' | sed '/<\/.*/d;/<checkstyle.*/d;s/<error.*line="\([[:digit:]]*\)".*message="\([^"]\+\).*/ \1: \2/;s/<file name="\([^"]*\)".*/\1/;/<\?xml.*>/d' | tee "$REPORT_FILE"
+find "." -name checkstyle-errors.xml -print0 \
+  | xargs -0 sed '$!N; /<file.*\n<\/file/d;P;D' \
+  | sed \
+      -e '/<\?xml.*>/d' \
+      -e '/<checkstyle.*/d' \
+      -e '/<\/.*/d' \
+      -e 's/<file name="\([^"]*\)".*/\1/' \
+      -e 's/<error.*line="\([[:digit:]]*\)".*message="\([^"]*\)".*/ \1: \2/' \
+      -e "s!^${BASE_DIR}/!!" \
+  | tee "$REPORT_FILE"
 
 ## generate counter
 wc -l "$REPORT_DIR/summary.txt" | awk '{print $1}'> "$REPORT_DIR/failures"
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShellHA.java
new file mode 100644
index 0000000..4e04b4c
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShellHA.java
@@ -0,0 +1,343 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.ozShell;
+
+import com.google.common.base.Strings;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
+import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.web.ozShell.OzoneShell;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import picocli.CommandLine;
+import picocli.CommandLine.ExecutionException;
+import picocli.CommandLine.IExceptionHandler2;
+import picocli.CommandLine.ParameterException;
+import picocli.CommandLine.ParseResult;
+import picocli.CommandLine.RunLast;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.PrintStream;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.UUID;
+
+import static org.junit.Assert.fail;
+
+/**
+ * This class tests Ozone sh shell command.
+ * Inspired by TestS3Shell
+ */
+public class TestOzoneShellHA {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestOzoneShellHA.class);
+
+  /**
+   * Set the timeout for every test.
+   */
+  @Rule
+  public Timeout testTimeout = new Timeout(300000);
+
+  private static File baseDir;
+  private static OzoneConfiguration conf = null;
+  private static MiniOzoneCluster cluster = null;
+  private static OzoneShell ozoneShell = null;
+
+  private final ByteArrayOutputStream out = new ByteArrayOutputStream();
+  private final ByteArrayOutputStream err = new ByteArrayOutputStream();
+  private static final PrintStream OLD_OUT = System.out;
+  private static final PrintStream OLD_ERR = System.err;
+
+  private static String omServiceId;
+  private static String clusterId;
+  private static String scmId;
+  private static int numOfOMs;
+
+  /**
+   * Create a MiniOzoneCluster for testing with using distributed Ozone
+   * handler type.
+   *
+   * @throws Exception
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+
+    String path = GenericTestUtils.getTempPath(
+        TestOzoneShellHA.class.getSimpleName());
+    baseDir = new File(path);
+    baseDir.mkdirs();
+    ozoneShell = new OzoneShell();
+
+    // Init HA cluster
+    omServiceId = "om-service-test1";
+    numOfOMs = 3;
+    clusterId = UUID.randomUUID().toString();
+    scmId = UUID.randomUUID().toString();
+    cluster = MiniOzoneCluster.newHABuilder(conf)
+        .setClusterId(clusterId)
+        .setScmId(scmId)
+        .setOMServiceId(omServiceId)
+        .setNumOfOzoneManagers(numOfOMs)
+        .build();
+    conf.setQuietMode(false);
+    cluster.waitForClusterToBeReady();
+  }
+
+  /**
+   * shutdown MiniOzoneCluster.
+   */
+  @AfterClass
+  public static void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+
+    if (baseDir != null) {
+      FileUtil.fullyDelete(baseDir, true);
+    }
+  }
+
+  @Before
+  public void setup() {
+    System.setOut(new PrintStream(out));
+    System.setErr(new PrintStream(err));
+  }
+
+  @After
+  public void reset() {
+    // reset stream after each unit test
+    out.reset();
+    err.reset();
+
+    // restore system streams
+    System.setOut(OLD_OUT);
+    System.setErr(OLD_ERR);
+  }
+
+  private void execute(OzoneShell shell, String[] args) {
+    LOG.info("Executing OzoneShell command with args {}", Arrays.asList(args));
+    CommandLine cmd = shell.getCmd();
+
+    IExceptionHandler2<List<Object>> exceptionHandler =
+        new IExceptionHandler2<List<Object>>() {
+          @Override
+          public List<Object> handleParseException(ParameterException ex,
+              String[] args) {
+            throw ex;
+          }
+
+          @Override
+          public List<Object> handleExecutionException(ExecutionException ex,
+              ParseResult parseRes) {
+            throw ex;
+          }
+        };
+
+    // Since there is no elegant way to pass Ozone config to the shell,
+    // the idea is to use 'set' to place those OM HA configs.
+    String[] argsWithHAConf = getHASetConfStrings(args);
+
+    cmd.parseWithHandlers(new RunLast(), exceptionHandler, argsWithHAConf);
+  }
+
+  /**
+   * Execute command, assert exception message and returns true if error
+   * was thrown.
+   */
+  private void executeWithError(OzoneShell shell, String[] args,
+      String expectedError) {
+    if (Strings.isNullOrEmpty(expectedError)) {
+      execute(shell, args);
+    } else {
+      try {
+        execute(shell, args);
+        fail("Exception is expected from command execution " + Arrays
+            .asList(args));
+      } catch (Exception ex) {
+        if (!Strings.isNullOrEmpty(expectedError)) {
+          Throwable exceptionToCheck = ex;
+          if (exceptionToCheck.getCause() != null) {
+            exceptionToCheck = exceptionToCheck.getCause();
+          }
+          Assert.assertTrue(
+              String.format(
+                  "Error of OzoneShell code doesn't contain the " +
+                      "exception [%s] in [%s]",
+                  expectedError, exceptionToCheck.getMessage()),
+              exceptionToCheck.getMessage().contains(expectedError));
+        }
+      }
+    }
+  }
+
+  /**
+   * @return the leader OM's Node ID in the MiniOzoneHACluster.
+   *
+   * TODO: This should be put into MiniOzoneHAClusterImpl in the future.
+   * This helper function is similar to the one in TestOzoneFsHAURLs.
+   */
+  private String getLeaderOMNodeId() {
+    Collection<String> omNodeIds = OmUtils.getOMNodeIds(conf, omServiceId);
+    assert(omNodeIds.size() == numOfOMs);
+    MiniOzoneHAClusterImpl haCluster = (MiniOzoneHAClusterImpl) cluster;
+    // Note: this loop may be implemented inside MiniOzoneHAClusterImpl
+    for (String omNodeId : omNodeIds) {
+      // Find the leader OM
+      if (!haCluster.getOzoneManager(omNodeId).isLeader()) {
+        continue;
+      }
+      return omNodeId;
+    }
+    return null;
+  }
+
+  private String getSetConfStringFromConf(String key) {
+    return String.format("--set=%s=%s", key, conf.get(key));
+  }
+
+  private String generateSetConfString(String key, String value) {
+    return String.format("--set=%s=%s", key, value);
+  }
+
+  /**
+   * Helper function to get a String array to be fed into OzoneShell.
+   * @param numOfArgs Additional number of arguments after the HA conf string,
+   *                  this translates into the number of empty array elements
+   *                  after the HA conf string.
+   * @return String array.
+   */
+  private String[] getHASetConfStrings(int numOfArgs) {
+    assert(numOfArgs >= 0);
+    String[] res = new String[1 + 1 + numOfOMs + numOfArgs];
+    final int indexOmServiceIds = 0;
+    final int indexOmNodes = 1;
+    final int indexOmAddressStart = 2;
+
+    res[indexOmServiceIds] = getSetConfStringFromConf(
+        OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY);
+
+    String omNodesKey = OmUtils.addKeySuffixes(
+        OMConfigKeys.OZONE_OM_NODES_KEY, omServiceId);
+    String omNodesVal = conf.get(omNodesKey);
+    res[indexOmNodes] = generateSetConfString(omNodesKey, omNodesVal);
+
+    String[] omNodesArr = omNodesVal.split(",");
+    // Sanity check
+    assert(omNodesArr.length == numOfOMs);
+    for (int i = 0; i < numOfOMs; i++) {
+      res[indexOmAddressStart + i] =
+          getSetConfStringFromConf(OmUtils.addKeySuffixes(
+              OMConfigKeys.OZONE_OM_ADDRESS_KEY, omServiceId, omNodesArr[i]));
+    }
+
+    return res;
+  }
+
+  /**
+   * Helper function to create a new set of arguments that contains HA configs.
+   * @param existingArgs Existing arguments to be fed into OzoneShell command.
+   * @return String array.
+   */
+  private String[] getHASetConfStrings(String[] existingArgs) {
+    // Get a String array populated with HA configs first
+    String[] res = getHASetConfStrings(existingArgs.length);
+
+    int indexCopyStart = res.length - existingArgs.length;
+    // Then copy the existing args to the returned String array
+    for (int i = 0; i < existingArgs.length; i++) {
+      res[indexCopyStart + i] = existingArgs[i];
+    }
+    return res;
+  }
+
+  /**
+   * Tests ozone sh command URI parsing with volume and bucket create commands.
+   */
+  @Test
+  public void testOzoneShCmdURIs() {
+    // Test case 1: ozone sh volume create /volume
+    // Expectation: Failure.
+    String[] args = new String[] {"volume", "create", "/volume"};
+    executeWithError(ozoneShell, args,
+        "Service ID or host name must not be omitted");
+
+    // Get leader OM node RPC address from ozone.om.address.omServiceId.omNode
+    String omLeaderNodeId = getLeaderOMNodeId();
+    String omLeaderNodeAddrKey = OmUtils.addKeySuffixes(
+        OMConfigKeys.OZONE_OM_ADDRESS_KEY, omServiceId, omLeaderNodeId);
+    String omLeaderNodeAddr = conf.get(omLeaderNodeAddrKey);
+    String omLeaderNodeAddrWithoutPort = omLeaderNodeAddr.split(":")[0];
+
+    // Test case 2: ozone sh volume create o3://om1/volume2
+    // Expectation: Success.
+    // Note: For now it seems OzoneShell is only trying the default port 9862
+    // instead of using the port defined in ozone.om.address (as ozone fs does).
+    // So the test will fail before this behavior is fixed.
+    // TODO: Fix this behavior, then uncomment the execute() below.
+    String setOmAddress = "--set=" + OMConfigKeys.OZONE_OM_ADDRESS_KEY + "="
+        + omLeaderNodeAddr;
+    args = new String[] {setOmAddress,
+        "volume", "create", "o3://" + omLeaderNodeAddrWithoutPort + "/volume2"};
+    //execute(ozoneShell, args);
+
+    // Test case 3: ozone sh volume create o3://om1:port/volume3
+    // Expectation: Success.
+    args = new String[] {
+        "volume", "create", "o3://" + omLeaderNodeAddr + "/volume3"};
+    execute(ozoneShell, args);
+
+    // Test case 4: ozone sh volume create o3://id1/volume
+    // Expectation: Success.
+    args = new String[] {"volume", "create", "o3://" + omServiceId + "/volume"};
+    execute(ozoneShell, args);
+
+    // Test case 5: ozone sh volume create o3://id1:port/volume
+    // Expectation: Failure.
+    args = new String[] {"volume", "create",
+        "o3://" + omServiceId + ":9862" + "/volume"};
+    executeWithError(ozoneShell, args, "does not use port information");
+
+    // Test case 6: ozone sh bucket create /volume/bucket
+    // Expectation: Failure.
+    args = new String[] {"bucket", "create", "/volume/bucket"};
+    executeWithError(ozoneShell, args,
+        "Service ID or host name must not be omitted");
+
+    // Test case 7: ozone sh bucket create o3://om1/volume/bucket
+    // Expectation: Success.
+    args = new String[] {
+        "bucket", "create", "o3://" + omServiceId + "/volume/bucket"};
+    execute(ozoneShell, args);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneAddress.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneAddress.java
index 207b0b4..4cb283e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneAddress.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneAddress.java
@@ -22,6 +22,7 @@
 import java.net.URISyntaxException;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
@@ -92,13 +93,27 @@
     } else if (scheme.equals(OZONE_RPC_SCHEME)) {
       if (ozoneURI.getHost() != null && !ozoneURI.getAuthority()
           .equals(EMPTY_HOST)) {
-        if (ozoneURI.getPort() == -1) {
+        if (OmUtils.isOmHAServiceId(conf, ozoneURI.getHost())) {
+          // When host is an HA service ID
+          if (ozoneURI.getPort() != -1) {
+            throw new OzoneClientException(
+                "Port " + ozoneURI.getPort() + " specified in URI but host '"
+                    + ozoneURI.getHost() + "' is a logical (HA) OzoneManager "
+                    + "and does not use port information.");
+          }
+          client = OzoneClientFactory.getRpcClient(ozoneURI.getHost(), conf);
+        } else if (ozoneURI.getPort() == -1) {
           client = OzoneClientFactory.getRpcClient(ozoneURI.getHost());
         } else {
           client = OzoneClientFactory
               .getRpcClient(ozoneURI.getHost(), ozoneURI.getPort(), conf);
         }
       } else {
+        // When host is not specified
+        if (OmUtils.isServiceIdsDefined(conf)) {
+          throw new OzoneClientException("Service ID or host name must not"
+              + " be omitted when ozone.om.service.ids is defined.");
+        }
         client = OzoneClientFactory.getRpcClient(conf);
       }
     } else {
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
index e5c0ba2..e8ebf02 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
@@ -104,7 +104,8 @@
                 size,
                 System.currentTimeMillis(),
                 System.currentTimeMillis(),
-                new ArrayList<>(), type, metadata, null
+                new ArrayList<>(), type, metadata, null,
+                factor.getValue()
             ));
             super.close();
           }
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index e165428..8b3693b 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -136,7 +136,7 @@
         <activeByDefault>false</activeByDefault>
       </activation>
       <properties>
-        <jdiff.stable.api>3.2.0</jdiff.stable.api>
+        <jdiff.stable.api>3.2.1</jdiff.stable.api>
         <jdiff.stability>-unstable</jdiff.stability>
         <!-- Commented out for HADOOP-11776 -->
         <!-- Uncomment param name="${jdiff.compatibility}" in javadoc doclet if compatibility is not empty -->
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 9f85148..e610b6a 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1715,9 +1715,56 @@
           <artifactId>frontend-maven-plugin</artifactId>
           <version>${frontend-maven-plugin.version}</version>
         </plugin>
+        <plugin>
+          <groupId>org.xolstice.maven.plugins</groupId>
+          <artifactId>protobuf-maven-plugin</artifactId>
+          <version>${protobuf-maven-plugin.version}</version>
+          <extensions>true</extensions>
+          <configuration>
+            <protocArtifact>
+              com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}
+            </protocArtifact>
+            <attachProtoSources>false</attachProtoSources>
+          </configuration>
+          <executions>
+            <execution>
+              <id>src-compile-protoc</id>
+              <phase>generate-sources</phase>
+              <goals>
+                <goal>compile</goal>
+              </goals>
+              <configuration>
+                <includeDependenciesInDescriptorSet>false</includeDependenciesInDescriptorSet>
+                <protoSourceRoot>${basedir}/src/main/proto</protoSourceRoot>
+                <outputDirectory>${project.build.directory}/generated-sources/java</outputDirectory>
+                <clearOutputDirectory>false</clearOutputDirectory>
+                <skip>true</skip>
+              </configuration>
+            </execution>
+            <execution>
+              <id>src-test-compile-protoc</id>
+              <phase>generate-test-sources</phase>
+              <goals>
+                <goal>test-compile</goal>
+              </goals>
+              <configuration>
+                <protoTestSourceRoot>${basedir}/src/test/proto</protoTestSourceRoot>
+                <outputDirectory>${project.build.directory}/generated-test-sources/java</outputDirectory>
+                <clearOutputDirectory>false</clearOutputDirectory>
+                <skip>true</skip>
+              </configuration>
+            </execution>
+          </executions>
+        </plugin>
       </plugins>
     </pluginManagement>
-
+    <extensions>
+      <extension>
+        <groupId>kr.motd.maven</groupId>
+        <artifactId>os-maven-plugin</artifactId>
+        <version>${os-maven-plugin.version}</version>
+      </extension>
+    </extensions>
     <plugins>
       <plugin>
         <artifactId>maven-clean-plugin</artifactId>
@@ -1918,9 +1965,6 @@
       <activation>
         <activeByDefault>false</activeByDefault>
       </activation>
-      <properties>
-        <protoc.path>/opt/protobuf-3.7/bin/protoc</protoc.path>
-      </properties>
       <build>
         <plugins>
           <plugin>
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index 791cc41..345ac90 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -350,9 +350,43 @@
   public static final String SERVER_SIDE_ENCRYPTION_KEY =
       "fs.s3a.server-side-encryption.key";
 
-  //override signature algorithm used for signing requests
+  /**
+   * List of custom Signers. The signer class will be loaded, and the signer
+   * name will be associated with this signer class in the S3 SDK. e.g. Single
+   * CustomSigner -> 'CustomSigner:org.apache...CustomSignerClass Multiple
+   * CustomSigners -> 'CSigner1:CustomSignerClass1,CSigner2:CustomerSignerClass2
+   */
+  public static final String CUSTOM_SIGNERS = "fs.s3a.custom.signers";
+
+  /**
+   * There's 3 parameters that can be used to specify a non-default signing
+   * algorithm. fs.s3a.signing-algorithm - This property has existed for the
+   * longest time. If specified, without either of the other 2 properties being
+   * specified, this signing algorithm will be used for S3 and DDB (S3Guard).
+   * The other 2 properties override this value for S3 or DDB.
+   * fs.s3a.s3.signing-algorithm - Allows overriding the S3 Signing algorithm.
+   * This does not affect DDB. Specifying this property without specifying
+   * fs.s3a.signing-algorithm will only update the signing algorithm for S3
+   * requests, and the default will be used for DDB fs.s3a.ddb.signing-algorithm
+   * - Allows overriding the DDB Signing algorithm. This does not affect S3.
+   * Specifying this property without specifying fs.s3a.signing-algorithm will
+   * only update the signing algorithm for DDB requests, and the default will be
+   * used for S3
+   */
   public static final String SIGNING_ALGORITHM = "fs.s3a.signing-algorithm";
 
+  public static final String SIGNING_ALGORITHM_S3 =
+      "fs.s3a." + Constants.AWS_SERVICE_IDENTIFIER_S3.toLowerCase()
+          + ".signing-algorithm";
+
+  public static final String SIGNING_ALGORITHM_DDB =
+      "fs.s3a." + Constants.AWS_SERVICE_IDENTIFIER_DDB.toLowerCase()
+          + "signing-algorithm";
+
+  public static final String SIGNING_ALGORITHM_STS =
+      "fs.s3a." + Constants.AWS_SERVICE_IDENTIFIER_STS.toLowerCase()
+          + "signing-algorithm";
+
   public static final String S3N_FOLDER_SUFFIX = "_$folder$";
   public static final String FS_S3A_BLOCK_SIZE = "fs.s3a.block.size";
   public static final String FS_S3A = "s3a";
@@ -796,4 +830,7 @@
   public static final String S3GUARD_CONSISTENCY_RETRY_INTERVAL_DEFAULT =
       "2s";
 
+  public static final String AWS_SERVICE_IDENTIFIER_S3 = "S3";
+  public static final String AWS_SERVICE_IDENTIFIER_DDB = "DDB";
+  public static final String AWS_SERVICE_IDENTIFIER_STS = "STS";
 }
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java
index 3e9368d..ff8ba1d6 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/DefaultS3ClientFactory.java
@@ -55,7 +55,8 @@
       final AWSCredentialsProvider credentials,
       final String userAgentSuffix) throws IOException {
     Configuration conf = getConf();
-    final ClientConfiguration awsConf = S3AUtils.createAwsConf(getConf(), bucket);
+    final ClientConfiguration awsConf = S3AUtils
+        .createAwsConf(getConf(), bucket, Constants.AWS_SERVICE_IDENTIFIER_S3);
     if (!StringUtils.isEmpty(userAgentSuffix)) {
       awsConf.setUserAgentSuffix(userAgentSuffix);
     }
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 6bdbba3..0747be2 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -259,6 +259,7 @@
   private MagicCommitIntegration committerIntegration;
 
   private AWSCredentialProviderList credentials;
+  private SignerManager signerManager;
 
   private ITtlTimeProvider ttlTimeProvider;
 
@@ -359,6 +360,9 @@
       }
       useListV1 = (listVersion == 1);
 
+      signerManager = new SignerManager();
+      signerManager.initCustomSigners(conf);
+
       // creates the AWS client, including overriding auth chain if
       // the FS came with a DT
       // this may do some patching of the configuration (e.g. setting
@@ -3053,6 +3057,8 @@
       instrumentation = null;
       closeAutocloseables(LOG, credentials);
       cleanupWithLogger(LOG, delegationTokens.orElse(null));
+      cleanupWithLogger(LOG, signerManager);
+      signerManager = null;
       credentials = null;
     }
   }
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
index 54d1b53..7e3c5e6 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
@@ -1203,14 +1203,59 @@
    * @param bucket Optional bucket to use to look up per-bucket proxy secrets
    * @return new AWS client configuration
    * @throws IOException problem creating AWS client configuration
+   *
+   * @deprecated use {@link #createAwsConf(Configuration, String, String)}
    */
+  @Deprecated
   public static ClientConfiguration createAwsConf(Configuration conf,
       String bucket)
       throws IOException {
+    return createAwsConf(conf, bucket, null);
+  }
+
+  /**
+   * Create a new AWS {@code ClientConfiguration}. All clients to AWS services
+   * <i>MUST</i> use this or the equivalents for the specific service for
+   * consistent setup of connectivity, UA, proxy settings.
+   *
+   * @param conf The Hadoop configuration
+   * @param bucket Optional bucket to use to look up per-bucket proxy secrets
+   * @param awsServiceIdentifier a string representing the AWS service (S3,
+   * DDB, etc) for which the ClientConfiguration is being created.
+   * @return new AWS client configuration
+   * @throws IOException problem creating AWS client configuration
+   */
+  public static ClientConfiguration createAwsConf(Configuration conf,
+      String bucket, String awsServiceIdentifier)
+      throws IOException {
     final ClientConfiguration awsConf = new ClientConfiguration();
     initConnectionSettings(conf, awsConf);
     initProxySupport(conf, bucket, awsConf);
     initUserAgent(conf, awsConf);
+    if (StringUtils.isNotEmpty(awsServiceIdentifier)) {
+      String configKey = null;
+      switch (awsServiceIdentifier) {
+      case AWS_SERVICE_IDENTIFIER_S3:
+        configKey = SIGNING_ALGORITHM_S3;
+        break;
+      case AWS_SERVICE_IDENTIFIER_DDB:
+        configKey = SIGNING_ALGORITHM_DDB;
+        break;
+      case AWS_SERVICE_IDENTIFIER_STS:
+        configKey = SIGNING_ALGORITHM_STS;
+        break;
+      default:
+        // Nothing to do. The original signer override is already setup
+      }
+      if (configKey != null) {
+        String signerOverride = conf.getTrimmed(configKey, "");
+        if (!signerOverride.isEmpty()) {
+          LOG.debug("Signer override for {}} = {}", awsServiceIdentifier,
+              signerOverride);
+          awsConf.setSignerOverride(signerOverride);
+        }
+      }
+    }
     return awsConf;
   }
 
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SignerManager.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SignerManager.java
new file mode 100644
index 0000000..5ca1482
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SignerManager.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.s3a;
+
+import com.amazonaws.auth.Signer;
+import com.amazonaws.auth.SignerFactory;
+import java.io.Closeable;
+import java.io.IOException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.conf.Configuration;
+
+import static org.apache.hadoop.fs.s3a.Constants.CUSTOM_SIGNERS;
+
+/**
+ * Class to handle custom signers.
+ */
+public class SignerManager implements Closeable {
+
+  private static final Logger LOG = LoggerFactory
+      .getLogger(SignerManager.class);
+
+
+  public SignerManager() {
+  }
+
+  /**
+   * Initialize custom signers and register them with the AWS SDK.
+   *
+   * @param conf Hadoop configuration
+   */
+  public void initCustomSigners(Configuration conf) {
+    String[] customSigners = conf.getTrimmedStrings(CUSTOM_SIGNERS);
+    if (customSigners == null || customSigners.length == 0) {
+      // No custom signers specified, nothing to do.
+      LOG.debug("No custom signers specified");
+      return;
+    }
+
+    for (String customSigner : customSigners) {
+      String[] parts = customSigner.split(":");
+      if (parts.length != 2) {
+        String message =
+            "Invalid format (Expected name:SignerClass) for CustomSigner: ["
+                + customSigner
+                + "]";
+        LOG.error(message);
+        throw new IllegalArgumentException(message);
+      }
+      maybeRegisterSigner(parts[0], parts[1], conf);
+    }
+  }
+
+  /*
+   * Make sure the signer class is registered once with the AWS SDK
+   */
+  private static void maybeRegisterSigner(String signerName,
+      String signerClassName, Configuration conf) {
+    try {
+      SignerFactory.getSignerByTypeAndService(signerName, null);
+    } catch (IllegalArgumentException e) {
+      // Signer is not registered with the AWS SDK.
+      // Load the class and register the signer.
+      Class<? extends Signer> clazz = null;
+      try {
+        clazz = (Class<? extends Signer>) conf.getClassByName(signerClassName);
+      } catch (ClassNotFoundException cnfe) {
+        throw new RuntimeException(String
+            .format("Signer class [%s] not found for signer [%s]",
+                signerClassName, signerName), cnfe);
+      }
+      LOG.debug("Registering Custom Signer - [{}->{}]", signerName,
+          clazz.getName());
+      synchronized (SignerManager.class) {
+        SignerFactory.registerSigner(signerName, clazz);
+      }
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+  }
+}
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/STSClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/STSClientFactory.java
index 74aca50..82811e6 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/STSClientFactory.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/STSClientFactory.java
@@ -31,12 +31,14 @@
 import com.amazonaws.services.securitytoken.model.Credentials;
 import com.amazonaws.services.securitytoken.model.GetSessionTokenRequest;
 import com.google.common.base.Preconditions;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.s3a.Constants;
 import org.apache.hadoop.fs.s3a.Invoker;
 import org.apache.hadoop.fs.s3a.Retries;
 import org.apache.hadoop.fs.s3a.S3AUtils;
@@ -73,7 +75,8 @@
       final Configuration conf,
       final String bucket,
       final AWSCredentialsProvider credentials) throws IOException {
-    final ClientConfiguration awsConf = S3AUtils.createAwsConf(conf, bucket);
+    final ClientConfiguration awsConf = S3AUtils.createAwsConf(conf, bucket,
+        Constants.AWS_SERVICE_IDENTIFIER_STS);
     String endpoint = conf.getTrimmed(DELEGATION_TOKEN_ENDPOINT,
         DEFAULT_DELEGATION_TOKEN_ENDPOINT);
     String region = conf.getTrimmed(DELEGATION_TOKEN_REGION,
@@ -99,7 +102,8 @@
       final AWSCredentialsProvider credentials,
       final String stsEndpoint,
       final String stsRegion) throws IOException {
-    final ClientConfiguration awsConf = S3AUtils.createAwsConf(conf, bucket);
+    final ClientConfiguration awsConf = S3AUtils.createAwsConf(conf, bucket,
+        Constants.AWS_SERVICE_IDENTIFIER_STS);
     return builder(credentials, awsConf, stsEndpoint, stsRegion);
   }
 
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/SessionTokenBinding.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/SessionTokenBinding.java
index 08d53cf..592ec61 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/SessionTokenBinding.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/SessionTokenBinding.java
@@ -36,6 +36,7 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.s3a.AWSCredentialProviderList;
+import org.apache.hadoop.fs.s3a.Constants;
 import org.apache.hadoop.fs.s3a.Invoker;
 import org.apache.hadoop.fs.s3a.Retries;
 import org.apache.hadoop.fs.s3a.S3ARetryPolicy;
@@ -301,7 +302,8 @@
 
       invoker = new Invoker(new S3ARetryPolicy(conf), LOG_EVENT);
       ClientConfiguration awsConf =
-          S3AUtils.createAwsConf(conf, uri.getHost());
+          S3AUtils.createAwsConf(conf, uri.getHost(),
+              Constants.AWS_SERVICE_IDENTIFIER_STS);
       AWSSecurityTokenService tokenService =
           STSClientFactory.builder(parentAuthChain,
               awsConf,
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
index 9e1d2f4..b6ff4d9 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
@@ -34,6 +34,7 @@
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.s3a.Constants;
 import org.apache.hadoop.fs.s3a.S3AUtils;
 
 import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_DDB_REGION_KEY;
@@ -80,7 +81,8 @@
           "Should have been configured before usage");
 
       final Configuration conf = getConf();
-      final ClientConfiguration awsConf = S3AUtils.createAwsConf(conf, bucket);
+      final ClientConfiguration awsConf = S3AUtils
+          .createAwsConf(conf, bucket, Constants.AWS_SERVICE_IDENTIFIER_DDB);
 
       final String region = getRegion(conf, defaultRegion);
       LOG.debug("Creating DynamoDB client in region {}", region);
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
index 959c424..32f3235 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
@@ -22,6 +22,7 @@
 import com.amazonaws.services.s3.AmazonS3;
 import com.amazonaws.services.s3.S3ClientOptions;
 
+
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.lang3.reflect.FieldUtils;
 import org.apache.hadoop.conf.Configuration;
@@ -30,12 +31,14 @@
 import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.s3native.S3xLoginHelper;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
 import java.io.File;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
@@ -617,4 +620,69 @@
         "override,base");
   }
 
+  @Test(timeout = 10_000L)
+  public void testS3SpecificSignerOverride() throws IOException {
+    ClientConfiguration clientConfiguration = null;
+    Configuration config;
+
+    String signerOverride = "testSigner";
+    String s3SignerOverride = "testS3Signer";
+
+    // Default SIGNING_ALGORITHM, overridden for S3 only
+    config = new Configuration();
+    config.set(SIGNING_ALGORITHM_S3, s3SignerOverride);
+    clientConfiguration = S3AUtils
+        .createAwsConf(config, "dontcare", AWS_SERVICE_IDENTIFIER_S3);
+    Assert.assertEquals(s3SignerOverride,
+        clientConfiguration.getSignerOverride());
+    clientConfiguration = S3AUtils
+        .createAwsConf(config, "dontcare", AWS_SERVICE_IDENTIFIER_DDB);
+    Assert.assertNull(clientConfiguration.getSignerOverride());
+
+    // Configured base SIGNING_ALGORITHM, overridden for S3 only
+    config = new Configuration();
+    config.set(SIGNING_ALGORITHM, signerOverride);
+    config.set(SIGNING_ALGORITHM_S3, s3SignerOverride);
+    clientConfiguration = S3AUtils
+        .createAwsConf(config, "dontcare", AWS_SERVICE_IDENTIFIER_S3);
+    Assert.assertEquals(s3SignerOverride,
+        clientConfiguration.getSignerOverride());
+    clientConfiguration = S3AUtils
+        .createAwsConf(config, "dontcare", AWS_SERVICE_IDENTIFIER_DDB);
+    Assert
+        .assertEquals(signerOverride, clientConfiguration.getSignerOverride());
+  }
+
+  @Test(timeout = 10_000L)
+  public void testDdbSpecificSignerOverride() throws IOException {
+    ClientConfiguration clientConfiguration = null;
+    Configuration config;
+
+    String signerOverride = "testSigner";
+    String ddbSignerOverride = "testDdbSigner";
+
+    // Default SIGNING_ALGORITHM, overridden for S3
+    config = new Configuration();
+    config.set(SIGNING_ALGORITHM_DDB, ddbSignerOverride);
+    clientConfiguration = S3AUtils
+        .createAwsConf(config, "dontcare", AWS_SERVICE_IDENTIFIER_DDB);
+    Assert.assertEquals(ddbSignerOverride,
+        clientConfiguration.getSignerOverride());
+    clientConfiguration = S3AUtils
+        .createAwsConf(config, "dontcare", AWS_SERVICE_IDENTIFIER_S3);
+    Assert.assertNull(clientConfiguration.getSignerOverride());
+
+    // Configured base SIGNING_ALGORITHM, overridden for S3
+    config = new Configuration();
+    config.set(SIGNING_ALGORITHM, signerOverride);
+    config.set(SIGNING_ALGORITHM_DDB, ddbSignerOverride);
+    clientConfiguration = S3AUtils
+        .createAwsConf(config, "dontcare", AWS_SERVICE_IDENTIFIER_DDB);
+    Assert.assertEquals(ddbSignerOverride,
+        clientConfiguration.getSignerOverride());
+    clientConfiguration = S3AUtils
+        .createAwsConf(config, "dontcare", AWS_SERVICE_IDENTIFIER_S3);
+    Assert
+        .assertEquals(signerOverride, clientConfiguration.getSignerOverride());
+  }
 }
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ATemporaryCredentials.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ATemporaryCredentials.java
index 4f2d731..041b6f4 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ATemporaryCredentials.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ATemporaryCredentials.java
@@ -368,7 +368,7 @@
         DurationInfo ignored = new DurationInfo(LOG, "requesting credentials")) {
       Configuration conf = new Configuration(getContract().getConf());
       ClientConfiguration awsConf =
-          S3AUtils.createAwsConf(conf, null);
+          S3AUtils.createAwsConf(conf, null, AWS_SERVICE_IDENTIFIER_STS);
       return intercept(clazz, exceptionText,
           () -> {
             AWSSecurityTokenService tokenService =
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
index e6f32af..b974385 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
@@ -680,7 +680,7 @@
     MarshalledCredentials sc = MarshalledCredentialBinding
         .requestSessionCredentials(
           buildAwsCredentialsProvider(conf),
-          S3AUtils.createAwsConf(conf, bucket),
+          S3AUtils.createAwsConf(conf, bucket, AWS_SERVICE_IDENTIFIER_STS),
           conf.getTrimmed(ASSUMED_ROLE_STS_ENDPOINT,
               DEFAULT_ASSUMED_ROLE_STS_ENDPOINT),
           conf.getTrimmed(ASSUMED_ROLE_STS_ENDPOINT_REGION,
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestSignerManager.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestSignerManager.java
new file mode 100644
index 0000000..ac759d0
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestSignerManager.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a;
+
+import java.util.concurrent.TimeUnit;
+
+import com.amazonaws.SignableRequest;
+import com.amazonaws.auth.AWSCredentials;
+import com.amazonaws.auth.Signer;
+import com.amazonaws.auth.SignerFactory;
+import org.assertj.core.api.Assertions;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.test.LambdaTestUtils;
+
+import static org.apache.hadoop.fs.s3a.Constants.CUSTOM_SIGNERS;
+
+/**
+ * Tests for the SignerManager.
+ */
+public class TestSignerManager {
+
+  @Rule
+  public Timeout testTimeout = new Timeout(
+      10_000L, TimeUnit.MILLISECONDS
+  );
+
+  @Test
+  public void testCustomSignerFailureIfNotRegistered() throws Exception {
+    LambdaTestUtils.intercept(Exception.class,
+        () -> SignerFactory.createSigner("testsignerUnregistered", null));
+    // Expecting generic Exception.class to handle future implementation
+    // changes.
+    // For now, this is an NPE
+  }
+
+  @Test
+  public void testCustomSignerInitialization() {
+    Configuration config = new Configuration();
+    SignerForTest1.reset();
+    SignerForTest2.reset();
+    config.set(CUSTOM_SIGNERS, "testsigner1:" + SignerForTest1.class.getName());
+    SignerManager signerManager = new SignerManager();
+    signerManager.initCustomSigners(config);
+    Signer s1 = SignerFactory.createSigner("testsigner1", null);
+    s1.sign(null, null);
+    Assertions.assertThat(SignerForTest1.initialized)
+        .as(SignerForTest1.class.getName() + " not initialized")
+        .isEqualTo(true);
+  }
+
+  @Test
+  public void testMultipleCustomSignerInitialization() {
+    Configuration config = new Configuration();
+    SignerForTest1.reset();
+    SignerForTest2.reset();
+    config.set(CUSTOM_SIGNERS,
+        "testsigner1:" + SignerForTest1.class.getName() + "," + "testsigner2:"
+            + SignerForTest2.class.getName());
+    SignerManager signerManager = new SignerManager();
+    signerManager.initCustomSigners(config);
+    Signer s1 = SignerFactory.createSigner("testsigner1", null);
+    s1.sign(null, null);
+    Assertions.assertThat(SignerForTest1.initialized)
+        .as(SignerForTest1.class.getName() + " not initialized")
+        .isEqualTo(true);
+
+    Signer s2 = SignerFactory.createSigner("testsigner2", null);
+    s2.sign(null, null);
+    Assertions.assertThat(SignerForTest2.initialized)
+        .as(SignerForTest2.class.getName() + " not initialized")
+        .isEqualTo(true);
+  }
+
+  /**
+   * SignerForTest1.
+   */
+  @Private
+  public static class SignerForTest1 implements Signer {
+
+    private static boolean initialized = false;
+
+    @Override
+    public void sign(SignableRequest<?> request, AWSCredentials credentials) {
+      initialized = true;
+    }
+
+    public static void reset() {
+      initialized = false;
+    }
+  }
+
+  /**
+   * SignerForTest2.
+   */
+  @Private
+  public static class SignerForTest2 implements Signer {
+
+    private static boolean initialized = false;
+
+    @Override
+    public void sign(SignableRequest<?> request, AWSCredentials credentials) {
+      initialized = true;
+    }
+
+    public static void reset() {
+      initialized = false;
+    }
+  }
+}
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractYarnClusterITest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractYarnClusterITest.java
index 2501662..2e8f1f0 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractYarnClusterITest.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractYarnClusterITest.java
@@ -196,8 +196,9 @@
 
 
   protected Job createJob() throws IOException {
-    Job mrJob = Job.getInstance(getClusterBinding().getConf(),
-        getMethodName());
+    Configuration jobConf = getClusterBinding().getConf();
+    jobConf.addResource(getConfiguration());
+    Job mrJob = Job.getInstance(jobConf, getMethodName());
     patchConfigurationForCommitter(mrJob.getConfiguration());
     return mrJob;
   }
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCLI.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCLI.java
new file mode 100644
index 0000000..c88b545
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCLI.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azurebfs;
+
+import java.util.UUID;
+
+import org.junit.Test;
+
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.conf.Configuration;
+
+import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION;
+import static org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes.ABFS_SCHEME;
+import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_ABFS_ACCOUNT_NAME;
+
+/**
+ * Tests for Azure Blob FileSystem CLI.
+ */
+public class ITestAzureBlobFileSystemCLI extends AbstractAbfsIntegrationTest {
+
+  public  ITestAzureBlobFileSystemCLI() throws Exception {
+    super();
+    final AbfsConfiguration conf = getConfiguration();
+    conf.setBoolean(AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION, false);
+  }
+
+  /**
+   * Test for HADOOP-16138: hadoop fs mkdir / of nonexistent abfs
+   * container raises NPE.
+   *
+   * The command should return with 1 exit status, but there should be no NPE.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testMkdirRootNonExistentContainer() throws Exception {
+    final Configuration rawConf = getRawConfiguration();
+    FsShell fsShell = new FsShell(rawConf);
+    final String account =
+        rawConf.get(FS_AZURE_ABFS_ACCOUNT_NAME, null);
+
+    String nonExistentContainer = "nonexistent-" + UUID.randomUUID();
+
+    int result = fsShell.run(new String[] { "-mkdir",
+        ABFS_SCHEME + "://" + nonExistentContainer + "@" + account + "/" });
+
+    assertEquals(1, result);
+  }
+}
diff --git a/pom.ozone.xml b/pom.ozone.xml
index c550368..55c0205 100644
--- a/pom.ozone.xml
+++ b/pom.ozone.xml
@@ -90,7 +90,7 @@
 
     <failIfNoTests>false</failIfNoTests>
     <maven.test.redirectTestOutputToFile>true</maven.test.redirectTestOutputToFile>
-    <jetty.version>9.3.24.v20180605</jetty.version>
+    <jetty.version>9.3.25.v20180904</jetty.version>
     <test.exclude>_</test.exclude>
     <test.exclude.pattern>_</test.exclude.pattern>
 
@@ -124,6 +124,9 @@
     <jackson.version>1.9.13</jackson.version>
     <jackson2.version>2.9.9</jackson2.version>
 
+    <!-- jaegertracing veresion -->
+    <jaeger.version>0.34.0</jaeger.version>
+
     <!-- httpcomponents versions -->
     <httpclient.version>4.5.2</httpclient.version>
     <httpcore.version>4.4.4</httpcore.version>
@@ -139,7 +142,6 @@
     <protobuf.version>2.5.0</protobuf.version>
     <protoc.path>${env.HADOOP_PROTOC_PATH}</protoc.path>
 
-    <zookeeper.version>3.4.13</zookeeper.version>
     <curator.version>2.12.0</curator.version>
     <findbugs.version>3.0.0</findbugs.version>
     <spotbugs.version>3.1.0-RC1</spotbugs.version>
@@ -1189,50 +1191,6 @@
       </dependency>
 
       <dependency>
-        <groupId>org.apache.zookeeper</groupId>
-        <artifactId>zookeeper</artifactId>
-        <version>${zookeeper.version}</version>
-        <exclusions>
-          <exclusion>
-            <!-- otherwise seems to drag in junit 3.8.1 via jline -->
-            <groupId>junit</groupId>
-            <artifactId>junit</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>com.sun.jdmk</groupId>
-            <artifactId>jmxtools</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>com.sun.jmx</groupId>
-            <artifactId>jmxri</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>org.jboss.netty</groupId>
-            <artifactId>netty</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>jline</groupId>
-            <artifactId>jline</artifactId>
-          </exclusion>
-        </exclusions>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.zookeeper</groupId>
-        <artifactId>zookeeper</artifactId>
-        <version>${zookeeper.version}</version>
-        <type>test-jar</type>
-        <exclusions>
-          <exclusion>
-            <groupId>org.jboss.netty</groupId>
-            <artifactId>netty</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>jline</groupId>
-            <artifactId>jline</artifactId>
-          </exclusion>
-        </exclusions>
-      </dependency>
-      <dependency>
         <groupId>org.hsqldb</groupId>
         <artifactId>hsqldb</artifactId>
         <version>${hsqldb.version}</version>