Merge remote-tracking branch 'origin/trunk' into HDDS-1880-Decom
diff --git a/BUILDING.txt b/BUILDING.txt
index 6f33a60..03dffdd 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -6,7 +6,6 @@
 * Unix System
 * JDK 1.8
 * Maven 3.3 or later
-* ProtocolBuffer 3.7.1
 * CMake 3.1 or newer (if compiling native code)
 * Zlib devel (if compiling native code)
 * Cyrus SASL devel (if compiling native code)
@@ -62,16 +61,6 @@
   $ sudo apt-get -y install maven
 * Native libraries
   $ sudo apt-get -y install build-essential autoconf automake libtool cmake zlib1g-dev pkg-config libssl-dev libsasl2-dev
-* ProtocolBuffer 3.7.1 (required)
-  $ mkdir -p /opt/protobuf-3.7-src \
-        && curl -L -s -S \
-          https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz \
-          -o /opt/protobuf-3.7.1.tar.gz \
-        && tar xzf /opt/protobuf-3.7.1.tar.gz --strip-components 1 -C /opt/protobuf-3.7-src \
-        && cd /opt/protobuf-3.7-src \
-        && ./configure\
-        && make install \
-        && rm -rf /opt/protobuf-3.7-src
 
 Optional packages:
 
@@ -311,16 +300,6 @@
 to update SNAPSHOTs from external repos.
 
 ----------------------------------------------------------------------------------
-Protocol Buffer compiler
-
-The version of Protocol Buffer compiler, protoc, must match the version of the
-protobuf JAR.
-
-If you have multiple versions of protoc in your system, you can set in your
-build shell the HADOOP_PROTOC_PATH environment variable to point to the one you
-want to use for the Hadoop build. If you don't define this environment variable,
-protoc is looked up in the PATH.
-----------------------------------------------------------------------------------
 Importing projects to eclipse
 
 When you import the project to eclipse, install hadoop-maven-plugins at first.
@@ -405,15 +384,6 @@
 * Install native libraries, only openssl is required to compile native code,
 you may optionally install zlib, lz4, etc.
   $ brew install openssl
-* Protocol Buffers 3.7.1 (required)
-  $ wget https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz
-  $ mkdir -p protobuf-3.7 && tar zxvf protobuf-java-3.7.1.tar.gz --strip-components 1 -C protobuf-3.7
-  $ cd protobuf-3.7
-  $ ./configure
-  $ make
-  $ make check
-  $ make install
-  $ protoc --version
 
 Note that building Hadoop 3.1.1/3.1.2/3.2.0 native code from source is broken
 on macOS. For 3.1.1/3.1.2, you need to manually backport YARN-8622. For 3.2.0,
@@ -439,7 +409,6 @@
 * Windows System
 * JDK 1.8
 * Maven 3.0 or later
-* ProtocolBuffer 3.7.1
 * CMake 3.1 or newer
 * Visual Studio 2010 Professional or Higher
 * Windows SDK 8.1 (if building CPU rate control for the container executor)
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 657c223..969d8bb 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -106,23 +106,6 @@
 ENV PATH "${PATH}:/opt/cmake/bin"
 
 ######
-# Install Google Protobuf 3.7.1 (2.6.0 ships with Xenial)
-######
-# hadolint ignore=DL3003
-RUN mkdir -p /opt/protobuf-src \
-    && curl -L -s -S \
-      https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz \
-      -o /opt/protobuf.tar.gz \
-    && tar xzf /opt/protobuf.tar.gz --strip-components 1 -C /opt/protobuf-src \
-    && cd /opt/protobuf-src \
-    && ./configure --prefix=/opt/protobuf \
-    && make install \
-    && cd /root \
-    && rm -rf /opt/protobuf-src
-ENV PROTOBUF_HOME /opt/protobuf
-ENV PATH "${PATH}:/opt/protobuf/bin"
-
-######
 # Install Apache Maven 3.3.9 (3.3.9 ships with Xenial)
 ######
 # hadolint ignore=DL3008
@@ -207,7 +190,7 @@
 ###
 
 # Hugo static website generator (for new hadoop site and Ozone docs)
-RUN curl -L -o hugo.deb https://github.com/gohugoio/hugo/releases/download/v0.30.2/hugo_0.30.2_Linux-64bit.deb \
+RUN curl -L -o hugo.deb https://github.com/gohugoio/hugo/releases/download/v0.58.3/hugo_0.58.3_Linux-64bit.deb \
     && dpkg --install hugo.deb \
     && rm hugo.deb
 
diff --git a/hadoop-cloud-storage-project/hadoop-cos/dev-support/findbugs-exclude.xml b/hadoop-cloud-storage-project/hadoop-cos/dev-support/findbugs-exclude.xml
new file mode 100644
index 0000000..40d78d0
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/dev-support/findbugs-exclude.xml
@@ -0,0 +1,18 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<FindBugsFilter>
+</FindBugsFilter>
diff --git a/hadoop-cloud-storage-project/hadoop-cos/pom.xml b/hadoop-cloud-storage-project/hadoop-cos/pom.xml
new file mode 100644
index 0000000..839bd04
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/pom.xml
@@ -0,0 +1,140 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-project</artifactId>
+    <version>3.3.0-SNAPSHOT</version>
+    <relativePath>../../hadoop-project</relativePath>
+  </parent>
+  <artifactId>hadoop-cos</artifactId>
+  <name>Apache Hadoop Tencent COS Support</name>
+  <description>
+    This module contains code to support integration with Tencent COS.
+    It also declares the dependencies needed to work with COS.
+  </description>
+  <packaging>jar</packaging>
+
+  <properties>
+    <file.encoding>UTF-8</file.encoding>
+    <downloadSources>true</downloadSources>
+  </properties>
+
+  <profiles>
+    <profile>
+      <id>tests-off</id>
+      <activation>
+        <file>
+          <missing>src/test/resources/auth-keys.xml</missing>
+        </file>
+      </activation>
+      <properties>
+        <maven.test.skip>true</maven.test.skip>
+      </properties>
+    </profile>
+    <profile>
+      <id>tests-on</id>
+      <activation>
+        <file>
+          <exists>src/test/resources/auth-keys.xml</exists>
+        </file>
+      </activation>
+      <properties>
+        <maven.test.skip>false</maven.test.skip>
+      </properties>
+    </profile>
+  </profiles>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>findbugs-maven-plugin</artifactId>
+        <configuration>
+          <findbugsXmlOutput>true</findbugsXmlOutput>
+          <xmlOutput>true</xmlOutput>
+          <excludeFilterFile>${basedir}/dev-support/findbugs-exclude.xml
+          </excludeFilterFile>
+          <effort>Max</effort>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <configuration>
+          <forkedProcessTimeoutInSeconds>3600</forkedProcessTimeoutInSeconds>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+
+  <dependencies>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>com.qcloud</groupId>
+      <artifactId>cos_api</artifactId>
+      <version>5.4.9</version>
+      <scope>compile</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>provided</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-tests</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-mapreduce-client-hs</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-distcp</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-distcp</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+
+  </dependencies>
+</project>
diff --git a/hadoop-cloud-storage-project/hadoop-cos/site/markdown/cloud-storage/index.md b/hadoop-cloud-storage-project/hadoop-cos/site/markdown/cloud-storage/index.md
new file mode 100644
index 0000000..7049b3f
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/site/markdown/cloud-storage/index.md
@@ -0,0 +1,367 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+# Integeration of Tencent COS in Hadoop
+
+## Introduction
+
+[Tencent COS](https://intl.cloud.tencent.com/product/cos) is a famous object storage system provided by Tencent Corp. Hadoop-COS is a client that makes the upper computing systems based on HDFS be able to use the COS as its underlying storage system. The big data-processing systems that have been identified for support are: Hadoop MR, Spark, Alluxio and etc. In addition, Druid also can use COS as its deep storage by configuring HDFS-Load-Plugin integerating with HADOOP-COS.
+
+
+## Features
+
+- Support Hadoop MapReduce and Spark write data into COS and read from it directly.
+
+- Implements the interfaces of the Hadoop file system and provides the pseudo-hierarchical directory structure same as HDFS.
+
+- Supports multipart uploads for a large file. Single file supports up to 19TB
+
+- High performance and high availability. The performance difference between Hadoop-COS and HDFS is not more than 30%.
+
+
+> Notes:
+
+> Object Storage is not a file system and it has some limitations:
+
+> 1. Object storage is a key-value storage and it does not support hierarchical directory naturally. Usually, using the directory separatory in object key to simulate the hierarchical directory, such as "/hadoop/data/words.dat".
+
+> 2. COS Object storage can not support the object's append operation currently. It means that you can not append content to the end of an existing object(file).
+
+> 3. Both `delete` and `rename` operations are non-atomic, which means that the operations are interrupted, the operation result may be inconsistent state.
+
+> 4. Object storages have different authorization models:
+
+>    - Directory permissions are reported as 777.
+
+>    - File permissions are reported as 666.
+
+>    - File owner is reported as the local current user.
+
+>    - File group is also reported as the local current user.
+
+> 5. Supports multipart uploads for a large file(up to 40TB), but the number of part is limited as 10000.
+
+> 6. The number of files listed each time is limited to 1000.
+
+
+## Quick Start
+
+### Concepts
+
+- **Bucket**: A container for storing data in COS. Its name is made up of user-defined bucketname and user appid.
+
+- **Appid**: Unique resource identifier for the user dimension.
+
+- **SecretId**: ID used to authenticate the user
+
+- **SecretKey**: Key used to authenticate the user
+
+- **Region**: The region where a bucket locates.
+
+- **CosN**: Hadoop-COS uses `cosn` as its URI scheme, so CosN is often used to refer to Hadoop-COS.
+
+
+### Usage
+
+#### System Requirements
+
+Linux kernel 2.6+
+
+
+#### Dependencies
+
+- cos_api (version 5.4.10 or later )
+- cos-java-sdk (version 2.0.6 recommended)
+- joda-time (version 2.9.9 recommended)
+- httpClient (version 4.5.1 or later recommended)
+- Jackson: jackson-core, jackson-databind, jackson-annotations (version 2.9.8 or later)
+- bcprov-jdk15on (version 1.59 recommended)
+
+
+#### Configure Properties
+
+##### URI and Region Properties
+
+If you plan to use COS as the default file system for Hadoop or other big data systems, you need to configure `fs.defaultFS` as the URI of Hadoop-COS in core-site.xml. Hadoop-COS uses `cosn` as its URI scheme, and the bucket as its URI host. At the same time, you need to explicitly set `fs.cosn.userinfo.region` to indicate the region your bucket locates.
+
+**NOTE**:
+
+- For Hadoop-COS, `fs.defaultFS` is an option. If you are only temporarily using the COS as a data source for Hadoop, you do not need to set the property, just specify the full URI when you use it. For example: `hadoop fs -ls cosn://testBucket-125236746/testDir/test.txt`.
+
+- `fs.cosn.userinfo.region` is an required property for Hadoop-COS. The reason is that Hadoop-COS must know the region of the using bucket in order to accurately construct a URL to access it.
+
+- COS supports multi-region storage, and different regions have different access domains by default. It is recommended to choose the nearest storage region according to your own business scenarios, so as to improve the object upload and download speed. You can find the available region from [https://intl.cloud.tencent.com/document/product/436/6224](https://intl.cloud.tencent.com/document/product/436/6224)
+
+The following is an example for the configuration format:
+
+```xml
+    <property>
+        <name>fs.defaultFS</name>
+        <value>cosn://<bucket-appid></value>
+        <description>
+            Optional: If you don't want to use CosN as the default file system, you don't need to configure it.
+        </description>
+    </property>
+
+    <property>
+        <name>fs.cosn.bucket.region</name>
+        <value>ap-xxx</value>
+        <description>The region where the bucket is located</description>
+    </property>
+
+```
+
+
+##### User Authentication Properties
+
+Each user needs to properly configure the credentials ( User's secreteId and secretKey ) properly to access the object stored in COS. These credentials can be obtained from the official console provided by Tencent Cloud.
+
+```xml
+    <property>
+        <name>fs.cosn.credentials.provider</name>
+        <value>org.apache.hadoop.fs.auth.SimpleCredentialProvider</value>
+        <description>
+
+            This option allows the user to specify how to get the credentials.
+            Comma-separated class names of credential provider classes which implement
+            com.qcloud.cos.auth.COSCredentialsProvider:
+
+            1.org.apache.hadoop.fs.auth.SimpleCredentialProvider: Obtain the secret id and secret key
+            from fs.cosn.userinfo.secretId and fs.cosn.userinfo.secretKey in core-site.xml
+            2.org.apache.hadoop.fs.auth.EnvironmentVariableCredentialProvider: Obtain the secret id and secret key               from system environment variables named COS_SECRET_ID and COS_SECRET_KEY
+
+            If unspecified, the default order of credential providers is:
+            1. org.apache.hadoop.fs.auth.SimpleCredentialProvider
+            2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialProvider
+
+        </description>
+    </property>
+
+    <property>
+        <name>fs.cosn.userinfo.secretId</name>
+        <value>xxxxxxxxxxxxxxxxxxxxxxxxx</value>
+        <description>Tencent Cloud Secret Id </description>
+    </property>
+
+    <property>
+        <name>fs.cosn.userinfo.secretKey</name>
+        <value>xxxxxxxxxxxxxxxxxxxxxxxx</value>
+        <description>Tencent Cloud Secret Key</description>
+    </property>
+
+```
+
+
+##### Integration Properties
+
+You need to explicitly specify the A and B options in order for Hadoop to properly integrate the COS as the underlying file system
+
+Only correctly set `fs.cosn.impl` and `fs.AbstractFileSystem.cosn.impl` to enable Hadoop to integrate COS as its underlying file system. `fs.cosn.impl` must be set as `org.apache.hadoop.fs.cos.CosFileSystem` and `fs.AbstractFileSystem.cosn.impl` must be set as `org.apache.hadoop.fs.cos.CosN`.
+
+```xml
+    <property>
+        <name>fs.cosn.impl</name>
+        <value>org.apache.hadoop.fs.cosn.CosNFileSystem</value>
+        <description>The implementation class of the CosN Filesystem</description>
+    </property>
+
+    <property>
+        <name>fs.AbstractFileSystem.cosn.impl</name>
+        <value>org.apache.hadoop.fs.cos.CosN</value>
+        <description>The implementation class of the CosN AbstractFileSystem.</description>
+    </property>
+
+```
+
+##### Other Runtime Properties
+
+Hadoop-COS provides rich runtime properties to set, and most of these do not require custom values because a well-run default value provided for them.
+
+**It is important to note that**:
+
+- Hadoop-COS will generate some temporary files and consumes some disk space. All temporary files would be placed in the directory specified by option `fs.cosn.tmp.dir` (Default: /tmp/hadoop_cos);
+
+- The default block size is 8MB and it means that you can only upload a single file up to 78GB into the COS blob storage system. That is mainly due to the fact that the multipart-upload can only support up to 10,000 blocks. For this reason, if needing to support larger single files, you must increase the block size accordingly by setting the property `fs.cosn.block.size`. For example, the size of the largest single file is 1TB, the block size is at least greater than or equal to (1 \* 1024 \* 1024 \* 1024 \* 1024)/10000 = 109951163. Currently, the maximum support file is 19TB (block size: 2147483648)
+
+```xml
+    <property>
+        <name>fs.cosn.tmp.dir</name>
+        <value>/tmp/hadoop_cos</value>
+        <description>Temporary files would be placed here.</description>
+    </property>
+
+    <property>
+        <name>fs.cosn.buffer.size</name>
+        <value>33554432</value>
+        <description>The total size of the buffer pool.</description>
+    </property>
+
+    <property>
+        <name>fs.cosn.block.size</name>
+        <value>8388608</value>
+        <description>
+        Block size to use cosn filesysten, which is the part size for MultipartUpload. Considering the COS supports up to 10000 blocks, user should estimate the maximum size of a single file. For example, 8MB part size can allow  writing a 78GB single file.
+        </description>
+    </property>
+
+    <property>
+        <name>fs.cosn.maxRetries</name>
+        <value>3</value>
+        <description>
+      The maximum number of retries for reading or writing files to COS, before throwing a failure to the application.
+        </description>
+    </property>
+
+    <property>
+        <name>fs.cosn.retry.interval.seconds</name>
+        <value>3</value>
+        <description>The number of seconds to sleep between each COS retry.</description>
+    </property>
+
+```
+
+
+##### Properties Summary
+
+| properties | description | default value | required |
+|:----------:|:-----------|:-------------:|:--------:|
+| fs.defaultFS | Configure the default file system used by Hadoop.| None | NO |
+| fs.cosn.credentials.provider | This option allows the user to specify how to get the credentials. Comma-separated class names of credential provider classes which implement com.qcloud.cos.auth.COSCredentialsProvider: <br> 1. org.apache.hadoop.fs.cos.auth.SimpleCredentialProvider: Obtain the secret id and secret key from `fs.cosn.userinfo.secretId` and `fs.cosn.userinfo.secretKey` in core-site.xml; <br> 2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialProvider: Obtain the secret id and secret key from system environment variables named `COSN_SECRET_ID` and `COSN_SECRET_KEY`. <br> <br> If unspecified, the default order of credential providers is: <br> 1. org.apache.hadoop.fs.auth.SimpleCredentialProvider; <br> 2. org.apache.hadoop.fs.auth.EnvironmentVariableCredentialProvider. | None | NO |
+| fs.cosn.userinfo.secretId/secretKey | The API key information of your account | None | YES |
+| fs.cosn.bucket.region | The region where the bucket is located. | None | YES |
+| fs.cosn.impl | The implementation class of the CosN filesystem. | None | YES |
+| fs.AbstractFileSystem.cosn.impl | The implementation class of the CosN AbstractFileSystem. | None | YES |
+| fs.cosn.tmp.dir | Temporary files generated by cosn would be stored here during the program running. | /tmp/hadoop_cos | NO |
+| fs.cosn.buffer.size | The total size of the buffer pool. Require greater than or equal to block size. | 33554432 | NO |
+| fs.cosn.block.size | The size of file block. Considering the limitation that each file can be divided into a maximum of 10,000 to upload, the option must be set according to the maximum size of used single file. For example, 8MB part size can allow  writing a 78GB single file. | 8388608 | NO |
+| fs.cosn.upload_thread_pool | Number of threads used for concurrent uploads when files are streamed to COS. | CPU core number * 3 | NO |
+| fs.cosn.read.ahead.block.size | The size of each read-ahead block. | 524288 (512KB) | NO |
+| fs.cosn.read.ahead.queue.size | The length of readahead queue. | 10 | NO |
+| fs.cosn.maxRetries | The maxium number of retries for reading or writing files to COS, before throwing a failure to the application. | 3 | NO |
+| fs.cosn.retry.interval.seconds | The number of seconds to sleep between each retry | 3 | NO |
+
+
+#### Command Usage
+
+Command format: `hadoop fs -ls -R cosn://bucket-appid/<path>` or `hadoop fs -ls -R /<path>`, the latter requires the defaultFs option to be set as `cosn`.
+
+
+#### Example
+
+Use CosN as the underlying file system to run the WordCount routine:
+
+```shell
+bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-x.x.x.jar wordcount cosn://example/mr/input.txt cosn://example/mr/output
+```
+
+If setting CosN as the default file system for Hadoop, you can run it as follows:
+
+```shell
+bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-x.x.x.jar wordcount /mr/input.txt /mr/output
+```
+
+## Testing the hadoop-cos Module
+
+To test CosN filesystem, the following two files which pass in authentication details to the test runner are needed.
+
+1. auth-keys.xml
+2. core-site.xml
+
+These two files need to be created under the `hadoop-cloud-storage-project/hadoop-cos/src/test/resource` directory.
+
+
+### `auth-key.xml`
+
+COS credentials can specified in `auth-key.xml`. At the same time, it is also a trigger for the CosN filesystem tests.
+COS bucket URL should be provided by specifying the option: `test.fs.cosn.name`.
+
+An example of the `auth-keys.xml` is as follow:
+
+```xml
+<configuration>
+    <property>
+        <name>test.fs.cosn.name</name>
+        <value>cosn://testbucket-12xxxxxx</value>
+    </property>
+    <property>
+        <name>fs.cosn.bucket.region</name>
+        <value>ap-xxx</value>
+        <description>The region where the bucket is located</description>
+    </property>
+    <property>
+        <name>fs.cosn.userinfo.secretId</name>
+        <value>AKIDXXXXXXXXXXXXXXXXXXXX</value>
+    </property>
+    <property>
+        <name>fs.cosn.userinfo.secretKey</name>
+        <value>xxxxxxxxxxxxxxxxxxxxxxxxx</value>
+    </property>
+</configuration>
+
+
+```
+
+Without this file, all tests in this module will be skipped.
+
+### `core-site.xml`
+
+This file pre-exists and sources the configurations created in auth-keys.xml.
+For most cases, no modification is needed, unless a specific, non-default property needs to be set during the testing.
+
+### `contract-test-options.xml`
+
+All configurations related to support contract tests need to be specified in `contract-test-options.xml`. Here is an example of `contract-test-options.xml`.
+
+```xml
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+    <include xmlns="http://www.w3.org/2001/XInclude"
+             href="auth-keys.xml"/>
+    <property>
+        <name>fs.contract.test.fs.cosn</name>
+        <value>cosn://testbucket-12xxxxxx</value>
+    </property>
+
+    <property>
+        <name>fs.cosn.bucket.region</name>
+        <value>ap-xxx</value>
+        <description>The region where the bucket is located</description>
+    </property>
+
+</configuration>
+
+```
+
+If the option `fs.contract.test.fs.cosn` not definded in the file, all contract tests will be skipped.
+
+## Other issues
+
+### Performance Loss
+
+The IO performance of COS is lower than HDFS in principle, even on virtual clusters running on Tencent CVM.
+
+The main reason can be attributed to the following points:
+
+- HDFS replicates data for faster query.
+
+- HDFS is significantly faster for many “metadata” operations: listing the contents of a directory, calling getFileStatus() on path, creating or deleting directories.
+
+- HDFS stores the data on the local hard disks, avoiding network traffic if the code can be executed on that host. But access to the object storing in COS requires access to network almost each time. It is a critical point in damaging IO performance. Hadoop-COS also do a lot of optimization work for it, such as the pre-read queue, the upload buffer pool, the concurrent upload thread pool, etc.
+
+- File IO performing many seek calls/positioned read calls will also encounter performance problems due to the size of the HTTP requests made. Despite the pre-read cache optimizations, a large number of random reads can still cause frequent network requests.
+
+- On HDFS, both the `rename` and `mv` for a directory or a file are an atomic and O(1)-level operation, but in COS, the operation need to combine `copy` and `delete` sequentially. Therefore, performing rename and move operations on a COS object is not only low performance, but also difficult to guarantee data consistency.
+
+At present, using the COS blob storage system through Hadoop-COS occurs about 20% ~ 25% performance loss compared to HDFS. But, the cost of using COS is lower than HDFS, which includes both storage and maintenance costs.
diff --git a/hadoop-cloud-storage-project/hadoop-cos/site/resources/css/site.css b/hadoop-cloud-storage-project/hadoop-cos/site/resources/css/site.css
new file mode 100644
index 0000000..7315db3
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/site/resources/css/site.css
@@ -0,0 +1,29 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+#banner {
+  height: 93px;
+  background: none;
+}
+
+#bannerLeft img {
+  margin-left: 30px;
+  margin-top: 10px;
+}
+
+#bannerRight img {
+  margin: 17px;
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/BufferPool.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/BufferPool.java
new file mode 100644
index 0000000..a4ee4d5
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/BufferPool.java
@@ -0,0 +1,245 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.MappedByteBuffer;
+import java.nio.channels.FileChannel;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * BufferPool class is used to manage the buffers during program execution.
+ * It is provided in a thread-safe singleton mode,and
+ * keeps the program's memory and disk consumption at a stable value.
+ */
+public final class BufferPool {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(BufferPool.class);
+
+  private static BufferPool ourInstance = new BufferPool();
+
+  /**
+   * Use this method to get the instance of BufferPool.
+   *
+   * @return the instance of BufferPool
+   */
+  public static BufferPool getInstance() {
+    return ourInstance;
+  }
+
+  private BlockingQueue<ByteBuffer> bufferPool = null;
+  private long singleBufferSize = 0;
+  private File diskBufferDir = null;
+
+  private AtomicBoolean isInitialize = new AtomicBoolean(false);
+
+  private BufferPool() {
+  }
+
+  private File createDir(String dirPath) throws IOException {
+    File dir = new File(dirPath);
+    if (null != dir) {
+      if (!dir.exists()) {
+        LOG.debug("Buffer dir: [{}] does not exists. create it first.",
+            dirPath);
+        if (dir.mkdirs()) {
+          if (!dir.setWritable(true) || !dir.setReadable(true)
+              || !dir.setExecutable(true)) {
+            LOG.warn("Set the buffer dir: [{}]'s permission [writable,"
+                + "readable, executable] failed.", dir.getAbsolutePath());
+          }
+          LOG.debug("Buffer dir: [{}] is created successfully.",
+              dir.getAbsolutePath());
+        } else {
+          // Once again, check if it has been created successfully.
+          // Prevent problems created by multiple processes at the same time.
+          if (!dir.exists()) {
+            throw new IOException("buffer dir:" + dir.getAbsolutePath()
+                + " is created unsuccessfully");
+          }
+        }
+      } else {
+        LOG.debug("buffer dir: {} already exists.", dirPath);
+      }
+    } else {
+      throw new IOException("creating buffer dir: " + dir.getAbsolutePath()
+          + "unsuccessfully.");
+    }
+
+    return dir;
+  }
+
+  /**
+   * Create buffers correctly by reading the buffer file directory,
+   * buffer pool size,and file block size in the configuration.
+   *
+   * @param conf Provides configurations for the Hadoop runtime
+   * @throws IOException Configuration errors,
+   *                     insufficient or no access for memory or
+   *                     disk space may cause this exception
+   */
+  public synchronized void initialize(Configuration conf)
+      throws IOException {
+    if (this.isInitialize.get()) {
+      return;
+    }
+    this.singleBufferSize = conf.getLong(CosNConfigKeys.COSN_BLOCK_SIZE_KEY,
+        CosNConfigKeys.DEFAULT_BLOCK_SIZE);
+
+    // The block size of CosN can only support up to 2GB.
+    if (this.singleBufferSize < Constants.MIN_PART_SIZE
+        || this.singleBufferSize > Constants.MAX_PART_SIZE) {
+      String exceptionMsg = String.format(
+          "The block size of CosN is limited to %d to %d",
+          Constants.MIN_PART_SIZE, Constants.MAX_PART_SIZE);
+      throw new IOException(exceptionMsg);
+    }
+
+    long memoryBufferLimit = conf.getLong(
+        CosNConfigKeys.COSN_UPLOAD_BUFFER_SIZE_KEY,
+        CosNConfigKeys.DEFAULT_UPLOAD_BUFFER_SIZE);
+
+    this.diskBufferDir = this.createDir(conf.get(
+        CosNConfigKeys.COSN_BUFFER_DIR_KEY,
+        CosNConfigKeys.DEFAULT_BUFFER_DIR));
+
+    int bufferPoolSize = (int) (memoryBufferLimit / this.singleBufferSize);
+    if (0 == bufferPoolSize) {
+      throw new IOException(
+          String.format("The total size of the buffer [%d] is " +
+                  "smaller than a single block [%d]."
+                  + "please consider increase the buffer size " +
+                  "or decrease the block size",
+              memoryBufferLimit, this.singleBufferSize));
+    }
+    this.bufferPool = new LinkedBlockingQueue<>(bufferPoolSize);
+    for (int i = 0; i < bufferPoolSize; i++) {
+      this.bufferPool.add(ByteBuffer.allocateDirect(
+          (int) this.singleBufferSize));
+    }
+
+    this.isInitialize.set(true);
+  }
+
+  /**
+   * Check if the buffer pool has been initialized.
+   *
+   * @throws IOException if the buffer pool is not initialized
+   */
+  private void checkInitialize() throws IOException {
+    if (!this.isInitialize.get()) {
+      throw new IOException(
+          "The buffer pool has not been initialized yet");
+    }
+  }
+
+  /**
+   * Obtain a buffer from this buffer pool through the method.
+   *
+   * @param bufferSize expected buffer size to get
+   * @return a buffer wrapper that satisfies the bufferSize.
+   * @throws IOException if the buffer pool not initialized,
+   *                     or the bufferSize parameter is not within
+   *                     the range[1MB to the single buffer size]
+   */
+  public ByteBufferWrapper getBuffer(int bufferSize) throws IOException {
+    this.checkInitialize();
+    if (bufferSize > 0 && bufferSize <= this.singleBufferSize) {
+      ByteBufferWrapper byteBufferWrapper = this.getByteBuffer();
+      if (null == byteBufferWrapper) {
+        // Use a disk buffer when the memory buffer is not enough
+        byteBufferWrapper = this.getMappedBuffer();
+      }
+      return byteBufferWrapper;
+    } else {
+      String exceptionMsg = String.format(
+          "Parameter buffer size out of range: 1048576 to %d",
+          this.singleBufferSize
+      );
+      throw new IOException(exceptionMsg);
+    }
+  }
+
+  /**
+   * Get a ByteBufferWrapper from the buffer pool.
+   *
+   * @return a new byte buffer wrapper
+   * @throws IOException if the buffer pool is not initialized
+   */
+  private ByteBufferWrapper getByteBuffer() throws IOException {
+    this.checkInitialize();
+    ByteBuffer buffer = this.bufferPool.poll();
+    return buffer == null ? null : new ByteBufferWrapper(buffer);
+  }
+
+  /**
+   * Get a mapped buffer from the buffer pool.
+   *
+   * @return a new mapped buffer
+   * @throws IOException If the buffer pool is not initialized.
+   *                     or some I/O error occurs
+   */
+  private ByteBufferWrapper getMappedBuffer() throws IOException {
+    this.checkInitialize();
+    File tmpFile = File.createTempFile(Constants.BLOCK_TMP_FILE_PREFIX,
+        Constants.BLOCK_TMP_FILE_SUFFIX, this.diskBufferDir);
+    tmpFile.deleteOnExit();
+    RandomAccessFile raf = new RandomAccessFile(tmpFile, "rw");
+    raf.setLength(this.singleBufferSize);
+    MappedByteBuffer buf = raf.getChannel().map(
+        FileChannel.MapMode.READ_WRITE, 0, this.singleBufferSize);
+    return new ByteBufferWrapper(buf, raf, tmpFile);
+  }
+
+  /**
+   * return the byte buffer wrapper to the buffer pool.
+   *
+   * @param byteBufferWrapper the byte buffer wrapper getting from the pool
+   * @throws InterruptedException if interrupted while waiting
+   * @throws IOException          some io error occurs
+   */
+  public void returnBuffer(ByteBufferWrapper byteBufferWrapper)
+      throws InterruptedException, IOException {
+    if (null == this.bufferPool || null == byteBufferWrapper) {
+      return;
+    }
+
+    if (byteBufferWrapper.isDiskBuffer()) {
+      byteBufferWrapper.close();
+    } else {
+      ByteBuffer byteBuffer = byteBufferWrapper.getByteBuffer();
+      if (null != byteBuffer) {
+        byteBuffer.clear();
+        LOG.debug("Return the buffer to the buffer pool.");
+        if (!this.bufferPool.offer(byteBuffer)) {
+          LOG.error("Return the buffer to buffer pool failed.");
+        }
+      }
+    }
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/ByteBufferInputStream.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/ByteBufferInputStream.java
new file mode 100644
index 0000000..440a7de
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/ByteBufferInputStream.java
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.nio.InvalidMarkException;
+
+/**
+ * The input stream class is used for buffered files.
+ * The purpose of providing this class is to optimize buffer read performance.
+ */
+public class ByteBufferInputStream extends InputStream {
+  private ByteBuffer byteBuffer;
+  private boolean isClosed;
+
+  public ByteBufferInputStream(ByteBuffer byteBuffer) throws IOException {
+    if (null == byteBuffer) {
+      throw new IOException("byte buffer is null");
+    }
+    this.byteBuffer = byteBuffer;
+    this.isClosed = false;
+  }
+
+  @Override
+  public int read() throws IOException {
+    if (null == this.byteBuffer) {
+      throw new IOException("this byte buffer for InputStream is null");
+    }
+    if (!this.byteBuffer.hasRemaining()) {
+      return -1;
+    }
+    return this.byteBuffer.get() & 0xFF;
+  }
+
+  @Override
+  public synchronized void mark(int readLimit) {
+    if (!this.markSupported()) {
+      return;
+    }
+    this.byteBuffer.mark();
+    // Parameter readLimit is ignored
+  }
+
+  @Override
+  public boolean markSupported() {
+    return true;
+  }
+
+  @Override
+  public synchronized void reset() throws IOException {
+    if (this.isClosed) {
+      throw new IOException("Closed in InputStream");
+    }
+    try {
+      this.byteBuffer.reset();
+    } catch (InvalidMarkException e) {
+      throw new IOException("Invalid mark");
+    }
+  }
+
+  @Override
+  public int available() {
+    return this.byteBuffer.remaining();
+  }
+
+  @Override
+  public void close() {
+    this.byteBuffer.rewind();
+    this.byteBuffer = null;
+    this.isClosed = true;
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/ByteBufferOutputStream.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/ByteBufferOutputStream.java
new file mode 100644
index 0000000..9e6a6fc
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/ByteBufferOutputStream.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+
+/**
+ * The input stream class is used for buffered files.
+ * The purpose of providing this class is to optimize buffer write performance.
+ */
+public class ByteBufferOutputStream extends OutputStream {
+  private ByteBuffer byteBuffer;
+  private boolean isFlush;
+  private boolean isClosed;
+
+  public ByteBufferOutputStream(ByteBuffer byteBuffer) throws IOException {
+    if (null == byteBuffer) {
+      throw new IOException("byte buffer is null");
+    }
+    this.byteBuffer = byteBuffer;
+    this.byteBuffer.clear();
+    this.isFlush = false;
+    this.isClosed = false;
+  }
+
+  @Override
+  public void write(int b) {
+    byte[] singleBytes = new byte[1];
+    singleBytes[0] = (byte) b;
+    this.byteBuffer.put(singleBytes, 0, 1);
+    this.isFlush = false;
+  }
+
+  @Override
+  public void flush() {
+    if (this.isFlush) {
+      return;
+    }
+    this.isFlush = true;
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (this.isClosed) {
+      return;
+    }
+    if (null == this.byteBuffer) {
+      throw new IOException("Can not close a null object");
+    }
+
+    this.flush();
+    this.byteBuffer.flip();
+    this.byteBuffer = null;
+    this.isFlush = false;
+    this.isClosed = true;
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/ByteBufferWrapper.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/ByteBufferWrapper.java
new file mode 100644
index 0000000..a7d1c5f
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/ByteBufferWrapper.java
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.MappedByteBuffer;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.util.CleanerUtil;
+
+/**
+ * The wrapper for memory buffers and disk buffers.
+ */
+public class ByteBufferWrapper {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ByteBufferWrapper.class);
+  private ByteBuffer byteBuffer;
+  private File file;
+  private RandomAccessFile randomAccessFile;
+
+  ByteBufferWrapper(ByteBuffer byteBuffer) {
+    this(byteBuffer, null, null);
+  }
+
+  ByteBufferWrapper(ByteBuffer byteBuffer, RandomAccessFile randomAccessFile,
+      File file) {
+    this.byteBuffer = byteBuffer;
+    this.file = file;
+    this.randomAccessFile = randomAccessFile;
+  }
+
+  public ByteBuffer getByteBuffer() {
+    return this.byteBuffer;
+  }
+
+  boolean isDiskBuffer() {
+    return this.file != null && this.randomAccessFile != null;
+  }
+
+  private void munmap(MappedByteBuffer buffer) {
+    if (CleanerUtil.UNMAP_SUPPORTED) {
+      try {
+        CleanerUtil.getCleaner().freeBuffer(buffer);
+      } catch (IOException e) {
+        LOG.warn("Failed to unmap the buffer", e);
+      }
+    } else {
+      LOG.trace(CleanerUtil.UNMAP_NOT_SUPPORTED_REASON);
+    }
+  }
+
+  void close() throws IOException {
+    if (null != this.byteBuffer) {
+      this.byteBuffer.clear();
+    }
+
+    IOException exception = null;
+    // catch all exceptions, and try to free up resources that can be freed.
+    try {
+      if (null != randomAccessFile) {
+        this.randomAccessFile.close();
+      }
+    } catch (IOException e) {
+      LOG.error("Close the random access file occurs an exception.", e);
+      exception = e;
+    }
+
+    if (this.byteBuffer instanceof MappedByteBuffer) {
+      munmap((MappedByteBuffer) this.byteBuffer);
+    }
+
+    if (null != this.file && this.file.exists()) {
+      if (!this.file.delete()) {
+        LOG.warn("Delete the tmp file: [{}] failed.",
+            this.file.getAbsolutePath());
+      }
+    }
+
+    if (null != exception) {
+      throw exception;
+    }
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/Constants.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/Constants.java
new file mode 100644
index 0000000..f67e07e
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/Constants.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+/**
+ * constant definition.
+ */
+public final class Constants {
+  private Constants() {
+  }
+
+  public static final String BLOCK_TMP_FILE_PREFIX = "cos_";
+  public static final String BLOCK_TMP_FILE_SUFFIX = "_local_block";
+
+  // The maximum number of files listed in a single COS list request.
+  public static final int COS_MAX_LISTING_LENGTH = 999;
+
+  // The maximum number of parts supported by a multipart uploading.
+  public static final int MAX_PART_NUM = 10000;
+
+  // The maximum size of a part
+  public static final long MAX_PART_SIZE = (long) 2 * Unit.GB;
+  // The minimum size of a part
+  public static final long MIN_PART_SIZE = (long) Unit.MB;
+
+  public static final String COSN_SECRET_ID_ENV = "COSN_SECRET_ID";
+  public static final String COSN_SECRET_KEY_ENV = "COSN_SECRET_KEY";
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosN.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosN.java
new file mode 100644
index 0000000..990fcbd
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosN.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DelegateToFileSystem;
+
+/**
+ * CosN implementation for the Hadoop's AbstractFileSystem.
+ * This implementation delegates to the CosNFileSystem {@link CosNFileSystem}.
+ */
+public class CosN extends DelegateToFileSystem {
+  public CosN(URI theUri, Configuration conf)
+      throws IOException, URISyntaxException {
+    super(theUri, new CosNFileSystem(), conf, CosNFileSystem.SCHEME, false);
+  }
+
+  @Override
+  public int getUriDefaultPort() {
+    return -1;
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNConfigKeys.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNConfigKeys.java
new file mode 100644
index 0000000..4d98d5f
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNConfigKeys.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+
+/**
+ * This class contains constants for configuration keys used in COS.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class CosNConfigKeys extends CommonConfigurationKeys {
+  public static final String USER_AGENT = "fs.cosn.user.agent";
+  public static final String DEFAULT_USER_AGENT = "cos-hadoop-plugin-v5.3";
+
+  public static final String COSN_CREDENTIALS_PROVIDER =
+      "fs.cosn.credentials.provider";
+  public static final String COSN_SECRET_ID_KEY = "fs.cosn.userinfo.secretId";
+  public static final String COSN_SECRET_KEY_KEY = "fs.cosn.userinfo.secretKey";
+  public static final String COSN_REGION_KEY = "fs.cosn.bucket.region";
+  public static final String COSN_ENDPOINT_SUFFIX_KEY =
+      "fs.cosn.bucket.endpoint_suffix";
+
+  public static final String COSN_USE_HTTPS_KEY = "fs.cosn.useHttps";
+  public static final boolean DEFAULT_USE_HTTPS = false;
+
+  public static final String COSN_BUFFER_DIR_KEY = "fs.cosn.tmp.dir";
+  public static final String DEFAULT_BUFFER_DIR = "/tmp/hadoop_cos";
+
+  public static final String COSN_UPLOAD_BUFFER_SIZE_KEY =
+      "fs.cosn.buffer.size";
+  public static final long DEFAULT_UPLOAD_BUFFER_SIZE = 32 * Unit.MB;
+
+  public static final String COSN_BLOCK_SIZE_KEY = "fs.cosn.block.size";
+  public static final long DEFAULT_BLOCK_SIZE = 8 * Unit.MB;
+
+  public static final String COSN_MAX_RETRIES_KEY = "fs.cosn.maxRetries";
+  public static final int DEFAULT_MAX_RETRIES = 3;
+  public static final String COSN_RETRY_INTERVAL_KEY =
+      "fs.cosn.retry.interval.seconds";
+  public static final long DEFAULT_RETRY_INTERVAL = 3;
+
+  public static final String UPLOAD_THREAD_POOL_SIZE_KEY =
+      "fs.cosn.upload_thread_pool";
+  public static final int DEFAULT_UPLOAD_THREAD_POOL_SIZE = 1;
+
+  public static final String COPY_THREAD_POOL_SIZE_KEY =
+      "fs.cosn.copy_thread_pool";
+  public static final int DEFAULT_COPY_THREAD_POOL_SIZE = 1;
+
+  /**
+   * This is the maximum time that excess idle threads will wait for new tasks
+   * before terminating. The time unit for it is second.
+   */
+  public static final String THREAD_KEEP_ALIVE_TIME_KEY =
+      "fs.cosn.threads.keep_alive_time";
+  // The default keep_alive_time is 60 seconds.
+  public static final long DEFAULT_THREAD_KEEP_ALIVE_TIME = 60L;
+
+  public static final String READ_AHEAD_BLOCK_SIZE_KEY =
+      "fs.cosn.read.ahead.block.size";
+  public static final long DEFAULT_READ_AHEAD_BLOCK_SIZE = 512 * Unit.KB;
+  public static final String READ_AHEAD_QUEUE_SIZE =
+      "fs.cosn.read.ahead.queue.size";
+  public static final int DEFAULT_READ_AHEAD_QUEUE_SIZE = 5;
+
+  public static final String MAX_CONNECTION_NUM = "fs.cosn.max.connection.num";
+  public static final int DEFAULT_MAX_CONNECTION_NUM = 2048;
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNCopyFileContext.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNCopyFileContext.java
new file mode 100644
index 0000000..39a2e91
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNCopyFileContext.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * The context of the copy task, including concurrency control,
+ * asynchronous acquisition of copy results and etc.
+ */
+public class CosNCopyFileContext {
+
+  private final ReentrantLock lock = new ReentrantLock();
+  private Condition readyCondition = lock.newCondition();
+
+  private AtomicBoolean copySuccess = new AtomicBoolean(true);
+  private AtomicInteger copiesFinish = new AtomicInteger(0);
+
+  public void lock() {
+    this.lock.lock();
+  }
+
+  public void unlock() {
+    this.lock.unlock();
+  }
+
+  public void awaitAllFinish(int waitCopiesFinish) throws InterruptedException {
+    while (this.copiesFinish.get() != waitCopiesFinish) {
+      this.readyCondition.await();
+    }
+  }
+
+  public void signalAll() {
+    this.readyCondition.signalAll();
+  }
+
+  public boolean isCopySuccess() {
+    return this.copySuccess.get();
+  }
+
+  public void setCopySuccess(boolean copySuccess) {
+    this.copySuccess.set(copySuccess);
+  }
+
+  public void incCopiesFinish() {
+    this.copiesFinish.addAndGet(1);
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNCopyFileTask.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNCopyFileTask.java
new file mode 100644
index 0000000..33d38b8
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNCopyFileTask.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+import java.io.IOException;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Used by {@link CosNFileSystem} as an task that submitted
+ * to the thread pool to accelerate the copy progress.
+ * Each task is responsible for copying the source key to the destination.
+ */
+public class CosNCopyFileTask implements Runnable {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(CosNCopyFileTask.class);
+
+  private NativeFileSystemStore store;
+  private String srcKey;
+  private String dstKey;
+  private CosNCopyFileContext cosCopyFileContext;
+
+  public CosNCopyFileTask(NativeFileSystemStore store, String srcKey,
+      String dstKey, CosNCopyFileContext cosCopyFileContext) {
+    this.store = store;
+    this.srcKey = srcKey;
+    this.dstKey = dstKey;
+    this.cosCopyFileContext = cosCopyFileContext;
+  }
+
+  @Override
+  public void run() {
+    boolean fail = false;
+    LOG.info(Thread.currentThread().getName() + "copying...");
+    try {
+      this.store.copy(srcKey, dstKey);
+    } catch (IOException e) {
+      LOG.warn("Exception thrown when copy from {} to {}, exception:{}",
+          this.srcKey, this.dstKey, e);
+      fail = true;
+    } finally {
+      this.cosCopyFileContext.lock();
+      if (fail) {
+        cosCopyFileContext.setCopySuccess(false);
+      }
+      cosCopyFileContext.incCopiesFinish();
+      cosCopyFileContext.signalAll();
+      this.cosCopyFileContext.unlock();
+    }
+  }
+
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNFileReadTask.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNFileReadTask.java
new file mode 100644
index 0000000..a5dcdda
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNFileReadTask.java
@@ -0,0 +1,125 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+
+/**
+ * Used by {@link CosNInputStream} as an asynchronous task
+ * submitted to the thread pool.
+ * Each task is responsible for reading a part of a large file.
+ * It is used to pre-read the data from COS to accelerate file reading process.
+ */
+public class CosNFileReadTask implements Runnable {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(CosNFileReadTask.class);
+
+  private final String key;
+  private final NativeFileSystemStore store;
+  private final CosNInputStream.ReadBuffer readBuffer;
+
+  private RetryPolicy retryPolicy;
+
+  public CosNFileReadTask(
+      Configuration conf,
+      String key, NativeFileSystemStore store,
+      CosNInputStream.ReadBuffer readBuffer) {
+    this.key = key;
+    this.store = store;
+    this.readBuffer = readBuffer;
+
+    RetryPolicy defaultPolicy =
+        RetryPolicies.retryUpToMaximumCountWithFixedSleep(
+            conf.getInt(
+                CosNConfigKeys.COSN_MAX_RETRIES_KEY,
+                CosNConfigKeys.DEFAULT_MAX_RETRIES),
+            conf.getLong(
+                CosNConfigKeys.COSN_RETRY_INTERVAL_KEY,
+                CosNConfigKeys.DEFAULT_RETRY_INTERVAL),
+            TimeUnit.SECONDS);
+    Map<Class<? extends Exception>, RetryPolicy> retryPolicyMap =
+        new HashMap<>();
+    retryPolicyMap.put(IOException.class, defaultPolicy);
+    retryPolicyMap.put(
+        IndexOutOfBoundsException.class, RetryPolicies.TRY_ONCE_THEN_FAIL);
+    retryPolicyMap.put(
+        NullPointerException.class, RetryPolicies.TRY_ONCE_THEN_FAIL);
+
+    this.retryPolicy = RetryPolicies.retryByException(
+        defaultPolicy, retryPolicyMap);
+  }
+
+  @Override
+  public void run() {
+    int retries = 0;
+    RetryPolicy.RetryAction retryAction;
+    LOG.info(Thread.currentThread().getName() + "read ...");
+    try {
+      this.readBuffer.lock();
+      do {
+        try {
+          InputStream inputStream = this.store.retrieveBlock(this.key,
+              this.readBuffer.getStart(), this.readBuffer.getEnd());
+          IOUtils.readFully(inputStream, this.readBuffer.getBuffer(), 0,
+              readBuffer.getBuffer().length);
+          inputStream.close();
+          this.readBuffer.setStatus(CosNInputStream.ReadBuffer.SUCCESS);
+          break;
+        } catch (IOException e) {
+          this.readBuffer.setStatus(CosNInputStream.ReadBuffer.ERROR);
+          LOG.warn(
+              "Exception occurs when retrieve the block range start: "
+                  + String.valueOf(this.readBuffer.getStart()) + " end: "
+                  + this.readBuffer.getEnd());
+          try {
+            retryAction = this.retryPolicy.shouldRetry(
+                e, retries++, 0, true);
+            if (retryAction.action
+                == RetryPolicy.RetryAction.RetryDecision.RETRY) {
+              Thread.sleep(retryAction.delayMillis);
+            }
+          } catch (Exception e1) {
+            String errMsg = String.format("Exception occurs when retry[%s] "
+                    + "to retrieve the block range start: %s, end:%s",
+                this.retryPolicy.toString(),
+                String.valueOf(this.readBuffer.getStart()),
+                String.valueOf(this.readBuffer.getEnd()));
+            LOG.error(errMsg, e1);
+            break;
+          }
+        }
+      } while (retryAction.action ==
+          RetryPolicy.RetryAction.RetryDecision.RETRY);
+      this.readBuffer.signalAll();
+    } finally {
+      this.readBuffer.unLock();
+    }
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNFileSystem.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNFileSystem.java
new file mode 100644
index 0000000..333b349
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNFileSystem.java
@@ -0,0 +1,814 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.concurrent.TimeUnit;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import com.google.common.util.concurrent.ListeningExecutorService;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BufferedFSInputStream;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIOException;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryProxy;
+import org.apache.hadoop.util.BlockingThreadPoolExecutorService;
+import org.apache.hadoop.util.Progressable;
+
+/**
+ * The core CosN Filesystem implementation.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class CosNFileSystem extends FileSystem {
+  static final Logger LOG = LoggerFactory.getLogger(CosNFileSystem.class);
+
+  public static final String SCHEME = "cosn";
+  public static final String PATH_DELIMITER = Path.SEPARATOR;
+
+  private URI uri;
+  private String bucket;
+  private NativeFileSystemStore store;
+  private Path workingDir;
+  private String owner = "Unknown";
+  private String group = "Unknown";
+
+  private ListeningExecutorService boundedIOThreadPool;
+  private ListeningExecutorService boundedCopyThreadPool;
+
+  public CosNFileSystem() {
+  }
+
+  public CosNFileSystem(NativeFileSystemStore store) {
+    this.store = store;
+  }
+
+  /**
+   * Return the protocol scheme for the FileSystem.
+   *
+   * @return <code>cosn</code>
+   */
+  @Override
+  public String getScheme() {
+    return CosNFileSystem.SCHEME;
+  }
+
+  @Override
+  public void initialize(URI name, Configuration conf) throws IOException {
+    super.initialize(name, conf);
+    this.bucket = name.getHost();
+    if (this.store == null) {
+      this.store = createDefaultStore(conf);
+    }
+    this.store.initialize(name, conf);
+    setConf(conf);
+    this.uri = URI.create(name.getScheme() + "://" + name.getAuthority());
+    this.workingDir = new Path("/user",
+        System.getProperty("user.name")).makeQualified(
+        this.uri,
+        this.getWorkingDirectory());
+    this.owner = getOwnerId();
+    this.group = getGroupId();
+    LOG.debug("owner:" + owner + ", group:" + group);
+
+    BufferPool.getInstance().initialize(this.getConf());
+
+    // initialize the thread pool
+    int uploadThreadPoolSize = this.getConf().getInt(
+        CosNConfigKeys.UPLOAD_THREAD_POOL_SIZE_KEY,
+        CosNConfigKeys.DEFAULT_UPLOAD_THREAD_POOL_SIZE
+    );
+    int readAheadPoolSize = this.getConf().getInt(
+        CosNConfigKeys.READ_AHEAD_QUEUE_SIZE,
+        CosNConfigKeys.DEFAULT_READ_AHEAD_QUEUE_SIZE
+    );
+    int ioThreadPoolSize = uploadThreadPoolSize + readAheadPoolSize / 3;
+    long threadKeepAlive = this.getConf().getLong(
+        CosNConfigKeys.THREAD_KEEP_ALIVE_TIME_KEY,
+        CosNConfigKeys.DEFAULT_THREAD_KEEP_ALIVE_TIME
+    );
+    this.boundedIOThreadPool = BlockingThreadPoolExecutorService.newInstance(
+        ioThreadPoolSize / 2, ioThreadPoolSize,
+        threadKeepAlive, TimeUnit.SECONDS,
+        "cos-transfer-thread-pool");
+    int copyThreadPoolSize = this.getConf().getInt(
+        CosNConfigKeys.COPY_THREAD_POOL_SIZE_KEY,
+        CosNConfigKeys.DEFAULT_COPY_THREAD_POOL_SIZE
+    );
+    this.boundedCopyThreadPool = BlockingThreadPoolExecutorService.newInstance(
+        CosNConfigKeys.DEFAULT_COPY_THREAD_POOL_SIZE, copyThreadPoolSize,
+        60L, TimeUnit.SECONDS,
+        "cos-copy-thread-pool");
+  }
+
+  private static NativeFileSystemStore createDefaultStore(Configuration conf) {
+    NativeFileSystemStore store = new CosNativeFileSystemStore();
+    RetryPolicy basePolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
+        conf.getInt(CosNConfigKeys.COSN_MAX_RETRIES_KEY,
+            CosNConfigKeys.DEFAULT_MAX_RETRIES),
+        conf.getLong(CosNConfigKeys.COSN_RETRY_INTERVAL_KEY,
+            CosNConfigKeys.DEFAULT_RETRY_INTERVAL),
+        TimeUnit.SECONDS);
+    Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
+        new HashMap<>();
+
+    exceptionToPolicyMap.put(IOException.class, basePolicy);
+    RetryPolicy methodPolicy = RetryPolicies.retryByException(
+        RetryPolicies.TRY_ONCE_THEN_FAIL,
+        exceptionToPolicyMap);
+    Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<>();
+    methodNameToPolicyMap.put("storeFile", methodPolicy);
+    methodNameToPolicyMap.put("rename", methodPolicy);
+
+    return (NativeFileSystemStore) RetryProxy.create(
+        NativeFileSystemStore.class, store, methodNameToPolicyMap);
+  }
+
+  private String getOwnerId() {
+    return System.getProperty("user.name");
+  }
+
+  private String getGroupId() {
+    return System.getProperty("user.name");
+  }
+
+  private String getOwnerInfo(boolean getOwnerId) {
+    String ownerInfoId = "";
+    try {
+      String userName = System.getProperty("user.name");
+      String command = "id -u " + userName;
+      if (!getOwnerId) {
+        command = "id -g " + userName;
+      }
+      Process child = Runtime.getRuntime().exec(command);
+      child.waitFor();
+
+      // Get the input stream and read from it
+      InputStream in = child.getInputStream();
+      StringBuilder strBuffer = new StringBuilder();
+      int c;
+      while ((c = in.read()) != -1) {
+        strBuffer.append((char) c);
+      }
+      in.close();
+      ownerInfoId = strBuffer.toString();
+    } catch (IOException | InterruptedException e) {
+      LOG.error("Getting owner info occurs a exception", e);
+    }
+    return ownerInfoId;
+  }
+
+  private static String pathToKey(Path path) {
+    if (path.toUri().getScheme() != null && path.toUri().getPath().isEmpty()) {
+      // allow uris without trailing slash after bucket to refer to root,
+      // like cosn://mybucket
+      return "";
+    }
+    if (!path.isAbsolute()) {
+      throw new IllegalArgumentException("Path must be absolute: " + path);
+    }
+    String ret = path.toUri().getPath();
+    if (ret.endsWith("/") && (ret.indexOf("/") != ret.length() - 1)) {
+      ret = ret.substring(0, ret.length() - 1);
+    }
+    return ret;
+  }
+
+  private static Path keyToPath(String key) {
+    if (!key.startsWith(PATH_DELIMITER)) {
+      return new Path("/" + key);
+    } else {
+      return new Path(key);
+    }
+  }
+
+  private Path makeAbsolute(Path path) {
+    if (path.isAbsolute()) {
+      return path;
+    }
+    return new Path(workingDir, path);
+  }
+
+  /**
+   * This optional operation is not yet supported.
+   */
+  @Override
+  public FSDataOutputStream append(Path f, int bufferSize,
+      Progressable progress) throws IOException {
+    throw new IOException("Not supported");
+  }
+
+  @Override
+  public FSDataOutputStream create(Path f, FsPermission permission,
+      boolean overwrite, int bufferSize, short replication, long blockSize,
+      Progressable progress) throws IOException {
+    FileStatus fileStatus;
+
+    try {
+      fileStatus = getFileStatus(f);
+      if (fileStatus.isDirectory()) {
+        throw new FileAlreadyExistsException(f + " is a directory");
+      }
+      if (!overwrite) {
+        // path references a file and overwrite is disabled
+        throw new FileAlreadyExistsException(f + " already exists");
+      }
+
+    } catch (FileNotFoundException e) {
+      LOG.debug("Creating a new file: [{}] in COS.", f);
+    }
+
+    Path absolutePath = makeAbsolute(f);
+    String key = pathToKey(absolutePath);
+    return new FSDataOutputStream(
+        new CosNOutputStream(getConf(), store, key, blockSize,
+            this.boundedIOThreadPool), statistics);
+  }
+
+  private boolean rejectRootDirectoryDelete(boolean isEmptyDir,
+      boolean recursive) throws PathIOException {
+    if (isEmptyDir) {
+      return true;
+    }
+    if (recursive) {
+      return false;
+    } else {
+      throw new PathIOException(this.bucket, "Can not delete root path");
+    }
+  }
+
+  @Override
+  public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
+      EnumSet<CreateFlag> flags, int bufferSize, short replication,
+      long blockSize, Progressable progress) throws IOException {
+    Path parent = f.getParent();
+    if (null != parent) {
+      if (!getFileStatus(parent).isDirectory()) {
+        throw new FileAlreadyExistsException("Not a directory: " + parent);
+      }
+    }
+
+    return create(f, permission, flags.contains(CreateFlag.OVERWRITE),
+        bufferSize, replication, blockSize, progress);
+  }
+
+  @Override
+  public boolean delete(Path f, boolean recursive) throws IOException {
+    LOG.debug("Ready to delete path: [{}]. recursive: [{}].", f, recursive);
+    FileStatus status;
+    try {
+      status = getFileStatus(f);
+    } catch (FileNotFoundException e) {
+      LOG.debug("Ready to delete the file: [{}], but it does not exist.", f);
+      return false;
+    }
+    Path absolutePath = makeAbsolute(f);
+    String key = pathToKey(absolutePath);
+    if (key.compareToIgnoreCase("/") == 0) {
+      FileStatus[] fileStatuses = listStatus(f);
+      return this.rejectRootDirectoryDelete(
+          fileStatuses.length == 0, recursive);
+    }
+
+    if (status.isDirectory()) {
+      if (!key.endsWith(PATH_DELIMITER)) {
+        key += PATH_DELIMITER;
+      }
+      if (!recursive && listStatus(f).length > 0) {
+        String errMsg = String.format("Can not delete the directory: [%s], as"
+            + " it is not empty and option recursive is false.", f);
+        throw new IOException(errMsg);
+      }
+
+      createParent(f);
+
+      String priorLastKey = null;
+      do {
+        PartialListing listing = store.list(
+            key,
+            Constants.COS_MAX_LISTING_LENGTH,
+            priorLastKey,
+            true);
+        for (FileMetadata file : listing.getFiles()) {
+          store.delete(file.getKey());
+        }
+        for (FileMetadata commonPrefix : listing.getCommonPrefixes()) {
+          store.delete(commonPrefix.getKey());
+        }
+        priorLastKey = listing.getPriorLastKey();
+      } while (priorLastKey != null);
+      try {
+        store.delete(key);
+      } catch (Exception e) {
+        LOG.error("Deleting the COS key: [{}] occurs an exception.", key, e);
+      }
+
+    } else {
+      LOG.debug("Delete the file: {}", f);
+      createParent(f);
+      store.delete(key);
+    }
+    return true;
+  }
+
+  @Override
+  public FileStatus getFileStatus(Path f) throws IOException {
+    Path absolutePath = makeAbsolute(f);
+    String key = pathToKey(absolutePath);
+
+    if (key.length() == 0) {
+      // root always exists
+      return newDirectory(absolutePath);
+    }
+
+    LOG.debug("Call the getFileStatus to obtain the metadata for "
+        + "the file: [{}].", f);
+
+    FileMetadata meta = store.retrieveMetadata(key);
+    if (meta != null) {
+      if (meta.isFile()) {
+        LOG.debug("Path: [{}] is a file. COS key: [{}]", f, key);
+        return newFile(meta, absolutePath);
+      } else {
+        LOG.debug("Path: [{}] is a dir. COS key: [{}]", f, key);
+        return newDirectory(meta, absolutePath);
+      }
+    }
+
+    if (!key.endsWith(PATH_DELIMITER)) {
+      key += PATH_DELIMITER;
+    }
+
+    // Considering that the object store's directory is a common prefix in
+    // the object key, it needs to check the existence of the path by listing
+    // the COS key.
+    LOG.debug("List COS key: [{}] to check the existence of the path.", key);
+    PartialListing listing = store.list(key, 1);
+    if (listing.getFiles().length > 0
+        || listing.getCommonPrefixes().length > 0) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Path: [{}] is a directory. COS key: [{}]", f, key);
+      }
+      return newDirectory(absolutePath);
+    }
+
+    throw new FileNotFoundException(
+        "No such file or directory '" + absolutePath + "'");
+  }
+
+  @Override
+  public URI getUri() {
+    return uri;
+  }
+
+  /**
+   * <p>
+   * If <code>f</code> is a file, this method will make a single call to COS.
+   * If <code>f</code> is a directory,
+   * this method will make a maximum of ( <i>n</i> / 199) + 2 calls to cos,
+   * where <i>n</i> is the total number of files
+   * and directories contained directly in <code>f</code>.
+   * </p>
+   */
+  @Override
+  public FileStatus[] listStatus(Path f) throws IOException {
+    Path absolutePath = makeAbsolute(f);
+    String key = pathToKey(absolutePath);
+
+    if (key.length() > 0) {
+      FileStatus fileStatus = this.getFileStatus(f);
+      if (fileStatus.isFile()) {
+        return new FileStatus[]{fileStatus};
+      }
+    }
+
+    if (!key.endsWith(PATH_DELIMITER)) {
+      key += PATH_DELIMITER;
+    }
+
+    URI pathUri = absolutePath.toUri();
+    Set<FileStatus> status = new TreeSet<>();
+    String priorLastKey = null;
+    do {
+      PartialListing listing = store.list(
+          key, Constants.COS_MAX_LISTING_LENGTH, priorLastKey, false);
+      for (FileMetadata fileMetadata : listing.getFiles()) {
+        Path subPath = keyToPath(fileMetadata.getKey());
+        if (fileMetadata.getKey().equals(key)) {
+          // this is just the directory we have been asked to list.
+          LOG.debug("The file list contains the COS key [{}] to be listed.",
+              key);
+        } else {
+          status.add(newFile(fileMetadata, subPath));
+        }
+      }
+
+      for (FileMetadata commonPrefix : listing.getCommonPrefixes()) {
+        Path subPath = keyToPath(commonPrefix.getKey());
+        String relativePath = pathUri.relativize(subPath.toUri()).getPath();
+        status.add(
+            newDirectory(commonPrefix, new Path(absolutePath, relativePath)));
+      }
+      priorLastKey = listing.getPriorLastKey();
+    } while (priorLastKey != null);
+
+    return status.toArray(new FileStatus[status.size()]);
+  }
+
+  private FileStatus newFile(FileMetadata meta, Path path) {
+    return new FileStatus(meta.getLength(), false, 1, getDefaultBlockSize(),
+        meta.getLastModified(), 0, null, this.owner, this.group,
+        path.makeQualified(this.getUri(), this.getWorkingDirectory()));
+  }
+
+  private FileStatus newDirectory(Path path) {
+    return new FileStatus(0, true, 1, 0, 0, 0, null, this.owner, this.group,
+        path.makeQualified(this.getUri(), this.getWorkingDirectory()));
+  }
+
+  private FileStatus newDirectory(FileMetadata meta, Path path) {
+    if (meta == null) {
+      return newDirectory(path);
+    }
+    return new FileStatus(0, true, 1, 0, meta.getLastModified(),
+        0, null, this.owner, this.group,
+        path.makeQualified(this.getUri(), this.getWorkingDirectory()));
+  }
+
+  /**
+   * Validate the path from the bottom up.
+   *
+   * @param path The path to be validated
+   * @throws FileAlreadyExistsException The specified path is an existing file
+   * @throws IOException                Getting the file status of the
+   *                                    specified path occurs
+   *                                    an IOException.
+   */
+  private void validatePath(Path path) throws IOException {
+    Path parent = path.getParent();
+    do {
+      try {
+        FileStatus fileStatus = getFileStatus(parent);
+        if (fileStatus.isDirectory()) {
+          break;
+        } else {
+          throw new FileAlreadyExistsException(String.format(
+              "Can't make directory for path '%s', it is a file.", parent));
+        }
+      } catch (FileNotFoundException e) {
+        LOG.debug("The Path: [{}] does not exist.", path);
+      }
+      parent = parent.getParent();
+    } while (parent != null);
+  }
+
+  @Override
+  public boolean mkdirs(Path f, FsPermission permission) throws IOException {
+    try {
+      FileStatus fileStatus = getFileStatus(f);
+      if (fileStatus.isDirectory()) {
+        return true;
+      } else {
+        throw new FileAlreadyExistsException("Path is a file: " + f);
+      }
+    } catch (FileNotFoundException e) {
+      validatePath(f);
+    }
+
+    return mkDirRecursively(f, permission);
+  }
+
+  /**
+   * Recursively create a directory.
+   *
+   * @param f          Absolute path to the directory.
+   * @param permission Directory permissions. Permission does not work for
+   *                   the CosN filesystem currently.
+   * @return Return true if the creation was successful,  throw a IOException.
+   * @throws IOException The specified path already exists or an error
+   *                     creating the path.
+   */
+  public boolean mkDirRecursively(Path f, FsPermission permission)
+      throws IOException {
+    Path absolutePath = makeAbsolute(f);
+    List<Path> paths = new ArrayList<>();
+    do {
+      paths.add(absolutePath);
+      absolutePath = absolutePath.getParent();
+    } while (absolutePath != null);
+
+    for (Path path : paths) {
+      if (path.equals(new Path(CosNFileSystem.PATH_DELIMITER))) {
+        break;
+      }
+      try {
+        FileStatus fileStatus = getFileStatus(path);
+        if (fileStatus.isFile()) {
+          throw new FileAlreadyExistsException(
+              String.format("Can't make directory for path: %s, "
+                  + "since it is a file.", f));
+        }
+        if (fileStatus.isDirectory()) {
+          break;
+        }
+      } catch (FileNotFoundException e) {
+        LOG.debug("Making dir: [{}] in COS", f);
+
+        String folderPath = pathToKey(makeAbsolute(f));
+        if (!folderPath.endsWith(PATH_DELIMITER)) {
+          folderPath += PATH_DELIMITER;
+        }
+        store.storeEmptyFile(folderPath);
+      }
+    }
+    return true;
+  }
+
+  private boolean mkdir(Path f) throws IOException {
+    try {
+      FileStatus fileStatus = getFileStatus(f);
+      if (fileStatus.isFile()) {
+        throw new FileAlreadyExistsException(
+            String.format(
+                "Can't make directory for path '%s' since it is a file.", f));
+      }
+    } catch (FileNotFoundException e) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Make directory: [{}] in COS.", f);
+      }
+
+      String folderPath = pathToKey(makeAbsolute(f));
+      if (!folderPath.endsWith(PATH_DELIMITER)) {
+        folderPath += PATH_DELIMITER;
+      }
+      store.storeEmptyFile(folderPath);
+    }
+    return true;
+  }
+
+  @Override
+  public FSDataInputStream open(Path f, int bufferSize) throws IOException {
+    FileStatus fs = getFileStatus(f); // will throw if the file doesn't
+    // exist
+    if (fs.isDirectory()) {
+      throw new FileNotFoundException("'" + f + "' is a directory");
+    }
+    LOG.info("Open the file: [{}] for reading.", f);
+    Path absolutePath = makeAbsolute(f);
+    String key = pathToKey(absolutePath);
+    long fileSize = store.getFileLength(key);
+    return new FSDataInputStream(new BufferedFSInputStream(
+        new CosNInputStream(this.getConf(), store, statistics, key, fileSize,
+            this.boundedIOThreadPool), bufferSize));
+  }
+
+  @Override
+  public boolean rename(Path src, Path dst) throws IOException {
+    LOG.debug("Rename source path: [{}] to dest path: [{}].", src, dst);
+
+    // Renaming the root directory is not allowed
+    if (src.isRoot()) {
+      LOG.debug("Cannot rename the root directory of a filesystem.");
+      return false;
+    }
+
+    // check the source path whether exists or not
+    FileStatus srcFileStatus = this.getFileStatus(src);
+
+    // Source path and destination path are not allowed to be the same
+    if (src.equals(dst)) {
+      LOG.debug("Source path and dest path refer to "
+          + "the same file or directory: [{}].", dst);
+      throw new IOException("Source path and dest path refer "
+          + "the same file or directory");
+    }
+
+    // It is not allowed to rename a parent directory to its subdirectory
+    Path dstParentPath;
+    for (dstParentPath = dst.getParent();
+         null != dstParentPath && !src.equals(dstParentPath);
+         dstParentPath = dstParentPath.getParent()) {
+      // Recursively find the common parent path of the source and
+      // destination paths.
+      LOG.debug("Recursively find the common parent directory of the source "
+              + "and destination paths. The currently found parent path: {}",
+          dstParentPath);
+    }
+
+    if (null != dstParentPath) {
+      LOG.debug("It is not allowed to rename a parent directory:[{}] "
+          + "to its subdirectory:[{}].", src, dst);
+      throw new IOException(String.format(
+          "It is not allowed to rename a parent directory: %s "
+              + "to its subdirectory: %s", src, dst));
+    }
+
+    FileStatus dstFileStatus;
+    try {
+      dstFileStatus = this.getFileStatus(dst);
+
+      // The destination path exists and is a file,
+      // and the rename operation is not allowed.
+      if (dstFileStatus.isFile()) {
+        throw new FileAlreadyExistsException(String.format(
+            "File: %s already exists", dstFileStatus.getPath()));
+      } else {
+        // The destination path is an existing directory,
+        // and it is checked whether there is a file or directory
+        // with the same name as the source path under the destination path
+        dst = new Path(dst, src.getName());
+        FileStatus[] statuses;
+        try {
+          statuses = this.listStatus(dst);
+        } catch (FileNotFoundException e) {
+          statuses = null;
+        }
+        if (null != statuses && statuses.length > 0) {
+          LOG.debug("Cannot rename source file: [{}] to dest file: [{}], "
+              + "because the file already exists.", src, dst);
+          throw new FileAlreadyExistsException(
+              String.format(
+                  "File: %s already exists", dst
+              )
+          );
+        }
+      }
+    } catch (FileNotFoundException e) {
+      // destination path not exists
+      Path tempDstParentPath = dst.getParent();
+      FileStatus dstParentStatus = this.getFileStatus(tempDstParentPath);
+      if (!dstParentStatus.isDirectory()) {
+        throw new IOException(String.format(
+            "Cannot rename %s to %s, %s is a file", src, dst, dst.getParent()
+        ));
+      }
+      // The default root directory is definitely there.
+    }
+
+    boolean result;
+    if (srcFileStatus.isDirectory()) {
+      result = this.copyDirectory(src, dst);
+    } else {
+      result = this.copyFile(src, dst);
+    }
+
+    if (!result) {
+      //Since rename is a non-atomic operation, after copy fails,
+      // it is not allowed to delete the data of the original path.
+      return false;
+    } else {
+      return this.delete(src, true);
+    }
+  }
+
+  private boolean copyFile(Path srcPath, Path dstPath) throws IOException {
+    String srcKey = pathToKey(srcPath);
+    String dstKey = pathToKey(dstPath);
+    this.store.copy(srcKey, dstKey);
+    return true;
+  }
+
+  private boolean copyDirectory(Path srcPath, Path dstPath) throws IOException {
+    String srcKey = pathToKey(srcPath);
+    if (!srcKey.endsWith(PATH_DELIMITER)) {
+      srcKey += PATH_DELIMITER;
+    }
+    String dstKey = pathToKey(dstPath);
+    if (!dstKey.endsWith(PATH_DELIMITER)) {
+      dstKey += PATH_DELIMITER;
+    }
+
+    if (dstKey.startsWith(srcKey)) {
+      throw new IOException(
+          "can not copy a directory to a subdirectory of self");
+    }
+
+    this.store.storeEmptyFile(dstKey);
+    CosNCopyFileContext copyFileContext = new CosNCopyFileContext();
+
+    int copiesToFinishes = 0;
+    String priorLastKey = null;
+    do {
+      PartialListing objectList = this.store.list(
+          srcKey, Constants.COS_MAX_LISTING_LENGTH, priorLastKey, true);
+      for (FileMetadata file : objectList.getFiles()) {
+        this.boundedCopyThreadPool.execute(new CosNCopyFileTask(
+            this.store,
+            file.getKey(),
+            dstKey.concat(file.getKey().substring(srcKey.length())),
+            copyFileContext));
+        copiesToFinishes++;
+        if (!copyFileContext.isCopySuccess()) {
+          break;
+        }
+      }
+      priorLastKey = objectList.getPriorLastKey();
+    } while (null != priorLastKey);
+
+    copyFileContext.lock();
+    try {
+      copyFileContext.awaitAllFinish(copiesToFinishes);
+    } catch (InterruptedException e) {
+      LOG.warn("interrupted when wait copies to finish");
+    } finally {
+      copyFileContext.lock();
+    }
+
+    return copyFileContext.isCopySuccess();
+  }
+
+  private void createParent(Path path) throws IOException {
+    Path parent = path.getParent();
+    if (parent != null) {
+      String parentKey = pathToKey(parent);
+      LOG.debug("Create parent key: {}", parentKey);
+      if (!parentKey.equals(PATH_DELIMITER)) {
+        String key = pathToKey(makeAbsolute(parent));
+        if (key.length() > 0) {
+          try {
+            store.storeEmptyFile(key + PATH_DELIMITER);
+          } catch (IOException e) {
+            LOG.debug("Store a empty file in COS failed.", e);
+            throw e;
+          }
+        }
+      }
+    }
+  }
+
+  @Override
+  @SuppressWarnings("deprecation")
+  public long getDefaultBlockSize() {
+    return getConf().getLong(
+        CosNConfigKeys.COSN_BLOCK_SIZE_KEY,
+        CosNConfigKeys.DEFAULT_BLOCK_SIZE);
+  }
+
+  /**
+   * Set the working directory to the given directory.
+   */
+  @Override
+  public void setWorkingDirectory(Path newDir) {
+    workingDir = newDir;
+  }
+
+  @Override
+  public Path getWorkingDirectory() {
+    return workingDir;
+  }
+
+  @Override
+  public String getCanonicalServiceName() {
+    // Does not support Token
+    return null;
+  }
+
+  @Override
+  public void close() throws IOException {
+    try {
+      this.store.close();
+      this.boundedIOThreadPool.shutdown();
+      this.boundedCopyThreadPool.shutdown();
+    } finally {
+      super.close();
+    }
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNInputStream.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNInputStream.java
new file mode 100644
index 0000000..e759b55
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNInputStream.java
@@ -0,0 +1,365 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.util.ArrayDeque;
+import java.util.Queue;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSExceptionMessages;
+import org.apache.hadoop.fs.FSInputStream;
+import org.apache.hadoop.fs.FileSystem;
+
+/**
+ * The input stream for the COS blob store.
+ * Optimized sequential read flow based on a forward read-ahead queue
+ */
+public class CosNInputStream extends FSInputStream {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(CosNInputStream.class);
+
+  /**
+   * This class is used by {@link CosNInputStream}
+   * and {@link CosNFileReadTask} to buffer data that read from COS blob store.
+   */
+  public static class ReadBuffer {
+    public static final int INIT = 1;
+    public static final int SUCCESS = 0;
+    public static final int ERROR = -1;
+
+    private final ReentrantLock lock = new ReentrantLock();
+    private Condition readyCondition = lock.newCondition();
+
+    private byte[] buffer;
+    private int status;
+    private long start;
+    private long end;
+
+    public ReadBuffer(long start, long end) {
+      this.start = start;
+      this.end = end;
+      this.buffer = new byte[(int) (this.end - this.start) + 1];
+      this.status = INIT;
+    }
+
+    public void lock() {
+      this.lock.lock();
+    }
+
+    public void unLock() {
+      this.lock.unlock();
+    }
+
+    public void await(int waitStatus) throws InterruptedException {
+      while (this.status == waitStatus) {
+        readyCondition.await();
+      }
+    }
+
+    public void signalAll() {
+      readyCondition.signalAll();
+    }
+
+    public byte[] getBuffer() {
+      return this.buffer;
+    }
+
+    public int getStatus() {
+      return this.status;
+    }
+
+    public void setStatus(int status) {
+      this.status = status;
+    }
+
+    public long getStart() {
+      return start;
+    }
+
+    public long getEnd() {
+      return end;
+    }
+  }
+
+  private FileSystem.Statistics statistics;
+  private final Configuration conf;
+  private final NativeFileSystemStore store;
+  private final String key;
+  private long position = 0;
+  private long nextPos = 0;
+  private long fileSize;
+  private long partRemaining;
+  private final long preReadPartSize;
+  private final int maxReadPartNumber;
+  private byte[] buffer;
+  private boolean closed;
+
+  private final ExecutorService readAheadExecutorService;
+  private final Queue<ReadBuffer> readBufferQueue;
+
+  public CosNInputStream(Configuration conf, NativeFileSystemStore store,
+      FileSystem.Statistics statistics, String key, long fileSize,
+      ExecutorService readAheadExecutorService) {
+    super();
+    this.conf = conf;
+    this.store = store;
+    this.statistics = statistics;
+    this.key = key;
+    this.fileSize = fileSize;
+    this.preReadPartSize = conf.getLong(
+        CosNConfigKeys.READ_AHEAD_BLOCK_SIZE_KEY,
+        CosNConfigKeys.DEFAULT_READ_AHEAD_BLOCK_SIZE);
+    this.maxReadPartNumber = conf.getInt(
+        CosNConfigKeys.READ_AHEAD_QUEUE_SIZE,
+        CosNConfigKeys.DEFAULT_READ_AHEAD_QUEUE_SIZE);
+
+    this.readAheadExecutorService = readAheadExecutorService;
+    this.readBufferQueue = new ArrayDeque<>(this.maxReadPartNumber);
+    this.closed = false;
+  }
+
+  private synchronized void reopen(long pos) throws IOException {
+    long partSize;
+
+    if (pos < 0) {
+      throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
+    } else if (pos > this.fileSize) {
+      throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
+    } else {
+      if (pos + this.preReadPartSize > this.fileSize) {
+        partSize = this.fileSize - pos;
+      } else {
+        partSize = this.preReadPartSize;
+      }
+    }
+
+    this.buffer = null;
+
+    boolean isRandomIO = true;
+    if (pos == this.nextPos) {
+      isRandomIO = false;
+    } else {
+      while (this.readBufferQueue.size() != 0) {
+        if (this.readBufferQueue.element().getStart() != pos) {
+          this.readBufferQueue.poll();
+        } else {
+          break;
+        }
+      }
+    }
+
+    this.nextPos = pos + partSize;
+
+    int currentBufferQueueSize = this.readBufferQueue.size();
+    long lastByteStart;
+    if (currentBufferQueueSize == 0) {
+      lastByteStart = pos - partSize;
+    } else {
+      ReadBuffer[] readBuffers =
+          this.readBufferQueue.toArray(
+              new ReadBuffer[currentBufferQueueSize]);
+      lastByteStart = readBuffers[currentBufferQueueSize - 1].getStart();
+    }
+
+    int maxLen = this.maxReadPartNumber - currentBufferQueueSize;
+    for (int i = 0; i < maxLen && i < (currentBufferQueueSize + 1) * 2; i++) {
+      if (lastByteStart + partSize * (i + 1) > this.fileSize) {
+        break;
+      }
+
+      long byteStart = lastByteStart + partSize * (i + 1);
+      long byteEnd = byteStart + partSize - 1;
+      if (byteEnd >= this.fileSize) {
+        byteEnd = this.fileSize - 1;
+      }
+
+      ReadBuffer readBuffer = new ReadBuffer(byteStart, byteEnd);
+      if (readBuffer.getBuffer().length == 0) {
+        readBuffer.setStatus(ReadBuffer.SUCCESS);
+      } else {
+        this.readAheadExecutorService.execute(
+            new CosNFileReadTask(
+                this.conf, this.key, this.store, readBuffer));
+      }
+
+      this.readBufferQueue.add(readBuffer);
+      if (isRandomIO) {
+        break;
+      }
+    }
+
+    ReadBuffer readBuffer = this.readBufferQueue.poll();
+    if (null != readBuffer) {
+      readBuffer.lock();
+      try {
+        readBuffer.await(ReadBuffer.INIT);
+        if (readBuffer.getStatus() == ReadBuffer.ERROR) {
+          this.buffer = null;
+        } else {
+          this.buffer = readBuffer.getBuffer();
+        }
+      } catch (InterruptedException e) {
+        LOG.warn("An interrupted exception occurred "
+            + "when waiting a read buffer.");
+      } finally {
+        readBuffer.unLock();
+      }
+    }
+
+    if (null == this.buffer) {
+      throw new IOException("Null IO stream");
+    }
+
+    this.position = pos;
+    this.partRemaining = partSize;
+  }
+
+  @Override
+  public void seek(long pos) throws IOException {
+    if (pos < 0) {
+      throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
+    }
+    if (pos > this.fileSize) {
+      throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
+    }
+
+    if (this.position == pos) {
+      return;
+    }
+    if (pos > position && pos < this.position + partRemaining) {
+      long len = pos - this.position;
+      this.position = pos;
+      this.partRemaining -= len;
+    } else {
+      this.reopen(pos);
+    }
+  }
+
+  @Override
+  public long getPos() {
+    return this.position;
+  }
+
+  @Override
+  public boolean seekToNewSource(long targetPos) {
+    // Currently does not support to seek the offset of a new source
+    return false;
+  }
+
+  @Override
+  public int read() throws IOException {
+    if (this.closed) {
+      throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
+    }
+
+    if (this.partRemaining <= 0 && this.position < this.fileSize) {
+      this.reopen(this.position);
+    }
+
+    int byteRead = -1;
+    if (this.partRemaining != 0) {
+      byteRead = this.buffer[
+          (int) (this.buffer.length - this.partRemaining)] & 0xff;
+    }
+    if (byteRead >= 0) {
+      this.position++;
+      this.partRemaining--;
+      if (null != this.statistics) {
+        this.statistics.incrementBytesRead(byteRead);
+      }
+    }
+
+    return byteRead;
+  }
+
+  @Override
+  public int read(byte[] b, int off, int len) throws IOException {
+    if (this.closed) {
+      throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
+    }
+
+    if (len == 0) {
+      return 0;
+    }
+
+    if (off < 0 || len < 0 || len > b.length) {
+      throw new IndexOutOfBoundsException();
+    }
+
+    int bytesRead = 0;
+    while (position < fileSize && bytesRead < len) {
+      if (partRemaining <= 0) {
+        reopen(position);
+      }
+
+      int bytes = 0;
+      for (int i = this.buffer.length - (int) partRemaining;
+           i < this.buffer.length; i++) {
+        b[off + bytesRead] = this.buffer[i];
+        bytes++;
+        bytesRead++;
+        if (off + bytesRead >= len) {
+          break;
+        }
+      }
+
+      if (bytes > 0) {
+        this.position += bytes;
+        this.partRemaining -= bytes;
+      } else if (this.partRemaining != 0) {
+        throw new IOException(
+            "Failed to read from stream. Remaining: " + this.partRemaining);
+      }
+    }
+    if (null != this.statistics && bytesRead > 0) {
+      this.statistics.incrementBytesRead(bytesRead);
+    }
+
+    return bytesRead == 0 ? -1 : bytesRead;
+  }
+
+  @Override
+  public int available() throws IOException {
+    if (this.closed) {
+      throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
+    }
+
+    long remaining = this.fileSize - this.position;
+    if (remaining > Integer.MAX_VALUE) {
+      return Integer.MAX_VALUE;
+    }
+    return (int)remaining;
+  }
+
+  @Override
+  public void close() {
+    if (this.closed) {
+      return;
+    }
+    this.closed = true;
+    this.buffer = null;
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNOutputStream.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNOutputStream.java
new file mode 100644
index 0000000..c437dde
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNOutputStream.java
@@ -0,0 +1,284 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.security.DigestOutputStream;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import com.qcloud.cos.model.PartETag;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * The output stream for the COS blob store.
+ * Implement streaming upload to COS based on the multipart upload function.
+ * ( the maximum size of each part is 5GB)
+ * Support up to 40TB single file by multipart upload (each part is 5GB).
+ * Improve the upload performance of writing large files by using byte buffers
+ * and a fixed thread pool.
+ */
+public class CosNOutputStream extends OutputStream {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(CosNOutputStream.class);
+
+  private final Configuration conf;
+  private final NativeFileSystemStore store;
+  private MessageDigest digest;
+  private long blockSize;
+  private String key;
+  private int currentBlockId = 0;
+  private Set<ByteBufferWrapper> blockCacheBuffers = new HashSet<>();
+  private ByteBufferWrapper currentBlockBuffer;
+  private OutputStream currentBlockOutputStream;
+  private String uploadId = null;
+  private ListeningExecutorService executorService;
+  private List<ListenableFuture<PartETag>> etagList = new LinkedList<>();
+  private int blockWritten = 0;
+  private boolean closed = false;
+
+  public CosNOutputStream(Configuration conf, NativeFileSystemStore store,
+      String key, long blockSize, ExecutorService executorService)
+      throws IOException {
+    this.conf = conf;
+    this.store = store;
+    this.key = key;
+    this.blockSize = blockSize;
+    if (this.blockSize < Constants.MIN_PART_SIZE) {
+      LOG.warn(
+          String.format(
+              "The minimum size of a single block is limited to %d.",
+              Constants.MIN_PART_SIZE));
+      this.blockSize = Constants.MIN_PART_SIZE;
+    }
+    if (this.blockSize > Constants.MAX_PART_SIZE) {
+      LOG.warn(
+          String.format(
+              "The maximum size of a single block is limited to %d.",
+              Constants.MAX_PART_SIZE));
+      this.blockSize = Constants.MAX_PART_SIZE;
+    }
+
+    // Use a blocking thread pool with fair scheduling
+    this.executorService = MoreExecutors.listeningDecorator(executorService);
+
+    try {
+      this.currentBlockBuffer =
+          BufferPool.getInstance().getBuffer((int) this.blockSize);
+    } catch (IOException e) {
+      throw new IOException("Getting a buffer size: "
+          + String.valueOf(this.blockSize)
+          + " from buffer pool occurs an exception: ", e);
+    }
+
+    try {
+      this.digest = MessageDigest.getInstance("MD5");
+      this.currentBlockOutputStream = new DigestOutputStream(
+          new ByteBufferOutputStream(this.currentBlockBuffer.getByteBuffer()),
+          this.digest);
+    } catch (NoSuchAlgorithmException e) {
+      this.digest = null;
+      this.currentBlockOutputStream =
+          new ByteBufferOutputStream(this.currentBlockBuffer.getByteBuffer());
+    }
+  }
+
+  @Override
+  public void flush() throws IOException {
+    this.currentBlockOutputStream.flush();
+  }
+
+  @Override
+  public synchronized void close() throws IOException {
+    if (this.closed) {
+      return;
+    }
+    this.currentBlockOutputStream.flush();
+    this.currentBlockOutputStream.close();
+    LOG.info("The output stream has been close, and "
+        + "begin to upload the last block: [{}].", this.currentBlockId);
+    this.blockCacheBuffers.add(this.currentBlockBuffer);
+    if (this.blockCacheBuffers.size() == 1) {
+      byte[] md5Hash = this.digest == null ? null : this.digest.digest();
+      store.storeFile(this.key,
+          new ByteBufferInputStream(this.currentBlockBuffer.getByteBuffer()),
+          md5Hash, this.currentBlockBuffer.getByteBuffer().remaining());
+    } else {
+      PartETag partETag = null;
+      if (this.blockWritten > 0) {
+        LOG.info("Upload the last part..., blockId: [{}], written bytes: [{}]",
+            this.currentBlockId, this.blockWritten);
+        partETag = store.uploadPart(
+            new ByteBufferInputStream(currentBlockBuffer.getByteBuffer()),
+            key, uploadId, currentBlockId + 1,
+            currentBlockBuffer.getByteBuffer().remaining());
+      }
+      final List<PartETag> futurePartETagList = this.waitForFinishPartUploads();
+      if (null == futurePartETagList) {
+        throw new IOException("Failed to multipart upload to cos, abort it.");
+      }
+      List<PartETag> tmpPartEtagList = new LinkedList<>(futurePartETagList);
+      if (null != partETag) {
+        tmpPartEtagList.add(partETag);
+      }
+      store.completeMultipartUpload(this.key, this.uploadId, tmpPartEtagList);
+    }
+    try {
+      BufferPool.getInstance().returnBuffer(this.currentBlockBuffer);
+    } catch (InterruptedException e) {
+      LOG.error("An exception occurred "
+          + "while returning the buffer to the buffer pool.", e);
+    }
+    LOG.info("The outputStream for key: [{}] has been uploaded.", key);
+    this.blockWritten = 0;
+    this.closed = true;
+  }
+
+  private List<PartETag> waitForFinishPartUploads() throws IOException {
+    try {
+      LOG.info("Wait for all parts to finish their uploading.");
+      return Futures.allAsList(this.etagList).get();
+    } catch (InterruptedException e) {
+      LOG.error("Interrupt the part upload.", e);
+      return null;
+    } catch (ExecutionException e) {
+      LOG.error("Cancelling futures.");
+      for (ListenableFuture<PartETag> future : this.etagList) {
+        future.cancel(true);
+      }
+      (store).abortMultipartUpload(this.key, this.uploadId);
+      LOG.error("Multipart upload with id: [{}] to COS key: [{}]",
+          this.uploadId, this.key, e);
+      throw new IOException("Multipart upload with id: "
+          + this.uploadId + " to " + this.key, e);
+    }
+  }
+
+  private void uploadPart() throws IOException {
+    this.currentBlockOutputStream.flush();
+    this.currentBlockOutputStream.close();
+    this.blockCacheBuffers.add(this.currentBlockBuffer);
+
+    if (this.currentBlockId == 0) {
+      uploadId = (store).getUploadId(key);
+    }
+
+    ListenableFuture<PartETag> partETagListenableFuture =
+        this.executorService.submit(
+            new Callable<PartETag>() {
+              private final ByteBufferWrapper buf = currentBlockBuffer;
+              private final String localKey = key;
+              private final String localUploadId = uploadId;
+              private final int blockId = currentBlockId;
+
+              @Override
+              public PartETag call() throws Exception {
+                if (LOG.isDebugEnabled()) {
+                  LOG.debug("{} is uploading a part.",
+                      Thread.currentThread().getName());
+                }
+                PartETag partETag = (store).uploadPart(
+                    new ByteBufferInputStream(this.buf.getByteBuffer()),
+                    this.localKey, this.localUploadId,
+                    this.blockId + 1, this.buf.getByteBuffer().remaining());
+                BufferPool.getInstance().returnBuffer(this.buf);
+                return partETag;
+              }
+            });
+    this.etagList.add(partETagListenableFuture);
+    try {
+      this.currentBlockBuffer =
+          BufferPool.getInstance().getBuffer((int) this.blockSize);
+    } catch (IOException e) {
+      String errMsg = String.format("Getting a buffer [size:%d] from "
+          + "the buffer pool failed.", this.blockSize);
+      throw new IOException(errMsg, e);
+    }
+    this.currentBlockId++;
+    if (null != this.digest) {
+      this.digest.reset();
+      this.currentBlockOutputStream = new DigestOutputStream(
+          new ByteBufferOutputStream(this.currentBlockBuffer.getByteBuffer()),
+          this.digest);
+    } else {
+      this.currentBlockOutputStream =
+          new ByteBufferOutputStream(this.currentBlockBuffer.getByteBuffer());
+    }
+  }
+
+  @Override
+  public void write(byte[] b, int off, int len) throws IOException {
+    if (this.closed) {
+      throw new IOException("block stream has been closed.");
+    }
+
+    while (len > 0) {
+      long writeBytes;
+      if (this.blockWritten + len > this.blockSize) {
+        writeBytes = this.blockSize - this.blockWritten;
+      } else {
+        writeBytes = len;
+      }
+
+      this.currentBlockOutputStream.write(b, off, (int) writeBytes);
+      this.blockWritten += writeBytes;
+      if (this.blockWritten >= this.blockSize) {
+        this.uploadPart();
+        this.blockWritten = 0;
+      }
+      len -= writeBytes;
+      off += writeBytes;
+    }
+  }
+
+  @Override
+  public void write(byte[] b) throws IOException {
+    this.write(b, 0, b.length);
+  }
+
+  @Override
+  public void write(int b) throws IOException {
+    if (this.closed) {
+      throw new IOException("block stream has been closed.");
+    }
+
+    byte[] singleBytes = new byte[1];
+    singleBytes[0] = (byte) b;
+    this.currentBlockOutputStream.write(singleBytes, 0, 1);
+    this.blockWritten += 1;
+    if (this.blockWritten >= this.blockSize) {
+      this.uploadPart();
+      this.blockWritten = 0;
+    }
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNUtils.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNUtils.java
new file mode 100644
index 0000000..39981ca
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNUtils.java
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import com.qcloud.cos.auth.COSCredentialsProvider;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.cosn.auth.COSCredentialProviderList;
+import org.apache.hadoop.fs.cosn.auth.EnvironmentVariableCredentialProvider;
+import org.apache.hadoop.fs.cosn.auth.SimpleCredentialProvider;
+
+/**
+ * Utility methods for CosN code.
+ */
+public final class CosNUtils {
+  private static final Logger LOG = LoggerFactory.getLogger(CosNUtils.class);
+
+  static final String INSTANTIATION_EXCEPTION
+      = "instantiation exception";
+  static final String NOT_COS_CREDENTIAL_PROVIDER
+      = "is not cos credential provider";
+  static final String ABSTRACT_CREDENTIAL_PROVIDER
+      = "is abstract and therefore cannot be created";
+
+  private CosNUtils() {
+  }
+
+  public static COSCredentialProviderList createCosCredentialsProviderSet(
+      Configuration conf) throws IOException {
+    COSCredentialProviderList credentialProviderList =
+        new COSCredentialProviderList();
+
+    Class<?>[] cosClasses = CosNUtils.loadCosProviderClasses(
+        conf,
+        CosNConfigKeys.COSN_CREDENTIALS_PROVIDER);
+    if (0 == cosClasses.length) {
+      credentialProviderList.add(new SimpleCredentialProvider(conf));
+      credentialProviderList.add(new EnvironmentVariableCredentialProvider());
+    } else {
+      for (Class<?> credClass : cosClasses) {
+        credentialProviderList.add(createCOSCredentialProvider(
+            conf,
+            credClass));
+      }
+    }
+
+    return credentialProviderList;
+  }
+
+  public static Class<?>[] loadCosProviderClasses(
+      Configuration conf,
+      String key,
+      Class<?>... defaultValue) throws IOException {
+    try {
+      return conf.getClasses(key, defaultValue);
+    } catch (RuntimeException e) {
+      Throwable c = e.getCause() != null ? e.getCause() : e;
+      throw new IOException("From option " + key + ' ' + c, c);
+    }
+  }
+
+  public static COSCredentialsProvider createCOSCredentialProvider(
+      Configuration conf,
+      Class<?> credClass) throws IOException {
+    COSCredentialsProvider credentialsProvider;
+    if (!COSCredentialsProvider.class.isAssignableFrom(credClass)) {
+      throw new IllegalArgumentException(
+          "class " + credClass + " " + NOT_COS_CREDENTIAL_PROVIDER);
+    }
+    if (Modifier.isAbstract(credClass.getModifiers())) {
+      throw new IllegalArgumentException(
+          "class " + credClass + " " + ABSTRACT_CREDENTIAL_PROVIDER);
+    }
+    LOG.debug("Credential Provider class: " + credClass.getName());
+
+    try {
+      // new credClass()
+      Constructor constructor = getConstructor(credClass);
+      if (constructor != null) {
+        credentialsProvider =
+            (COSCredentialsProvider) constructor.newInstance();
+        return credentialsProvider;
+      }
+      // new credClass(conf)
+      constructor = getConstructor(credClass, Configuration.class);
+      if (null != constructor) {
+        credentialsProvider =
+            (COSCredentialsProvider) constructor.newInstance(conf);
+        return credentialsProvider;
+      }
+
+      Method factory = getFactoryMethod(
+          credClass, COSCredentialsProvider.class, "getInstance");
+      if (null != factory) {
+        credentialsProvider = (COSCredentialsProvider) factory.invoke(null);
+        return credentialsProvider;
+      }
+
+      throw new IllegalArgumentException(
+          "Not supported constructor or factory method found"
+      );
+
+    } catch (IllegalAccessException e) {
+      throw new IOException(
+          credClass.getName() + " " + INSTANTIATION_EXCEPTION + ": " + e, e);
+    } catch (InstantiationException e) {
+      throw new IOException(
+          credClass.getName() + " " + INSTANTIATION_EXCEPTION + ": " + e, e);
+    } catch (InvocationTargetException e) {
+      Throwable targetException = e.getTargetException();
+      if (targetException == null) {
+        targetException = e;
+      }
+      throw new IOException(
+          credClass.getName() + " " + INSTANTIATION_EXCEPTION + ": "
+              + targetException, targetException);
+    }
+  }
+
+  private static Constructor<?> getConstructor(Class<?> cl, Class<?>... args) {
+    try {
+      Constructor constructor = cl.getDeclaredConstructor(args);
+      return Modifier.isPublic(constructor.getModifiers()) ? constructor : null;
+    } catch (NoSuchMethodException e) {
+      return null;
+    }
+  }
+
+  private static Method getFactoryMethod(
+      Class<?> cl, Class<?> returnType, String methodName) {
+    try {
+      Method m = cl.getDeclaredMethod(methodName);
+      if (Modifier.isPublic(m.getModifiers())
+          && Modifier.isStatic(m.getModifiers())
+          && returnType.isAssignableFrom(m.getReturnType())) {
+        return m;
+      } else {
+        return null;
+      }
+    } catch (NoSuchMethodException e) {
+      return null;
+    }
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNativeFileSystemStore.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNativeFileSystemStore.java
new file mode 100644
index 0000000..833f42d
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/CosNativeFileSystemStore.java
@@ -0,0 +1,768 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+import java.io.BufferedInputStream;
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.concurrent.ThreadLocalRandom;
+
+import com.qcloud.cos.COSClient;
+import com.qcloud.cos.ClientConfig;
+import com.qcloud.cos.auth.BasicCOSCredentials;
+import com.qcloud.cos.auth.COSCredentials;
+import com.qcloud.cos.exception.CosClientException;
+import com.qcloud.cos.exception.CosServiceException;
+import com.qcloud.cos.http.HttpProtocol;
+import com.qcloud.cos.model.AbortMultipartUploadRequest;
+import com.qcloud.cos.model.COSObject;
+import com.qcloud.cos.model.COSObjectSummary;
+import com.qcloud.cos.model.CompleteMultipartUploadRequest;
+import com.qcloud.cos.model.CompleteMultipartUploadResult;
+import com.qcloud.cos.model.CopyObjectRequest;
+import com.qcloud.cos.model.DeleteObjectRequest;
+import com.qcloud.cos.model.GetObjectMetadataRequest;
+import com.qcloud.cos.model.GetObjectRequest;
+import com.qcloud.cos.model.InitiateMultipartUploadRequest;
+import com.qcloud.cos.model.InitiateMultipartUploadResult;
+import com.qcloud.cos.model.ListObjectsRequest;
+import com.qcloud.cos.model.ObjectListing;
+import com.qcloud.cos.model.ObjectMetadata;
+import com.qcloud.cos.model.PartETag;
+import com.qcloud.cos.model.PutObjectRequest;
+import com.qcloud.cos.model.PutObjectResult;
+import com.qcloud.cos.model.UploadPartRequest;
+import com.qcloud.cos.model.UploadPartResult;
+import com.qcloud.cos.region.Region;
+import com.qcloud.cos.utils.Base64;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.cosn.auth.COSCredentialProviderList;
+import org.apache.hadoop.util.VersionInfo;
+import org.apache.http.HttpStatus;
+
+/**
+ * The class actually performs access operation to the COS blob store.
+ * It provides the bridging logic for the Hadoop's abstract filesystem and COS.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+class CosNativeFileSystemStore implements NativeFileSystemStore {
+  private COSClient cosClient;
+  private String bucketName;
+  private int maxRetryTimes;
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(CosNativeFileSystemStore.class);
+
+  /**
+   * Initialize the client to access COS blob storage.
+   *
+   * @param conf Hadoop configuration with COS configuration options.
+   * @throws IOException Initialize the COS client failed,
+   *                     caused by incorrect options.
+   */
+  private void initCOSClient(Configuration conf) throws IOException {
+    COSCredentialProviderList credentialProviderList =
+        CosNUtils.createCosCredentialsProviderSet(conf);
+    String region = conf.get(CosNConfigKeys.COSN_REGION_KEY);
+    String endpointSuffix = conf.get(
+        CosNConfigKeys.COSN_ENDPOINT_SUFFIX_KEY);
+    if (null == region && null == endpointSuffix) {
+      String exceptionMsg = String.format("config %s and %s at least one",
+          CosNConfigKeys.COSN_REGION_KEY,
+          CosNConfigKeys.COSN_ENDPOINT_SUFFIX_KEY);
+      throw new IOException(exceptionMsg);
+    }
+
+    COSCredentials cosCred;
+    cosCred = new BasicCOSCredentials(
+        credentialProviderList.getCredentials().getCOSAccessKeyId(),
+        credentialProviderList.getCredentials().getCOSSecretKey());
+
+    boolean useHttps = conf.getBoolean(CosNConfigKeys.COSN_USE_HTTPS_KEY,
+        CosNConfigKeys.DEFAULT_USE_HTTPS);
+
+    ClientConfig config;
+    if (null == region) {
+      config = new ClientConfig(new Region(""));
+      config.setEndPointSuffix(endpointSuffix);
+    } else {
+      config = new ClientConfig(new Region(region));
+    }
+    if (useHttps) {
+      config.setHttpProtocol(HttpProtocol.https);
+    }
+
+    config.setUserAgent(conf.get(CosNConfigKeys.USER_AGENT,
+        CosNConfigKeys.DEFAULT_USER_AGENT) + " For " + " Hadoop "
+        + VersionInfo.getVersion());
+
+    this.maxRetryTimes = conf.getInt(CosNConfigKeys.COSN_MAX_RETRIES_KEY,
+        CosNConfigKeys.DEFAULT_MAX_RETRIES);
+
+    config.setMaxConnectionsCount(
+        conf.getInt(CosNConfigKeys.MAX_CONNECTION_NUM,
+            CosNConfigKeys.DEFAULT_MAX_CONNECTION_NUM));
+
+    this.cosClient = new COSClient(cosCred, config);
+  }
+
+  /**
+   * Initialize the CosNativeFileSystemStore object, including
+   * its COS client and default COS bucket.
+   *
+   * @param uri  The URI of the COS bucket accessed by default.
+   * @param conf Hadoop configuration with COS configuration options.
+   * @throws IOException Initialize the COS client failed.
+   */
+  @Override
+  public void initialize(URI uri, Configuration conf) throws IOException {
+    try {
+      initCOSClient(conf);
+      this.bucketName = uri.getHost();
+    } catch (Exception e) {
+      handleException(e, "");
+    }
+  }
+
+  /**
+   * Store a file into COS from the specified input stream, which would be
+   * retried until the success or maximum number.
+   *
+   * @param key         COS object key.
+   * @param inputStream Input stream to be uploaded into COS.
+   * @param md5Hash     MD5 value of the content to be uploaded.
+   * @param length      Length of uploaded content.
+   * @throws IOException Upload the file failed.
+   */
+  private void storeFileWithRetry(String key, InputStream inputStream,
+      byte[] md5Hash, long length) throws IOException {
+    try {
+      ObjectMetadata objectMetadata = new ObjectMetadata();
+      objectMetadata.setContentMD5(Base64.encodeAsString(md5Hash));
+      objectMetadata.setContentLength(length);
+      PutObjectRequest putObjectRequest =
+          new PutObjectRequest(bucketName, key, inputStream, objectMetadata);
+
+      PutObjectResult putObjectResult =
+          (PutObjectResult) callCOSClientWithRetry(putObjectRequest);
+      LOG.debug("Store file successfully. COS key: [{}], ETag: [{}], "
+          + "MD5: [{}].", key, putObjectResult.getETag(), new String(md5Hash));
+    } catch (Exception e) {
+      String errMsg = String.format("Store file failed. COS key: [%s], "
+          + "exception: [%s]", key, e.toString());
+      LOG.error(errMsg);
+      handleException(new Exception(errMsg), key);
+    }
+  }
+
+  /**
+   * Store a local file into COS.
+   *
+   * @param key     COS object key.
+   * @param file    The local file to be uploaded.
+   * @param md5Hash The MD5 value of the file to be uploaded.
+   * @throws IOException Upload the file failed.
+   */
+  @Override
+  public void storeFile(String key, File file, byte[] md5Hash)
+      throws IOException {
+    LOG.info("Store file from local path: [{}]. file length: [{}] COS key: " +
+            "[{}] MD5: [{}].", file.getCanonicalPath(), file.length(), key,
+        new String(md5Hash));
+    storeFileWithRetry(key, new BufferedInputStream(new FileInputStream(file)),
+        md5Hash, file.length());
+  }
+
+  /**
+   * Store a file into COS from the specified input stream.
+   *
+   * @param key           COS object key.
+   * @param inputStream   The Input stream to be uploaded.
+   * @param md5Hash       The MD5 value of the content to be uploaded.
+   * @param contentLength Length of uploaded content.
+   * @throws IOException Upload the file failed.
+   */
+  @Override
+  public void storeFile(
+      String key,
+      InputStream inputStream,
+      byte[] md5Hash,
+      long contentLength) throws IOException {
+    LOG.info("Store file from input stream. COS key: [{}], "
+        + "length: [{}], MD5: [{}].", key, contentLength, md5Hash);
+    storeFileWithRetry(key, inputStream, md5Hash, contentLength);
+  }
+
+  // For cos, storeEmptyFile means creating a directory
+  @Override
+  public void storeEmptyFile(String key) throws IOException {
+    if (!key.endsWith(CosNFileSystem.PATH_DELIMITER)) {
+      key = key + CosNFileSystem.PATH_DELIMITER;
+    }
+
+    ObjectMetadata objectMetadata = new ObjectMetadata();
+    objectMetadata.setContentLength(0);
+    InputStream input = new ByteArrayInputStream(new byte[0]);
+    PutObjectRequest putObjectRequest =
+        new PutObjectRequest(bucketName, key, input, objectMetadata);
+    try {
+      PutObjectResult putObjectResult =
+          (PutObjectResult) callCOSClientWithRetry(putObjectRequest);
+      LOG.debug("Store empty file successfully. COS key: [{}], ETag: [{}].",
+          key, putObjectResult.getETag());
+    } catch (Exception e) {
+      String errMsg = String.format("Store empty file failed. "
+          + "COS key: [%s], exception: [%s]", key, e.toString());
+      LOG.error(errMsg);
+      handleException(new Exception(errMsg), key);
+    }
+  }
+
+  public PartETag uploadPart(File file, String key, String uploadId,
+      int partNum) throws IOException {
+    InputStream inputStream = new FileInputStream(file);
+    return uploadPart(inputStream, key, uploadId, partNum, file.length());
+  }
+
+  @Override
+  public PartETag uploadPart(InputStream inputStream, String key,
+      String uploadId, int partNum, long partSize) throws IOException {
+    UploadPartRequest uploadPartRequest = new UploadPartRequest();
+    uploadPartRequest.setBucketName(this.bucketName);
+    uploadPartRequest.setUploadId(uploadId);
+    uploadPartRequest.setInputStream(inputStream);
+    uploadPartRequest.setPartNumber(partNum);
+    uploadPartRequest.setPartSize(partSize);
+    uploadPartRequest.setKey(key);
+
+    try {
+      UploadPartResult uploadPartResult =
+          (UploadPartResult) callCOSClientWithRetry(uploadPartRequest);
+      return uploadPartResult.getPartETag();
+    } catch (Exception e) {
+      String errMsg = String.format("Current thread: [%d], COS key: [%s], "
+              + "upload id: [%s], part num: [%d], exception: [%s]",
+          Thread.currentThread().getId(), key, uploadId, partNum, e.toString());
+      handleException(new Exception(errMsg), key);
+    }
+
+    return null;
+  }
+
+  public void abortMultipartUpload(String key, String uploadId) {
+    LOG.info("Abort the multipart upload. COS key: [{}], upload id: [{}].",
+        key, uploadId);
+    AbortMultipartUploadRequest abortMultipartUploadRequest =
+        new AbortMultipartUploadRequest(bucketName, key, uploadId);
+    cosClient.abortMultipartUpload(abortMultipartUploadRequest);
+  }
+
+  /**
+   * Initialize a multipart upload and return the upload id.
+   *
+   * @param key The COS object key initialized to multipart upload.
+   * @return The multipart upload id.
+   */
+  public String getUploadId(String key) {
+    if (null == key || key.length() == 0) {
+      return "";
+    }
+
+    LOG.info("Initiate a multipart upload. bucket: [{}], COS key: [{}].",
+        bucketName, key);
+    InitiateMultipartUploadRequest initiateMultipartUploadRequest =
+        new InitiateMultipartUploadRequest(bucketName, key);
+    InitiateMultipartUploadResult initiateMultipartUploadResult =
+        cosClient.initiateMultipartUpload(initiateMultipartUploadRequest);
+    return initiateMultipartUploadResult.getUploadId();
+  }
+
+  /**
+   * Finish a multipart upload process, which will merge all parts uploaded.
+   *
+   * @param key          The COS object key to be finished.
+   * @param uploadId     The upload id of the multipart upload to be finished.
+   * @param partETagList The etag list of the part that has been uploaded.
+   * @return The result object of completing the multipart upload process.
+   */
+  public CompleteMultipartUploadResult completeMultipartUpload(
+      String key, String uploadId, List<PartETag> partETagList) {
+    Collections.sort(partETagList, new Comparator<PartETag>() {
+      @Override
+      public int compare(PartETag o1, PartETag o2) {
+        return o1.getPartNumber() - o2.getPartNumber();
+      }
+    });
+    LOG.info("Complete the multipart upload. bucket: [{}], COS key: [{}], "
+        + "upload id: [{}].", bucketName, key, uploadId);
+    CompleteMultipartUploadRequest completeMultipartUploadRequest =
+        new CompleteMultipartUploadRequest(
+            bucketName, key, uploadId, partETagList);
+    return cosClient.completeMultipartUpload(completeMultipartUploadRequest);
+  }
+
+  private FileMetadata queryObjectMetadata(String key) throws IOException {
+    GetObjectMetadataRequest getObjectMetadataRequest =
+        new GetObjectMetadataRequest(bucketName, key);
+    try {
+      ObjectMetadata objectMetadata =
+          (ObjectMetadata) callCOSClientWithRetry(getObjectMetadataRequest);
+      long mtime = 0;
+      if (objectMetadata.getLastModified() != null) {
+        mtime = objectMetadata.getLastModified().getTime();
+      }
+      long fileSize = objectMetadata.getContentLength();
+      FileMetadata fileMetadata = new FileMetadata(key, fileSize, mtime,
+          !key.endsWith(CosNFileSystem.PATH_DELIMITER));
+      LOG.debug("Retrieve file metadata. COS key: [{}], ETag: [{}], "
+              + "length: [{}].", key, objectMetadata.getETag(),
+          objectMetadata.getContentLength());
+      return fileMetadata;
+    } catch (CosServiceException e) {
+      if (e.getStatusCode() != HttpStatus.SC_NOT_FOUND) {
+        String errorMsg = String.format("Retrieve file metadata file failed. "
+            + "COS key: [%s], CosServiceException: [%s].", key, e.toString());
+        LOG.error(errorMsg);
+        handleException(new Exception(errorMsg), key);
+      }
+    }
+    return null;
+  }
+
+  @Override
+  public FileMetadata retrieveMetadata(String key) throws IOException {
+    if (key.endsWith(CosNFileSystem.PATH_DELIMITER)) {
+      key = key.substring(0, key.length() - 1);
+    }
+
+    if (!key.isEmpty()) {
+      FileMetadata fileMetadata = queryObjectMetadata(key);
+      if (fileMetadata != null) {
+        return fileMetadata;
+      }
+    }
+
+    // If the key is a directory.
+    key = key + CosNFileSystem.PATH_DELIMITER;
+    return queryObjectMetadata(key);
+  }
+
+  /**
+   * Download a COS object and return the input stream associated with it.
+   *
+   * @param key The object key that is being retrieved from the COS bucket
+   * @return This method returns null if the key is not found
+   * @throws IOException if failed to download.
+   */
+  @Override
+  public InputStream retrieve(String key) throws IOException {
+    LOG.debug("Retrieve object key: [{}].", key);
+    GetObjectRequest getObjectRequest =
+        new GetObjectRequest(this.bucketName, key);
+    try {
+      COSObject cosObject =
+          (COSObject) callCOSClientWithRetry(getObjectRequest);
+      return cosObject.getObjectContent();
+    } catch (Exception e) {
+      String errMsg = String.format("Retrieving key: [%s] occurs "
+          + "an exception: [%s].", key, e.toString());
+      LOG.error("Retrieving COS key: [{}] occurs an exception: [{}].", key, e);
+      handleException(new Exception(errMsg), key);
+    }
+    // never will get here
+    return null;
+  }
+
+  /**
+   * Retrieved a part of a COS object, which is specified the start position.
+   *
+   * @param key            The object key that is being retrieved from
+   *                       the COS bucket.
+   * @param byteRangeStart The start position of the part to be retrieved in
+   *                       the object.
+   * @return The input stream associated with the retrieved object.
+   * @throws IOException if failed to retrieve.
+   */
+  @Override
+  public InputStream retrieve(String key, long byteRangeStart)
+      throws IOException {
+    try {
+      LOG.debug("Retrieve COS key:[{}]. range start:[{}].",
+          key, byteRangeStart);
+      long fileSize = getFileLength(key);
+      long byteRangeEnd = fileSize - 1;
+      GetObjectRequest getObjectRequest =
+          new GetObjectRequest(this.bucketName, key);
+      if (byteRangeEnd >= byteRangeStart) {
+        getObjectRequest.setRange(byteRangeStart, fileSize - 1);
+      }
+      COSObject cosObject =
+          (COSObject) callCOSClientWithRetry(getObjectRequest);
+      return cosObject.getObjectContent();
+    } catch (Exception e) {
+      String errMsg =
+          String.format("Retrieving COS key: [%s] occurs an exception. " +
+                  "byte range start: [%s], exception: [%s].",
+              key, byteRangeStart, e.toString());
+      LOG.error(errMsg);
+      handleException(new Exception(errMsg), key);
+    }
+
+    // never will get here
+    return null;
+  }
+
+  /**
+   * Download a part of a COS object, which is specified the start and
+   * end position.
+   *
+   * @param key            The object key that is being downloaded
+   * @param byteRangeStart The start position of the part to be retrieved in
+   *                       the object.
+   * @param byteRangeEnd   The end position of the part to be retrieved in
+   *                       the object.
+   * @return The input stream associated with the retrieved objects.
+   * @throws IOException If failed to retrieve.
+   */
+  @Override
+  public InputStream retrieveBlock(String key, long byteRangeStart,
+      long byteRangeEnd) throws IOException {
+    try {
+      GetObjectRequest request = new GetObjectRequest(this.bucketName, key);
+      request.setRange(byteRangeStart, byteRangeEnd);
+      COSObject cosObject = (COSObject) this.callCOSClientWithRetry(request);
+      return cosObject.getObjectContent();
+    } catch (CosServiceException e) {
+      String errMsg =
+          String.format("Retrieving key [%s] with byteRangeStart [%d] occurs " +
+                  "an CosServiceException: [%s].",
+              key, byteRangeStart, e.toString());
+      LOG.error(errMsg);
+      handleException(new Exception(errMsg), key);
+      return null;
+    } catch (CosClientException e) {
+      String errMsg =
+          String.format("Retrieving key [%s] with byteRangeStart [%d] "
+                  + "occurs an exception: [%s].",
+              key, byteRangeStart, e.toString());
+      LOG.error("Retrieving COS key: [{}] with byteRangeStart: [{}] " +
+          "occurs an exception: [{}].", key, byteRangeStart, e);
+      handleException(new Exception(errMsg), key);
+    }
+
+    return null;
+  }
+
+  @Override
+  public PartialListing list(String prefix, int maxListingLength)
+      throws IOException {
+    return list(prefix, maxListingLength, null, false);
+  }
+
+  @Override
+  public PartialListing list(String prefix, int maxListingLength,
+      String priorLastKey, boolean recurse) throws IOException {
+    return list(prefix, recurse ? null : CosNFileSystem.PATH_DELIMITER,
+        maxListingLength, priorLastKey);
+  }
+
+  /**
+   * List the metadata for all objects that
+   * the object key has the specified prefix.
+   *
+   * @param prefix           The prefix to be listed.
+   * @param delimiter        The delimiter is a sign, the same paths between
+   *                         are listed.
+   * @param maxListingLength The maximum number of listed entries.
+   * @param priorLastKey     The last key in any previous search.
+   * @return A metadata list on the match.
+   * @throws IOException If list objects failed.
+   */
+  private PartialListing list(String prefix, String delimiter,
+      int maxListingLength, String priorLastKey) throws IOException {
+    LOG.debug("List objects. prefix: [{}], delimiter: [{}], " +
+            "maxListLength: [{}], priorLastKey: [{}].",
+        prefix, delimiter, maxListingLength, priorLastKey);
+
+    if (!prefix.startsWith(CosNFileSystem.PATH_DELIMITER)) {
+      prefix += CosNFileSystem.PATH_DELIMITER;
+    }
+    ListObjectsRequest listObjectsRequest = new ListObjectsRequest();
+    listObjectsRequest.setBucketName(bucketName);
+    listObjectsRequest.setPrefix(prefix);
+    listObjectsRequest.setDelimiter(delimiter);
+    listObjectsRequest.setMarker(priorLastKey);
+    listObjectsRequest.setMaxKeys(maxListingLength);
+    ObjectListing objectListing = null;
+    try {
+      objectListing =
+          (ObjectListing) callCOSClientWithRetry(listObjectsRequest);
+    } catch (Exception e) {
+      String errMsg = String.format("prefix: [%s], delimiter: [%s], "
+              + "maxListingLength: [%d], priorLastKey: [%s]. "
+              + "List objects occur an exception: [%s].", prefix,
+          (delimiter == null) ? "" : delimiter, maxListingLength, priorLastKey,
+          e.toString());
+      LOG.error(errMsg);
+      handleException(new Exception(errMsg), prefix);
+    }
+    ArrayList<FileMetadata> fileMetadataArray = new ArrayList<>();
+    ArrayList<FileMetadata> commonPrefixArray = new ArrayList<>();
+
+    if (null == objectListing) {
+      String errMsg = String.format("List the prefix: [%s] failed. " +
+              "delimiter: [%s], max listing length:" +
+              " [%s], prior last key: [%s]",
+          prefix, delimiter, maxListingLength, priorLastKey);
+      handleException(new Exception(errMsg), prefix);
+    }
+
+    List<COSObjectSummary> summaries = objectListing.getObjectSummaries();
+    for (COSObjectSummary cosObjectSummary : summaries) {
+      String filePath = cosObjectSummary.getKey();
+      if (!filePath.startsWith(CosNFileSystem.PATH_DELIMITER)) {
+        filePath = CosNFileSystem.PATH_DELIMITER + filePath;
+      }
+      if (filePath.equals(prefix)) {
+        continue;
+      }
+      long mtime = 0;
+      if (cosObjectSummary.getLastModified() != null) {
+        mtime = cosObjectSummary.getLastModified().getTime();
+      }
+      long fileLen = cosObjectSummary.getSize();
+      fileMetadataArray.add(
+          new FileMetadata(filePath, fileLen, mtime, true));
+    }
+    List<String> commonPrefixes = objectListing.getCommonPrefixes();
+    for (String commonPrefix : commonPrefixes) {
+      if (!commonPrefix.startsWith(CosNFileSystem.PATH_DELIMITER)) {
+        commonPrefix = CosNFileSystem.PATH_DELIMITER + commonPrefix;
+      }
+      commonPrefixArray.add(
+          new FileMetadata(commonPrefix, 0, 0, false));
+    }
+
+    FileMetadata[] fileMetadata = new FileMetadata[fileMetadataArray.size()];
+    for (int i = 0; i < fileMetadataArray.size(); ++i) {
+      fileMetadata[i] = fileMetadataArray.get(i);
+    }
+    FileMetadata[] commonPrefixMetaData =
+        new FileMetadata[commonPrefixArray.size()];
+    for (int i = 0; i < commonPrefixArray.size(); ++i) {
+      commonPrefixMetaData[i] = commonPrefixArray.get(i);
+    }
+    // when truncated is false, it means that listing is finished.
+    if (!objectListing.isTruncated()) {
+      return new PartialListing(
+          null, fileMetadata, commonPrefixMetaData);
+    } else {
+      return new PartialListing(
+          objectListing.getNextMarker(), fileMetadata, commonPrefixMetaData);
+    }
+  }
+
+  @Override
+  public void delete(String key) throws IOException {
+    LOG.debug("Delete object key: [{}] from bucket: {}.", key, this.bucketName);
+    try {
+      DeleteObjectRequest deleteObjectRequest =
+          new DeleteObjectRequest(bucketName, key);
+      callCOSClientWithRetry(deleteObjectRequest);
+    } catch (Exception e) {
+      String errMsg =
+          String.format("Delete key: [%s] occurs an exception: [%s].",
+              key, e.toString());
+      LOG.error(errMsg);
+      handleException(new Exception(errMsg), key);
+    }
+  }
+
+  public void rename(String srcKey, String dstKey) throws IOException {
+    LOG.debug("Rename source key: [{}] to dest key: [{}].", srcKey, dstKey);
+    try {
+      CopyObjectRequest copyObjectRequest =
+          new CopyObjectRequest(bucketName, srcKey, bucketName, dstKey);
+      callCOSClientWithRetry(copyObjectRequest);
+      DeleteObjectRequest deleteObjectRequest =
+          new DeleteObjectRequest(bucketName, srcKey);
+      callCOSClientWithRetry(deleteObjectRequest);
+    } catch (Exception e) {
+      String errMsg = String.format("Rename object unsuccessfully. "
+              + "source cos key: [%s], dest COS " +
+              "key: [%s], exception: [%s]",
+          srcKey,
+          dstKey, e.toString());
+      LOG.error(errMsg);
+      handleException(new Exception(errMsg), srcKey);
+    }
+  }
+
+  @Override
+  public void copy(String srcKey, String dstKey) throws IOException {
+    LOG.debug("Copy source key: [{}] to dest key: [{}].", srcKey, dstKey);
+    try {
+      CopyObjectRequest copyObjectRequest =
+          new CopyObjectRequest(bucketName, srcKey, bucketName, dstKey);
+      callCOSClientWithRetry(copyObjectRequest);
+    } catch (Exception e) {
+      String errMsg = String.format("Copy object unsuccessfully. "
+              + "source COS key: %s, dest COS key: " +
+              "%s, exception: %s",
+          srcKey,
+          dstKey, e.toString());
+      LOG.error(errMsg);
+      handleException(new Exception(errMsg), srcKey);
+    }
+  }
+
+  @Override
+  public void purge(String prefix) throws IOException {
+    throw new IOException("purge not supported");
+  }
+
+  @Override
+  public void dump() throws IOException {
+    throw new IOException("dump not supported");
+  }
+
+  // process Exception and print detail
+  private void handleException(Exception e, String key) throws IOException {
+    String cosPath = CosNFileSystem.SCHEME + "://" + bucketName + key;
+    String exceptInfo = String.format("%s : %s", cosPath, e.toString());
+    throw new IOException(exceptInfo);
+  }
+
+  @Override
+  public long getFileLength(String key) throws IOException {
+    LOG.debug("Get file length. COS key: {}", key);
+    GetObjectMetadataRequest getObjectMetadataRequest =
+        new GetObjectMetadataRequest(bucketName, key);
+    try {
+      ObjectMetadata objectMetadata =
+          (ObjectMetadata) callCOSClientWithRetry(getObjectMetadataRequest);
+      return objectMetadata.getContentLength();
+    } catch (Exception e) {
+      String errMsg = String.format("Getting file length occurs an exception." +
+              "COS key: %s, exception: %s", key,
+          e.toString());
+      LOG.error(errMsg);
+      handleException(new Exception(errMsg), key);
+      return 0; // never will get here
+    }
+  }
+
+  private <X> Object callCOSClientWithRetry(X request)
+      throws CosServiceException, IOException {
+    String sdkMethod = "";
+    int retryIndex = 1;
+    while (true) {
+      try {
+        if (request instanceof PutObjectRequest) {
+          sdkMethod = "putObject";
+          return this.cosClient.putObject((PutObjectRequest) request);
+        } else if (request instanceof UploadPartRequest) {
+          sdkMethod = "uploadPart";
+          if (((UploadPartRequest) request).getInputStream()
+              instanceof ByteBufferInputStream) {
+            ((UploadPartRequest) request).getInputStream()
+                .mark((int) ((UploadPartRequest) request).getPartSize());
+          }
+          return this.cosClient.uploadPart((UploadPartRequest) request);
+        } else if (request instanceof GetObjectMetadataRequest) {
+          sdkMethod = "queryObjectMeta";
+          return this.cosClient.getObjectMetadata(
+              (GetObjectMetadataRequest) request);
+        } else if (request instanceof DeleteObjectRequest) {
+          sdkMethod = "deleteObject";
+          this.cosClient.deleteObject((DeleteObjectRequest) request);
+          return new Object();
+        } else if (request instanceof CopyObjectRequest) {
+          sdkMethod = "copyFile";
+          return this.cosClient.copyObject((CopyObjectRequest) request);
+        } else if (request instanceof GetObjectRequest) {
+          sdkMethod = "getObject";
+          return this.cosClient.getObject((GetObjectRequest) request);
+        } else if (request instanceof ListObjectsRequest) {
+          sdkMethod = "listObjects";
+          return this.cosClient.listObjects((ListObjectsRequest) request);
+        } else {
+          throw new IOException("no such method");
+        }
+      } catch (CosServiceException cse) {
+        String errMsg = String.format("Call cos sdk failed, "
+                + "retryIndex: [%d / %d], "
+                + "call method: %s, exception: %s",
+            retryIndex, this.maxRetryTimes, sdkMethod, cse.toString());
+        int statusCode = cse.getStatusCode();
+        // Retry all server errors
+        if (statusCode / 100 == 5) {
+          if (retryIndex <= this.maxRetryTimes) {
+            LOG.info(errMsg);
+            long sleepLeast = retryIndex * 300L;
+            long sleepBound = retryIndex * 500L;
+            try {
+              if (request instanceof UploadPartRequest) {
+                if (((UploadPartRequest) request).getInputStream()
+                    instanceof ByteBufferInputStream) {
+                  ((UploadPartRequest) request).getInputStream().reset();
+                }
+              }
+              Thread.sleep(
+                  ThreadLocalRandom.current().nextLong(sleepLeast, sleepBound));
+              ++retryIndex;
+            } catch (InterruptedException e) {
+              throw new IOException(e.toString());
+            }
+          } else {
+            LOG.error(errMsg);
+            throw new IOException(errMsg);
+          }
+        } else {
+          throw cse;
+        }
+      } catch (Exception e) {
+        String errMsg = String.format("Call cos sdk failed, "
+            + "call method: %s, exception: %s", sdkMethod, e.toString());
+        LOG.error(errMsg);
+        throw new IOException(errMsg);
+      }
+    }
+  }
+
+  @Override
+  public void close() {
+    if (null != this.cosClient) {
+      this.cosClient.shutdown();
+    }
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/FileMetadata.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/FileMetadata.java
new file mode 100644
index 0000000..c11c887
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/FileMetadata.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * <p>
+ * Holds basic metadata for a file stored in a {@link NativeFileSystemStore}.
+ * </p>
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+class FileMetadata {
+  private final String key;
+  private final long length;
+  private final long lastModified;
+  private final boolean isFile;
+
+  FileMetadata(String key, long length, long lastModified) {
+    this(key, length, lastModified, true);
+  }
+
+  FileMetadata(String key, long length, long lastModified, boolean isFile) {
+    this.key = key;
+    this.length = length;
+    this.lastModified = lastModified;
+    this.isFile = isFile;
+  }
+
+  public String getKey() {
+    return key;
+  }
+
+  public long getLength() {
+    return length;
+  }
+
+  public long getLastModified() {
+    return lastModified;
+  }
+
+  @Override
+  public String toString() {
+    return "FileMetadata[" + key + ", " + length + ", " + lastModified + ", "
+        + "file?" + isFile + "]";
+  }
+
+  public boolean isFile() {
+    return isFile;
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/NativeFileSystemStore.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/NativeFileSystemStore.java
new file mode 100644
index 0000000..536c6de
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/NativeFileSystemStore.java
@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.util.List;
+
+import com.qcloud.cos.model.CompleteMultipartUploadResult;
+import com.qcloud.cos.model.PartETag;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * <p>
+ * An abstraction for a key-based {@link File} store.
+ * </p>
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+interface NativeFileSystemStore {
+
+  void initialize(URI uri, Configuration conf) throws IOException;
+
+  void storeFile(String key, File file, byte[] md5Hash) throws IOException;
+
+  void storeFile(String key, InputStream inputStream, byte[] md5Hash,
+      long contentLength) throws IOException;
+
+  void storeEmptyFile(String key) throws IOException;
+
+  CompleteMultipartUploadResult completeMultipartUpload(
+      String key, String uploadId, List<PartETag> partETagList);
+
+  void abortMultipartUpload(String key, String uploadId);
+
+  String getUploadId(String key);
+
+  PartETag uploadPart(File file, String key, String uploadId, int partNum)
+      throws IOException;
+
+  PartETag uploadPart(InputStream inputStream, String key, String uploadId,
+      int partNum, long partSize) throws IOException;
+
+  FileMetadata retrieveMetadata(String key) throws IOException;
+
+  InputStream retrieve(String key) throws IOException;
+
+  InputStream retrieve(String key, long byteRangeStart) throws IOException;
+
+  InputStream retrieveBlock(String key, long byteRangeStart, long byteRangeEnd)
+      throws IOException;
+
+  long getFileLength(String key) throws IOException;
+
+  PartialListing list(String prefix, int maxListingLength) throws IOException;
+
+  PartialListing list(String prefix, int maxListingLength,
+      String priorLastKey, boolean recursive) throws IOException;
+
+  void delete(String key) throws IOException;
+
+  void copy(String srcKey, String dstKey) throws IOException;
+
+  /**
+   * Delete all keys with the given prefix. Used for testing.
+   *
+   * @throws IOException if purge is not supported
+   */
+  void purge(String prefix) throws IOException;
+
+  /**
+   * Diagnostic method to dump state to the console.
+   *
+   * @throws IOException if dump is not supported
+   */
+  void dump() throws IOException;
+
+  void close();
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/PartialListing.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/PartialListing.java
new file mode 100644
index 0000000..78cba38
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/PartialListing.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * <p>
+ * Holds information on a directory listing for a
+ * {@link NativeFileSystemStore}.
+ * This includes the {@link FileMetadata files} and directories
+ * (their names) contained in a directory.
+ * </p>
+ * <p>
+ * This listing may be returned in chunks, so a <code>priorLastKey</code>
+ * is provided so that the next chunk may be requested.
+ * </p>
+ *
+ * @see NativeFileSystemStore#list(String, int, String, boolean)
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+class PartialListing {
+
+  private final String priorLastKey;
+  private final FileMetadata[] files;
+  private final FileMetadata[] commonPrefixes;
+
+  PartialListing(String priorLastKey, FileMetadata[] files,
+                 FileMetadata[] commonPrefixes) {
+    this.priorLastKey = priorLastKey;
+    this.files = files;
+    this.commonPrefixes = commonPrefixes;
+  }
+
+  public FileMetadata[] getFiles() {
+    return files;
+  }
+
+  public FileMetadata[] getCommonPrefixes() {
+    return commonPrefixes;
+  }
+
+  public String getPriorLastKey() {
+    return priorLastKey;
+  }
+
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/Unit.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/Unit.java
new file mode 100644
index 0000000..5950ba7
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/Unit.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+/**
+ * Constant definition of storage unit.
+ */
+public final class Unit {
+  private Unit() {
+  }
+
+  public static final int KB = 1024;
+  public static final int MB = 1024 * KB;
+  public static final int GB = 1024 * MB;
+  public static final long TB = (long) 1024 * GB;
+  public static final long PB = (long) 1024 * TB;
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialProviderList.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialProviderList.java
new file mode 100644
index 0000000..e900b99
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialProviderList.java
@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn.auth;
+
+import java.io.Closeable;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import com.google.common.base.Preconditions;
+import com.qcloud.cos.auth.AnonymousCOSCredentials;
+import com.qcloud.cos.auth.COSCredentials;
+import com.qcloud.cos.auth.COSCredentialsProvider;
+import com.qcloud.cos.exception.CosClientException;
+import com.qcloud.cos.utils.StringUtils;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * a list of cos credentials provider.
+ */
+public class COSCredentialProviderList implements
+    COSCredentialsProvider, AutoCloseable {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(COSCredentialProviderList.class);
+
+  private static final String NO_COS_CREDENTIAL_PROVIDERS =
+      "No COS Credential Providers";
+  private static final String CREDENTIALS_REQUESTED_WHEN_CLOSED =
+      "Credentials requested after provider list was closed";
+
+  private final List<COSCredentialsProvider> providers =
+      new ArrayList<>(1);
+  private boolean reuseLastProvider = true;
+  private COSCredentialsProvider lastProvider;
+
+  private final AtomicInteger refCount = new AtomicInteger(1);
+  private final AtomicBoolean isClosed = new AtomicBoolean(false);
+
+  public COSCredentialProviderList() {
+  }
+
+  public COSCredentialProviderList(
+      Collection<COSCredentialsProvider> providers) {
+    this.providers.addAll(providers);
+  }
+
+  public void add(COSCredentialsProvider provider) {
+    this.providers.add(provider);
+  }
+
+  public int getRefCount() {
+    return this.refCount.get();
+  }
+
+  public void checkNotEmpty() {
+    if (this.providers.isEmpty()) {
+      throw new NoAuthWithCOSException(NO_COS_CREDENTIAL_PROVIDERS);
+    }
+  }
+
+  public COSCredentialProviderList share() {
+    Preconditions.checkState(!this.closed(), "Provider list is closed");
+    this.refCount.incrementAndGet();
+    return this;
+  }
+
+  public boolean closed() {
+    return this.isClosed.get();
+  }
+
+  @Override
+  public COSCredentials getCredentials() {
+    if (this.closed()) {
+      throw new NoAuthWithCOSException(CREDENTIALS_REQUESTED_WHEN_CLOSED);
+    }
+
+    this.checkNotEmpty();
+
+    if (this.reuseLastProvider && this.lastProvider != null) {
+      return this.lastProvider.getCredentials();
+    }
+
+    for (COSCredentialsProvider provider : this.providers) {
+      try {
+        COSCredentials credentials = provider.getCredentials();
+        if (!StringUtils.isNullOrEmpty(credentials.getCOSAccessKeyId())
+            && !StringUtils.isNullOrEmpty(credentials.getCOSSecretKey())
+            || credentials instanceof AnonymousCOSCredentials) {
+          this.lastProvider = provider;
+          return credentials;
+        }
+      } catch (CosClientException e) {
+        LOG.warn("No credentials provided by {}: {}", provider, e.toString());
+      }
+    }
+
+    throw new NoAuthWithCOSException(
+        "No COS Credentials provided by " + this.providers.toString());
+  }
+
+  @Override
+  public void close() throws Exception {
+    if (this.closed()) {
+      return;
+    }
+
+    int remainder = this.refCount.decrementAndGet();
+    if (remainder != 0) {
+      return;
+    }
+    this.isClosed.set(true);
+
+    for (COSCredentialsProvider provider : this.providers) {
+      if (provider instanceof Closeable) {
+        ((Closeable) provider).close();
+      }
+    }
+  }
+
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/EnvironmentVariableCredentialProvider.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/EnvironmentVariableCredentialProvider.java
new file mode 100644
index 0000000..0a7786b
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/EnvironmentVariableCredentialProvider.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn.auth;
+
+import com.qcloud.cos.auth.BasicCOSCredentials;
+import com.qcloud.cos.auth.COSCredentials;
+import com.qcloud.cos.auth.COSCredentialsProvider;
+import com.qcloud.cos.exception.CosClientException;
+import com.qcloud.cos.utils.StringUtils;
+
+import org.apache.hadoop.fs.cosn.Constants;
+
+/**
+ * the provider obtaining the cos credentials from the environment variables.
+ */
+public class EnvironmentVariableCredentialProvider
+    implements COSCredentialsProvider {
+  @Override
+  public COSCredentials getCredentials() {
+    String secretId = System.getenv(Constants.COSN_SECRET_ID_ENV);
+    String secretKey = System.getenv(Constants.COSN_SECRET_KEY_ENV);
+
+    secretId = StringUtils.trim(secretId);
+    secretKey = StringUtils.trim(secretKey);
+
+    if (!StringUtils.isNullOrEmpty(secretId)
+        && !StringUtils.isNullOrEmpty(secretKey)) {
+      return new BasicCOSCredentials(secretId, secretKey);
+    } else {
+      throw new CosClientException(
+          "Unable to load COS credentials from environment variables" +
+              "(COS_SECRET_ID or COS_SECRET_KEY)");
+    }
+  }
+
+  @Override
+  public String toString() {
+    return "EnvironmentVariableCredentialProvider{}";
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/NoAuthWithCOSException.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/NoAuthWithCOSException.java
new file mode 100644
index 0000000..fa188bf
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/NoAuthWithCOSException.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn.auth;
+
+import com.qcloud.cos.exception.CosClientException;
+
+/**
+ * Exception thrown when no credentials can be obtained.
+ */
+public class NoAuthWithCOSException extends CosClientException {
+  public NoAuthWithCOSException(String message, Throwable t) {
+    super(message, t);
+  }
+
+  public NoAuthWithCOSException(String message) {
+    super(message);
+  }
+
+  public NoAuthWithCOSException(Throwable t) {
+    super(t);
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/SimpleCredentialProvider.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/SimpleCredentialProvider.java
new file mode 100644
index 0000000..f0635fc
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/SimpleCredentialProvider.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn.auth;
+
+import com.qcloud.cos.auth.BasicCOSCredentials;
+import com.qcloud.cos.auth.COSCredentials;
+import com.qcloud.cos.auth.COSCredentialsProvider;
+import com.qcloud.cos.exception.CosClientException;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.cosn.CosNConfigKeys;
+
+/**
+ * Get the credentials from the hadoop configuration.
+ */
+public class SimpleCredentialProvider implements COSCredentialsProvider {
+  private String secretId;
+  private String secretKey;
+
+  public SimpleCredentialProvider(Configuration conf) {
+    this.secretId = conf.get(
+        CosNConfigKeys.COSN_SECRET_ID_KEY
+    );
+    this.secretKey = conf.get(
+        CosNConfigKeys.COSN_SECRET_KEY_KEY
+    );
+  }
+
+  @Override
+  public COSCredentials getCredentials() {
+    if (!StringUtils.isEmpty(this.secretId)
+        && !StringUtils.isEmpty(this.secretKey)) {
+      return new BasicCOSCredentials(this.secretId, this.secretKey);
+    }
+    throw new CosClientException("secret id or secret key is unset");
+  }
+
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/package-info.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/package-info.java
new file mode 100644
index 0000000..4b6f8cf
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/package-info.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn.auth;
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/package-info.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/package-info.java
new file mode 100644
index 0000000..b466082
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/package-info.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/CosNTestConfigKey.java b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/CosNTestConfigKey.java
new file mode 100644
index 0000000..4d5ee48
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/CosNTestConfigKey.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+/**
+ * Configuration options for the CosN file system for testing.
+ */
+public final class CosNTestConfigKey {
+  private CosNTestConfigKey() {
+  }
+
+  public static final String TEST_COS_FILESYSTEM_CONF_KEY =
+      "test.fs.cosn.name";
+  public static final String DEFAULT_TEST_COS_FILESYSTEM_CONF_VALUE =
+      "";
+  public static final String TEST_UNIQUE_FORK_ID_KEY =
+      "test.unique.fork.id";
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/CosNTestUtils.java b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/CosNTestUtils.java
new file mode 100644
index 0000000..8afce51
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/CosNTestUtils.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+import java.io.IOException;
+import java.net.URI;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.junit.internal.AssumptionViolatedException;
+
+/**
+ * Utilities for the CosN tests.
+ */
+public final class CosNTestUtils {
+
+  private CosNTestUtils() {
+  }
+
+  /**
+   * Create the file system for test.
+   *
+   * @param configuration hadoop's configuration
+   * @return The file system for test
+   * @throws IOException If fail to create or initialize the file system.
+   */
+  public static CosNFileSystem createTestFileSystem(
+      Configuration configuration) throws IOException {
+    String fsName = configuration.getTrimmed(
+        CosNTestConfigKey.TEST_COS_FILESYSTEM_CONF_KEY,
+        CosNTestConfigKey.DEFAULT_TEST_COS_FILESYSTEM_CONF_VALUE);
+
+    boolean liveTest = StringUtils.isNotEmpty(fsName);
+    URI testUri;
+    if (liveTest) {
+      testUri = URI.create(fsName);
+      liveTest = testUri.getScheme().equals(CosNFileSystem.SCHEME);
+    } else {
+      throw new AssumptionViolatedException("no test file system in " +
+          fsName);
+    }
+
+    CosNFileSystem cosFs = new CosNFileSystem();
+    cosFs.initialize(testUri, configuration);
+    return cosFs;
+  }
+
+  /**
+   * Create a dir path for test.
+   * The value of {@link CosNTestConfigKey#TEST_UNIQUE_FORK_ID_KEY}
+   * will be used if it is set.
+   *
+   * @param defVal default value
+   * @return The test path
+   */
+  public static Path createTestPath(Path defVal) {
+    String testUniqueForkId = System.getProperty(
+        CosNTestConfigKey.TEST_UNIQUE_FORK_ID_KEY);
+    return testUniqueForkId ==
+        null ? defVal : new Path("/" + testUniqueForkId, "test");
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/TestCosNInputStream.java b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/TestCosNInputStream.java
new file mode 100644
index 0000000..79884ba
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/TestCosNInputStream.java
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.io.IOUtils;
+import org.junit.*;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Random;
+
+/**
+ * CosNInputStream Tester.
+ */
+public class TestCosNInputStream {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestCosNInputStream.class);
+
+  private FileSystem fs;
+
+  private Path testRootDir;
+
+  @Before
+  public void setUp() throws IOException {
+    Configuration configuration = new Configuration();
+    this.fs = CosNTestUtils.createTestFileSystem(configuration);
+    this.testRootDir = CosNTestUtils.createTestPath(new Path("/test"));
+    LOG.info("test root dir: " + this.testRootDir);
+  }
+
+  @After
+  public void tearDown() throws IOException {
+    if (null != this.fs) {
+      this.fs.delete(this.testRootDir, true);
+    }
+  }
+
+  /**
+   * Method: seek(long pos).
+   */
+  @Test
+  public void testSeek() throws Exception {
+    Path seekTestFilePath = new Path(this.testRootDir + "/"
+        + "seekTestFile");
+    long fileSize = 5 * Unit.MB;
+
+    ContractTestUtils.generateTestFile(
+        this.fs, seekTestFilePath, fileSize, 256, 255);
+    LOG.info("5MB file for seek test has created.");
+
+    FSDataInputStream inputStream = this.fs.open(seekTestFilePath);
+    int seekTimes = 5;
+    for (int i = 0; i != seekTimes; i++) {
+      long pos = fileSize / (seekTimes - i) - 1;
+      inputStream.seek(pos);
+      assertTrue("expected position at: " +
+              pos + ", but got: " + inputStream.getPos(),
+          inputStream.getPos() == pos);
+      LOG.info("completed seeking at pos: " + inputStream.getPos());
+    }
+    LOG.info("begin to random position seeking test...");
+    Random random = new Random();
+    for (int i = 0; i < seekTimes; i++) {
+      long pos = Math.abs(random.nextLong()) % fileSize;
+      LOG.info("seeking for pos: " + pos);
+      inputStream.seek(pos);
+      assertTrue("expected position at: " +
+              pos + ", but got: " + inputStream.getPos(),
+          inputStream.getPos() == pos);
+      LOG.info("completed seeking at pos: " + inputStream.getPos());
+    }
+  }
+
+  /**
+   * Method: getPos().
+   */
+  @Test
+  public void testGetPos() throws Exception {
+    Path seekTestFilePath = new Path(this.testRootDir + "/" +
+        "seekTestFile");
+    long fileSize = 5 * Unit.MB;
+    ContractTestUtils.generateTestFile(
+        this.fs, seekTestFilePath, fileSize, 256, 255);
+    LOG.info("5MB file for getPos test has created.");
+
+    FSDataInputStream inputStream = this.fs.open(seekTestFilePath);
+    Random random = new Random();
+    long pos = Math.abs(random.nextLong()) % fileSize;
+    inputStream.seek(pos);
+    assertTrue("expected position at: " +
+            pos + ", but got: " + inputStream.getPos(),
+        inputStream.getPos() == pos);
+    LOG.info("completed get pos tests.");
+  }
+
+  /**
+   * Method: seekToNewSource(long targetPos).
+   */
+  @Ignore("Not ready yet")
+  public void testSeekToNewSource() throws Exception {
+    LOG.info("Currently it is not supported to " +
+        "seek the offset in a new source.");
+  }
+
+  /**
+   * Method: read().
+   */
+  @Test
+  public void testRead() throws Exception {
+    final int bufLen = 256;
+    Path readTestFilePath = new Path(this.testRootDir + "/"
+        + "testReadSmallFile.txt");
+    long fileSize = 5 * Unit.MB;
+
+    ContractTestUtils.generateTestFile(
+        this.fs, readTestFilePath, fileSize, 256, 255);
+    LOG.info("read test file: " + readTestFilePath + " has created.");
+
+    FSDataInputStream inputStream = this.fs.open(readTestFilePath);
+    byte[] buf = new byte[bufLen];
+    long bytesRead = 0;
+    while (bytesRead < fileSize) {
+      int bytes = 0;
+      if (fileSize - bytesRead < bufLen) {
+        int remaining = (int) (fileSize - bytesRead);
+        bytes = inputStream.read(buf, 0, remaining);
+      } else {
+        bytes = inputStream.read(buf, 0, bufLen);
+      }
+      bytesRead += bytes;
+
+      if (bytesRead % (1 * Unit.MB) == 0) {
+        int available = inputStream.available();
+        assertTrue("expected remaining: " + (fileSize - bytesRead) +
+            " but got: " + available, (fileSize - bytesRead) == available);
+        LOG.info("Bytes read: " +
+            Math.round((double) bytesRead / Unit.MB) + "MB");
+      }
+    }
+
+    assertTrue(inputStream.available() == 0);
+    IOUtils.closeStream(inputStream);
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/TestCosNOutputStream.java b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/TestCosNOutputStream.java
new file mode 100644
index 0000000..7fd8897
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/TestCosNOutputStream.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.Before;
+import org.junit.After;
+import org.junit.rules.Timeout;
+
+import java.io.IOException;
+
+/**
+ * CosNOutputStream Tester.
+ * <p>
+ * If the test.fs.cosn.name property is not set, all test case will fail.
+ */
+public class TestCosNOutputStream {
+  private FileSystem fs;
+  private Path testRootDir;
+
+  @Rule
+  public Timeout timeout = new Timeout(3600 * 1000);
+
+  @Before
+  public void setUp() throws Exception {
+    Configuration configuration = new Configuration();
+    configuration.setInt(
+        CosNConfigKeys.COSN_BLOCK_SIZE_KEY, 2 * Unit.MB);
+    configuration.setLong(
+        CosNConfigKeys.COSN_UPLOAD_BUFFER_SIZE_KEY,
+        CosNConfigKeys.DEFAULT_UPLOAD_BUFFER_SIZE);
+    this.fs = CosNTestUtils.createTestFileSystem(configuration);
+    this.testRootDir = new Path("/test");
+  }
+
+  @After
+  public void tearDown() throws Exception {
+  }
+
+  @Test
+  public void testEmptyFileUpload() throws IOException {
+    ContractTestUtils.createAndVerifyFile(this.fs, this.testRootDir, 0);
+  }
+
+  @Test
+  public void testSingleFileUpload() throws IOException {
+    ContractTestUtils.createAndVerifyFile(
+        this.fs, this.testRootDir, 1 * Unit.MB - 1);
+    ContractTestUtils.createAndVerifyFile(
+        this.fs, this.testRootDir, 1 * Unit.MB);
+    ContractTestUtils.createAndVerifyFile(
+        this.fs, this.testRootDir, 2 * Unit.MB - 1);
+  }
+
+  @Test
+  public void testLargeFileUpload() throws IOException {
+    ContractTestUtils.createAndVerifyFile(
+        this.fs, this.testRootDir, 2 * Unit.MB);
+    ContractTestUtils.createAndVerifyFile(
+        this.fs, this.testRootDir, 2 * Unit.MB + 1);
+    ContractTestUtils.createAndVerifyFile(
+        this.fs, this.testRootDir, 100 * Unit.MB);
+    // In principle, a maximum boundary test (file size: 2MB * 10000 - 1)
+    // should be provided here,
+    // but it is skipped due to network bandwidth and test time constraints.
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/CosNContract.java b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/CosNContract.java
new file mode 100644
index 0000000..cd40979
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/CosNContract.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.AbstractBondedFSContract;
+import org.apache.hadoop.fs.cosn.CosNFileSystem;
+import org.apache.hadoop.fs.cosn.CosNTestUtils;
+
+/**
+ * The contract of CosN: only enabled if the test bucket is provided.
+ */
+public class CosNContract extends AbstractBondedFSContract {
+  private static final String CONTRACT_XML = "contract/cosn.xml";
+
+  protected CosNContract(Configuration conf) {
+    super(conf);
+    addConfResource(CONTRACT_XML);
+  }
+
+  @Override
+  public String getScheme() {
+    return CosNFileSystem.SCHEME;
+  }
+
+  @Override
+  public Path getTestPath() {
+    return CosNTestUtils.createTestPath(super.getTestPath());
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractCreate.java b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractCreate.java
new file mode 100644
index 0000000..9488bd4
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractCreate.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * CosN contract tests for creating files.
+ */
+public class TestCosNContractCreate extends AbstractContractCreateTest {
+  @Override
+  protected AbstractFSContract createContract(Configuration configuration) {
+    return new CosNContract(configuration);
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractDelete.java b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractDelete.java
new file mode 100644
index 0000000..1c23ac2
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractDelete.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * CosN contract tests for deleting files.
+ */
+public class TestCosNContractDelete extends AbstractContractDeleteTest {
+  @Override
+  protected AbstractFSContract createContract(Configuration configuration) {
+    return new CosNContract(configuration);
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractDistCp.java b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractDistCp.java
new file mode 100644
index 0000000..75ac53b
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractDistCp.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.apache.hadoop.fs.cosn.CosNConfigKeys;
+import org.apache.hadoop.fs.cosn.Unit;
+import org.apache.hadoop.tools.contract.AbstractContractDistCpTest;
+
+/**
+ * Contract test suit covering CosN integration with DistCp.
+ */
+public class TestCosNContractDistCp extends AbstractContractDistCpTest {
+
+  private static final int MULTIPART_SETTING = 2 * Unit.MB;
+  private static final long UPLOAD_BUFFER_POOL_SIZE = 5 * 2 * Unit.MB;
+  private static final int UPLOAD_THREAD_POOL_SIZE = 5;
+  private static final int COPY_THREAD_POOL_SIZE = 3;
+
+  @Override
+  protected AbstractFSContract createContract(Configuration conf) {
+    return new CosNContract(conf);
+  }
+
+  @Override
+  protected Configuration createConfiguration() {
+    Configuration newConf = super.createConfiguration();
+    newConf.setInt(CosNConfigKeys.COSN_BLOCK_SIZE_KEY,
+        MULTIPART_SETTING);
+    newConf.setLong(CosNConfigKeys.COSN_UPLOAD_BUFFER_SIZE_KEY,
+        UPLOAD_BUFFER_POOL_SIZE);
+    newConf.setInt(CosNConfigKeys.UPLOAD_THREAD_POOL_SIZE_KEY,
+        UPLOAD_THREAD_POOL_SIZE);
+    newConf.setInt(CosNConfigKeys.COPY_THREAD_POOL_SIZE_KEY,
+        COPY_THREAD_POOL_SIZE);
+    return newConf;
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractGetFileStatus.java b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractGetFileStatus.java
new file mode 100644
index 0000000..9fba6ee
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractGetFileStatus.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * CosN contract tests covering getFileStatus.
+ */
+public class TestCosNContractGetFileStatus
+    extends AbstractContractGetFileStatusTest {
+  @Override
+  protected AbstractFSContract createContract(Configuration conf) {
+    return new CosNContract(conf);
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractMkdir.java b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractMkdir.java
new file mode 100644
index 0000000..e704e13
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractMkdir.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * CosN contract tests for making directories.
+ */
+public class TestCosNContractMkdir extends AbstractContractMkdirTest {
+  @Override
+  protected AbstractFSContract createContract(Configuration configuration) {
+    return new CosNContract(configuration);
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractOpen.java b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractOpen.java
new file mode 100644
index 0000000..1bb732b
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractOpen.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * CosN contract tests for opening files.
+ */
+public class TestCosNContractOpen extends AbstractContractOpenTest {
+  @Override
+  protected AbstractFSContract createContract(Configuration configuration) {
+    return new CosNContract(configuration);
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractRename.java b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractRename.java
new file mode 100644
index 0000000..f82c8df
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractRename.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * CosN contract tests for renaming a file.
+ */
+public class TestCosNContractRename extends AbstractContractRenameTest {
+  @Override
+  protected AbstractFSContract createContract(Configuration configuration) {
+    return new CosNContract(configuration);
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractRootDir.java b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractRootDir.java
new file mode 100644
index 0000000..145aee9
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractRootDir.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * root dir operations against an COS bucket.
+ */
+public class TestCosNContractRootDir
+    extends AbstractContractRootDirectoryTest {
+  @Override
+  protected AbstractFSContract createContract(Configuration configuration) {
+    return new CosNContract(configuration);
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractSeek.java b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractSeek.java
new file mode 100644
index 0000000..e391567
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/TestCosNContractSeek.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+
+/**
+ * CosN contract tests for seeking a position in a file.
+ */
+public class TestCosNContractSeek extends AbstractContractSeekTest {
+  @Override
+  protected AbstractFSContract createContract(Configuration configuration) {
+    return new CosNContract(configuration);
+  }
+}
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/package-info.java b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/package-info.java
new file mode 100644
index 0000000..97598b1
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/test/java/org/apache/hadoop/fs/cosn/contract/package-info.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.cosn.contract;
\ No newline at end of file
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/test/resources/contract/cosn.xml b/hadoop-cloud-storage-project/hadoop-cos/src/test/resources/contract/cosn.xml
new file mode 100644
index 0000000..ac4f58c
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/test/resources/contract/cosn.xml
@@ -0,0 +1,120 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~  or more contributor license agreements.  See the NOTICE file
+  ~  distributed with this work for additional information
+  ~  regarding copyright ownership.  The ASF licenses this file
+  ~  to you under the Apache License, Version 2.0 (the
+  ~  "License"); you may not use this file except in compliance
+  ~  with the License.  You may obtain a copy of the License at
+  ~
+  ~       http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~  Unless required by applicable law or agreed to in writing, software
+  ~  distributed under the License is distributed on an "AS IS" BASIS,
+  ~  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~  See the License for the specific language governing permissions and
+  ~  limitations under the License.
+  -->
+
+<configuration>
+  <property>
+    <name>fs.contract.test.root-tests-enabled</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.is-blobstore</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.create-overwrites-directory</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.create-visibility-delayed</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.is-case-sensitive</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.rename-returns-false-if-source-missing</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>fs.contract.rename-remove-dest-if-empty-dir</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-append</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-atomic-directory-delete</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-atomic-rename</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-block-locality</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-concat</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-seek</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-seek-on-closed-file</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>fs.contract.rejects-seek-past-eof</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.rename-overwrites-dest</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-getfilestatus</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-unix-permissions</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-strict-exceptions</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.test.random-seek-count</name>
+    <value>10</value>
+  </property>
+
+</configuration>
\ No newline at end of file
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/test/resources/core-site.xml b/hadoop-cloud-storage-project/hadoop-cos/src/test/resources/core-site.xml
new file mode 100644
index 0000000..fbd23bb
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/test/resources/core-site.xml
@@ -0,0 +1,107 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+  <property>
+    <name>fs.cosn.credentials.provider</name>
+    <value>org.apache.hadoop.fs.cosn.auth.SimpleCredentialProvider</value>
+    <description>
+      This option allows the user to specify how to get the credentials.
+      Comma-separated class names of credential provider classes which
+      implement
+      com.qcloud.cos.auth.COSCredentialsProvider:
+
+      1.org.apache.hadoop.fs.cosn.auth.SimpleCredentialProvider: Obtain the
+      secret id and secret key
+      from fs.cosn.userinfo.secretId and fs.cosn.userinfo.secretKey in
+      core-site.xml
+      2.org.apache.hadoop.fs.cosn.auth.EnvironmentVariableCredentialProvider:
+      Obtain the secret id and secret key
+      from the system environment variables named COS_SECRET_ID and
+      COS_SECRET_KEY
+
+      If unspecified, the default order of credential providers is:
+      1. org.apache.hadoop.fs.cosn.auth.SimpleCredentialProvider
+      2. org.apache.hadoop.fs.cosn.auth.EnvironmentVariableCredentialProvider
+    </description>
+  </property>
+  <property>
+    <name>fs.cosn.impl</name>
+    <value>org.apache.hadoop.fs.cosn.CosNFileSystem</value>
+    <description>
+      The implementation class of the CosN Filesystem.
+    </description>
+  </property>
+
+  <property>
+    <name>fs.AbstractFileSystem.cosn.impl</name>
+    <value>org.apache.hadoop.fs.cosn.CosN</value>
+    <description>
+      The implementation class of the CosN AbstractFileSystem.
+    </description>
+  </property>
+
+  <property>
+    <name>fs.cosn.tmp.dir</name>
+    <value>/tmp/hadoop_cos</value>
+    <description>
+      Temporary files will be placed here.
+    </description>
+  </property>
+
+  <property>
+    <name>fs.cosn.block.size</name>
+    <value>8388608</value>
+    <description>
+      Block size to use cosn filesystem, which is the part size for
+      MultipartUpload.
+      Considering the COS supports up to 10000 blocks, user should
+      estimate the maximum size of a single file.
+      for example, 8MB part size can allow writing a 78GB single file.
+    </description>
+  </property>
+
+  <property>
+    <name>fs.cosn.upload.buffer.size</name>
+    <value>536870912</value>
+    <description>The total size of the memory buffer pool.</description>
+  </property>
+
+  <property>
+    <name>fs.cosn.read.ahead.block.size</name>
+    <value>1048576</value>
+    <description>
+      Bytes to read ahead during a seek() before closing and
+      re-opening the cosn HTTP connection.
+    </description>
+  </property>
+
+  <property>
+    <name>fs.cosn.read.ahead.queue.size</name>
+    <value>64</value>
+    <description>
+      The length of the pre-read queue.
+    </description>
+  </property>
+
+  <include xmlns="http://www.w3.org/2001/XInclude" href="auth-keys.xml">
+    <fallback/>
+  </include>
+
+</configuration>
diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/test/resources/log4j.properties b/hadoop-cloud-storage-project/hadoop-cos/src/test/resources/log4j.properties
new file mode 100644
index 0000000..1a6baae
--- /dev/null
+++ b/hadoop-cloud-storage-project/hadoop-cos/src/test/resources/log4j.properties
@@ -0,0 +1,18 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+# log4j configuration used during build and unit tests
+
+log4j.rootLogger=info,stdout
+log4j.threshhold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
diff --git a/hadoop-cloud-storage-project/pom.xml b/hadoop-cloud-storage-project/pom.xml
index a96431f..f39e8c3 100644
--- a/hadoop-cloud-storage-project/pom.xml
+++ b/hadoop-cloud-storage-project/pom.xml
@@ -31,6 +31,7 @@
 
   <modules>
     <module>hadoop-cloud-storage</module>
+    <module>hadoop-cos</module>
   </modules>
 
   <build>
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
index 80364ce..9e601e2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
@@ -330,8 +330,8 @@
       throws IOException {
     checkStream();
     if (!(in instanceof PositionedReadable)) {
-      throw new UnsupportedOperationException("This stream does not support " +
-          "positioned read.");
+      throw new UnsupportedOperationException(in.getClass().getCanonicalName()
+          + " does not support positioned read.");
     }
     final int n = ((PositionedReadable) in).read(position, buffer, offset,
         length);
@@ -351,8 +351,8 @@
       throws IOException {
     checkStream();
     if (!(in instanceof ByteBufferPositionedReadable)) {
-      throw new UnsupportedOperationException("This stream does not support " +
-          "positioned reads with byte buffers.");
+      throw new UnsupportedOperationException(in.getClass().getCanonicalName()
+          + " does not support positioned reads with byte buffers.");
     }
     int bufPos = buf.position();
     final int n = ((ByteBufferPositionedReadable) in).read(position, buf);
@@ -363,7 +363,27 @@
 
     return n;
   }
-  
+
+  /**
+   * Positioned readFully using {@link ByteBuffer}s. This method is thread-safe.
+   */
+  @Override
+  public void readFully(long position, final ByteBuffer buf)
+      throws IOException {
+    checkStream();
+    if (!(in instanceof ByteBufferPositionedReadable)) {
+      throw new UnsupportedOperationException(in.getClass().getCanonicalName()
+          + " does not support positioned reads with byte buffers.");
+    }
+    int bufPos = buf.position();
+    ((ByteBufferPositionedReadable) in).readFully(position, buf);
+    final int n = buf.position() - bufPos;
+    if (n > 0) {
+      // This operation does not change the current offset of the file
+      decrypt(position, buf, n, bufPos);
+    }
+  }
+
   /**
    * Decrypt length bytes in buffer starting at offset. Output is also put 
    * into buffer starting at offset. It is thread-safe.
@@ -480,8 +500,8 @@
       throws IOException {
     checkStream();
     if (!(in instanceof PositionedReadable)) {
-      throw new UnsupportedOperationException("This stream does not support " +
-          "positioned readFully.");
+      throw new UnsupportedOperationException(in.getClass().getCanonicalName()
+          + " does not support positioned readFully.");
     }
     ((PositionedReadable) in).readFully(position, buffer, offset, length);
     if (length > 0) {
@@ -513,8 +533,8 @@
       }
     } else {
       if (!(in instanceof Seekable)) {
-        throw new UnsupportedOperationException("This stream does not " +
-                "support seek.");
+        throw new UnsupportedOperationException(in.getClass().getCanonicalName()
+            + " does not support seek.");
       }
       ((Seekable) in).seek(pos);
       resetStreamOffset(pos);
@@ -672,8 +692,8 @@
         "Cannot seek to negative offset.");
     checkStream();
     if (!(in instanceof Seekable)) {
-      throw new UnsupportedOperationException("This stream does not support " +
-          "seekToNewSource.");
+      throw new UnsupportedOperationException(in.getClass().getCanonicalName()
+          + " does not support seekToNewSource.");
     }
     boolean result = ((Seekable) in).seekToNewSource(targetPos);
     resetStreamOffset(targetPos);
@@ -687,16 +707,16 @@
     checkStream();
     if (outBuffer.remaining() > 0) {
       if (!(in instanceof Seekable)) {
-        throw new UnsupportedOperationException("This stream does not " +
-                "support seek.");
+        throw new UnsupportedOperationException(in.getClass().getCanonicalName()
+            + " does not support seek.");
       }
       // Have some decrypted data unread, need to reset.
       ((Seekable) in).seek(getPos());
       resetStreamOffset(getPos());
     }
     if (!(in instanceof HasEnhancedByteBufferAccess)) {
-      throw new UnsupportedOperationException("This stream does not support " +
-          "enhanced byte buffer access.");
+      throw new UnsupportedOperationException(in.getClass().getCanonicalName()
+          + " does not support enhanced byte buffer access.");
     }
     final ByteBuffer buffer = ((HasEnhancedByteBufferAccess) in).
         read(bufferPool, maxLength, opts);
@@ -714,8 +734,8 @@
   @Override
   public void releaseBuffer(ByteBuffer buffer) {
     if (!(in instanceof HasEnhancedByteBufferAccess)) {
-      throw new UnsupportedOperationException("This stream does not support " + 
-          "release buffer.");
+      throw new UnsupportedOperationException(in.getClass().getCanonicalName()
+          + " does not support release buffer.");
     }
     ((HasEnhancedByteBufferAccess) in).releaseBuffer(buffer);
   }
@@ -724,8 +744,8 @@
   public void setReadahead(Long readahead) throws IOException,
       UnsupportedOperationException {
     if (!(in instanceof CanSetReadahead)) {
-      throw new UnsupportedOperationException("This stream does not support " +
-          "setting the readahead caching strategy.");
+      throw new UnsupportedOperationException(in.getClass().getCanonicalName()
+          + " does not support setting the readahead caching strategy.");
     }
     ((CanSetReadahead) in).setReadahead(readahead);
   }
@@ -734,8 +754,9 @@
   public void setDropBehind(Boolean dropCache) throws IOException,
       UnsupportedOperationException {
     if (!(in instanceof CanSetReadahead)) {
-      throw new UnsupportedOperationException("This stream does not " +
-          "support setting the drop-behind caching setting.");
+      throw new UnsupportedOperationException(in.getClass().getCanonicalName()
+          + " stream does not support setting the drop-behind caching"
+          + " setting.");
     }
     ((CanSetDropBehind) in).setDropBehind(dropCache);
   }
@@ -842,8 +863,8 @@
     case StreamCapabilities.READBYTEBUFFER:
     case StreamCapabilities.PREADBYTEBUFFER:
       if (!(in instanceof StreamCapabilities)) {
-        throw new UnsupportedOperationException("This stream does not expose " +
-          "its stream capabilities.");
+        throw new UnsupportedOperationException(in.getClass().getCanonicalName()
+          + " does not expose its stream capabilities.");
       }
       return ((StreamCapabilities) in).hasCapability(capability);
     default:
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index 6e82543..0453ca1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -60,6 +60,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
+
 /**
  * This class provides an interface for implementors of a Hadoop file system
  * (analogous to the VFS of Unix). Applications do not access this class;
@@ -72,7 +74,7 @@
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public abstract class AbstractFileSystem {
+public abstract class AbstractFileSystem implements PathCapabilities {
   static final Logger LOG = LoggerFactory.getLogger(AbstractFileSystem.class);
 
   /** Recording statistics per a file system class. */
@@ -1371,4 +1373,16 @@
         new CompletableFuture<>(), () -> open(path, bufferSize));
   }
 
+  public boolean hasPathCapability(final Path path,
+      final String capability)
+      throws IOException {
+    switch (validatePathCapabilityArgs(makeQualified(path), capability)) {
+    case CommonPathCapabilities.FS_SYMLINKS:
+      // delegate to the existing supportsSymlinks() call.
+      return supportsSymlinks();
+    default:
+      // the feature is not implemented.
+      return false;
+    }
+  }
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferPositionedReadable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferPositionedReadable.java
index d99ee16..f8282d8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferPositionedReadable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferPositionedReadable.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.fs;
 
+import java.io.EOFException;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
@@ -55,6 +56,8 @@
    * <p>
    * Implementations should treat 0-length requests as legitimate, and must not
    * signal an error upon their receipt.
+   * <p>
+   * This does not change the current offset of a file, and is thread-safe.
    *
    * @param position position within file
    * @param buf the ByteBuffer to receive the results of the read operation.
@@ -63,4 +66,25 @@
    * @throws IOException if there is some error performing the read
    */
   int read(long position, ByteBuffer buf) throws IOException;
+
+  /**
+   * Reads {@code buf.remaining()} bytes into buf from a given position in
+   * the file or until the end of the data was reached before the read
+   * operation completed. Callers should use {@code buf.limit(...)} to
+   * control the size of the desired read and {@code buf.position(...)} to
+   * control the offset into the buffer the data should be written to.
+   * <p>
+   * This operation provides similar semantics to
+   * {@link #read(long, ByteBuffer)}, the difference is that this method is
+   * guaranteed to read data until the {@link ByteBuffer} is full, or until
+   * the end of the data stream is reached.
+   *
+   * @param position position within file
+   * @param buf the ByteBuffer to receive the results of the read operation.
+   * @throws IOException if there is some error performing the read
+   * @throws EOFException the end of the data was reached before
+   * the read operation completed
+   * @see #read(long, ByteBuffer)
+   */
+  void readFully(long position, ByteBuffer buf) throws IOException;
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
index 99aa5d2..5e5d29a2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
@@ -27,6 +27,7 @@
 import java.util.Collections;
 import java.util.EnumSet;
 import java.util.List;
+import java.util.Locale;
 import java.util.Set;
 import java.util.concurrent.CompletableFuture;
 
@@ -42,6 +43,8 @@
 import org.apache.hadoop.util.LambdaUtils;
 import org.apache.hadoop.util.Progressable;
 
+import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
+
 /****************************************************************
  * Abstract Checksumed FileSystem.
  * It provide a basic implementation of a Checksumed FileSystem,
@@ -872,4 +875,23 @@
   public FSDataOutputStreamBuilder appendFile(Path path) {
     return createDataOutputStreamBuilder(this, path).append();
   }
+
+  /**
+   * Disable those operations which the checksummed FS blocks.
+   * {@inheritDoc}
+   */
+  @Override
+  public boolean hasPathCapability(final Path path, final String capability)
+      throws IOException {
+    // query the superclass, which triggers argument validation.
+    final Path p = makeQualified(path);
+    switch (validatePathCapabilityArgs(p, capability)) {
+    case CommonPathCapabilities.FS_APPEND:
+    case CommonPathCapabilities.FS_CONCAT:
+      return false;
+    default:
+      return super.hasPathCapability(p, capability);
+    }
+  }
+
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java
new file mode 100644
index 0000000..31e6bac
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonPathCapabilities.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+/**
+ * Common path capabilities.
+ */
+public final class CommonPathCapabilities {
+
+  private CommonPathCapabilities() {
+  }
+
+  /**
+   * Does the store support
+   * {@code FileSystem.setAcl(Path, List)},
+   * {@code FileSystem.getAclStatus(Path)}
+   * and related methods?
+   * Value: {@value}.
+   */
+  public static final String FS_ACLS = "fs.capability.paths.acls";
+
+  /**
+   * Does the store support {@code FileSystem.append(Path)}?
+   * Value: {@value}.
+   */
+  public static final String FS_APPEND = "fs.capability.paths.append";
+
+  /**
+   * Does the store support {@code FileSystem.getFileChecksum(Path)}?
+   * Value: {@value}.
+   */
+  public static final String FS_CHECKSUMS = "fs.capability.paths.checksums";
+
+  /**
+   * Does the store support {@code FileSystem.concat(Path, Path[])}?
+   * Value: {@value}.
+   */
+  public static final String FS_CONCAT = "fs.capability.paths.concat";
+
+  /**
+   * Does the store support {@code FileSystem.listCorruptFileBlocks(Path)} ()}?
+   * Value: {@value}.
+   */
+  public static final String FS_LIST_CORRUPT_FILE_BLOCKS =
+      "fs.capability.paths.list-corrupt-file-blocks";
+
+  /**
+   * Does the store support
+   * {@code FileSystem.createPathHandle(FileStatus, Options.HandleOpt...)}
+   * and related methods?
+   * Value: {@value}.
+   */
+  public static final String FS_PATHHANDLES = "fs.capability.paths.pathhandles";
+
+  /**
+   * Does the store support {@code FileSystem.setPermission(Path, FsPermission)}
+   * and related methods?
+   * Value: {@value}.
+   */
+  public static final String FS_PERMISSIONS = "fs.capability.paths.permissions";
+
+  /**
+   * Does this filesystem connector only support filesystem read operations?
+   * For example, the {@code HttpFileSystem} is always read-only.
+   * This is different from "is the specific instance and path read only?",
+   * which must be determined by checking permissions (where supported), or
+   * attempting write operations under a path.
+   * Value: {@value}.
+   */
+  public static final String FS_READ_ONLY_CONNECTOR =
+      "fs.capability.paths.read-only-connector";
+
+  /**
+   * Does the store support snapshots through
+   * {@code FileSystem.createSnapshot(Path)} and related methods??
+   * Value: {@value}.
+   */
+  public static final String FS_SNAPSHOTS = "fs.capability.paths.snapshots";
+
+  /**
+   * Does the store support {@code FileSystem.setStoragePolicy(Path, String)}
+   * and related methods?
+   * Value: {@value}.
+   */
+  public static final String FS_STORAGEPOLICY =
+      "fs.capability.paths.storagepolicy";
+
+  /**
+   * Does the store support symlinks through
+   * {@code FileSystem.createSymlink(Path, Path, boolean)} and related methods?
+   * Value: {@value}.
+   */
+  public static final String FS_SYMLINKS =
+      "fs.capability.paths.symlinks";
+
+  /**
+   * Does the store support {@code FileSystem#truncate(Path, long)} ?
+   * Value: {@value}.
+   */
+  public static final String FS_TRUNCATE =
+      "fs.capability.paths.truncate";
+
+  /**
+   * Does the store support XAttributes through
+   * {@code FileSystem#.setXAttr()} and related methods?
+   * Value: {@value}.
+   */
+  public static final String FS_XATTRS = "fs.capability.paths.xattrs";
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
index 165c56c..a8f294f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
@@ -281,4 +281,11 @@
       int bufferSize) throws IOException {
     return fsImpl.openFileWithOptions(path, mandatoryKeys, options, bufferSize);
   }
+
+  @Override
+  public boolean hasPathCapability(final Path path,
+      final String capability)
+      throws IOException {
+    return fsImpl.hasPathCapability(path, capability);
+  }
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
index 066cc3d..31f8297 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
@@ -52,8 +52,8 @@
   public FSDataInputStream(InputStream in) {
     super(in);
     if( !(in instanceof Seekable) || !(in instanceof PositionedReadable) ) {
-      throw new IllegalArgumentException(
-          "In is not an instance of Seekable or PositionedReadable");
+      throw new IllegalArgumentException(in.getClass().getCanonicalName() +
+          " is not an instance of Seekable or PositionedReadable");
     }
   }
   
@@ -150,7 +150,7 @@
     }
 
     throw new UnsupportedOperationException("Byte-buffer read unsupported " +
-            "by input stream");
+            "by " + in.getClass().getCanonicalName());
   }
 
   @Override
@@ -170,9 +170,8 @@
     try {
       ((CanSetReadahead)in).setReadahead(readahead);
     } catch (ClassCastException e) {
-      throw new UnsupportedOperationException(
-          "this stream does not support setting the readahead " +
-          "caching strategy.");
+      throw new UnsupportedOperationException(in.getClass().getCanonicalName() +
+          " does not support setting the readahead caching strategy.");
     }
   }
 
@@ -256,6 +255,16 @@
       return ((ByteBufferPositionedReadable) in).read(position, buf);
     }
     throw new UnsupportedOperationException("Byte-buffer pread unsupported " +
-        "by input stream");
+        "by " + in.getClass().getCanonicalName());
+  }
+
+  @Override
+  public void readFully(long position, ByteBuffer buf) throws IOException {
+    if (in instanceof ByteBufferPositionedReadable) {
+      ((ByteBufferPositionedReadable) in).readFully(position, buf);
+    } else {
+      throw new UnsupportedOperationException("Byte-buffer pread " +
+              "unsupported by " + in.getClass().getCanonicalName());
+    }
   }
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index f650748..b2c1369 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -46,6 +46,8 @@
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.Options.CreateOpts;
 import org.apache.hadoop.fs.impl.FutureDataInputStreamBuilderImpl;
+import org.apache.hadoop.fs.impl.FsLinkResolution;
+import org.apache.hadoop.fs.impl.PathCapabilitiesSupport;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
@@ -68,6 +70,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
+
 /**
  * The FileContext class provides an interface for users of the Hadoop
  * file system. It exposes a number of file system operations, e.g. create,
@@ -171,7 +175,7 @@
 
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class FileContext {
+public class FileContext implements PathCapabilities {
   
   public static final Logger LOG = LoggerFactory.getLogger(FileContext.class);
   /**
@@ -2934,4 +2938,21 @@
       }.resolve(FileContext.this, absF);
     }
   }
+
+  /**
+   * Return the path capabilities of the bonded {@code AbstractFileSystem}.
+   * @param path path to query the capability of.
+   * @param capability string to query the stream support for.
+   * @return true iff the capability is supported under that FS.
+   * @throws IOException path resolution or other IO failure
+   * @throws IllegalArgumentException invalid arguments
+   */
+  public boolean hasPathCapability(Path path, String capability)
+      throws IOException {
+    validatePathCapabilityArgs(path, capability);
+    return FsLinkResolution.resolve(this,
+        fixRelativePart(path),
+        (fs, p) -> fs.hasPathCapability(p, capability));
+  }
+
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index c3be2f2..2376c05 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -88,6 +88,7 @@
 
 import static com.google.common.base.Preconditions.checkArgument;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*;
+import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
 
 /****************************************************************
  * An abstract base class for a fairly generic filesystem.  It
@@ -134,7 +135,7 @@
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public abstract class FileSystem extends Configured
-    implements Closeable, DelegationTokenIssuer {
+    implements Closeable, DelegationTokenIssuer, PathCapabilities {
   public static final String FS_DEFAULT_NAME_KEY =
                    CommonConfigurationKeys.FS_DEFAULT_NAME_KEY;
   public static final String DEFAULT_FS =
@@ -720,6 +721,7 @@
    *
    */
   protected void checkPath(Path path) {
+    Preconditions.checkArgument(path != null, "null path");
     URI uri = path.toUri();
     String thatScheme = uri.getScheme();
     if (thatScheme == null)                // fs is relative
@@ -2062,7 +2064,12 @@
    * @throws IOException IO failure
    */
   public FileStatus[] globStatus(Path pathPattern) throws IOException {
-    return new Globber(this, pathPattern, DEFAULT_FILTER).glob();
+    return Globber.createGlobber(this)
+        .withPathPattern(pathPattern)
+        .withPathFiltern(DEFAULT_FILTER)
+        .withResolveSymlinks(true)
+        .build()
+        .glob();
   }
 
   /**
@@ -3259,6 +3266,25 @@
     return ret;
   }
 
+  /**
+   * The base FileSystem implementation generally has no knowledge
+   * of the capabilities of actual implementations.
+   * Unless it has a way to explicitly determine the capabilities,
+   * this method returns false.
+   * {@inheritDoc}
+   */
+  public boolean hasPathCapability(final Path path, final String capability)
+      throws IOException {
+    switch (validatePathCapabilityArgs(makeQualified(path), capability)) {
+    case CommonPathCapabilities.FS_SYMLINKS:
+      // delegate to the existing supportsSymlinks() call.
+      return supportsSymlinks() && areSymlinksEnabled();
+    default:
+      // the feature is not implemented.
+      return false;
+    }
+  }
+
   // making it volatile to be able to do a double checked locking
   private volatile static boolean FILE_SYSTEMS_LOADED = false;
 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index e05c574..3bc3cb2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -729,4 +729,11 @@
     return fs.openFileWithOptions(pathHandle, mandatoryKeys, options,
         bufferSize);
   }
+
+  @Override
+  public boolean hasPathCapability(final Path path, final String capability)
+      throws IOException {
+    return fs.hasPathCapability(path, capability);
+  }
+
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
index f5430d6..731a52a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
@@ -446,4 +446,9 @@
     return myFs.openFileWithOptions(path, mandatoryKeys, options, bufferSize);
   }
 
+  public boolean hasPathCapability(final Path path,
+      final String capability)
+      throws IOException {
+    return myFs.hasPathCapability(path, capability);
+  }
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
index b241a94..f301f22 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
@@ -25,15 +25,24 @@
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.util.DurationInfo;
 
 import org.apache.htrace.core.TraceScope;
 import org.apache.htrace.core.Tracer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * Implementation of {@link FileSystem#globStatus(Path, PathFilter)}.
+ * This has historically been package-private; it has been opened
+ * up for object stores within the {@code hadoop-*} codebase ONLY.
+ * It could be expanded for external store implementations in future.
+ */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
-class Globber {
+public class Globber {
   public static final Logger LOG =
       LoggerFactory.getLogger(Globber.class.getName());
 
@@ -42,21 +51,62 @@
   private final Path pathPattern;
   private final PathFilter filter;
   private final Tracer tracer;
-  
-  public Globber(FileSystem fs, Path pathPattern, PathFilter filter) {
+  private final boolean resolveSymlinks;
+
+  Globber(FileSystem fs, Path pathPattern, PathFilter filter) {
     this.fs = fs;
     this.fc = null;
     this.pathPattern = pathPattern;
     this.filter = filter;
     this.tracer = FsTracer.get(fs.getConf());
+    this.resolveSymlinks = true;
   }
 
-  public Globber(FileContext fc, Path pathPattern, PathFilter filter) {
+  Globber(FileContext fc, Path pathPattern, PathFilter filter) {
     this.fs = null;
     this.fc = fc;
     this.pathPattern = pathPattern;
     this.filter = filter;
     this.tracer = fc.getTracer();
+    this.resolveSymlinks = true;
+  }
+
+  /**
+   * Filesystem constructor for use by {@link GlobBuilder}.
+   * @param fs filesystem
+   * @param pathPattern path pattern
+   * @param filter optional filter
+   * @param resolveSymlinks should symlinks be resolved.
+   */
+  private Globber(FileSystem fs, Path pathPattern, PathFilter filter,
+      boolean resolveSymlinks) {
+    this.fs = fs;
+    this.fc = null;
+    this.pathPattern = pathPattern;
+    this.filter = filter;
+    this.resolveSymlinks = resolveSymlinks;
+    this.tracer = FsTracer.get(fs.getConf());
+    LOG.debug("Created Globber for path={}, symlinks={}",
+        pathPattern, resolveSymlinks);
+  }
+
+  /**
+   * File Context constructor for use by {@link GlobBuilder}.
+   * @param fc file context
+   * @param pathPattern path pattern
+   * @param filter optional filter
+   * @param resolveSymlinks should symlinks be resolved.
+   */
+  private Globber(FileContext fc, Path pathPattern, PathFilter filter,
+      boolean resolveSymlinks) {
+    this.fs = null;
+    this.fc = fc;
+    this.pathPattern = pathPattern;
+    this.filter = filter;
+    this.resolveSymlinks = resolveSymlinks;
+    this.tracer = fc.getTracer();
+    LOG.debug("Created Globber path={}, symlinks={}",
+        pathPattern, resolveSymlinks);
   }
 
   private FileStatus getFileStatus(Path path) throws IOException {
@@ -67,6 +117,7 @@
         return fc.getFileStatus(path);
       }
     } catch (FileNotFoundException e) {
+      LOG.debug("getFileStatus({}) failed; returning null", path, e);
       return null;
     }
   }
@@ -79,6 +130,7 @@
         return fc.util().listStatus(path);
       }
     } catch (FileNotFoundException e) {
+      LOG.debug("listStatus({}) failed; returning empty array", path, e);
       return new FileStatus[0];
     }
   }
@@ -107,7 +159,7 @@
    */
   private static List<String> getPathComponents(String path)
       throws IOException {
-    ArrayList<String> ret = new ArrayList<String>();
+    ArrayList<String> ret = new ArrayList<>();
     for (String component : path.split(Path.SEPARATOR)) {
       if (!component.isEmpty()) {
         ret.add(component);
@@ -145,7 +197,8 @@
   public FileStatus[] glob() throws IOException {
     TraceScope scope = tracer.newScope("Globber#glob");
     scope.addKVAnnotation("pattern", pathPattern.toUri().getPath());
-    try {
+    try (DurationInfo ignored = new DurationInfo(LOG, false,
+        "glob %s", pathPattern)) {
       return doGlob();
     } finally {
       scope.close();
@@ -164,10 +217,11 @@
     String pathPatternString = pathPattern.toUri().getPath();
     List<String> flattenedPatterns = GlobExpander.expand(pathPatternString);
 
+    LOG.debug("Filesystem glob {}", pathPatternString);
     // Now loop over all flattened patterns.  In every case, we'll be trying to
     // match them to entries in the filesystem.
     ArrayList<FileStatus> results = 
-        new ArrayList<FileStatus>(flattenedPatterns.size());
+        new ArrayList<>(flattenedPatterns.size());
     boolean sawWildcard = false;
     for (String flatPattern : flattenedPatterns) {
       // Get the absolute path for this flattened pattern.  We couldn't do 
@@ -175,13 +229,14 @@
       // path you go down influences how the path must be made absolute.
       Path absPattern = fixRelativePart(new Path(
           flatPattern.isEmpty() ? Path.CUR_DIR : flatPattern));
+      LOG.debug("Pattern: {}", absPattern);
       // Now we break the flattened, absolute pattern into path components.
       // For example, /a/*/c would be broken into the list [a, *, c]
       List<String> components =
           getPathComponents(absPattern.toUri().getPath());
       // Starting out at the root of the filesystem, we try to match
       // filesystem entries against pattern components.
-      ArrayList<FileStatus> candidates = new ArrayList<FileStatus>(1);
+      ArrayList<FileStatus> candidates = new ArrayList<>(1);
       // To get the "real" FileStatus of root, we'd have to do an expensive
       // RPC to the NameNode.  So we create a placeholder FileStatus which has
       // the correct path, but defaults for the rest of the information.
@@ -206,12 +261,13 @@
       for (int componentIdx = 0; componentIdx < components.size();
           componentIdx++) {
         ArrayList<FileStatus> newCandidates =
-            new ArrayList<FileStatus>(candidates.size());
+            new ArrayList<>(candidates.size());
         GlobFilter globFilter = new GlobFilter(components.get(componentIdx));
         String component = unescapePathComponent(components.get(componentIdx));
         if (globFilter.hasPattern()) {
           sawWildcard = true;
         }
+        LOG.debug("Component {}, patterned={}", component, sawWildcard);
         if (candidates.isEmpty() && sawWildcard) {
           // Optimization: if there are no more candidates left, stop examining 
           // the path components.  We can only do this if we've already seen
@@ -245,19 +301,31 @@
               // incorrectly conclude that /a/b was a file and should not match
               // /a/*/*.  So we use getFileStatus of the path we just listed to
               // disambiguate.
-              Path path = candidate.getPath();
-              FileStatus status = getFileStatus(path);
-              if (status == null) {
-                // null means the file was not found
-                LOG.warn("File/directory {} not found:"
-                    + " it may have been deleted."
-                    + " If this is an object store, this can be a sign of"
-                    + " eventual consistency problems.",
-                    path);
-                continue;
-              }
-              if (!status.isDirectory()) {
-                continue;
+              if (resolveSymlinks) {
+                LOG.debug("listStatus found one entry; disambiguating {}",
+                    children[0]);
+                Path path = candidate.getPath();
+                FileStatus status = getFileStatus(path);
+                if (status == null) {
+                  // null means the file was not found
+                  LOG.warn("File/directory {} not found:"
+                      + " it may have been deleted."
+                      + " If this is an object store, this can be a sign of"
+                      + " eventual consistency problems.",
+                      path);
+                  continue;
+                }
+                if (!status.isDirectory()) {
+                  LOG.debug("Resolved entry is a file; skipping: {}", status);
+                  continue;
+                }
+              } else {
+                // there's no symlinks in this store, so no need to issue
+                // another call, just see if the result is a directory or a file
+                if (children[0].getPath().equals(candidate.getPath())) {
+                  // the listing status is of a file
+                  continue;
+                }
               }
             }
             for (FileStatus child : children) {
@@ -312,6 +380,8 @@
      */
     if ((!sawWildcard) && results.isEmpty() &&
         (flattenedPatterns.size() <= 1)) {
+      LOG.debug("No matches found and there was no wildcard in the path {}",
+          pathPattern);
       return null;
     }
     /*
@@ -324,4 +394,98 @@
     Arrays.sort(ret);
     return ret;
   }
+
+  /**
+   * Create a builder for a Globber, bonded to the specific filesystem.
+   * @param filesystem filesystem
+   * @return the builder to finish configuring.
+   */
+  public static GlobBuilder createGlobber(FileSystem filesystem) {
+    return new GlobBuilder(filesystem);
+  }
+
+  /**
+   * Create a builder for a Globber, bonded to the specific file
+   * context.
+   * @param fileContext file context.
+   * @return the builder to finish configuring.
+   */
+  public static GlobBuilder createGlobber(FileContext fileContext) {
+    return new GlobBuilder(fileContext);
+  }
+
+  /**
+   * Builder for Globber instances.
+   */
+  @InterfaceAudience.Private
+  public static class GlobBuilder {
+
+    private final FileSystem fs;
+
+    private final FileContext fc;
+
+    private Path pathPattern;
+
+    private PathFilter filter;
+
+    private boolean resolveSymlinks = true;
+
+    /**
+     * Construct bonded to a file context.
+     * @param fc file context.
+     */
+    public GlobBuilder(final FileContext fc) {
+      this.fs = null;
+      this.fc = checkNotNull(fc);
+    }
+
+    /**
+     * Construct bonded to a filesystem.
+     * @param fs file system.
+     */
+    public GlobBuilder(final FileSystem fs) {
+      this.fs = checkNotNull(fs);
+      this.fc = null;
+    }
+
+    /**
+     * Set the path pattern.
+     * @param pattern pattern to use.
+     * @return the builder
+     */
+    public GlobBuilder withPathPattern(Path pattern) {
+      pathPattern = pattern;
+      return this;
+    }
+
+    /**
+     * Set the path filter.
+     * @param pathFilter filter
+     * @return the builder
+     */
+    public GlobBuilder withPathFiltern(PathFilter pathFilter) {
+      filter = pathFilter;
+      return this;
+    }
+
+    /**
+     * Set the symlink resolution policy.
+     * @param resolve resolution flag.
+     * @return the builder
+     */
+    public GlobBuilder withResolveSymlinks(boolean resolve) {
+      resolveSymlinks = resolve;
+      return this;
+    }
+
+    /**
+     * Build the Globber.
+     * @return a new instance.
+     */
+    public Globber build() {
+      return fs != null
+          ? new Globber(fs, pathPattern, filter, resolveSymlinks)
+          : new Globber(fc, pathPattern, filter, resolveSymlinks);
+    }
+  }
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
index f7da819..5f4c4a2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
@@ -36,6 +36,8 @@
 import java.net.URLDecoder;
 import java.util.*;
 
+import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
+
 /**
  * This is an implementation of the Hadoop Archive 
  * Filesystem. This archive Filesystem has index files
@@ -899,7 +901,22 @@
     throws IOException {
     throw new IOException("Har: setPermission not allowed");
   }
-  
+
+  /**
+   * Declare that this filesystem connector is always read only.
+   * {@inheritDoc}
+   */
+  @Override
+  public boolean hasPathCapability(final Path path, final String capability)
+      throws IOException {
+    switch (validatePathCapabilityArgs(path, capability)) {
+    case CommonPathCapabilities.FS_READ_ONLY_CONNECTOR:
+      return true;
+    default:
+      return false;
+    }
+  }
+
   /**
    * Hadoop archives input stream. This input stream fakes EOF 
    * since archive files are part of bigger part files.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathCapabilities.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathCapabilities.java
new file mode 100644
index 0000000..d349256
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathCapabilities.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+
+/**
+ * The Path counterpoint to {@link StreamCapabilities}; a query to see if,
+ * a FileSystem/FileContext instance has a specific capability under the given
+ * path.
+ * Other classes may also implement the interface, as desired.
+ *
+ * See {@link CommonPathCapabilities} for the well-known capabilities.
+ */
+public interface PathCapabilities {
+
+  /**
+   * Probe for a specific capability under the given path.
+   * If the function returns {@code true}, this instance is explicitly
+   * declaring that the capability is available.
+   * If the function returns {@code false}, it can mean one of:
+   * <ul>
+   *   <li>The capability is not known.</li>
+   *   <li>The capability is known but it is not supported.</li>
+   *   <li>The capability is known but the filesystem does not know if it
+   *   is supported under the supplied path.</li>
+   * </ul>
+   * The core guarantee which a caller can rely on is: if the predicate
+   * returns true, then the specific operation/behavior can be expected to be
+   * supported. However a specific call may be rejected for permission reasons,
+   * the actual file/directory not being present, or some other failure during
+   * the attempted execution of the operation.
+   * <p>
+   * Implementors: {@link org.apache.hadoop.fs.impl.PathCapabilitiesSupport}
+   * can be used to help implement this method.
+   * @param path path to query the capability of.
+   * @param capability non-null, non-empty string to query the path for support.
+   * @return true if the capability is supported under that part of the FS.
+   * @throws IOException this should not be raised, except on problems
+   * resolving paths or relaying the call.
+   * @throws IllegalArgumentException invalid arguments
+   */
+  boolean hasPathCapability(Path path, String capability)
+      throws IOException;
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index bd003ae..cf22105 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -53,6 +53,8 @@
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
 
+import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
+
 /****************************************************************
  * Implement the FileSystem API for the raw local filesystem.
  *
@@ -1060,4 +1062,21 @@
     // return an unqualified symlink target
     return fi.getSymlink();
   }
+
+  @Override
+  public boolean hasPathCapability(final Path path, final String capability)
+      throws IOException {
+    switch (validatePathCapabilityArgs(makeQualified(path), capability)) {
+    case CommonPathCapabilities.FS_APPEND:
+    case CommonPathCapabilities.FS_CONCAT:
+    case CommonPathCapabilities.FS_PATHHANDLES:
+    case CommonPathCapabilities.FS_PERMISSIONS:
+    case CommonPathCapabilities.FS_TRUNCATE:
+      return true;
+    case CommonPathCapabilities.FS_SYMLINKS:
+      return FileSystem.areSymlinksEnabled();
+    default:
+      return super.hasPathCapability(path, capability);
+    }
+  }
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/AbstractHttpFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/AbstractHttpFileSystem.java
index fa0b2cf..baf0a81 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/AbstractHttpFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/AbstractHttpFileSystem.java
@@ -20,6 +20,7 @@
 
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonPathCapabilities;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -36,6 +37,8 @@
 import java.net.URI;
 import java.net.URLConnection;
 
+import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
+
 abstract class AbstractHttpFileSystem extends FileSystem {
   private static final long DEFAULT_BLOCK_SIZE = 4096;
   private static final Path WORKING_DIR = new Path("/");
@@ -111,6 +114,21 @@
     return new FileStatus(-1, false, 1, DEFAULT_BLOCK_SIZE, 0, path);
   }
 
+  /**
+   * Declare that this filesystem connector is always read only.
+   * {@inheritDoc}
+   */
+  @Override
+  public boolean hasPathCapability(final Path path, final String capability)
+      throws IOException {
+    switch (validatePathCapabilityArgs(path, capability)) {
+    case CommonPathCapabilities.FS_READ_ONLY_CONNECTOR:
+      return true;
+    default:
+      return super.hasPathCapability(path, capability);
+    }
+  }
+
   private static class HttpDataInputStream extends FilterInputStream
       implements Seekable, PositionedReadable {
 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FsLinkResolution.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FsLinkResolution.java
new file mode 100644
index 0000000..f5ef8c4
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FsLinkResolution.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.impl;
+
+import java.io.IOException;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.AbstractFileSystem;
+import org.apache.hadoop.fs.FSLinkResolver;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnresolvedLinkException;
+
+/**
+ * Class to allow Lambda expressions to be used in {@link FileContext}
+ * link resolution.
+ * @param <T> type of the returned value.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class FsLinkResolution<T> extends FSLinkResolver<T> {
+
+  /**
+   * The function to invoke in the {@link #next(AbstractFileSystem, Path)} call.
+   */
+  private final FsLinkResolutionFunction<T> fn;
+
+  /**
+   * Construct an instance with the given function.
+   * @param fn function to invoke.
+   */
+  public FsLinkResolution(final FsLinkResolutionFunction<T> fn) {
+    this.fn = Preconditions.checkNotNull(fn);
+  }
+
+  @Override
+  public T next(final AbstractFileSystem fs, final Path p)
+      throws UnresolvedLinkException, IOException {
+    return fn.apply(fs, p);
+  }
+
+  /**
+   * The signature of the function to invoke.
+   * @param <T> type resolved to
+   */
+  @FunctionalInterface
+  public interface FsLinkResolutionFunction<T> {
+
+    /**
+     *
+     * @param fs filesystem to resolve against.
+     * @param path path to resolve
+     * @return a result of type T
+     * @throws UnresolvedLinkException link resolution failure
+     * @throws IOException other IO failure.
+     */
+    T apply(final AbstractFileSystem fs, final Path path)
+        throws IOException, UnresolvedLinkException;
+  }
+
+  /**
+   * Apply the given function to the resolved path under the the supplied
+   * FileContext.
+   * @param fileContext file context to resolve under
+   * @param path path to resolve
+   * @param fn function to invoke
+   * @param <T> return type.
+   * @return the return value of the function as revoked against the resolved
+   * path.
+   * @throws UnresolvedLinkException link resolution failure
+   * @throws IOException other IO failure.
+   */
+  public static <T> T resolve(
+      final FileContext fileContext, final Path path,
+      final FsLinkResolutionFunction<T> fn)
+      throws UnresolvedLinkException, IOException {
+    return new FsLinkResolution<>(fn).resolve(fileContext, path);
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/PathCapabilitiesSupport.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/PathCapabilitiesSupport.java
new file mode 100644
index 0000000..9332ac6
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/PathCapabilitiesSupport.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.impl;
+
+import java.util.Locale;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathCapabilities;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class PathCapabilitiesSupport {
+
+  /**
+   * Validate the arguments to
+   * {@link PathCapabilities#hasPathCapability(Path, String)}.
+   * @param path path to query the capability of.
+   * @param capability non-null, non-empty string to query the path for support.
+   * @return the string to use in a switch statement.
+   * @throws IllegalArgumentException if a an argument is invalid.
+   */
+  public static String validatePathCapabilityArgs(
+      final Path path, final String capability) {
+    checkArgument(path != null, "null path");
+    checkArgument(capability != null, "capability parameter is null");
+    checkArgument(!capability.isEmpty(),
+        "capability parameter is empty string");
+    return capability.toLowerCase(Locale.ENGLISH);
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
index 1780cda..6380d0ca 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
@@ -81,9 +81,7 @@
       }
 
       if (!item.fs.exists(itemParentPath)) {
-        throw new PathNotFoundException(String.format(
-            "mkdir failed for path: %s. Item parent path not found: %s.",
-        itemPath.toString(), itemParentPath.toString()));
+        throw new PathNotFoundException(itemParentPath.toString());
       }
     }
     if (!item.fs.mkdirs(item.path)) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index c93225f..773a7b2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -491,4 +491,10 @@
       throws IOException, UnsupportedOperationException {
     return super.openFile(fullPath(path));
   }
+
+  @Override
+  public boolean hasPathCapability(final Path path, final String capability)
+      throws IOException {
+    return super.hasPathCapability(fullPath(path), capability);
+  }
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 6bc469c..faa374a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.fs.viewfs;
 
+import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
 import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
 import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE;
 import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE_DEFAULT;
@@ -43,6 +44,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockStoragePolicySpi;
+import org.apache.hadoop.fs.CommonPathCapabilities;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -1027,6 +1029,36 @@
   }
 
   /**
+   * Reject the concat operation; forward the rest to the viewed FS.
+   * @param path path to query the capability of.
+   * @param capability string to query the stream support for.
+   * @return the capability
+   * @throws IOException if there is no resolved FS, or it raises an IOE.
+   */
+  @Override
+  public boolean hasPathCapability(Path path, String capability)
+      throws IOException {
+    final Path p = makeQualified(path);
+    switch (validatePathCapabilityArgs(p, capability)) {
+    case CommonPathCapabilities.FS_CONCAT:
+      // concat is not supported, as it may be invoked across filesystems.
+      return false;
+    default:
+      // no break
+    }
+    // otherwise, check capabilities of mounted FS.
+    try {
+      InodeTree.ResolveResult<FileSystem> res
+          = fsState.resolve(getUriPath(p), true);
+      return res.targetFileSystem.hasPathCapability(res.remainingPath,
+          capability);
+    } catch (FileNotFoundException e) {
+      // no mount point, nothing will work.
+      throw new NotInMountpointException(p, "hasPathCapability");
+    }
+  }
+
+  /**
    * An instance of this class represents an internal dir of the viewFs
    * that is internal dir of the mount table.
    * It is a read only mount tables and create, mkdir or delete operations
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
index 7c46c5b..f2a5b24 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
@@ -163,6 +163,7 @@
         setHealthStatus(false, exceptionStackTrace);
         break;
       case FAILED_WITH_EXIT_CODE:
+        // see Javadoc above - we don't report bad health intentionally
         setHealthStatus(true, "", now);
         break;
       case FAILED:
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md b/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md
index d693774..281e42d 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md
@@ -74,6 +74,10 @@
 | hadoop.pipes.java.reducer | mapreduce.pipes.isjavareducer |
 | hadoop.pipes.partitioner | mapreduce.pipes.partitioner |
 | heartbeat.recheck.interval | dfs.namenode.heartbeat.recheck-interval |
+| httpfs.authentication.kerberos.keytab | hadoop.http.authentication.kerberos.keytab |
+| httpfs.authentication.kerberos.principal | hadoop.http.authentication.kerberos.principal |
+| httpfs.authentication.signature.secret.file | hadoop.http.authentication.signature.secret.file |
+| httpfs.authentication.type | hadoop.http.authentication.type |
 | io.bytes.per.checksum | dfs.bytes-per-checksum |
 | io.sort.factor | mapreduce.task.io.sort.factor |
 | io.sort.mb | mapreduce.task.io.sort.mb |
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
index 0b09f02..a2458ee 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
@@ -526,7 +526,7 @@
    `getFileStatus(P).getBlockSize()`.
 1. By inference, it MUST be > 0 for any file of length > 0.
 
-## State Changing Operations
+## <a name="state_changing_operations"></a> State Changing Operations
 
 ### `boolean mkdirs(Path p, FsPermission permission)`
 
@@ -1479,7 +1479,7 @@
 
 ### `boolean hasCapability(capability)`
 
-Return true if the `OutputStream`, `InputStream`, or other FileSystem class
+Return true iff the `OutputStream`, `InputStream`, or other FileSystem class
 has the desired capability.
 
 The caller can query the capabilities of a stream using a string value.
@@ -1492,3 +1492,4 @@
 in:readahead | READAHEAD  | CanSetReadahead  | Set the readahead on the input stream.
 dropbehind   | DROPBEHIND | CanSetDropBehind | Drop the cache.
 in:unbuffer  | UNBUFFER   | CanUnbuffer      | Reduce the buffering on the input stream.
+
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/index.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/index.md
index 6b4399e..df538ee 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/index.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/index.md
@@ -33,6 +33,7 @@
 1. [Model](model.html)
 1. [FileSystem class](filesystem.html)
 1. [FSDataInputStream class](fsdatainputstream.html)
+1. [PathCapabilities interface](pathcapabilities.html)
 1. [FSDataOutputStreamBuilder class](fsdataoutputstreambuilder.html)
 2. [Testing with the Filesystem specification](testing.html)
 2. [Extending the specification and its tests](extending.html)
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/pathcapabilities.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/pathcapabilities.md
new file mode 100644
index 0000000..e053bfb
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/pathcapabilities.md
@@ -0,0 +1,158 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+# <a name="PathCapabilities"></a> interface `PathCapabilities`
+
+The `PathCapabilities` interface provides a way to programmatically query the
+operations offered under a given path by an instance of `FileSystem`, `FileContext`
+or other implementing class.
+
+```java
+public interface PathCapabilities {
+  boolean hasPathCapability(Path path, String capability)
+      throws IOException;
+}
+```
+
+There are a number of goals here:
+
+1. Allow callers to probe for optional filesystem operations without actually
+having to invoke them.
+1. Allow filesystems with their own optional per-instance features to declare
+whether or not they are active for the specific instance.
+1. Allow for fileystem connectors which work with object stores to expose the
+fundamental difference in semantics of these stores (e.g: files not visible
+until closed, file rename being `O(data)`), directory rename being non-atomic,
+etc.
+
+### Available Capabilities
+
+Capabilities are defined as strings and split into "Common Capabilites"
+and non-standard ones for a specific store.
+
+The common capabilities are all defined under the prefix `fs.capability.`
+
+Consult the javadocs for `org.apache.hadoop.fs.CommonPathCapabilities` for these.
+
+
+Individual filesystems MAY offer their own set of capabilities which
+can be probed for. These MUST begin with `fs.` + the filesystem scheme +
+ `.capability`. For example `fs.s3a.capability.select.sql`;
+
+### `boolean hasPathCapability(path, capability)`
+
+Probe for the instance offering a specific capability under the
+given path.
+
+#### Postconditions
+
+```python
+if fs_supports_the_feature(path, capability):
+  return True
+else:
+  return False
+```
+
+Return: `True`, iff the specific capability is available.
+
+A filesystem instance *MUST NOT* return `True` for any capability unless it is
+known to be supported by that specific instance. As a result, if a caller
+probes for a capability then it can assume that the specific feature/semantics
+are available.
+
+If the probe returns `False` then it can mean one of:
+
+1. The capability is unknown.
+1. The capability is known, and known to be unavailable on this instance.
+1. The capability is known but this local class does not know if it is supported
+   under the supplied path.
+
+This predicate is intended to be low cost. If it requires remote calls other
+than path/link resolution, it SHOULD conclude that the availability
+of the feature is unknown and return `False`.
+
+The predicate MUST also be side-effect free.
+
+*Validity of paths*
+There is no requirement that the existence of the path must be checked;
+the parameter exists so that any filesystem which relays operations to other
+filesystems (e.g `viewfs`) can resolve and relay it to the nested filesystem.
+Consider the call to be *relatively* lightweight.
+
+Because of this, it may be that while the filesystem declares that
+it supports a capability under a path, the actual invocation of the operation
+may fail for other reasons.
+
+As an example, while a filesystem may support `append()` under a path,
+if invoked on a directory, the call may fail.
+
+That is for a path `root = new Path("/")`: the capabilities call may succeed
+
+```java
+fs.hasCapabilities(root, "fs.capability.append") == true
+```
+
+But a subsequent call to the operation on that specific path may fail,
+because the root path is a directory:
+
+```java
+fs.append(root)
+```
+
+
+Similarly, there is no checking that the caller has the permission to
+perform a specific operation: just because a feature is available on that
+path does not mean that the caller can execute the operation.
+
+The `hasCapabilities(path, capability)` probe is therefore declaring that
+the operation will not be rejected as unsupported, not that a specific invocation
+will be permitted on that path by the caller.
+
+*Duration of availability*
+
+As the state of a remote store changes,so may path capabilities. This
+may be due to changes in the local state of the fileystem (e.g. symbolic links
+or mount points changing), or changes in its functionality (e.g. a feature
+becoming availaible/unavailable due to operational changes, system upgrades, etc.)
+
+*Capabilities which must be invoked to determine availablity*
+
+Some operations may be known by the client connector, and believed to be available,
+but may actually fail when invoked due to the state and permissons of the remote
+store —state which is cannot be determined except by attempting
+side-effecting operations.
+
+A key example of this is symbolic links and the local filesystem.
+The filesystem declares that it supports this unless symbolic links are explicitly
+disabled —when invoked they may actually fail.
+
+### Implementors Notes
+
+Implementors *MUST NOT* return `true` for any capability which is not guaranteed
+to be supported. To return `true` indicates that the implementation/deployment
+of the filesystem does, to the best of the knowledge of the filesystem client,
+offer the desired operations *and semantics* queried for.
+
+For performance reasons, implementations *SHOULD NOT* check the path for
+existence, unless it needs to resolve symbolic links in parts of the path
+to determine whether a feature is present. This is required of `FileContext`
+and `viewfs`.
+
+Individual filesystems *MUST NOT* unilaterally define new `fs.capability`-prefixed
+capabilities. Instead they *MUST* do one of the following:
+
+* Define and stabilize new cross-filesystem capability flags (preferred),
+and so formally add a new `fs.capability` value.
+* Use the scheme of the filesystem to as a prefix for their own options,
+e.g `fs.hdfs.`
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.3/CHANGES.3.1.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.3/CHANGES.3.1.3.md
new file mode 100644
index 0000000..70187e9
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.3/CHANGES.3.1.3.md
@@ -0,0 +1,336 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# Apache Hadoop Changelog
+
+## Release 3.1.3 - 2019-09-12
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-15922](https://issues.apache.org/jira/browse/HADOOP-15922) | DelegationTokenAuthenticationFilter get wrong doAsUser since it does not decode URL |  Major | common, kms | He Xiaoqiao | He Xiaoqiao |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-15950](https://issues.apache.org/jira/browse/HADOOP-15950) | Failover for LdapGroupsMapping |  Major | common, security | Lukas Majercak | Lukas Majercak |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-15481](https://issues.apache.org/jira/browse/HADOOP-15481) | Emit FairCallQueue stats as metrics |  Major | metrics, rpc-server | Erik Krogen | Christopher Gregorian |
+| [HDFS-14213](https://issues.apache.org/jira/browse/HDFS-14213) | Remove Jansson from BUILDING.txt |  Minor | documentation | Akira Ajisaka | Dinesh Chitlangia |
+| [HDFS-14221](https://issues.apache.org/jira/browse/HDFS-14221) | Replace Guava Optional with Java Optional |  Major | . | Arpit Agarwal | Arpit Agarwal |
+| [HDFS-14222](https://issues.apache.org/jira/browse/HDFS-14222) | Make ThrottledAsyncChecker constructor public |  Major | . | Arpit Agarwal | Arpit Agarwal |
+| [HADOOP-16089](https://issues.apache.org/jira/browse/HADOOP-16089) | AliyunOSS: update oss-sdk version to 3.4.1 |  Major | fs/oss | wujinhu | wujinhu |
+| [HDFS-14231](https://issues.apache.org/jira/browse/HDFS-14231) | DataXceiver#run() should not log exceptions caused by InvalidToken exception as an error |  Major | hdfs | Kitti Nanasi | Kitti Nanasi |
+| [YARN-7171](https://issues.apache.org/jira/browse/YARN-7171) | RM UI should sort memory / cores numerically |  Major | . | Eric Maynard | Ahmed Hussein |
+| [YARN-9282](https://issues.apache.org/jira/browse/YARN-9282) | Typo in javadoc of class LinuxContainerExecutor: hadoop.security.authetication should be 'authentication' |  Trivial | . | Szilard Nemeth | Charan Hebri |
+| [HADOOP-16108](https://issues.apache.org/jira/browse/HADOOP-16108) | Tail Follow Interval Should Allow To Specify The Sleep Interval To Save Unnecessary RPC's |  Major | . | Harshakiran Reddy | Ayush Saxena |
+| [YARN-8295](https://issues.apache.org/jira/browse/YARN-8295) | [UI2] Improve "Resource Usage" tab error message when there are no data available. |  Minor | yarn-ui-v2 | Gergely Novák | Charan Hebri |
+| [YARN-7824](https://issues.apache.org/jira/browse/YARN-7824) | [UI2] Yarn Component Instance page should include link to container logs |  Major | yarn-ui-v2 | Yesha Vora | Akhil PB |
+| [HADOOP-15281](https://issues.apache.org/jira/browse/HADOOP-15281) | Distcp to add no-rename copy option |  Major | tools/distcp | Steve Loughran | Andrew Olson |
+| [YARN-9309](https://issues.apache.org/jira/browse/YARN-9309) | Improve graph text in SLS to avoid overlapping |  Minor | . | Bilwa S T | Bilwa S T |
+| [YARN-9168](https://issues.apache.org/jira/browse/YARN-9168) | DistributedShell client timeout should be -1 by default |  Minor | . | Zhankun Tang | Zhankun Tang |
+| [YARN-9087](https://issues.apache.org/jira/browse/YARN-9087) | Improve logging for initialization of Resource plugins |  Major | yarn | Szilard Nemeth | Szilard Nemeth |
+| [YARN-9121](https://issues.apache.org/jira/browse/YARN-9121) | Replace GpuDiscoverer.getInstance() to a readable object for easy access control |  Major | . | Szilard Nemeth | Szilard Nemeth |
+| [YARN-9139](https://issues.apache.org/jira/browse/YARN-9139) | Simplify initializer code of GpuDiscoverer |  Major | . | Szilard Nemeth | Szilard Nemeth |
+| [HDFS-14247](https://issues.apache.org/jira/browse/HDFS-14247) | Repeat adding node description into network topology |  Minor | datanode | HuangTao | HuangTao |
+| [YARN-9138](https://issues.apache.org/jira/browse/YARN-9138) | Improve test coverage for nvidia-smi binary execution of GpuDiscoverer |  Major | . | Szilard Nemeth | Szilard Nemeth |
+| [MAPREDUCE-7191](https://issues.apache.org/jira/browse/MAPREDUCE-7191) | JobHistoryServer should log exception when loading/parsing history file failed |  Minor | mrv2 | Jiandan Yang | Jiandan Yang |
+| [HDFS-14346](https://issues.apache.org/jira/browse/HDFS-14346) | Better time precision in getTimeDuration |  Minor | namenode | Chao Sun | Chao Sun |
+| [HDFS-14366](https://issues.apache.org/jira/browse/HDFS-14366) | Improve HDFS append performance |  Major | hdfs | Chao Sun | Chao Sun |
+| [MAPREDUCE-7190](https://issues.apache.org/jira/browse/MAPREDUCE-7190) | Add SleepJob additional parameter to make parallel runs distinguishable |  Major | . | Adam Antal | Adam Antal |
+| [HADOOP-16208](https://issues.apache.org/jira/browse/HADOOP-16208) | Do Not Log InterruptedException in Client |  Minor | common | David Mollitor | David Mollitor |
+| [YARN-9463](https://issues.apache.org/jira/browse/YARN-9463) | Add queueName info when failing with queue capacity sanity check |  Trivial | capacity scheduler | Aihua Xu | Aihua Xu |
+| [HADOOP-16227](https://issues.apache.org/jira/browse/HADOOP-16227) | Upgrade checkstyle to 8.19 |  Major | build | Akira Ajisaka | Akira Ajisaka |
+| [HDFS-14432](https://issues.apache.org/jira/browse/HDFS-14432) | dfs.datanode.shared.file.descriptor.paths duplicated in hdfs-default.xml |  Minor | hdfs | puleya7 | puleya7 |
+| [HDFS-14463](https://issues.apache.org/jira/browse/HDFS-14463) | Add Log Level link under NameNode and DataNode Web UI Utilities dropdown |  Trivial | webhdfs | Siyao Meng | Siyao Meng |
+| [YARN-9529](https://issues.apache.org/jira/browse/YARN-9529) | Log correct cpu controller path on error while initializing CGroups. |  Major | nodemanager | Jonathan Hung | Jonathan Hung |
+| [HADOOP-16289](https://issues.apache.org/jira/browse/HADOOP-16289) | Allow extra jsvc startup option in hadoop\_start\_secure\_daemon in hadoop-functions.sh |  Major | scripts | Siyao Meng | Siyao Meng |
+| [HADOOP-16307](https://issues.apache.org/jira/browse/HADOOP-16307) | Intern User Name and Group Name in FileStatus |  Major | fs | David Mollitor | David Mollitor |
+| [HDFS-14507](https://issues.apache.org/jira/browse/HDFS-14507) | Document -blockingDecommission option for hdfs dfsadmin -listOpenFiles |  Minor | documentation | Siyao Meng | Siyao Meng |
+| [HDFS-14451](https://issues.apache.org/jira/browse/HDFS-14451) | Incorrect header or version mismatch log message |  Minor | ipc | David Mollitor | Shweta |
+| [HDFS-14502](https://issues.apache.org/jira/browse/HDFS-14502) | keepResults option in NNThroughputBenchmark should call saveNamespace() |  Major | benchmarks, hdfs | Konstantin Shvachko | Konstantin Shvachko |
+| [HADOOP-16323](https://issues.apache.org/jira/browse/HADOOP-16323) | https everywhere in Maven settings |  Minor | build | Akira Ajisaka | Akira Ajisaka |
+| [YARN-9563](https://issues.apache.org/jira/browse/YARN-9563) | Resource report REST API could return NaN or Inf |  Minor | . | Ahmed Hussein | Ahmed Hussein |
+| [YARN-9545](https://issues.apache.org/jira/browse/YARN-9545) | Create healthcheck REST endpoint for ATSv2 |  Major | ATSv2 | Zoltan Siegl | Zoltan Siegl |
+| [HDFS-10659](https://issues.apache.org/jira/browse/HDFS-10659) | Namenode crashes after Journalnode re-installation in an HA cluster due to missing paxos directory |  Major | ha, journal-node | Amit Anand | star |
+| [HDFS-14513](https://issues.apache.org/jira/browse/HDFS-14513) | FSImage which is saving should be clean while NameNode shutdown |  Major | namenode | He Xiaoqiao | He Xiaoqiao |
+| [YARN-9543](https://issues.apache.org/jira/browse/YARN-9543) | [UI2] Handle ATSv2 server down or failures cases gracefully in YARN UI v2 |  Major | ATSv2, yarn-ui-v2 | Zoltan Siegl | Zoltan Siegl |
+| [HADOOP-16369](https://issues.apache.org/jira/browse/HADOOP-16369) | Fix zstandard shortname misspelled as zts |  Major | . | Jonathan Eagles | Jonathan Eagles |
+| [HDFS-14560](https://issues.apache.org/jira/browse/HDFS-14560) | Allow block replication parameters to be refreshable |  Major | namenode | Stephen O'Donnell | Stephen O'Donnell |
+| [HDFS-12770](https://issues.apache.org/jira/browse/HDFS-12770) | Add doc about how to disable client socket cache |  Trivial | hdfs-client | Weiwei Yang | Weiwei Yang |
+| [HADOOP-9157](https://issues.apache.org/jira/browse/HADOOP-9157) | Better option for curl in hadoop-auth-examples |  Minor | documentation | Jingguo Yao | Andras Bokor |
+| [HDFS-14340](https://issues.apache.org/jira/browse/HDFS-14340) | Lower the log level when can't get postOpAttr |  Minor | nfs | Anuhan Torgonshar | Anuhan Torgonshar |
+| [HADOOP-15914](https://issues.apache.org/jira/browse/HADOOP-15914) | hadoop jar command has no help argument |  Major | common | Adam Antal | Adam Antal |
+| [HADOOP-16156](https://issues.apache.org/jira/browse/HADOOP-16156) | [Clean-up] Remove NULL check before instanceof and fix checkstyle in InnerNodeImpl |  Minor | . | Shweta | Shweta |
+| [HADOOP-14385](https://issues.apache.org/jira/browse/HADOOP-14385) | HttpExceptionUtils#validateResponse swallows exceptions |  Trivial | . | Wei-Chiu Chuang | Wei-Chiu Chuang |
+| [HDFS-12564](https://issues.apache.org/jira/browse/HDFS-12564) | Add the documents of swebhdfs configurations on the client side |  Major | documentation, webhdfs | Takanobu Asanuma | Takanobu Asanuma |
+| [HDFS-14403](https://issues.apache.org/jira/browse/HDFS-14403) | Cost-Based RPC FairCallQueue |  Major | ipc, namenode | Erik Krogen | Christopher Gregorian |
+| [HADOOP-16266](https://issues.apache.org/jira/browse/HADOOP-16266) | Add more fine-grained processing time metrics to the RPC layer |  Minor | ipc | Christopher Gregorian | Erik Krogen |
+| [YARN-9629](https://issues.apache.org/jira/browse/YARN-9629) | Support configurable MIN\_LOG\_ROLLING\_INTERVAL |  Minor | log-aggregation, nodemanager, yarn | Adam Antal | Adam Antal |
+| [HDFS-13694](https://issues.apache.org/jira/browse/HDFS-13694) | Making md5 computing being in parallel with image loading |  Major | . | zhouyingchao | Lisheng Sun |
+| [HDFS-14632](https://issues.apache.org/jira/browse/HDFS-14632) | Reduce useless #getNumLiveDataNodes call in SafeModeMonitor |  Major | namenode | He Xiaoqiao | He Xiaoqiao |
+| [YARN-9573](https://issues.apache.org/jira/browse/YARN-9573) | DistributedShell cannot specify LogAggregationContext |  Major | distributed-shell, log-aggregation, yarn | Adam Antal | Adam Antal |
+| [YARN-9337](https://issues.apache.org/jira/browse/YARN-9337) | GPU auto-discovery script runs even when the resource is given by hand |  Major | yarn | Adam Antal | Adam Antal |
+| [YARN-9127](https://issues.apache.org/jira/browse/YARN-9127) | Create more tests to verify GpuDeviceInformationParser |  Major | . | Szilard Nemeth | Peter Bacsko |
+| [HDFS-14547](https://issues.apache.org/jira/browse/HDFS-14547) | DirectoryWithQuotaFeature.quota costs additional memory even the storage type quota is not set. |  Major | . | Jinglun | Jinglun |
+| [HDFS-14697](https://issues.apache.org/jira/browse/HDFS-14697) | Backport HDFS-14513 to branch-2 |  Minor | namenode | He Xiaoqiao | He Xiaoqiao |
+| [YARN-8045](https://issues.apache.org/jira/browse/YARN-8045) | Reduce log output from container status calls |  Major | . | Shane Kumpf | Craig Condit |
+| [HDFS-14693](https://issues.apache.org/jira/browse/HDFS-14693) | NameNode should log a warning when EditLog IPC logger's pending size exceeds limit. |  Minor | namenode | Xudong Cao | Xudong Cao |
+| [YARN-9094](https://issues.apache.org/jira/browse/YARN-9094) | Remove unused interface method: NodeResourceUpdaterPlugin#handleUpdatedResourceFromRM |  Trivial | . | Szilard Nemeth | Gergely Pollak |
+| [YARN-9096](https://issues.apache.org/jira/browse/YARN-9096) | Some GpuResourcePlugin and ResourcePluginManager methods are synchronized unnecessarily |  Major | . | Szilard Nemeth | Gergely Pollak |
+| [YARN-9092](https://issues.apache.org/jira/browse/YARN-9092) | Create an object for cgroups mount enable and cgroups mount path as they belong together |  Minor | . | Szilard Nemeth | Gergely Pollak |
+| [YARN-9124](https://issues.apache.org/jira/browse/YARN-9124) | Resolve contradiction in ResourceUtils: addMandatoryResources / checkMandatoryResources work differently |  Minor | . | Szilard Nemeth | Adam Antal |
+| [YARN-8199](https://issues.apache.org/jira/browse/YARN-8199) | Logging fileSize of log files under NM Local Dir |  Major | log-aggregation | Prabhu Joseph | Prabhu Joseph |
+| [YARN-9729](https://issues.apache.org/jira/browse/YARN-9729) | [UI2] Fix error message for logs when ATSv2 is offline |  Major | yarn-ui-v2 | Zoltan Siegl | Zoltan Siegl |
+| [YARN-9135](https://issues.apache.org/jira/browse/YARN-9135) | NM State store ResourceMappings serialization are tested with Strings instead of real Device objects |  Major | . | Szilard Nemeth | Peter Bacsko |
+| [HDFS-14370](https://issues.apache.org/jira/browse/HDFS-14370) | Edit log tailing fast-path should allow for backoff |  Major | namenode, qjm | Erik Krogen | Erik Krogen |
+| [YARN-9442](https://issues.apache.org/jira/browse/YARN-9442) | container working directory has group read permissions |  Minor | yarn | Jim Brennan | Jim Brennan |
+| [HADOOP-16459](https://issues.apache.org/jira/browse/HADOOP-16459) | Backport [HADOOP-16266] "Add more fine-grained processing time metrics to the RPC layer" to branch-2 |  Major | . | Erik Krogen | Erik Krogen |
+| [HDFS-14491](https://issues.apache.org/jira/browse/HDFS-14491) | More Clarity on Namenode UI Around Blocks and Replicas |  Minor | . | Alan Jackoway | Siyao Meng |
+| [YARN-9140](https://issues.apache.org/jira/browse/YARN-9140) | Code cleanup in ResourcePluginManager.initialize and in TestResourcePluginManager |  Trivial | . | Szilard Nemeth | Peter Bacsko |
+| [YARN-9488](https://issues.apache.org/jira/browse/YARN-9488) | Skip YARNFeatureNotEnabledException from ClientRMService |  Minor | resourcemanager | Prabhu Joseph | Prabhu Joseph |
+| [YARN-8586](https://issues.apache.org/jira/browse/YARN-8586) | Extract log aggregation related fields and methods from RMAppImpl |  Major | . | Szilard Nemeth | Peter Bacsko |
+| [YARN-9100](https://issues.apache.org/jira/browse/YARN-9100) | Add tests for GpuResourceAllocator and do minor code cleanup |  Major | . | Szilard Nemeth | Peter Bacsko |
+| [HADOOP-15246](https://issues.apache.org/jira/browse/HADOOP-15246) | SpanReceiverInfo - Prefer ArrayList over LinkedList |  Trivial | common | David Mollitor | David Mollitor |
+| [HADOOP-16158](https://issues.apache.org/jira/browse/HADOOP-16158) | DistCp to support checksum validation when copy blocks in parallel |  Major | tools/distcp | Kai Xie | Kai Xie |
+| [HDFS-14746](https://issues.apache.org/jira/browse/HDFS-14746) | Trivial test code update after HDFS-14687 |  Trivial | ec | Wei-Chiu Chuang | kevin su |
+| [HDFS-13709](https://issues.apache.org/jira/browse/HDFS-13709) | Report bad block to NN when transfer block encounter EIO exception |  Major | datanode | Chen Zhang | Chen Zhang |
+| [HDFS-14665](https://issues.apache.org/jira/browse/HDFS-14665) | HttpFS: LISTSTATUS response is missing HDFS-specific fields |  Major | httpfs | Siyao Meng | Siyao Meng |
+| [HDFS-14276](https://issues.apache.org/jira/browse/HDFS-14276) | [SBN read] Reduce tailing overhead |  Major | ha, namenode | Wei-Chiu Chuang | Ayush Saxena |
+| [HDFS-14748](https://issues.apache.org/jira/browse/HDFS-14748) | Make DataNodePeerMetrics#minOutlierDetectionSamples configurable |  Major | . | Lisheng Sun | Lisheng Sun |
+| [HADOOP-15998](https://issues.apache.org/jira/browse/HADOOP-15998) | Ensure jar validation works on Windows. |  Blocker | build | Brian Grunkemeyer | Brian Grunkemeyer |
+| [HDFS-14633](https://issues.apache.org/jira/browse/HDFS-14633) | The StorageType quota and consume in QuotaFeature is not handled for rename |  Major | . | Jinglun | Jinglun |
+| [YARN-9795](https://issues.apache.org/jira/browse/YARN-9795) | ClusterMetrics to include AM allocation delay |  Minor | . | Fengnan Li | Fengnan Li |
+| [YARN-8995](https://issues.apache.org/jira/browse/YARN-8995) | Log events info in AsyncDispatcher when event queue size cumulatively reaches a certain number every time. |  Major | metrics, nodemanager, resourcemanager | zhuqi | zhuqi |
+
+
+### BUG FIXES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HDFS-13642](https://issues.apache.org/jira/browse/HDFS-13642) | Creating a file with block size smaller than EC policy's cell size should fail |  Major | erasure-coding | Xiao Chen | Xiao Chen |
+| [HADOOP-15948](https://issues.apache.org/jira/browse/HADOOP-15948) | Inconsistency in get and put syntax if filename/dirname contains space |  Minor | fs | vivek kumar | Ayush Saxena |
+| [HDFS-13816](https://issues.apache.org/jira/browse/HDFS-13816) | dfs.getQuotaUsage() throws NPE on non-existent dir instead of FileNotFoundException |  Major | namenode | Vinayakumar B | Vinayakumar B |
+| [HADOOP-15966](https://issues.apache.org/jira/browse/HADOOP-15966) | Hadoop Kerberos broken on macos as java.security.krb5.realm is reset: Null realm name (601) |  Major | scripts | Steve Loughran | Steve Loughran |
+| [HADOOP-16028](https://issues.apache.org/jira/browse/HADOOP-16028) | Fix NetworkTopology chooseRandom function to support excluded nodes |  Major | . | Sihai Ke | Sihai Ke |
+| [YARN-9162](https://issues.apache.org/jira/browse/YARN-9162) | Fix TestRMAdminCLI#testHelp |  Major | resourcemanager, test | Ayush Saxena | Ayush Saxena |
+| [HADOOP-16031](https://issues.apache.org/jira/browse/HADOOP-16031) | TestSecureLogins#testValidKerberosName fails |  Major | security | Akira Ajisaka | Akira Ajisaka |
+| [HADOOP-16016](https://issues.apache.org/jira/browse/HADOOP-16016) | TestSSLFactory#testServerWeakCiphers sporadically fails in precommit builds |  Major | security, test | Jason Lowe | Akira Ajisaka |
+| [HDFS-14198](https://issues.apache.org/jira/browse/HDFS-14198) | Upload and Create button doesn't get enabled after getting reset. |  Major | . | Ayush Saxena | Ayush Saxena |
+| [YARN-9203](https://issues.apache.org/jira/browse/YARN-9203) | Fix typos in yarn-default.xml |  Trivial | documentation | Rahul Padmanabhan | Rahul Padmanabhan |
+| [HDFS-14207](https://issues.apache.org/jira/browse/HDFS-14207) | ZKFC should catch exception when ha configuration missing |  Major | hdfs | Fei Hui | Fei Hui |
+| [HDFS-14218](https://issues.apache.org/jira/browse/HDFS-14218) | EC: Ls -e throw NPE when directory ec policy is disabled |  Major | . | Surendra Singh Lilhore | Ayush Saxena |
+| [YARN-9210](https://issues.apache.org/jira/browse/YARN-9210) | RM nodes web page can not display node info |  Blocker | yarn | Jiandan Yang | Jiandan Yang |
+| [YARN-8961](https://issues.apache.org/jira/browse/YARN-8961) | [UI2] Flow Run End Time shows 'Invalid date' |  Major | . | Charan Hebri | Akhil PB |
+| [YARN-7088](https://issues.apache.org/jira/browse/YARN-7088) | Add application launch time to Resource Manager REST API |  Major | . | Abdullah Yousufi | Kanwaljeet Sachdev |
+| [YARN-9222](https://issues.apache.org/jira/browse/YARN-9222) | Print launchTime in ApplicationSummary |  Major | . | Jonathan Hung | Jonathan Hung |
+| [YARN-8901](https://issues.apache.org/jira/browse/YARN-8901) | Restart "NEVER" policy does not work with component dependency |  Critical | . | Yesha Vora | Suma Shivaprasad |
+| [YARN-9237](https://issues.apache.org/jira/browse/YARN-9237) | NM should ignore sending finished apps to RM during RM fail-over |  Major | yarn | Jiandan Yang | Jiandan Yang |
+| [YARN-6616](https://issues.apache.org/jira/browse/YARN-6616) | YARN AHS shows submitTime for jobs same as startTime |  Minor | . | Prabhu Joseph | Prabhu Joseph |
+| [YARN-9099](https://issues.apache.org/jira/browse/YARN-9099) | GpuResourceAllocator#getReleasingGpus calculates number of GPUs in a wrong way |  Major | . | Szilard Nemeth | Szilard Nemeth |
+| [HADOOP-16086](https://issues.apache.org/jira/browse/HADOOP-16086) | Backport HADOOP-15549 to branch-3.1 |  Major | metrics | Yuming Wang | Todd Lipcon |
+| [YARN-9206](https://issues.apache.org/jira/browse/YARN-9206) | RMServerUtils does not count SHUTDOWN as an accepted state |  Major | . | Kuhu Shukla | Kuhu Shukla |
+| [HADOOP-16096](https://issues.apache.org/jira/browse/HADOOP-16096) | HADOOP-15281/distcp -Xdirect needs to use commons-logging on 3.1 |  Critical | . | Eric Payne | Steve Loughran |
+| [HDFS-14140](https://issues.apache.org/jira/browse/HDFS-14140) | JournalNodeSyncer authentication is failing in secure cluster |  Major | journal-node, security | Surendra Singh Lilhore | Surendra Singh Lilhore |
+| [YARN-9257](https://issues.apache.org/jira/browse/YARN-9257) | Distributed Shell client throws a NPE for a non-existent queue |  Major | distributed-shell | Charan Hebri | Charan Hebri |
+| [YARN-8761](https://issues.apache.org/jira/browse/YARN-8761) | Service AM support for decommissioning component instances |  Major | . | Billie Rinaldi | Billie Rinaldi |
+| [HDFS-14266](https://issues.apache.org/jira/browse/HDFS-14266) | EC : Fsck -blockId shows null for EC Blocks if One Block Is Not Available. |  Major | . | Harshakiran Reddy | Ayush Saxena |
+| [HDFS-14274](https://issues.apache.org/jira/browse/HDFS-14274) | EC: NPE While Listing EC Policy For A Directory Following Replication Policy. |  Major | erasure-coding | Souryakanta Dwivedy | Ayush Saxena |
+| [HDFS-14263](https://issues.apache.org/jira/browse/HDFS-14263) | Remove unnecessary block file exists check from FsDatasetImpl#getBlockInputStream() |  Major | datanode | Surendra Singh Lilhore | Surendra Singh Lilhore |
+| [YARN-7761](https://issues.apache.org/jira/browse/YARN-7761) | [UI2] Clicking 'master container log' or 'Link' next to 'log' under application's appAttempt goes to Old UI's Log link |  Major | yarn-ui-v2 | Sumana Sathish | Akhil PB |
+| [YARN-9295](https://issues.apache.org/jira/browse/YARN-9295) | [UI2] Fix label typo in Cluster Overview page |  Trivial | yarn-ui-v2 | Charan Hebri | Charan Hebri |
+| [YARN-9284](https://issues.apache.org/jira/browse/YARN-9284) | Fix the unit of yarn.service.am-resource.memory in the document |  Minor | documentation, yarn-native-services | Masahiro Tanaka | Masahiro Tanaka |
+| [YARN-9283](https://issues.apache.org/jira/browse/YARN-9283) | Javadoc of LinuxContainerExecutor#addSchedPriorityCommand has a wrong property name as reference |  Minor | documentation | Szilard Nemeth | Adam Antal |
+| [YARN-9286](https://issues.apache.org/jira/browse/YARN-9286) | [Timeline Server] Sorting based on FinalStatus shows pop-up message |  Minor | timelineserver | Nallasivan | Bilwa S T |
+| [HDFS-14081](https://issues.apache.org/jira/browse/HDFS-14081) | hdfs dfsadmin -metasave metasave\_test results NPE |  Major | hdfs | Shweta | Shweta |
+| [HADOOP-15813](https://issues.apache.org/jira/browse/HADOOP-15813) | Enable more reliable SSL connection reuse |  Major | common | Daryn Sharp | Daryn Sharp |
+| [HADOOP-16105](https://issues.apache.org/jira/browse/HADOOP-16105) | WASB in secure mode does not set connectingUsingSAS |  Major | fs/azure | Steve Loughran | Steve Loughran |
+| [YARN-9238](https://issues.apache.org/jira/browse/YARN-9238) | Avoid allocating opportunistic containers to previous/removed/non-exist application attempt |  Critical | . | lujie | lujie |
+| [YARN-9118](https://issues.apache.org/jira/browse/YARN-9118) | Handle exceptions with parsing user defined GPU devices in GpuDiscoverer |  Major | . | Szilard Nemeth | Szilard Nemeth |
+| [YARN-9317](https://issues.apache.org/jira/browse/YARN-9317) | Avoid repeated YarnConfiguration#timelineServiceV2Enabled check |  Major | . | Bibin A Chundatt | Prabhu Joseph |
+| [YARN-9213](https://issues.apache.org/jira/browse/YARN-9213) | RM Web UI v1 does not show custom resource allocations for containers page |  Major | . | Szilard Nemeth | Szilard Nemeth |
+| [YARN-9248](https://issues.apache.org/jira/browse/YARN-9248) | RMContainerImpl:Invalid event: ACQUIRED at KILLED |  Major | . | lujie | lujie |
+| [HADOOP-16018](https://issues.apache.org/jira/browse/HADOOP-16018) | DistCp won't reassemble chunks when blocks per chunk \> 0 |  Major | tools/distcp | Kai Xie | Kai Xie |
+| [YARN-9334](https://issues.apache.org/jira/browse/YARN-9334) | YARN Service Client does not work with SPNEGO when knox is configured |  Major | yarn-native-services | Tarun Parimi | Billie Rinaldi |
+| [HDFS-14305](https://issues.apache.org/jira/browse/HDFS-14305) | Serial number in BlockTokenSecretManager could overlap between different namenodes |  Major | namenode, security | Chao Sun | He Xiaoqiao |
+| [HDFS-14314](https://issues.apache.org/jira/browse/HDFS-14314) | fullBlockReportLeaseId should be reset after registering to NN |  Critical | datanode | star | star |
+| [YARN-8803](https://issues.apache.org/jira/browse/YARN-8803) | [UI2] Show flow runs in the order of recently created time in graph widgets |  Major | yarn-ui-v2 | Akhil PB | Akhil PB |
+| [HADOOP-16114](https://issues.apache.org/jira/browse/HADOOP-16114) | NetUtils#canonicalizeHost gives different value for same host |  Minor | net | Praveen Krishna | Praveen Krishna |
+| [HDFS-14317](https://issues.apache.org/jira/browse/HDFS-14317) | Standby does not trigger edit log rolling when in-progress edit log tailing is enabled |  Critical | . | Ekanth Sethuramalingam | Ekanth Sethuramalingam |
+| [HDFS-14333](https://issues.apache.org/jira/browse/HDFS-14333) | Datanode fails to start if any disk has errors during Namenode registration |  Major | datanode | Stephen O'Donnell | Stephen O'Donnell |
+| [HADOOP-16192](https://issues.apache.org/jira/browse/HADOOP-16192) | CallQueue backoff bug fixes: doesn't perform backoff when add() is used, and doesn't update backoff when refreshed |  Major | ipc | Erik Krogen | Erik Krogen |
+| [HDFS-14037](https://issues.apache.org/jira/browse/HDFS-14037) | Fix SSLFactory truststore reloader thread leak in URLConnectionFactory |  Major | hdfs-client, webhdfs | Takanobu Asanuma | Takanobu Asanuma |
+| [HADOOP-16225](https://issues.apache.org/jira/browse/HADOOP-16225) | Fix links to the developer mailing lists in DownstreamDev.md |  Minor | documentation | Akira Ajisaka | Wanqiang Ji |
+| [HADOOP-16232](https://issues.apache.org/jira/browse/HADOOP-16232) | Fix errors in the checkstyle configration xmls |  Major | build | Akira Ajisaka | Wanqiang Ji |
+| [HDFS-14389](https://issues.apache.org/jira/browse/HDFS-14389) | getAclStatus returns incorrect permissions and owner when an iNodeAttributeProvider is configured |  Major | namenode | Stephen O'Donnell | Stephen O'Donnell |
+| [HDFS-14407](https://issues.apache.org/jira/browse/HDFS-14407) | Fix misuse of SLF4j logging API in DatasetVolumeChecker#checkAllVolumes |  Minor | . | Wanqiang Ji | Wanqiang Ji |
+| [YARN-9413](https://issues.apache.org/jira/browse/YARN-9413) | Queue resource leak after app fail for CapacityScheduler |  Major | capacityscheduler | Tao Yang | Tao Yang |
+| [HADOOP-14544](https://issues.apache.org/jira/browse/HADOOP-14544) | DistCp documentation for command line options is misaligned. |  Minor | documentation | Chris Nauroth | Masatake Iwasaki |
+| [HDFS-10477](https://issues.apache.org/jira/browse/HDFS-10477) | Stop decommission a rack of DataNodes caused NameNode fail over to standby |  Major | namenode | yunjiong zhao | yunjiong zhao |
+| [YARN-6695](https://issues.apache.org/jira/browse/YARN-6695) | Race condition in RM for publishing container events vs appFinished events causes NPE |  Critical | . | Rohith Sharma K S | Prabhu Joseph |
+| [YARN-8622](https://issues.apache.org/jira/browse/YARN-8622) | NodeManager native build fails due to getgrouplist not found on macOS |  Major | nodemanager | Ewan Higgs | Siyao Meng |
+| [HADOOP-16265](https://issues.apache.org/jira/browse/HADOOP-16265) | Configuration#getTimeDuration is not consistent between default value and manual settings. |  Major | . | star | star |
+| [YARN-9307](https://issues.apache.org/jira/browse/YARN-9307) | node\_partitions constraint does not work |  Major | . | kyungwan nam | kyungwan nam |
+| [HDFS-13677](https://issues.apache.org/jira/browse/HDFS-13677) | Dynamic refresh Disk configuration results in overwriting VolumeMap |  Blocker | . | xuzq | xuzq |
+| [YARN-9285](https://issues.apache.org/jira/browse/YARN-9285) | RM UI progress column is of wrong type |  Minor | yarn | Ahmed Hussein | Ahmed Hussein |
+| [HADOOP-16278](https://issues.apache.org/jira/browse/HADOOP-16278) | With S3A Filesystem, Long Running services End up Doing lot of GC and eventually die |  Major | common, hadoop-aws, metrics | Rajat Khandelwal | Rajat Khandelwal |
+| [YARN-9504](https://issues.apache.org/jira/browse/YARN-9504) | [UI2] Fair scheduler queue view page does not show actual capacity |  Major | fairscheduler, yarn-ui-v2 | Zoltan Siegl | Zoltan Siegl |
+| [YARN-9519](https://issues.apache.org/jira/browse/YARN-9519) | TFile log aggregation file format is not working for yarn.log-aggregation.TFile.remote-app-log-dir config |  Major | log-aggregation | Adam Antal | Adam Antal |
+| [HADOOP-16247](https://issues.apache.org/jira/browse/HADOOP-16247) | NPE in FsUrlConnection |  Major | hdfs-client | Karthik Palanisamy | Karthik Palanisamy |
+| [HADOOP-16248](https://issues.apache.org/jira/browse/HADOOP-16248) | MutableQuantiles leak memory under heavy load |  Major | metrics | Alexis Daboville | Alexis Daboville |
+| [HDFS-14323](https://issues.apache.org/jira/browse/HDFS-14323) | Distcp fails in Hadoop 3.x when 2.x source webhdfs url has special characters in hdfs file path |  Major | webhdfs | Srinivasu Majeti | Srinivasu Majeti |
+| [MAPREDUCE-7205](https://issues.apache.org/jira/browse/MAPREDUCE-7205) | Treat container scheduler kill exit code as a task attempt killing event |  Major | applicationmaster, mr-am, mrv2 | Wanqiang Ji | Wanqiang Ji |
+| [HDFS-14500](https://issues.apache.org/jira/browse/HDFS-14500) | NameNode StartupProgress continues to report edit log segments after the LOADING\_EDITS phase is finished |  Major | namenode | Erik Krogen | Erik Krogen |
+| [HADOOP-16331](https://issues.apache.org/jira/browse/HADOOP-16331) | Fix ASF License check in pom.xml |  Major | . | Wanqiang Ji | Akira Ajisaka |
+| [YARN-9542](https://issues.apache.org/jira/browse/YARN-9542) | Fix LogsCLI guessAppOwner ignores custom file format suffix |  Minor | log-aggregation | Prabhu Joseph | Prabhu Joseph |
+| [HDFS-14512](https://issues.apache.org/jira/browse/HDFS-14512) | ONE\_SSD policy will be violated while write data with DistributedFileSystem.create(....favoredNodes) |  Major | . | Shen Yinjie | Ayush Saxena |
+| [HADOOP-16334](https://issues.apache.org/jira/browse/HADOOP-16334) | Fix yetus-wrapper not working when HADOOP\_YETUS\_VERSION \>= 0.9.0 |  Major | yetus | Wanqiang Ji | Wanqiang Ji |
+| [HDFS-14521](https://issues.apache.org/jira/browse/HDFS-14521) | Suppress setReplication logging. |  Major | . | Kihwal Lee | Kihwal Lee |
+| [YARN-9507](https://issues.apache.org/jira/browse/YARN-9507) | Fix NPE in NodeManager#serviceStop on startup failure |  Minor | . | Bilwa S T | Bilwa S T |
+| [YARN-8947](https://issues.apache.org/jira/browse/YARN-8947) | [UI2] Active User info missing from UI2 |  Major | yarn-ui-v2 | Akhil PB | Akhil PB |
+| [YARN-8906](https://issues.apache.org/jira/browse/YARN-8906) | [UI2] NM hostnames not displayed correctly in Node Heatmap Chart |  Major | . | Charan Hebri | Akhil PB |
+| [YARN-8625](https://issues.apache.org/jira/browse/YARN-8625) | Aggregate Resource Allocation for each job is not present in ATS |  Major | ATSv2 | Prabhu Joseph | Prabhu Joseph |
+| [HADOOP-16345](https://issues.apache.org/jira/browse/HADOOP-16345) | Potential NPE when instantiating FairCallQueue metrics |  Major | ipc | Erik Krogen | Erik Krogen |
+| [YARN-9594](https://issues.apache.org/jira/browse/YARN-9594) | Fix missing break statement in ContainerScheduler#handle |  Major | . | lujie | lujie |
+| [YARN-9565](https://issues.apache.org/jira/browse/YARN-9565) | RMAppImpl#ranNodes not cleared on FinalTransition |  Major | . | Bibin A Chundatt | Bilwa S T |
+| [YARN-9547](https://issues.apache.org/jira/browse/YARN-9547) | ContainerStatusPBImpl default execution type is not returned |  Major | . | Bibin A Chundatt | Bilwa S T |
+| [HDFS-13231](https://issues.apache.org/jira/browse/HDFS-13231) | Extend visualization for Decommissioning, Maintenance Mode under Datanode tab in the NameNode UI |  Major | datanode, namenode | Haibo Yan | Stephen O'Donnell |
+| [YARN-9621](https://issues.apache.org/jira/browse/YARN-9621) | FIX TestDSWithMultipleNodeManager.testDistributedShellWithPlacementConstraint on branch-3.1 |  Major | distributed-shell, test | Peter Bacsko | Prabhu Joseph |
+| [HDFS-14535](https://issues.apache.org/jira/browse/HDFS-14535) | The default 8KB buffer in requestFileDescriptors#BufferedOutputStream is causing lots of heap allocation in HBase when using short-circut read |  Major | hdfs-client | Zheng Hu | Zheng Hu |
+| [HDFS-13730](https://issues.apache.org/jira/browse/HDFS-13730) | BlockReaderRemote.sendReadResult throws NPE |  Major | hdfs-client | Wei-Chiu Chuang | Yuanbo Liu |
+| [YARN-9584](https://issues.apache.org/jira/browse/YARN-9584) | Should put initializeProcessTrees method call before get pid |  Critical | nodemanager | Wanqiang Ji | Wanqiang Ji |
+| [HDFS-14010](https://issues.apache.org/jira/browse/HDFS-14010) | Pass correct DF usage to ReservedSpaceCalculator builder |  Minor | . | Lukas Majercak | Lukas Majercak |
+| [HDFS-14078](https://issues.apache.org/jira/browse/HDFS-14078) | Admin helper fails to prettify NullPointerExceptions |  Major | . | Elek, Marton | Elek, Marton |
+| [HDFS-14101](https://issues.apache.org/jira/browse/HDFS-14101) | Random failure of testListCorruptFilesCorruptedBlock |  Major | test | Kihwal Lee | Zsolt Venczel |
+| [HDFS-14465](https://issues.apache.org/jira/browse/HDFS-14465) | When the Block expected replications is larger than the number of DataNodes, entering maintenance will never exit. |  Major | . | Yicong Cai | Yicong Cai |
+| [HDFS-12487](https://issues.apache.org/jira/browse/HDFS-12487) | FsDatasetSpi.isValidBlock() lacks null pointer check inside and neither do the callers |  Major | balancer & mover, diskbalancer | liumi | liumi |
+| [HDFS-14074](https://issues.apache.org/jira/browse/HDFS-14074) | DataNode runs async disk checks  maybe  throws NullPointerException, and DataNode failed to register to NameSpace. |  Major | hdfs | guangyi lu | guangyi lu |
+| [HDFS-14541](https://issues.apache.org/jira/browse/HDFS-14541) |  When evictableMmapped or evictable size is zero, do not throw NoSuchElementException |  Major | hdfs-client, performance | Zheng Hu | Lisheng Sun |
+| [HDFS-14598](https://issues.apache.org/jira/browse/HDFS-14598) | Findbugs warning caused by HDFS-12487 |  Minor | diskbalancer | Wei-Chiu Chuang | He Xiaoqiao |
+| [YARN-9639](https://issues.apache.org/jira/browse/YARN-9639) | DecommissioningNodesWatcher cause memory leak |  Blocker | . | Bibin A Chundatt | Bilwa S T |
+| [YARN-9327](https://issues.apache.org/jira/browse/YARN-9327) | Improve synchronisation in ProtoUtils#convertToProtoFormat block |  Critical | . | Bibin A Chundatt | Bibin A Chundatt |
+| [YARN-9655](https://issues.apache.org/jira/browse/YARN-9655) | AllocateResponse in FederationInterceptor lost  applicationPriority |  Major | federation | hunshenshi | hunshenshi |
+| [HADOOP-16385](https://issues.apache.org/jira/browse/HADOOP-16385) | Namenode crashes with "RedundancyMonitor thread received Runtime exception" |  Major | . | krishna reddy | Ayush Saxena |
+| [YARN-9644](https://issues.apache.org/jira/browse/YARN-9644) | First RMContext object is always leaked during switch over |  Blocker | . | Bibin A Chundatt | Bibin A Chundatt |
+| [HDFS-14629](https://issues.apache.org/jira/browse/HDFS-14629) | Property value Hard Coded in DNConf.java |  Trivial | . | hemanthboyina | hemanthboyina |
+| [YARN-9557](https://issues.apache.org/jira/browse/YARN-9557) | Application fails in diskchecker when ReadWriteDiskValidator is configured. |  Critical | nodemanager | Anuruddh Nayak | Bilwa S T |
+| [HDFS-12703](https://issues.apache.org/jira/browse/HDFS-12703) | Exceptions are fatal to decommissioning monitor |  Critical | namenode | Daryn Sharp | He Xiaoqiao |
+| [HDFS-12748](https://issues.apache.org/jira/browse/HDFS-12748) | NameNode memory leak when accessing webhdfs GETHOMEDIRECTORY |  Major | hdfs | Jiandan Yang | Weiwei Yang |
+| [YARN-9625](https://issues.apache.org/jira/browse/YARN-9625) | UI2 - No link to a queue on the Queues page for Fair Scheduler |  Major | . | Charan Hebri | Zoltan Siegl |
+| [HDFS-14466](https://issues.apache.org/jira/browse/HDFS-14466) | Add a regression test for HDFS-14323 |  Minor | fs, test, webhdfs | Yuya Ebihara | Masatake Iwasaki |
+| [YARN-9235](https://issues.apache.org/jira/browse/YARN-9235) | If linux container executor is not set for a GPU cluster GpuResourceHandlerImpl is not initialized and NPE is thrown |  Major | yarn | Antal Bálint Steinbach | Adam Antal |
+| [YARN-9626](https://issues.apache.org/jira/browse/YARN-9626) | UI2 - Fair scheduler queue apps page issues |  Major | . | Charan Hebri | Zoltan Siegl |
+| [YARN-9682](https://issues.apache.org/jira/browse/YARN-9682) | Wrong log message when finalizing the upgrade |  Trivial | . | kyungwan nam | kyungwan nam |
+| [HADOOP-16440](https://issues.apache.org/jira/browse/HADOOP-16440) | Distcp can not preserve timestamp with -delete  option |  Major | . | ludun | ludun |
+| [MAPREDUCE-7076](https://issues.apache.org/jira/browse/MAPREDUCE-7076) | TestNNBench#testNNBenchCreateReadAndDelete failing in our internal build |  Minor | test | Rushabh S Shah | kevin su |
+| [YARN-9668](https://issues.apache.org/jira/browse/YARN-9668) | UGI conf doesn't read user overridden configurations on RM and NM startup |  Major | . | Jonathan Hung | Jonathan Hung |
+| [HADOOP-9844](https://issues.apache.org/jira/browse/HADOOP-9844) | NPE when trying to create an error message response of SASL RPC |  Major | ipc | Steve Loughran | Steve Loughran |
+| [HADOOP-16245](https://issues.apache.org/jira/browse/HADOOP-16245) | Enabling SSL within LdapGroupsMapping can break system SSL configs |  Major | common, security | Erik Krogen | Erik Krogen |
+| [HDFS-14429](https://issues.apache.org/jira/browse/HDFS-14429) | Block remain in COMMITTED but not COMPLETE caused by Decommission |  Major | . | Yicong Cai | Yicong Cai |
+| [HADOOP-16435](https://issues.apache.org/jira/browse/HADOOP-16435) | RpcMetrics should not be retained forever |  Critical | rpc-server | Zoltan Haindrich | Zoltan Haindrich |
+| [YARN-9596](https://issues.apache.org/jira/browse/YARN-9596) | QueueMetrics has incorrect metrics when labelled partitions are involved |  Major | capacity scheduler | Muhammad Samir Khan | Muhammad Samir Khan |
+| [MAPREDUCE-7225](https://issues.apache.org/jira/browse/MAPREDUCE-7225) | Fix broken current folder expansion during MR job start |  Major | mrv2 | Adam Antal | Peter Bacsko |
+| [HDFS-13529](https://issues.apache.org/jira/browse/HDFS-13529) | Fix default trash policy emptier trigger time correctly |  Major | namenode | He Xiaoqiao | He Xiaoqiao |
+| [HADOOP-15681](https://issues.apache.org/jira/browse/HADOOP-15681) | AuthenticationFilter should generate valid date format for Set-Cookie header regardless of default Locale |  Minor | security | Cao Manh Dat | Cao Manh Dat |
+| [HDFS-14685](https://issues.apache.org/jira/browse/HDFS-14685) | DefaultAuditLogger doesn't print CallerContext |  Major | hdfs | xuzq | xuzq |
+| [HDFS-14462](https://issues.apache.org/jira/browse/HDFS-14462) | WebHDFS throws "Error writing request body to server" instead of DSQuotaExceededException |  Major | webhdfs | Erik Krogen | Simbarashe Dzinamarira |
+| [HDFS-14557](https://issues.apache.org/jira/browse/HDFS-14557) | JournalNode error: Can't scan a pre-transactional edit log |  Major | ha | Wei-Chiu Chuang | Stephen O'Donnell |
+| [HDFS-14692](https://issues.apache.org/jira/browse/HDFS-14692) | Upload button should not encode complete url |  Major | . | Lokesh Jain | Lokesh Jain |
+| [HDFS-14631](https://issues.apache.org/jira/browse/HDFS-14631) | The DirectoryScanner doesn't fix the wrongly placed replica. |  Major | . | Jinglun | Jinglun |
+| [YARN-9685](https://issues.apache.org/jira/browse/YARN-9685) | NPE when rendering the info table of leaf queue in non-accessible partitions |  Major | capacityscheduler | Tao Yang | Tao Yang |
+| [HDFS-14459](https://issues.apache.org/jira/browse/HDFS-14459) | ClosedChannelException silently ignored in FsVolumeList.addBlockPool() |  Major | datanode | Stephen O'Donnell | Stephen O'Donnell |
+| [HDFS-13359](https://issues.apache.org/jira/browse/HDFS-13359) | DataXceiver hung due to the lock in FsDatasetImpl#getBlockInputStream |  Major | datanode | Yiqun Lin | Yiqun Lin |
+| [YARN-9451](https://issues.apache.org/jira/browse/YARN-9451) | AggregatedLogsBlock shows wrong NM http port |  Minor | nodemanager | Prabhu Joseph | Prabhu Joseph |
+| [YARN-9723](https://issues.apache.org/jira/browse/YARN-9723) | ApplicationPlacementContext is not required for terminated jobs during recovery |  Major | resourcemanager | Prabhu Joseph | Prabhu Joseph |
+| [HDFS-12914](https://issues.apache.org/jira/browse/HDFS-12914) | Block report leases cause missing blocks until next report |  Critical | namenode | Daryn Sharp | Santosh Marella |
+| [HDFS-14148](https://issues.apache.org/jira/browse/HDFS-14148) | HDFS OIV ReverseXML SnapshotSection parser throws exception when there are more than one snapshottable directory |  Major | hdfs | Siyao Meng | Siyao Meng |
+| [HDFS-14595](https://issues.apache.org/jira/browse/HDFS-14595) | HDFS-11848 breaks API compatibility |  Blocker | . | Wei-Chiu Chuang | Siyao Meng |
+| [HDFS-14423](https://issues.apache.org/jira/browse/HDFS-14423) | Percent (%) and plus (+) characters no longer work in WebHDFS |  Major | webhdfs | Jing Wang | Masatake Iwasaki |
+| [MAPREDUCE-7230](https://issues.apache.org/jira/browse/MAPREDUCE-7230) | TestHSWebApp.testLogsViewSingle fails |  Major | jobhistoryserver, test | Prabhu Joseph | Prabhu Joseph |
+| [HDFS-14687](https://issues.apache.org/jira/browse/HDFS-14687) | Standby Namenode never come out of safemode when EC files are being written. |  Critical | ec, namenode | Surendra Singh Lilhore | Surendra Singh Lilhore |
+| [HDFS-13101](https://issues.apache.org/jira/browse/HDFS-13101) | Yet another fsimage corruption related to snapshot |  Major | snapshots | Yongjun Zhang | Shashikant Banerjee |
+| [HDFS-13201](https://issues.apache.org/jira/browse/HDFS-13201) | Fix prompt message in testPolicyAndStateCantBeNull |  Minor | . | chencan | chencan |
+| [HDFS-14311](https://issues.apache.org/jira/browse/HDFS-14311) | Multi-threading conflict at layoutVersion when loading block pool storage |  Major | rolling upgrades | Yicong Cai | Yicong Cai |
+| [HDFS-14582](https://issues.apache.org/jira/browse/HDFS-14582) | Failed to start DN with ArithmeticException when NULL checksum used |  Major | datanode | Surendra Singh Lilhore | Surendra Singh Lilhore |
+| [HADOOP-16494](https://issues.apache.org/jira/browse/HADOOP-16494) | Add SHA-256 or SHA-512 checksum to release artifacts to comply with the release distribution policy |  Blocker | build | Akira Ajisaka | Akira Ajisaka |
+| [YARN-9774](https://issues.apache.org/jira/browse/YARN-9774) | Fix order of arguments for assertEquals in TestSLSUtils |  Minor | test | Nikhil Navadiya | Nikhil Navadiya |
+| [HDFS-13596](https://issues.apache.org/jira/browse/HDFS-13596) | NN restart fails after RollingUpgrade from 2.x to 3.x |  Blocker | hdfs | Hanisha Koneru | Fei Hui |
+| [HDFS-14396](https://issues.apache.org/jira/browse/HDFS-14396) | Failed to load image from FSImageFile when downgrade from 3.x to 2.x |  Blocker | rolling upgrades | Fei Hui | Fei Hui |
+| [YARN-9642](https://issues.apache.org/jira/browse/YARN-9642) | Fix Memory Leak in AbstractYarnScheduler caused by timer |  Blocker | resourcemanager | Bibin A Chundatt | Bibin A Chundatt |
+| [HDFS-13977](https://issues.apache.org/jira/browse/HDFS-13977) | NameNode can kill itself if it tries to send too many txns to a QJM simultaneously |  Major | namenode, qjm | Erik Krogen | Erik Krogen |
+| [YARN-9438](https://issues.apache.org/jira/browse/YARN-9438) | launchTime not written to state store for running applications |  Major | . | Jonathan Hung | Jonathan Hung |
+| [HDFS-12212](https://issues.apache.org/jira/browse/HDFS-12212) | Options.Rename.To\_TRASH is considered even when Options.Rename.NONE is specified |  Major | namenode | Vinayakumar B | Vinayakumar B |
+| [HDFS-8178](https://issues.apache.org/jira/browse/HDFS-8178) | QJM doesn't move aside stale inprogress edits files |  Major | qjm | Zhe Zhang | Istvan Fajth |
+| [HDFS-14706](https://issues.apache.org/jira/browse/HDFS-14706) | Checksums are not checked if block meta file is less than 7 bytes |  Major | . | Stephen O'Donnell | Stephen O'Donnell |
+| [YARN-9797](https://issues.apache.org/jira/browse/YARN-9797) | LeafQueue#activateApplications should use resourceCalculator#fitsIn |  Blocker | . | Bibin A Chundatt | Bilwa S T |
+| [YARN-9785](https://issues.apache.org/jira/browse/YARN-9785) | Fix DominantResourceCalculator when one resource is zero |  Blocker | . | Bilwa S T | Bilwa S T |
+| [YARN-9817](https://issues.apache.org/jira/browse/YARN-9817) | Fix failing testcases due to not initialized AsyncDispatcher -  ArithmeticException: / by zero |  Major | test | Prabhu Joseph | Prabhu Joseph |
+
+
+### TESTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [YARN-9315](https://issues.apache.org/jira/browse/YARN-9315) | TestCapacitySchedulerMetrics fails intermittently |  Minor | capacity scheduler | Prabhu Joseph | Prabhu Joseph |
+| [YARN-9316](https://issues.apache.org/jira/browse/YARN-9316) | TestPlacementConstraintsUtil#testInterAppConstraintsByAppID fails intermittently |  Minor | capacity scheduler | Prabhu Joseph | Prabhu Joseph |
+| [YARN-9325](https://issues.apache.org/jira/browse/YARN-9325) | TestQueueManagementDynamicEditPolicy fails intermittent |  Minor | capacity scheduler | Prabhu Joseph | Prabhu Joseph |
+| [HDFS-11950](https://issues.apache.org/jira/browse/HDFS-11950) | Disable libhdfs zerocopy test on Mac |  Minor | libhdfs | John Zhuge | Akira Ajisaka |
+
+
+### SUB-TASKS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-16045](https://issues.apache.org/jira/browse/HADOOP-16045) | Don't run TestDU on Windows |  Trivial | common, test | Lukas Majercak | Lukas Majercak |
+| [HADOOP-16079](https://issues.apache.org/jira/browse/HADOOP-16079) | Token.toString faulting if any token listed can't load. |  Blocker | security | Steve Loughran | Steve Loughran |
+| [YARN-9253](https://issues.apache.org/jira/browse/YARN-9253) | Add UT to verify Placement Constraint in Distributed Shell |  Major | . | Prabhu Joseph | Prabhu Joseph |
+| [YARN-9293](https://issues.apache.org/jira/browse/YARN-9293) | Optimize MockAMLauncher event handling |  Major | . | Bibin A Chundatt | Bibin A Chundatt |
+| [HADOOP-16109](https://issues.apache.org/jira/browse/HADOOP-16109) | Parquet reading S3AFileSystem causes EOF |  Blocker | fs/s3 | Dave Christianson | Steve Loughran |
+| [HADOOP-16191](https://issues.apache.org/jira/browse/HADOOP-16191) | AliyunOSS: improvements for copyFile/copyDirectory and logging |  Major | fs/oss | wujinhu | wujinhu |
+| [YARN-9391](https://issues.apache.org/jira/browse/YARN-9391) | Disable PATH variable to be passed to Docker container |  Major | . | Eric Yang | Jim Brennan |
+| [HADOOP-16220](https://issues.apache.org/jira/browse/HADOOP-16220) | Add findbugs ignores for unjustified issues during update to guava to 27.0-jre in hadoop-project |  Major | . | Gabor Bota | Gabor Bota |
+| [HADOOP-16233](https://issues.apache.org/jira/browse/HADOOP-16233) | S3AFileStatus to declare that isEncrypted() is always true |  Minor | fs/s3 | Steve Loughran | Steve Loughran |
+| [HADOOP-16306](https://issues.apache.org/jira/browse/HADOOP-16306) | AliyunOSS: Remove temporary files when upload small files to OSS |  Major | fs/oss | wujinhu | wujinhu |
+| [HDFS-14553](https://issues.apache.org/jira/browse/HDFS-14553) | Make queue size of BlockReportProcessingThread configurable |  Major | namenode | He Xiaoqiao | He Xiaoqiao |
+| [HDFS-14034](https://issues.apache.org/jira/browse/HDFS-14034) | Support getQuotaUsage API in WebHDFS |  Major | fs, webhdfs | Erik Krogen | Chao Sun |
+| [YARN-9765](https://issues.apache.org/jira/browse/YARN-9765) | SLS runner crashes when run with metrics turned off. |  Major | . | Abhishek Modi | Abhishek Modi |
+| [HDFS-14674](https://issues.apache.org/jira/browse/HDFS-14674) | [SBN read] Got an unexpected txid when tail editlog |  Blocker | . | wangzhaohui | wangzhaohui |
+| [YARN-9775](https://issues.apache.org/jira/browse/YARN-9775) | RMWebServices /scheduler-conf GET returns all hadoop configurations for ZKConfigurationStore |  Major | restapi | Prabhu Joseph | Prabhu Joseph |
+| [HDFS-14779](https://issues.apache.org/jira/browse/HDFS-14779) | Fix logging error in TestEditLog#testMultiStreamsLoadEditWithConfMaxTxns |  Major | . | Jonathan Hung | Jonathan Hung |
+
+
+### OTHER:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-16025](https://issues.apache.org/jira/browse/HADOOP-16025) | Update the year to 2019 |  Major | build | Ayush Saxena | Ayush Saxena |
+| [HDFS-12729](https://issues.apache.org/jira/browse/HDFS-12729) | Document special paths in HDFS |  Major | documentation | Chris Douglas | Masatake Iwasaki |
+| [YARN-9191](https://issues.apache.org/jira/browse/YARN-9191) | Add cli option in DS to support enforceExecutionType in resource requests. |  Major | . | Abhishek Modi | Abhishek Modi |
+| [HADOOP-16263](https://issues.apache.org/jira/browse/HADOOP-16263) | Update BUILDING.txt with macOS native build instructions |  Minor | . | Siyao Meng | Siyao Meng |
+| [YARN-9559](https://issues.apache.org/jira/browse/YARN-9559) | Create AbstractContainersLauncher for pluggable ContainersLauncher logic |  Major | . | Jonathan Hung | Jonathan Hung |
+| [HADOOP-16551](https://issues.apache.org/jira/browse/HADOOP-16551) | The changelog\*.md seems not generated when create-release |  Blocker | . | Zhankun Tang |  |
+
+
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.3/RELEASENOTES.3.1.3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.3/RELEASENOTES.3.1.3.md
new file mode 100644
index 0000000..c806810
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.3/RELEASENOTES.3.1.3.md
@@ -0,0 +1,59 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# Apache Hadoop  3.1.3 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
+
+
+---
+
+* [HADOOP-15922](https://issues.apache.org/jira/browse/HADOOP-15922) | *Major* | **DelegationTokenAuthenticationFilter get wrong doAsUser since it does not decode URL**
+
+- Fix DelegationTokenAuthentication filter for incorrectly double encode doAs user parameter.
+
+
+---
+
+* [YARN-8761](https://issues.apache.org/jira/browse/YARN-8761) | *Major* | **Service AM support for decommissioning component instances**
+
+- Component instance number is not linear increment when decommission feature is used.  Application with assumption of linear increment component instance number maybe impacted by introduction of this feature.
+
+
+---
+
+* [HDFS-14305](https://issues.apache.org/jira/browse/HDFS-14305) | *Major* | **Serial number in BlockTokenSecretManager could overlap between different namenodes**
+
+NameNodes rely on independent block token key ranges to communicate block token identities to DataNodes and clients in a way that does not create conflicts between the tokens issued by multiple NameNodes. HDFS-6440 introduced the potential for overlaps in key ranges; this fixes the issue by creating 64 possible key ranges that NameNodes assign themselves to, allowing for up to 64 NameNodes to run safely. This limitation only applies within a single Namespace; there may be more than 64 NameNodes total spread among multiple federated Namespaces.
+
+
+---
+
+* [HADOOP-16114](https://issues.apache.org/jira/browse/HADOOP-16114) | *Minor* | **NetUtils#canonicalizeHost gives different value for same host**
+
+The above patch will resolve the race condition
+
+
+---
+
+* [HDFS-14396](https://issues.apache.org/jira/browse/HDFS-14396) | *Blocker* | **Failed to load image from FSImageFile when downgrade from 3.x to 2.x**
+
+During a rolling upgrade from Hadoop 2.x to 3.x, NameNode cannot persist erasure coding information, and therefore a user cannot start using erasure coding feature until finalize is done.
+
+
+
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
index 7463d6c..64bb966 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
@@ -388,42 +388,41 @@
     Assert.assertArrayEquals(readData, expectedData);
   }
   
-  /** Test read fully */
+  /** Test read fully. */
   @Test(timeout=120000)
   public void testReadFully() throws Exception {
     OutputStream out = getOutputStream(defaultBufferSize);
     writeData(out);
     
-    InputStream in = getInputStream(defaultBufferSize);
-    final int len1 = dataLen / 4;
-    // Read len1 bytes
-    byte[] readData = new byte[len1];
-    readAll(in, readData, 0, len1);
-    byte[] expectedData = new byte[len1];
-    System.arraycopy(data, 0, expectedData, 0, len1);
-    Assert.assertArrayEquals(readData, expectedData);
-    
-    // Pos: 1/3 dataLen
-    readFullyCheck(in, dataLen / 3);
-    
-    // Read len1 bytes
-    readData = new byte[len1];
-    readAll(in, readData, 0, len1);
-    expectedData = new byte[len1];
-    System.arraycopy(data, len1, expectedData, 0, len1);
-    Assert.assertArrayEquals(readData, expectedData);
-    
-    // Pos: 1/2 dataLen
-    readFullyCheck(in, dataLen / 2);
-    
-    // Read len1 bytes
-    readData = new byte[len1];
-    readAll(in, readData, 0, len1);
-    expectedData = new byte[len1];
-    System.arraycopy(data, 2 * len1, expectedData, 0, len1);
-    Assert.assertArrayEquals(readData, expectedData);
-    
-    in.close();
+    try (InputStream in = getInputStream(defaultBufferSize)) {
+      final int len1 = dataLen / 4;
+      // Read len1 bytes
+      byte[] readData = new byte[len1];
+      readAll(in, readData, 0, len1);
+      byte[] expectedData = new byte[len1];
+      System.arraycopy(data, 0, expectedData, 0, len1);
+      Assert.assertArrayEquals(readData, expectedData);
+
+      // Pos: 1/3 dataLen
+      readFullyCheck(in, dataLen / 3);
+
+      // Read len1 bytes
+      readData = new byte[len1];
+      readAll(in, readData, 0, len1);
+      expectedData = new byte[len1];
+      System.arraycopy(data, len1, expectedData, 0, len1);
+      Assert.assertArrayEquals(readData, expectedData);
+
+      // Pos: 1/2 dataLen
+      readFullyCheck(in, dataLen / 2);
+
+      // Read len1 bytes
+      readData = new byte[len1];
+      readAll(in, readData, 0, len1);
+      expectedData = new byte[len1];
+      System.arraycopy(data, 2 * len1, expectedData, 0, len1);
+      Assert.assertArrayEquals(readData, expectedData);
+    }
   }
   
   private void readFullyCheck(InputStream in, int pos) throws Exception {
@@ -441,6 +440,60 @@
     } catch (EOFException e) {
     }
   }
+
+  /** Test byte byffer read fully. */
+  @Test(timeout=120000)
+  public void testByteBufferReadFully() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+
+    try (InputStream in = getInputStream(defaultBufferSize)) {
+      final int len1 = dataLen / 4;
+      // Read len1 bytes
+      byte[] readData = new byte[len1];
+      readAll(in, readData, 0, len1);
+      byte[] expectedData = new byte[len1];
+      System.arraycopy(data, 0, expectedData, 0, len1);
+      Assert.assertArrayEquals(readData, expectedData);
+
+      // Pos: 1/3 dataLen
+      byteBufferReadFullyCheck(in, dataLen / 3);
+
+      // Read len1 bytes
+      readData = new byte[len1];
+      readAll(in, readData, 0, len1);
+      expectedData = new byte[len1];
+      System.arraycopy(data, len1, expectedData, 0, len1);
+      Assert.assertArrayEquals(readData, expectedData);
+
+      // Pos: 1/2 dataLen
+      byteBufferReadFullyCheck(in, dataLen / 2);
+
+      // Read len1 bytes
+      readData = new byte[len1];
+      readAll(in, readData, 0, len1);
+      expectedData = new byte[len1];
+      System.arraycopy(data, 2 * len1, expectedData, 0, len1);
+      Assert.assertArrayEquals(readData, expectedData);
+    }
+  }
+
+  private void byteBufferReadFullyCheck(InputStream in, int pos)
+          throws Exception {
+    ByteBuffer result = ByteBuffer.allocate(dataLen - pos);
+    ((ByteBufferPositionedReadable) in).readFully(pos, result);
+
+    byte[] expectedData = new byte[dataLen - pos];
+    System.arraycopy(data, pos, expectedData, 0, dataLen - pos);
+    Assert.assertArrayEquals(result.array(), expectedData);
+
+    result = ByteBuffer.allocate(dataLen); // Exceeds maximum length
+    try {
+      ((ByteBufferPositionedReadable) in).readFully(pos, result);
+      Assert.fail("Read fully exceeds maximum length should fail.");
+    } catch (EOFException e) {
+    }
+  }
   
   /** Test seek to different position. */
   @Test(timeout=120000)
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
index 8bcf46e..73c6249 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
@@ -331,6 +331,30 @@
     }
 
     @Override
+    public void readFully(long position, ByteBuffer buf) throws IOException {
+      if (buf == null) {
+        throw new NullPointerException();
+      } else if (!buf.hasRemaining()) {
+        return;
+      }
+
+      if (position > length) {
+        throw new IOException("Cannot read after EOF.");
+      }
+      if (position < 0) {
+        throw new IOException("Cannot read to negative offset.");
+      }
+
+      checkStream();
+
+      if (position + buf.remaining() > length) {
+        throw new EOFException("Reach the end of stream.");
+      }
+
+      buf.put(data, (int) position, buf.remaining());
+    }
+
+    @Override
     public void readFully(long position, byte[] b, int off, int len)
         throws IOException {
       if (b == null) {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java
index e7d922e..8453889 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java
@@ -95,6 +95,11 @@
   @Override
   @Test(timeout=10000)
   public void testPositionedReadWithByteBuffer() throws IOException {}
+
+  @Ignore("Wrapped stream doesn't support ByteBufferPositionedReadable")
+  @Override
+  @Test(timeout=10000)
+  public void testByteBufferReadFully() throws Exception {}
   
   @Ignore("ChecksumFSOutputSummer doesn't support Syncable")
   @Override
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java
index 036706f..1bf1dd3 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java
@@ -96,6 +96,11 @@
   @Test(timeout=10000)
   public void testPositionedReadWithByteBuffer() throws IOException {}
 
+  @Ignore("Wrapped stream doesn't support ByteBufferPositionedReadable")
+  @Override
+  @Test(timeout=10000)
+  public void testByteBufferReadFully() throws Exception {}
+
   @Ignore("Wrapped stream doesn't support ReadFully")
   @Override
   @Test(timeout=10000)
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
index a9fb117..9b92dac 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
@@ -18,12 +18,15 @@
 
 package org.apache.hadoop.fs.contract;
 
+import org.apache.hadoop.fs.CommonPathCapabilities;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
+
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertHasPathCapabilities;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
@@ -155,4 +158,11 @@
                                                  dataset.length);
     ContractTestUtils.compareByteArrays(dataset, bytes, dataset.length);
   }
+
+  @Test
+  public void testFileSystemDeclaresCapability() throws Throwable {
+    assertHasPathCapabilities(getFileSystem(), target,
+        CommonPathCapabilities.FS_APPEND);
+  }
+
 }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java
index d30e0d6..d712369 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java
@@ -24,7 +24,9 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.hadoop.fs.CommonPathCapabilities.FS_CONCAT;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.assertFileHasLength;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertHasPathCapabilities;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
@@ -93,4 +95,9 @@
         () -> getFileSystem().concat(target, new Path[]{target})));
   }
 
+  @Test
+  public void testFileSystemDeclaresCapability() throws Throwable {
+    assertHasPathCapabilities(getFileSystem(), target, FS_CONCAT);
+  }
+
 }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
index 789fb0a..f616349 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
@@ -25,6 +25,7 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathCapabilities;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.StreamCapabilities;
 import org.apache.hadoop.io.IOUtils;
@@ -1491,23 +1492,62 @@
     assertTrue("Stream should be instanceof StreamCapabilities",
         stream instanceof StreamCapabilities);
 
-    if (shouldHaveCapabilities!=null) {
+    StreamCapabilities source = (StreamCapabilities) stream;
+    if (shouldHaveCapabilities != null) {
       for (String shouldHaveCapability : shouldHaveCapabilities) {
         assertTrue("Should have capability: " + shouldHaveCapability,
-            ((StreamCapabilities) stream).hasCapability(shouldHaveCapability));
+            source.hasCapability(shouldHaveCapability));
       }
     }
 
-    if (shouldNotHaveCapabilities!=null) {
+    if (shouldNotHaveCapabilities != null) {
       for (String shouldNotHaveCapability : shouldNotHaveCapabilities) {
         assertFalse("Should not have capability: " + shouldNotHaveCapability,
-            ((StreamCapabilities) stream)
-                .hasCapability(shouldNotHaveCapability));
+            source.hasCapability(shouldNotHaveCapability));
       }
     }
   }
 
   /**
+   * Custom assert to test {@link PathCapabilities}.
+   *
+   * @param source source (FS, FC, etc)
+   * @param path path to check
+   * @param capabilities The array of unexpected capabilities
+   */
+  public static void assertHasPathCapabilities(
+      final PathCapabilities source,
+      final Path path,
+      final String...capabilities) throws IOException {
+
+    for (String shouldHaveCapability: capabilities) {
+      assertTrue("Should have capability: " + shouldHaveCapability
+              + " under " + path,
+          source.hasPathCapability(path, shouldHaveCapability));
+    }
+  }
+
+  /**
+   * Custom assert to test that the named {@link PathCapabilities}
+   * are not supported.
+   *
+   * @param source source (FS, FC, etc)
+   * @param path path to check
+   * @param capabilities The array of unexpected capabilities
+   */
+  public static void assertLacksPathCapabilities(
+      final PathCapabilities source,
+      final Path path,
+      final String...capabilities) throws IOException {
+
+    for (String shouldHaveCapability: capabilities) {
+      assertFalse("Path  must not support capability: " + shouldHaveCapability
+              + " under " + path,
+          source.hasPathCapability(path, shouldHaveCapability));
+    }
+  }
+
+  /**
    * Function which calls {@code InputStream.read()} and
    * downgrades an IOE to a runtime exception.
    * @param in input
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
index c1b6cc4..db36154 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
@@ -575,6 +575,9 @@
     if (o == null) {
       return NULL_RESULT;
     } else {
+      if (o instanceof String) {
+        return '"' + (String)o + '"';
+      }
       try {
         return o.toString();
       } catch (Exception e) {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNodeHealthScriptRunner.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNodeHealthScriptRunner.java
index 8fc64d1..2748c0b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNodeHealthScriptRunner.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNodeHealthScriptRunner.java
@@ -94,6 +94,8 @@
     String timeOutScript =
       Shell.WINDOWS ? "@echo off\nping -n 4 127.0.0.1 >nul\necho \"I am fine\""
       : "sleep 4\necho \"I am fine\"";
+    String exitCodeScript = "exit 127";
+
     Configuration conf = new Configuration();
     writeNodeHealthScriptFile(normalScript, true);
     NodeHealthScriptRunner nodeHealthScriptRunner = new NodeHealthScriptRunner(
@@ -132,5 +134,12 @@
     Assert.assertEquals(
             NodeHealthScriptRunner.NODE_HEALTH_SCRIPT_TIMED_OUT_MSG,
             nodeHealthScriptRunner.getHealthReport());
+
+    // Exit code 127
+    writeNodeHealthScriptFile(exitCodeScript, true);
+    timerTask.run();
+    Assert.assertTrue("Node health status reported unhealthy",
+        nodeHealthScriptRunner.isHealthy());
+    Assert.assertEquals("", nodeHealthScriptRunner.getHealthReport());
   }
 }
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index 2a6d44a..9af807f 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -274,8 +274,8 @@
         </executions>
       </plugin>
       <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>findbugs-maven-plugin</artifactId>
+        <groupId>com.github.spotbugs</groupId>
+        <artifactId>spotbugs-maven-plugin</artifactId>
         <configuration>
           <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
         </configuration>
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/FunctionWithServiceException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/FunctionWithServiceException.java
new file mode 100644
index 0000000..b9d7bce
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/FunctionWithServiceException.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.function;
+
+import com.google.protobuf.ServiceException;
+
+/**
+ * Functional interface like java.util.function.Function but with
+ * checked exception.
+ */
+@FunctionalInterface
+public interface FunctionWithServiceException<T, R> {
+
+  /**
+   * Applies this function to the given argument.
+   *
+   * @param t the function argument
+   * @return the function result
+   */
+  R apply(T t) throws ServiceException;
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java
new file mode 100644
index 0000000..915fe35
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Functional interfaces for ozone, similar to java.util.function.
+ */
+package org.apache.hadoop.hdds.function;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
index d7d53a4..efe79a76 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
@@ -16,22 +16,29 @@
  */
 package org.apache.hadoop.hdds.protocolPB;
 
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
 import java.io.Closeable;
 import java.io.IOException;
+import java.util.function.Consumer;
+
+import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto;
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCACertificateRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertificateRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertificateRequestProto.Builder;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetDataNodeCertRequestProto;
-import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityRequest;
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityRequest.Builder;
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityResponse;
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.Type;
+import org.apache.hadoop.hdds.tracing.TracingUtil;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.ipc.RPC;
 
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
 import static org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetOMCertRequestProto;
 
 /**
@@ -53,6 +60,28 @@
   }
 
   /**
+   * Helper method to wrap the request and send the message.
+   */
+  private SCMSecurityResponse submitRequest(
+      SCMSecurityProtocolProtos.Type type,
+      Consumer<Builder> builderConsumer) throws IOException {
+    final SCMSecurityResponse response;
+    try {
+
+      Builder builder = SCMSecurityRequest.newBuilder()
+          .setCmdType(type)
+          .setTraceID(TracingUtil.exportCurrentSpan());
+      builderConsumer.accept(builder);
+      SCMSecurityRequest wrapper = builder.build();
+
+      response = rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper);
+    } catch (ServiceException ex) {
+      throw ProtobufHelper.getRemoteException(ex);
+    }
+    return response;
+  }
+
+  /**
    * Closes this stream and releases any system resources associated
    * with it. If the stream is already closed then invoking this
    * method has no effect.
@@ -87,8 +116,8 @@
   /**
    * Get SCM signed certificate for OM.
    *
-   * @param omDetails       - OzoneManager Details.
-   * @param certSignReq     - Certificate signing request.
+   * @param omDetails   - OzoneManager Details.
+   * @param certSignReq - Certificate signing request.
    * @return byte[]         - SCM signed certificate.
    */
   @Override
@@ -100,64 +129,61 @@
   /**
    * Get SCM signed certificate for OM.
    *
-   * @param omDetails       - OzoneManager Details.
-   * @param certSignReq     - Certificate signing request.
+   * @param omDetails   - OzoneManager Details.
+   * @param certSignReq - Certificate signing request.
    * @return byte[]         - SCM signed certificate.
    */
   public SCMGetCertResponseProto getOMCertChain(
       OzoneManagerDetailsProto omDetails, String certSignReq)
       throws IOException {
-    SCMGetOMCertRequestProto.Builder builder = SCMGetOMCertRequestProto
+    SCMGetOMCertRequestProto request = SCMGetOMCertRequestProto
         .newBuilder()
         .setCSR(certSignReq)
-        .setOmDetails(omDetails);
-    try {
-      return rpcProxy.getOMCertificate(NULL_RPC_CONTROLLER, builder.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
+        .setOmDetails(omDetails)
+        .build();
+    return submitRequest(Type.GetOMCertificate,
+        builder -> builder.setGetOMCertRequest(request))
+        .getGetCertResponseProto();
   }
 
   /**
    * Get SCM signed certificate with given serial id. Throws exception if
    * certificate is not found.
    *
-   * @param certSerialId    - Certificate serial id.
+   * @param certSerialId - Certificate serial id.
    * @return string         - pem encoded certificate.
    */
   @Override
   public String getCertificate(String certSerialId) throws IOException {
-    Builder builder = SCMGetCertificateRequestProto
+    SCMGetCertificateRequestProto request = SCMGetCertificateRequestProto
         .newBuilder()
-        .setCertSerialId(certSerialId);
-    try {
-      return rpcProxy.getCertificate(NULL_RPC_CONTROLLER, builder.build())
-          .getX509Certificate();
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
+        .setCertSerialId(certSerialId)
+        .build();
+    return submitRequest(Type.GetCertificate,
+        builder -> builder.setGetCertificateRequest(request))
+        .getGetCertResponseProto()
+        .getX509Certificate();
   }
 
   /**
    * Get SCM signed certificate for Datanode.
    *
-   * @param dnDetails       - Datanode Details.
-   * @param certSignReq     - Certificate signing request.
+   * @param dnDetails   - Datanode Details.
+   * @param certSignReq - Certificate signing request.
    * @return byte[]         - SCM signed certificate.
    */
   public SCMGetCertResponseProto getDataNodeCertificateChain(
       DatanodeDetailsProto dnDetails, String certSignReq)
       throws IOException {
-    SCMGetDataNodeCertRequestProto.Builder builder =
+
+    SCMGetDataNodeCertRequestProto request =
         SCMGetDataNodeCertRequestProto.newBuilder()
             .setCSR(certSignReq)
-            .setDatanodeDetails(dnDetails);
-    try {
-      return rpcProxy.getDataNodeCertificate(NULL_RPC_CONTROLLER,
-          builder.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
+            .setDatanodeDetails(dnDetails)
+            .build();
+    return submitRequest(Type.GetDataNodeCertificate,
+        builder -> builder.setGetDataNodeCertRequest(request))
+        .getGetCertResponseProto();
   }
 
   /**
@@ -169,12 +195,10 @@
   public String getCACertificate() throws IOException {
     SCMGetCACertificateRequestProto protoIns = SCMGetCACertificateRequestProto
         .getDefaultInstance();
-    try {
-      return rpcProxy.getCACertificate(NULL_RPC_CONTROLLER, protoIns)
-          .getX509Certificate();
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
+    return submitRequest(Type.GetCACertificate,
+        builder -> builder.setGetCACertificateRequest(protoIns))
+        .getGetCertResponseProto().getX509Certificate();
+
   }
 
   /**
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolServerSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolServerSideTranslatorPB.java
deleted file mode 100644
index 2fd5594..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolServerSideTranslatorPB.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.protocolPB;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import java.io.IOException;
-
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertificateRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetDataNodeCertRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto.ResponseCode;
-import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
-import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetOMCertRequestProto;
-
-/**
- * This class is the server-side translator that forwards requests received on
- * {@link SCMSecurityProtocolPB} to the {@link
- * SCMSecurityProtocol} server implementation.
- */
-public class SCMSecurityProtocolServerSideTranslatorPB implements
-    SCMSecurityProtocolPB {
-
-  private final SCMSecurityProtocol impl;
-
-  public SCMSecurityProtocolServerSideTranslatorPB(SCMSecurityProtocol impl) {
-    this.impl = impl;
-  }
-
-  /**
-   * Get SCM signed certificate for DataNode.
-   *
-   * @param controller
-   * @param request
-   * @return SCMGetDataNodeCertResponseProto.
-   */
-  @Override
-  public SCMGetCertResponseProto getDataNodeCertificate(
-      RpcController controller, SCMGetDataNodeCertRequestProto request)
-      throws ServiceException {
-    try {
-      String certificate = impl
-          .getDataNodeCertificate(request.getDatanodeDetails(),
-              request.getCSR());
-      SCMGetCertResponseProto.Builder builder =
-          SCMGetCertResponseProto
-              .newBuilder()
-              .setResponseCode(ResponseCode.success)
-              .setX509Certificate(certificate)
-              .setX509CACertificate(impl.getCACertificate());
-
-      return builder.build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  /**
-   * Get SCM signed certificate for OzoneManager.
-   *
-   * @param controller
-   * @param request
-   * @return SCMGetCertResponseProto.
-   */
-  @Override
-  public SCMGetCertResponseProto getOMCertificate(
-      RpcController controller, SCMGetOMCertRequestProto request)
-      throws ServiceException {
-    try {
-      String certificate = impl
-          .getOMCertificate(request.getOmDetails(),
-              request.getCSR());
-      SCMGetCertResponseProto.Builder builder =
-          SCMGetCertResponseProto
-              .newBuilder()
-              .setResponseCode(ResponseCode.success)
-              .setX509Certificate(certificate)
-              .setX509CACertificate(impl.getCACertificate());
-      return builder.build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public SCMGetCertResponseProto getCertificate(RpcController controller,
-      SCMGetCertificateRequestProto request) throws ServiceException {
-    try {
-      String certificate = impl.getCertificate(request.getCertSerialId());
-      SCMGetCertResponseProto.Builder builder =
-          SCMGetCertResponseProto
-              .newBuilder()
-              .setResponseCode(ResponseCode.success)
-              .setX509Certificate(certificate);
-      return builder.build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public SCMGetCertResponseProto getCACertificate(RpcController controller,
-      SCMSecurityProtocolProtos.SCMGetCACertificateRequestProto request)
-      throws ServiceException {
-    try {
-      String certificate = impl.getCACertificate();
-      SCMGetCertResponseProto.Builder builder =
-          SCMGetCertResponseProto
-              .newBuilder()
-              .setResponseCode(ResponseCode.success)
-              .setX509Certificate(certificate);
-      return builder.build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index f00ecb2..1617806 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -105,6 +105,11 @@
   // TODO: Set to 1024 once RATIS issue around purge is fixed.
   public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT =
       1000000;
+
+  public static final String DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS =
+      "dfs.container.ratis.leader.num.pending.requests";
+  public static final int
+      DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT = 4096;
   // expiry interval stateMachineData cache entry inside containerStateMachine
   public static final String
       DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL =
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
index ab3fcd1..01db597 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
@@ -16,64 +16,57 @@
  */
 package org.apache.hadoop.hdds.scm.protocolPB;
 
-import com.google.common.base.Preconditions;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.Consumer;
+
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.GetScmInfoResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ActivatePipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DeactivatePipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ClosePipelineRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DeactivatePipelineRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartReplicationManagerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopReplicationManagerRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.PipelineRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.PipelineResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationRequest;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationRequest.Builder;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartReplicationManagerRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopReplicationManagerRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.Type;
 import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.GetContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.GetContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.NodeQueryRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.NodeQueryResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.PipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.PipelineResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.SCMListContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.SCMListContainerResponseProto;
 import org.apache.hadoop.hdds.tracing.TracingUtil;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.ipc.RPC;
 
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
+import com.google.common.base.Preconditions;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
 
 /**
  * This class is the client-side translator to translate the requests made on
@@ -102,13 +95,34 @@
   }
 
   /**
+   * Helper method to wrap the request and send the message.
+   */
+  private ScmContainerLocationResponse submitRequest(
+      StorageContainerLocationProtocolProtos.Type type,
+      Consumer<Builder> builderConsumer) throws IOException {
+    final ScmContainerLocationResponse response;
+    try {
+
+      Builder builder = ScmContainerLocationRequest.newBuilder()
+          .setCmdType(type)
+          .setTraceID(TracingUtil.exportCurrentSpan());
+      builderConsumer.accept(builder);
+      ScmContainerLocationRequest wrapper = builder.build();
+
+      response = rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper);
+    } catch (ServiceException ex) {
+      throw ProtobufHelper.getRemoteException(ex);
+    }
+    return response;
+  }
+
+  /**
    * Asks SCM where a container should be allocated. SCM responds with the set
    * of datanodes that should be used creating this container. Ozone/SCM only
    * supports replication factor of either 1 or 3.
-   * @param type - Replication Type
+   *
+   * @param type   - Replication Type
    * @param factor - Replication Count
-   * @return
-   * @throws IOException
    */
   @Override
   public ContainerWithPipeline allocateContainer(
@@ -122,12 +136,11 @@
         .setOwner(owner)
         .build();
 
-    final ContainerResponseProto response;
-    try {
-      response = rpcProxy.allocateContainer(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
+    ContainerResponseProto response =
+        submitRequest(Type.AllocateContainer,
+            builder -> builder.setContainerRequest(request))
+            .getContainerResponse();
+    //TODO should be migrated to use the top level status structure.
     if (response.getErrorCode() != ContainerResponseProto.Error.success) {
       throw new IOException(response.hasErrorMessage() ?
           response.getErrorMessage() : "Allocate container failed.");
@@ -144,13 +157,12 @@
         .setContainerID(containerID)
         .setTraceID(TracingUtil.exportCurrentSpan())
         .build();
-    try {
-      GetContainerResponseProto response =
-          rpcProxy.getContainer(NULL_RPC_CONTROLLER, request);
-      return ContainerInfo.fromProtobuf(response.getContainerInfo());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
+    ScmContainerLocationResponse response =
+        submitRequest(Type.GetContainer,
+            (builder) -> builder.setGetContainerRequest(request));
+    return ContainerInfo
+        .fromProtobuf(response.getGetContainerResponse().getContainerInfo());
+
   }
 
   /**
@@ -164,14 +176,15 @@
         GetContainerWithPipelineRequestProto.newBuilder()
             .setTraceID(TracingUtil.exportCurrentSpan())
             .setContainerID(containerID).build();
-    try {
-      GetContainerWithPipelineResponseProto response =
-          rpcProxy.getContainerWithPipeline(NULL_RPC_CONTROLLER, request);
-      return ContainerWithPipeline.fromProtobuf(
-          response.getContainerWithPipeline());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
+
+    ScmContainerLocationResponse response =
+        submitRequest(Type.GetContainerWithPipeline,
+            (builder) -> builder.setGetContainerWithPipelineRequest(request));
+
+    return ContainerWithPipeline.fromProtobuf(
+        response.getGetContainerWithPipelineResponse()
+            .getContainerWithPipeline());
+
   }
 
   /**
@@ -191,26 +204,22 @@
     builder.setTraceID(TracingUtil.exportCurrentSpan());
     SCMListContainerRequestProto request = builder.build();
 
-    try {
-      SCMListContainerResponseProto response =
-          rpcProxy.listContainer(NULL_RPC_CONTROLLER, request);
-      List<ContainerInfo> containerList = new ArrayList<>();
-      for (HddsProtos.ContainerInfoProto containerInfoProto : response
-          .getContainersList()) {
-        containerList.add(ContainerInfo.fromProtobuf(containerInfoProto));
-      }
-      return containerList;
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
+    SCMListContainerResponseProto response =
+        submitRequest(Type.ListContainer,
+            builder1 -> builder1.setScmListContainerRequest(request))
+            .getScmListContainerResponse();
+    List<ContainerInfo> containerList = new ArrayList<>();
+    for (HddsProtos.ContainerInfoProto containerInfoProto : response
+        .getContainersList()) {
+      containerList.add(ContainerInfo.fromProtobuf(containerInfoProto));
     }
+    return containerList;
+
   }
 
   /**
    * Ask SCM to delete a container by name. SCM will remove
    * the container mapping in its database.
-   *
-   * @param containerID
-   * @throws IOException
    */
   @Override
   public void deleteContainer(long containerID)
@@ -222,18 +231,13 @@
         .setTraceID(TracingUtil.exportCurrentSpan())
         .setContainerID(containerID)
         .build();
-    try {
-      rpcProxy.deleteContainer(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
+    submitRequest(Type.DeleteContainer,
+        builder -> builder.setScmDeleteContainerRequest(request));
+
   }
 
   /**
    * Queries a list of Node Statuses.
-   *
-   * @param nodeStatuses
-   * @return List of Datanodes.
    */
   @Override
   public List<HddsProtos.Node> queryNode(HddsProtos.NodeState
@@ -246,21 +250,18 @@
         .setState(nodeStatuses)
         .setTraceID(TracingUtil.exportCurrentSpan())
         .setScope(queryScope).setPoolName(poolName).build();
-    try {
-      NodeQueryResponseProto response =
-          rpcProxy.queryNode(NULL_RPC_CONTROLLER, request);
-      return response.getDatanodesList();
-    } catch (ServiceException e) {
-      throw  ProtobufHelper.getRemoteException(e);
-    }
+    NodeQueryResponseProto response = submitRequest(Type.QueryNode,
+        builder -> builder.setNodeQueryRequest(request)).getNodeQueryResponse();
+    return response.getDatanodesList();
 
   }
 
   /**
    * Notify from client that creates object on datanodes.
-   * @param type object type
-   * @param id object id
-   * @param op operation type (e.g., create, close, delete)
+   *
+   * @param type  object type
+   * @param id    object id
+   * @param op    operation type (e.g., create, close, delete)
    * @param stage object creation stage : begin/complete
    */
   @Override
@@ -278,20 +279,17 @@
             .setOp(op)
             .setStage(stage)
             .build();
-    try {
-      rpcProxy.notifyObjectStageChange(NULL_RPC_CONTROLLER, request);
-    } catch(ServiceException e){
-      throw ProtobufHelper.getRemoteException(e);
-    }
+    submitRequest(Type.NotifyObjectStageChange,
+        builder -> builder.setObjectStageChangeRequest(request));
+
   }
 
   /**
    * Creates a replication pipeline of a specified type.
    *
    * @param replicationType - replication type
-   * @param factor - factor 1 or 3
-   * @param nodePool - optional machine list to build a pipeline.
-   * @throws IOException
+   * @param factor          - factor 1 or 3
+   * @param nodePool        - optional machine list to build a pipeline.
    */
   @Override
   public Pipeline createReplicationPipeline(HddsProtos.ReplicationType
@@ -303,87 +301,82 @@
         .setReplicationFactor(factor)
         .setReplicationType(replicationType)
         .build();
-    try {
-      PipelineResponseProto response =
-          rpcProxy.allocatePipeline(NULL_RPC_CONTROLLER, request);
-      if (response.getErrorCode() ==
-          PipelineResponseProto.Error.success) {
-        Preconditions.checkState(response.hasPipeline(), "With success, " +
-            "must come a pipeline");
-        return Pipeline.getFromProtobuf(response.getPipeline());
-      } else {
-        String errorMessage = String.format("create replication pipeline " +
-                "failed. code : %s Message: %s", response.getErrorCode(),
-            response.hasErrorMessage() ? response.getErrorMessage() : "");
-        throw new IOException(errorMessage);
-      }
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
+
+    PipelineResponseProto response =
+        submitRequest(Type.AllocatePipeline,
+            builder -> builder.setPipelineRequest(request))
+            .getPipelineResponse();
+    if (response.getErrorCode() ==
+        PipelineResponseProto.Error.success) {
+      Preconditions.checkState(response.hasPipeline(), "With success, " +
+          "must come a pipeline");
+      return Pipeline.getFromProtobuf(response.getPipeline());
+    } else {
+      String errorMessage = String.format("create replication pipeline " +
+              "failed. code : %s Message: %s", response.getErrorCode(),
+          response.hasErrorMessage() ? response.getErrorMessage() : "");
+      throw new IOException(errorMessage);
     }
+
   }
 
   @Override
   public List<Pipeline> listPipelines() throws IOException {
-    try {
-      ListPipelineRequestProto request = ListPipelineRequestProto
-          .newBuilder().setTraceID(TracingUtil.exportCurrentSpan())
-          .build();
-      ListPipelineResponseProto response = rpcProxy.listPipelines(
-          NULL_RPC_CONTROLLER, request);
-      List<Pipeline> list = new ArrayList<>();
-      for (HddsProtos.Pipeline pipeline : response.getPipelinesList()) {
-        Pipeline fromProtobuf = Pipeline.getFromProtobuf(pipeline);
-        list.add(fromProtobuf);
-      }
-      return list;
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
+    ListPipelineRequestProto request = ListPipelineRequestProto
+        .newBuilder().setTraceID(TracingUtil.exportCurrentSpan())
+        .build();
+
+    ListPipelineResponseProto response = submitRequest(Type.ListPipelines,
+        builder -> builder.setListPipelineRequest(request))
+        .getListPipelineResponse();
+
+    List<Pipeline> list = new ArrayList<>();
+    for (HddsProtos.Pipeline pipeline : response.getPipelinesList()) {
+      Pipeline fromProtobuf = Pipeline.getFromProtobuf(pipeline);
+      list.add(fromProtobuf);
     }
+    return list;
+
   }
 
   @Override
   public void activatePipeline(HddsProtos.PipelineID pipelineID)
       throws IOException {
-    try {
-      ActivatePipelineRequestProto request =
-          ActivatePipelineRequestProto.newBuilder()
-              .setTraceID(TracingUtil.exportCurrentSpan())
-              .setPipelineID(pipelineID)
-              .build();
-      rpcProxy.activatePipeline(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
+    ActivatePipelineRequestProto request =
+        ActivatePipelineRequestProto.newBuilder()
+            .setTraceID(TracingUtil.exportCurrentSpan())
+            .setPipelineID(pipelineID)
+            .build();
+    submitRequest(Type.ActivatePipeline,
+        builder -> builder.setActivatePipelineRequest(request));
+
   }
 
   @Override
   public void deactivatePipeline(HddsProtos.PipelineID pipelineID)
       throws IOException {
-    try {
-      DeactivatePipelineRequestProto request =
-          DeactivatePipelineRequestProto.newBuilder()
-              .setTraceID(TracingUtil.exportCurrentSpan())
-              .setPipelineID(pipelineID)
-              .build();
-      rpcProxy.deactivatePipeline(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
+
+    DeactivatePipelineRequestProto request =
+        DeactivatePipelineRequestProto.newBuilder()
+            .setTraceID(TracingUtil.exportCurrentSpan())
+            .setPipelineID(pipelineID)
+            .build();
+    submitRequest(Type.DeactivatePipeline,
+        builder -> builder.setDeactivatePipelineRequest(request));
   }
 
   @Override
   public void closePipeline(HddsProtos.PipelineID pipelineID)
       throws IOException {
-    try {
-      ClosePipelineRequestProto request =
-          ClosePipelineRequestProto.newBuilder()
-              .setTraceID(TracingUtil.exportCurrentSpan())
-              .setPipelineID(pipelineID)
-          .build();
-      rpcProxy.closePipeline(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
+
+    ClosePipelineRequestProto request =
+        ClosePipelineRequestProto.newBuilder()
+            .setTraceID(TracingUtil.exportCurrentSpan())
+            .setPipelineID(pipelineID)
+            .build();
+    submitRequest(Type.ClosePipeline,
+        builder -> builder.setClosePipelineRequest(request));
+
   }
 
   @Override
@@ -392,16 +385,14 @@
         HddsProtos.GetScmInfoRequestProto.newBuilder()
             .setTraceID(TracingUtil.exportCurrentSpan())
             .build();
-    try {
-      HddsProtos.GetScmInfoResponseProto resp = rpcProxy.getScmInfo(
-          NULL_RPC_CONTROLLER, request);
-      ScmInfo.Builder builder = new ScmInfo.Builder()
-          .setClusterId(resp.getClusterId())
-          .setScmId(resp.getScmId());
-      return builder.build();
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
+
+    GetScmInfoResponseProto resp = submitRequest(Type.GetScmInfo,
+        builder -> builder.setGetScmInfoRequest(request))
+        .getGetScmInfoResponse();
+    ScmInfo.Builder builder = new ScmInfo.Builder()
+        .setClusterId(resp.getClusterId())
+        .setScmId(resp.getScmId());
+    return builder.build();
 
   }
 
@@ -409,73 +400,67 @@
    * Check if SCM is in safe mode.
    *
    * @return Returns true if SCM is in safe mode else returns false.
-   * @throws IOException
    */
   @Override
   public boolean inSafeMode() throws IOException {
     InSafeModeRequestProto request =
         InSafeModeRequestProto.getDefaultInstance();
-    try {
-      InSafeModeResponseProto resp = rpcProxy.inSafeMode(
-          NULL_RPC_CONTROLLER, request);
-      return resp.getInSafeMode();
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
+
+    return submitRequest(Type.InSafeMode,
+        builder -> builder.setInSafeModeRequest(request))
+        .getInSafeModeResponse().getInSafeMode();
+
   }
 
   /**
    * Force SCM out of Safe mode.
    *
    * @return returns true if operation is successful.
-   * @throws IOException
    */
   @Override
   public boolean forceExitSafeMode() throws IOException {
     ForceExitSafeModeRequestProto request =
         ForceExitSafeModeRequestProto.getDefaultInstance();
-    try {
-      ForceExitSafeModeResponseProto resp = rpcProxy
-          .forceExitSafeMode(NULL_RPC_CONTROLLER, request);
-      return resp.getExitedSafeMode();
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
+    ForceExitSafeModeResponseProto resp =
+        submitRequest(Type.ForceExitSafeMode,
+            builder -> builder.setForceExitSafeModeRequest(request))
+            .getForceExitSafeModeResponse();
+
+    return resp.getExitedSafeMode();
+
   }
 
   @Override
   public void startReplicationManager() throws IOException {
-    try {
-      StartReplicationManagerRequestProto request =
-          StartReplicationManagerRequestProto.getDefaultInstance();
-      rpcProxy.startReplicationManager(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
+
+    StartReplicationManagerRequestProto request =
+        StartReplicationManagerRequestProto.getDefaultInstance();
+    submitRequest(Type.StartReplicationManager,
+        builder -> builder.setStartReplicationManagerRequest(request));
+
   }
 
   @Override
   public void stopReplicationManager() throws IOException {
-    try {
-      StopReplicationManagerRequestProto request =
-          StopReplicationManagerRequestProto.getDefaultInstance();
-      rpcProxy.stopReplicationManager(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
+
+    StopReplicationManagerRequestProto request =
+        StopReplicationManagerRequestProto.getDefaultInstance();
+    submitRequest(Type.StopReplicationManager,
+        builder -> builder.setStopReplicationManagerRequest(request));
+
   }
 
   @Override
   public boolean getReplicationManagerStatus() throws IOException {
-    try {
-      ReplicationManagerStatusRequestProto request =
-          ReplicationManagerStatusRequestProto.getDefaultInstance();
-      ReplicationManagerStatusResponseProto response =
-          rpcProxy.getReplicationManagerStatus(NULL_RPC_CONTROLLER, request);
-      return response.getIsRunning();
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
+
+    ReplicationManagerStatusRequestProto request =
+        ReplicationManagerStatusRequestProto.getDefaultInstance();
+    ReplicationManagerStatusResponseProto response =
+        submitRequest(Type.GetReplicationManagerStatus,
+            builder -> builder.setSeplicationManagerStatusRequest(request))
+            .getReplicationManagerStatusResponse();
+    return response.getIsRunning();
+
   }
 
   @Override
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
index 4d5ecab..263864f 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
@@ -22,11 +22,13 @@
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.eclipse.jetty.util.StringUtil;
 import org.rocksdb.ColumnFamilyDescriptor;
 import org.rocksdb.ColumnFamilyOptions;
 import org.rocksdb.DBOptions;
+import org.rocksdb.InfoLogLevel;
 import org.rocksdb.RocksDB;
 import org.rocksdb.Statistics;
 import org.rocksdb.StatsLevel;
@@ -54,6 +56,8 @@
 public final class DBStoreBuilder {
   private static final Logger LOG =
       LoggerFactory.getLogger(DBStoreBuilder.class);
+  public static final Logger ROCKS_DB_LOGGER =
+      LoggerFactory.getLogger(RocksDB.class);
   private Set<TableConfig> tables;
   private DBProfile dbProfile;
   private DBOptions rocksDBOption;
@@ -63,8 +67,9 @@
   private Configuration configuration;
   private CodecRegistry registry;
   private String rocksDbStat;
+  private RocksDBConfiguration rocksDBConfiguration;
 
-  private DBStoreBuilder(Configuration configuration) {
+  private DBStoreBuilder(OzoneConfiguration configuration) {
     tables = new HashSet<>();
     tableNames = new LinkedList<>();
     this.configuration = configuration;
@@ -72,9 +77,11 @@
     this.rocksDbStat = configuration.getTrimmed(
         OZONE_METADATA_STORE_ROCKSDB_STATISTICS,
         OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT);
+    this.rocksDBConfiguration =
+        configuration.getObject(RocksDBConfiguration.class);
   }
 
-  public static DBStoreBuilder newBuilder(Configuration configuration) {
+  public static DBStoreBuilder newBuilder(OzoneConfiguration configuration) {
     return new DBStoreBuilder(configuration);
   }
 
@@ -199,6 +206,19 @@
       option = dbProfile.getDBOptions();
     }
 
+    if (rocksDBConfiguration.isRocksdbLoggingEnabled()) {
+      org.rocksdb.Logger logger = new org.rocksdb.Logger(option) {
+        @Override
+        protected void log(InfoLogLevel infoLogLevel, String s) {
+          ROCKS_DB_LOGGER.info(s);
+        }
+      };
+      InfoLogLevel level = InfoLogLevel.valueOf(rocksDBConfiguration
+          .getRocksdbLogLevel() + "_LEVEL");
+      logger.setInfoLogLevel(level);
+      option.setLogger(logger);
+    }
+
     if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) {
       Statistics statistics = new Statistics();
       statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat));
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBConfiguration.java
new file mode 100644
index 0000000..1a8c846
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBConfiguration.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.utils.db;
+
+import org.apache.hadoop.hdds.conf.Config;
+import org.apache.hadoop.hdds.conf.ConfigGroup;
+import org.apache.hadoop.hdds.conf.ConfigTag;
+import org.apache.hadoop.hdds.conf.ConfigType;
+
+/**
+ * Holds configuration items for OM RocksDB.
+ */
+@ConfigGroup(prefix = "hadoop.hdds.db")
+public class RocksDBConfiguration {
+
+  private boolean rocksdbLogEnabled;
+
+  @Config(key = "rocksdb.logging.enabled",
+      type = ConfigType.BOOLEAN,
+      defaultValue = "false",
+      tags = {ConfigTag.OM},
+      description = "Enable/Disable RocksDB logging for OM.")
+  public void setRocksdbLoggingEnabled(boolean enabled) {
+    this.rocksdbLogEnabled = enabled;
+  }
+
+  public boolean isRocksdbLoggingEnabled() {
+    return rocksdbLogEnabled;
+  }
+
+  private String rocksdbLogLevel;
+
+  @Config(key = "rocksdb.logging.level",
+      type = ConfigType.STRING,
+      defaultValue = "INFO",
+      tags = {ConfigTag.OM},
+      description = "OM RocksDB logging level (INFO/DEBUG/WARN/ERROR/FATAL)")
+  public void setRocksdbLogLevel(String level) {
+    this.rocksdbLogLevel = level;
+  }
+
+  public String getRocksdbLogLevel() {
+    return rocksdbLogLevel;
+  }
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
index c5f23bb..597eff1 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
@@ -104,7 +104,7 @@
           // We should build cache after OM restart when clean up policy is
           // NEVER. Setting epoch value -1, so that when it is marked for
           // delete, this will be considered for cleanup.
-          cache.put(new CacheKey<>(kv.getKey()),
+          cache.loadInitial(new CacheKey<>(kv.getKey()),
               new CacheValue<>(Optional.of(kv.getValue()), EPOCH_DEFAULT));
         }
       }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCache.java
index 1f16969..de5a079 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCache.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCache.java
@@ -44,6 +44,15 @@
   CACHEVALUE get(CACHEKEY cacheKey);
 
   /**
+   * This method should be called for tables with cache cleanup policy
+   * {@link TableCacheImpl.CacheCleanupPolicy#NEVER} after system restart to
+   * fill up the cache.
+   * @param cacheKey
+   * @param cacheValue
+   */
+  void loadInitial(CACHEKEY cacheKey, CACHEVALUE cacheValue);
+
+  /**
    * Add an entry to the cache, if the key already exists it overrides.
    * @param cacheKey
    * @param value
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCacheImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCacheImpl.java
index f33136b..c3215c4 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCacheImpl.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCacheImpl.java
@@ -71,6 +71,13 @@
   }
 
   @Override
+  public void loadInitial(CACHEKEY cacheKey, CACHEVALUE cacheValue) {
+    // No need to add entry to epochEntries. Adding to cache is required during
+    // normal put operation.
+    cache.put(cacheKey, cacheValue);
+  }
+
+  @Override
   public void put(CACHEKEY cacheKey, CACHEVALUE value) {
     cache.put(cacheKey, value);
     epochEntries.add(new EpochEntry<>(value.getEpoch(), cacheKey));
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 9050ebd..a3d1c4a 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -329,6 +329,11 @@
       ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP;
   public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT =
       ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT;
+  public static final String DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS =
+      ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS;
+  public static final int
+      DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT =
+      ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT;
   public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY =
       ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY;
   public static final TimeDuration
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index d6e079a..9817d87 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -239,6 +239,8 @@
   public static final String KEY = "key";
   public static final String QUOTA = "quota";
   public static final String QUOTA_IN_BYTES = "quotaInBytes";
+  public static final String OBJECT_ID = "objectID";
+  public static final String UPDATE_ID = "updateID";
   public static final String CLIENT_ID = "clientID";
   public static final String OWNER = "owner";
   public static final String ADMIN = "admin";
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java
index c302084..49efad0 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java
@@ -18,22 +18,22 @@
 package org.apache.hadoop.ozone.lock;
 
 import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 /**
  * Lock implementation which also maintains counter.
  */
 public final class ActiveLock {
 
-  private Lock lock;
+  private ReadWriteLock lock;
   private AtomicInteger count;
 
   /**
    * Use ActiveLock#newInstance to create instance.
    */
   private ActiveLock() {
-    this.lock = new ReentrantLock();
+    this.lock = new ReentrantReadWriteLock();
     this.count = new AtomicInteger(0);
   }
 
@@ -47,21 +47,58 @@
   }
 
   /**
-   * Acquires the lock.
+   * Acquires read lock.
    *
-   * <p>If the lock is not available then the current thread becomes
-   * disabled for thread scheduling purposes and lies dormant until the
-   * lock has been acquired.
+   * <p>Acquires the read lock if the write lock is not held by
+   * another thread and returns immediately.
+   *
+   * <p>If the write lock is held by another thread then
+   * the current thread becomes disabled for thread scheduling
+   * purposes and lies dormant until the read lock has been acquired.
    */
-  public void lock() {
-    lock.lock();
+  void readLock() {
+    lock.readLock().lock();
   }
 
   /**
-   * Releases the lock.
+   * Attempts to release the read lock.
+   *
+   * <p>If the number of readers is now zero then the lock
+   * is made available for write lock attempts.
    */
-  public void unlock() {
-    lock.unlock();
+  void readUnlock() {
+    lock.readLock().unlock();
+  }
+
+  /**
+   * Acquires write lock.
+   *
+   * <p>Acquires the write lock if neither the read nor write lock
+   * are held by another thread
+   * and returns immediately, setting the write lock hold count to
+   * one.
+   *
+   * <p>If the current thread already holds the write lock then the
+   * hold count is incremented by one and the method returns
+   * immediately.
+   *
+   * <p>If the lock is held by another thread then the current
+   * thread becomes disabled for thread scheduling purposes and
+   * lies dormant until the write lock has been acquired.
+   */
+  void writeLock() {
+    lock.writeLock().lock();
+  }
+
+  /**
+   * Attempts to release the write lock.
+   *
+   * <p>If the current thread is the holder of this lock then
+   * the hold count is decremented. If the hold count is now
+   * zero then the lock is released.
+   */
+  void writeUnlock() {
+    lock.writeLock().unlock();
   }
 
   /**
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java
index 5f76bd6..670d4d1 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java
@@ -25,42 +25,156 @@
 
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.Consumer;
 
 /**
  * Manages the locks on a given resource. A new lock is created for each
  * and every unique resource. Uniqueness of resource depends on the
  * {@code equals} implementation of it.
  */
-public class LockManager<T> {
+public class LockManager<R> {
 
   private static final Logger LOG = LoggerFactory.getLogger(LockManager.class);
 
-  private final Map<T, ActiveLock> activeLocks = new ConcurrentHashMap<>();
+  private final Map<R, ActiveLock> activeLocks = new ConcurrentHashMap<>();
   private final GenericObjectPool<ActiveLock> lockPool =
       new GenericObjectPool<>(new PooledLockFactory());
 
   /**
-   * Creates new LockManager instance.
+   * Creates new LockManager instance with the given Configuration.
    *
    * @param conf Configuration object
    */
-  public LockManager(Configuration conf) {
-    int maxPoolSize = conf.getInt(HddsConfigKeys.HDDS_LOCK_MAX_CONCURRENCY,
+  public LockManager(final Configuration conf) {
+    final int maxPoolSize = conf.getInt(
+        HddsConfigKeys.HDDS_LOCK_MAX_CONCURRENCY,
         HddsConfigKeys.HDDS_LOCK_MAX_CONCURRENCY_DEFAULT);
     lockPool.setMaxTotal(maxPoolSize);
   }
 
-
   /**
    * Acquires the lock on given resource.
    *
    * <p>If the lock is not available then the current thread becomes
    * disabled for thread scheduling purposes and lies dormant until the
    * lock has been acquired.
+   *
+   * @param resource on which the lock has to be acquired
+   * @deprecated Use {@link LockManager#writeLock} instead
    */
-  public void lock(T resource) {
-    activeLocks.compute(resource, (k, v) -> {
-      ActiveLock lock;
+  public void lock(final R resource) {
+    writeLock(resource);
+  }
+
+  /**
+   * Releases the lock on given resource.
+   *
+   * @param resource for which the lock has to be released
+   * @deprecated Use {@link LockManager#writeUnlock} instead
+   */
+  public void unlock(final R resource) {
+    writeUnlock(resource);
+  }
+
+  /**
+   * Acquires the read lock on given resource.
+   *
+   * <p>Acquires the read lock on resource if the write lock is not held by
+   * another thread and returns immediately.
+   *
+   * <p>If the write lock on resource is held by another thread then
+   * the current thread becomes disabled for thread scheduling
+   * purposes and lies dormant until the read lock has been acquired.
+   *
+   * @param resource on which the read lock has to be acquired
+   */
+  public void readLock(final R resource) {
+    acquire(resource, ActiveLock::readLock);
+  }
+
+  /**
+   * Releases the read lock on given resource.
+   *
+   * @param resource for which the read lock has to be released
+   * @throws IllegalMonitorStateException if the current thread does not
+   *                                      hold this lock
+   */
+  public void readUnlock(final R resource) throws IllegalMonitorStateException {
+    release(resource, ActiveLock::readUnlock);
+  }
+
+  /**
+   * Acquires the write lock on given resource.
+   *
+   * <p>Acquires the write lock on resource if neither the read nor write lock
+   * are held by another thread and returns immediately.
+   *
+   * <p>If the current thread already holds the write lock then the
+   * hold count is incremented by one and the method returns
+   * immediately.
+   *
+   * <p>If the lock is held by another thread then the current
+   * thread becomes disabled for thread scheduling purposes and
+   * lies dormant until the write lock has been acquired.
+   *
+   * @param resource on which the lock has to be acquired
+   */
+  public void writeLock(final R resource) {
+    acquire(resource, ActiveLock::writeLock);
+  }
+
+  /**
+   * Releases the write lock on given resource.
+   *
+   * @param resource for which the lock has to be released
+   * @throws IllegalMonitorStateException if the current thread does not
+   *                                      hold this lock
+   */
+  public void writeUnlock(final R resource)
+      throws IllegalMonitorStateException {
+    release(resource, ActiveLock::writeUnlock);
+  }
+
+  /**
+   * Acquires the lock on given resource using the provided lock function.
+   *
+   * @param resource on which the lock has to be acquired
+   * @param lockFn function to acquire the lock
+   */
+  private void acquire(final R resource, final Consumer<ActiveLock> lockFn) {
+    lockFn.accept(getLockForLocking(resource));
+  }
+
+  /**
+   * Releases the lock on given resource using the provided release function.
+   *
+   * @param resource for which the lock has to be released
+   * @param releaseFn function to release the lock
+   */
+  private void release(final R resource, final Consumer<ActiveLock> releaseFn) {
+    final ActiveLock lock = getLockForReleasing(resource);
+    releaseFn.accept(lock);
+    decrementActiveLockCount(resource);
+  }
+
+  /**
+   * Returns {@link ActiveLock} instance for the given resource,
+   * on which the lock can be acquired.
+   *
+   * @param resource on which the lock has to be acquired
+   * @return {@link ActiveLock} instance
+   */
+  private ActiveLock getLockForLocking(final R resource) {
+    /*
+     * While getting a lock object for locking we should
+     * atomically increment the active count of the lock.
+     *
+     * This is to avoid cases where the selected lock could
+     * be removed from the activeLocks map and returned to
+     * the object pool.
+     */
+    return activeLocks.compute(resource, (k, v) -> {
+      final ActiveLock lock;
       try {
         if (v == null) {
           lock = lockPool.borrowObject();
@@ -73,22 +187,34 @@
         throw new RuntimeException(ex);
       }
       return lock;
-    }).lock();
+    });
   }
 
   /**
-   * Releases the lock on given resource.
+   * Returns {@link ActiveLock} instance for the given resource,
+   * for which the lock has to be released.
+   *
+   * @param resource for which the lock has to be released
+   * @return {@link ActiveLock} instance
    */
-  public void unlock(T resource) {
-    ActiveLock lock = activeLocks.get(resource);
-    if (lock == null) {
-      // Someone is releasing a lock which was never acquired. Log and return.
-      LOG.error("Trying to release the lock on {}, which was never acquired.",
-          resource);
-      throw new IllegalMonitorStateException("Releasing lock on resource "
-          + resource + " without acquiring lock");
+  private ActiveLock getLockForReleasing(final R resource) {
+    if (activeLocks.containsKey(resource)) {
+      return activeLocks.get(resource);
     }
-    lock.unlock();
+    // Someone is releasing a lock which was never acquired.
+    LOG.error("Trying to release the lock on {}, which was never acquired.",
+        resource);
+    throw new IllegalMonitorStateException("Releasing lock on resource "
+        + resource + " without acquiring lock");
+  }
+
+  /**
+   * Decrements the active lock count and returns the {@link ActiveLock}
+   * object to pool if the active count is 0.
+   *
+   * @param resource resource to which the ActiveLock is associated
+   */
+  private void decrementActiveLockCount(final R resource) {
     activeLocks.computeIfPresent(resource, (k, v) -> {
       v.decrementActiveCount();
       if (v.getActiveLockCount() != 0) {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
deleted file mode 100644
index 99c9e8d..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
+++ /dev/null
@@ -1,389 +0,0 @@
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocolPB;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import io.opentracing.Scope;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.InSafeModeRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.InSafeModeResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ForceExitSafeModeRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartReplicationManagerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartReplicationManagerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopReplicationManagerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopReplicationManagerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusResponseProto;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ActivatePipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ActivatePipelineResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.DeactivatePipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.DeactivatePipelineResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ClosePipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ClosePipelineResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ListPipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ListPipelineResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.GetContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.GetContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ObjectStageChangeResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.PipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.PipelineResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.SCMDeleteContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.SCMListContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.SCMListContainerResponseProto;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * This class is the server-side translator that forwards requests received on
- * {@link StorageContainerLocationProtocolPB} to the
- * {@link StorageContainerLocationProtocol} server implementation.
- */
-@InterfaceAudience.Private
-public final class StorageContainerLocationProtocolServerSideTranslatorPB
-    implements StorageContainerLocationProtocolPB {
-
-  private final StorageContainerLocationProtocol impl;
-
-  /**
-   * Creates a new StorageContainerLocationProtocolServerSideTranslatorPB.
-   *
-   * @param impl {@link StorageContainerLocationProtocol} server implementation
-   */
-  public StorageContainerLocationProtocolServerSideTranslatorPB(
-      StorageContainerLocationProtocol impl) throws IOException {
-    this.impl = impl;
-  }
-
-  @Override
-  public ContainerResponseProto allocateContainer(RpcController unused,
-      ContainerRequestProto request) throws ServiceException {
-    try (Scope scope = TracingUtil
-        .importAndCreateScope("allocateContainer", request.getTraceID())) {
-      ContainerWithPipeline containerWithPipeline = impl
-          .allocateContainer(request.getReplicationType(),
-              request.getReplicationFactor(), request.getOwner());
-      return ContainerResponseProto.newBuilder()
-          .setContainerWithPipeline(containerWithPipeline.getProtobuf())
-          .setErrorCode(ContainerResponseProto.Error.success)
-          .build();
-
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public GetContainerResponseProto getContainer(
-      RpcController controller, GetContainerRequestProto request)
-      throws ServiceException {
-    try (Scope scope = TracingUtil
-        .importAndCreateScope("getContainer", request.getTraceID())) {
-      ContainerInfo container = impl.getContainer(request.getContainerID());
-      return GetContainerResponseProto.newBuilder()
-          .setContainerInfo(container.getProtobuf())
-          .build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public GetContainerWithPipelineResponseProto getContainerWithPipeline(
-      RpcController controller, GetContainerWithPipelineRequestProto request)
-      throws ServiceException {
-    try (Scope scope = TracingUtil
-        .importAndCreateScope("getContainerWithPipeline",
-            request.getTraceID())) {
-      ContainerWithPipeline container = impl
-          .getContainerWithPipeline(request.getContainerID());
-      return GetContainerWithPipelineResponseProto.newBuilder()
-          .setContainerWithPipeline(container.getProtobuf())
-          .build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public SCMListContainerResponseProto listContainer(RpcController controller,
-      SCMListContainerRequestProto request) throws ServiceException {
-    try (Scope scope = TracingUtil
-        .importAndCreateScope("listContainer", request.getTraceID())) {
-      long startContainerID = 0;
-      int count = -1;
-
-      // Arguments check.
-      if (request.hasStartContainerID()) {
-        // End container name is given.
-        startContainerID = request.getStartContainerID();
-      }
-      count = request.getCount();
-      List<ContainerInfo> containerList =
-          impl.listContainer(startContainerID, count);
-      SCMListContainerResponseProto.Builder builder =
-          SCMListContainerResponseProto.newBuilder();
-      for (ContainerInfo container : containerList) {
-        builder.addContainers(container.getProtobuf());
-      }
-      return builder.build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public SCMDeleteContainerResponseProto deleteContainer(
-      RpcController controller, SCMDeleteContainerRequestProto request)
-      throws ServiceException {
-    try (Scope scope = TracingUtil
-        .importAndCreateScope("deleteContainer", request.getTraceID())) {
-      impl.deleteContainer(request.getContainerID());
-      return SCMDeleteContainerResponseProto.newBuilder().build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public StorageContainerLocationProtocolProtos.NodeQueryResponseProto
-      queryNode(RpcController controller,
-      StorageContainerLocationProtocolProtos.NodeQueryRequestProto request)
-      throws ServiceException {
-    try (Scope scope = TracingUtil
-        .importAndCreateScope("queryNode", request.getTraceID())) {
-      HddsProtos.NodeState nodeState = request.getState();
-      List<HddsProtos.Node> datanodes = impl.queryNode(nodeState,
-          request.getScope(), request.getPoolName());
-      return StorageContainerLocationProtocolProtos
-          .NodeQueryResponseProto.newBuilder()
-          .addAllDatanodes(datanodes)
-          .build();
-    } catch (Exception e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public ObjectStageChangeResponseProto notifyObjectStageChange(
-      RpcController controller, ObjectStageChangeRequestProto request)
-      throws ServiceException {
-    try (Scope scope = TracingUtil
-        .importAndCreateScope("notifyObjectStageChange",
-            request.getTraceID())) {
-      impl.notifyObjectStageChange(request.getType(), request.getId(),
-          request.getOp(), request.getStage());
-      return ObjectStageChangeResponseProto.newBuilder().build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public PipelineResponseProto allocatePipeline(
-      RpcController controller, PipelineRequestProto request)
-      throws ServiceException {
-    // TODO : Wiring this up requires one more patch.
-    return null;
-  }
-
-  @Override
-  public ListPipelineResponseProto listPipelines(
-      RpcController controller, ListPipelineRequestProto request)
-      throws ServiceException {
-    try (Scope scope = TracingUtil
-        .importAndCreateScope("listPipelines", request.getTraceID())) {
-      ListPipelineResponseProto.Builder builder = ListPipelineResponseProto
-          .newBuilder();
-      List<Pipeline> pipelines = impl.listPipelines();
-      for (Pipeline pipeline : pipelines) {
-        HddsProtos.Pipeline protobufMessage = pipeline.getProtobufMessage();
-        builder.addPipelines(protobufMessage);
-      }
-      return builder.build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public ActivatePipelineResponseProto activatePipeline(
-      RpcController controller, ActivatePipelineRequestProto request)
-      throws ServiceException {
-    try (Scope ignored = TracingUtil
-        .importAndCreateScope("activatePipeline", request.getTraceID())) {
-      impl.activatePipeline(request.getPipelineID());
-      return ActivatePipelineResponseProto.newBuilder().build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public DeactivatePipelineResponseProto deactivatePipeline(
-      RpcController controller, DeactivatePipelineRequestProto request)
-      throws ServiceException {
-    try (Scope ignored = TracingUtil
-        .importAndCreateScope("deactivatePipeline", request.getTraceID())) {
-      impl.deactivatePipeline(request.getPipelineID());
-      return DeactivatePipelineResponseProto.newBuilder().build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public ClosePipelineResponseProto closePipeline(
-      RpcController controller, ClosePipelineRequestProto request)
-      throws ServiceException {
-    try (Scope scope = TracingUtil
-        .importAndCreateScope("closePipeline", request.getTraceID())) {
-      impl.closePipeline(request.getPipelineID());
-      return ClosePipelineResponseProto.newBuilder().build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public HddsProtos.GetScmInfoResponseProto getScmInfo(
-      RpcController controller, HddsProtos.GetScmInfoRequestProto req)
-      throws ServiceException {
-    try (Scope scope = TracingUtil
-        .importAndCreateScope("getScmInfo", req.getTraceID())) {
-      ScmInfo scmInfo = impl.getScmInfo();
-      return HddsProtos.GetScmInfoResponseProto.newBuilder()
-          .setClusterId(scmInfo.getClusterId())
-          .setScmId(scmInfo.getScmId())
-          .build();
-    } catch (IOException ex) {
-      throw new ServiceException(ex);
-    }
-
-  }
-
-  @Override
-  public InSafeModeResponseProto inSafeMode(
-      RpcController controller,
-      InSafeModeRequestProto request) throws ServiceException {
-    try (Scope scope = TracingUtil
-        .importAndCreateScope("inSafeMode", request.getTraceID())) {
-      return InSafeModeResponseProto.newBuilder()
-          .setInSafeMode(impl.inSafeMode()).build();
-    } catch (IOException ex) {
-      throw new ServiceException(ex);
-    }
-  }
-
-  @Override
-  public ForceExitSafeModeResponseProto forceExitSafeMode(
-      RpcController controller, ForceExitSafeModeRequestProto request)
-      throws ServiceException {
-    try (Scope scope = TracingUtil
-        .importAndCreateScope("forceExitSafeMode", request.getTraceID())) {
-      return ForceExitSafeModeResponseProto.newBuilder()
-          .setExitedSafeMode(impl.forceExitSafeMode()).build();
-    } catch (IOException ex) {
-      throw new ServiceException(ex);
-    }
-  }
-
-  @Override
-  public StartReplicationManagerResponseProto startReplicationManager(
-      RpcController controller, StartReplicationManagerRequestProto request)
-      throws ServiceException {
-    try (Scope ignored = TracingUtil.importAndCreateScope(
-        "startReplicationManager", request.getTraceID())) {
-      impl.startReplicationManager();
-      return StartReplicationManagerResponseProto.newBuilder().build();
-    } catch (IOException ex) {
-      throw new ServiceException(ex);
-    }
-  }
-
-  @Override
-  public StopReplicationManagerResponseProto stopReplicationManager(
-      RpcController controller, StopReplicationManagerRequestProto request)
-      throws ServiceException {
-    try (Scope ignored = TracingUtil.importAndCreateScope(
-        "stopReplicationManager", request.getTraceID())) {
-      impl.stopReplicationManager();
-      return StopReplicationManagerResponseProto.newBuilder().build();
-    } catch (IOException ex) {
-      throw new ServiceException(ex);
-    }
-  }
-
-  @Override
-  public ReplicationManagerStatusResponseProto getReplicationManagerStatus(
-      RpcController controller, ReplicationManagerStatusRequestProto request)
-      throws ServiceException {
-    try (Scope ignored = TracingUtil.importAndCreateScope(
-        "getReplicationManagerStatus", request.getTraceID())) {
-      return ReplicationManagerStatusResponseProto.newBuilder()
-          .setIsRunning(impl.getReplicationManagerStatus()).build();
-    } catch (IOException ex) {
-      throw new ServiceException(ex);
-    }
-  }
-
-}
diff --git a/hadoop-hdds/common/src/main/proto/SCMSecurityProtocol.proto b/hadoop-hdds/common/src/main/proto/SCMSecurityProtocol.proto
index 5b6dd27..72e0e9f6 100644
--- a/hadoop-hdds/common/src/main/proto/SCMSecurityProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/SCMSecurityProtocol.proto
@@ -30,17 +30,61 @@
 
 option java_generate_equals_and_hash = true;
 
-package hadoop.hdds;
+package hadoop.hdds.security;
 
 import "hdds.proto";
 
 /**
+All commands is send as request and all response come back via
+Response class. If adding new functions please follow this protocol, since
+our tracing and visibility tools depend on this pattern.
+*/
+message SCMSecurityRequest {
+    required Type cmdType = 1; // Type of the command
+
+    optional string traceID = 2;
+
+    optional SCMGetDataNodeCertRequestProto getDataNodeCertRequest = 3;
+    optional SCMGetOMCertRequestProto getOMCertRequest = 4;
+    optional SCMGetCertificateRequestProto getCertificateRequest = 5;
+    optional SCMGetCACertificateRequestProto getCACertificateRequest = 6;
+
+}
+
+message SCMSecurityResponse {
+    required Type cmdType = 1; // Type of the command
+
+    // A string that identifies this command, we generate  Trace ID in Ozone
+    // frontend and this allows us to trace that command all over ozone.
+    optional string traceID = 2;
+
+    optional bool success = 3 [default = true];
+
+    optional string message = 4;
+
+    required Status status = 5;
+
+    optional SCMGetCertResponseProto getCertResponseProto = 6;
+
+}
+
+enum Type {
+    GetDataNodeCertificate = 1;
+    GetOMCertificate = 2;
+    GetCertificate = 3;
+    GetCACertificate = 4;
+}
+
+enum Status {
+    OK = 1;
+}
+/**
 * This message is send by data node to prove its identity and get an SCM
 * signed certificate.
 */
 message SCMGetDataNodeCertRequestProto {
-  required DatanodeDetailsProto datanodeDetails = 1;
-  required string CSR = 2;
+    required DatanodeDetailsProto datanodeDetails = 1;
+    required string CSR = 2;
 }
 
 /**
@@ -48,15 +92,15 @@
 * signed certificate.
 */
 message SCMGetOMCertRequestProto {
-  required OzoneManagerDetailsProto omDetails = 1;
-  required string CSR = 2;
+    required OzoneManagerDetailsProto omDetails = 1;
+    required string CSR = 2;
 }
 
 /**
 * Proto request to get a certificate with given serial id.
 */
 message SCMGetCertificateRequestProto {
-  required string certSerialId = 1;
+    required string certSerialId = 1;
 }
 
 /**
@@ -69,39 +113,17 @@
  * Returns a certificate signed by SCM.
  */
 message SCMGetCertResponseProto {
-  enum ResponseCode {
-    success = 1;
-    authenticationFailed = 2;
-    invalidCSR = 3;
-  }
-  required ResponseCode responseCode = 1;
-  required string x509Certificate = 2; // Base64 encoded X509 certificate.
-  optional string x509CACertificate = 3; // Base64 encoded CA X509 certificate.
+    enum ResponseCode {
+        success = 1;
+        authenticationFailed = 2;
+        invalidCSR = 3;
+    }
+    required ResponseCode responseCode = 1;
+    required string x509Certificate = 2; // Base64 encoded X509 certificate.
+    optional string x509CACertificate = 3; // Base64 encoded CA X509 certificate.
 }
 
 
 service SCMSecurityProtocolService {
-  /**
-  * Get SCM signed certificate for DataNode.
-  */
-  rpc getDataNodeCertificate (SCMGetDataNodeCertRequestProto) returns
-  (SCMGetCertResponseProto);
-
-  /**
-  * Get SCM signed certificate for DataNode.
-  */
-  rpc getOMCertificate (SCMGetOMCertRequestProto) returns
-  (SCMGetCertResponseProto);
-
-  /**
-   * Get SCM signed certificate for DataNode.
-   */
-  rpc getCertificate (SCMGetCertificateRequestProto) returns
-  (SCMGetCertResponseProto);
-
-  /**
-   * Get SCM signed certificate for DataNode.
-   */
-  rpc getCACertificate (SCMGetCACertificateRequestProto) returns
-  (SCMGetCertResponseProto);
+    rpc submitRequest (SCMSecurityRequest) returns (SCMSecurityResponse);
 }
diff --git a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
index ded0d02..fc7a598 100644
--- a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
@@ -26,7 +26,7 @@
 option java_outer_classname = "ScmBlockLocationProtocolProtos";
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
-package hadoop.hdds;
+package hadoop.hdds.block;
 
 import "hdds.proto";
 
diff --git a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
index 0c35876..8ea72b6 100644
--- a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
@@ -26,11 +26,101 @@
 option java_outer_classname = "StorageContainerLocationProtocolProtos";
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
-package hadoop.hdds;
+package hadoop.hdds.container;
 
 import "hdds.proto";
 
 /**
+  All functions are dispatched as Request/Response under Ozone.
+  if you add newe functions, please add them using same pattern.
+*/
+message ScmContainerLocationRequest {
+  required Type cmdType = 1; // Type of the command
+
+  // A string that identifies this command, we generate  Trace ID in Ozone
+  // frontend and this allows us to trace that command all over ozone.
+  optional string traceID = 2;
+
+  optional ContainerRequestProto containerRequest = 6;
+  optional GetContainerRequestProto getContainerRequest = 7;
+  optional GetContainerWithPipelineRequestProto getContainerWithPipelineRequest = 8;
+  optional SCMListContainerRequestProto scmListContainerRequest = 9;
+  optional SCMDeleteContainerRequestProto scmDeleteContainerRequest = 10;
+  optional NodeQueryRequestProto nodeQueryRequest = 11;
+  optional ObjectStageChangeRequestProto objectStageChangeRequest = 12;
+  optional PipelineRequestProto pipelineRequest = 13;
+  optional ListPipelineRequestProto listPipelineRequest = 14;
+  optional ActivatePipelineRequestProto activatePipelineRequest = 15;
+  optional DeactivatePipelineRequestProto deactivatePipelineRequest = 16;
+  optional ClosePipelineRequestProto closePipelineRequest = 17;
+  optional GetScmInfoRequestProto getScmInfoRequest = 18;
+  optional InSafeModeRequestProto inSafeModeRequest = 19;
+  optional ForceExitSafeModeRequestProto forceExitSafeModeRequest = 20;
+  optional StartReplicationManagerRequestProto startReplicationManagerRequest = 21;
+  optional StopReplicationManagerRequestProto stopReplicationManagerRequest = 22;
+  optional ReplicationManagerStatusRequestProto seplicationManagerStatusRequest = 23;
+
+}
+
+message ScmContainerLocationResponse {
+  required Type cmdType = 1; // Type of the command
+
+  optional string traceID = 2;
+
+  optional bool success = 3 [default = true];
+
+  optional string message = 4;
+
+  required Status status = 5;
+
+  optional ContainerResponseProto containerResponse = 6;
+  optional GetContainerResponseProto getContainerResponse = 7;
+  optional GetContainerWithPipelineResponseProto getContainerWithPipelineResponse = 8;
+  optional SCMListContainerResponseProto scmListContainerResponse = 9;
+  optional SCMDeleteContainerResponseProto scmDeleteContainerResponse = 10;
+  optional NodeQueryResponseProto nodeQueryResponse = 11;
+  optional ObjectStageChangeResponseProto objectStageChangeResponse = 12;
+  optional PipelineResponseProto pipelineResponse = 13;
+  optional ListPipelineResponseProto listPipelineResponse = 14;
+  optional ActivatePipelineResponseProto activatePipelineResponse = 15;
+  optional DeactivatePipelineResponseProto deactivatePipelineResponse = 16;
+  optional ClosePipelineResponseProto closePipelineResponse = 17;
+  optional GetScmInfoResponseProto getScmInfoResponse = 18;
+  optional InSafeModeResponseProto inSafeModeResponse = 19;
+  optional ForceExitSafeModeResponseProto forceExitSafeModeResponse = 20;
+  optional StartReplicationManagerResponseProto startReplicationManagerResponse = 21;
+  optional StopReplicationManagerResponseProto stopReplicationManagerResponse = 22;
+  optional ReplicationManagerStatusResponseProto replicationManagerStatusResponse = 23;
+  enum Status {
+    OK = 1;
+    CONTAINER_ALREADY_EXISTS = 2;
+    CONTAINER_IS_MISSING = 3;
+  }
+}
+
+enum Type {
+
+  AllocateContainer = 1;
+  GetContainer = 2;
+  GetContainerWithPipeline = 3;
+  ListContainer = 4;
+  DeleteContainer = 5;
+  QueryNode = 6;
+  NotifyObjectStageChange = 7;
+  AllocatePipeline = 8;
+  ListPipelines = 9;
+  ActivatePipeline = 10;
+  DeactivatePipeline = 11;
+  ClosePipeline = 12;
+  GetScmInfo = 13;
+  InSafeMode = 14;
+  ForceExitSafeMode = 15;
+  StartReplicationManager = 16;
+  StopReplicationManager = 17;
+  GetReplicationManagerStatus = 18;
+}
+
+/**
 * Request send to SCM asking where the container should be created.
 */
 message ContainerRequestProto {
@@ -235,97 +325,6 @@
  * and response messages for details of the RPC calls.
  */
 service StorageContainerLocationProtocolService {
+  rpc submitRequest (ScmContainerLocationRequest) returns (ScmContainerLocationResponse);
 
-  /**
-   * Creates a container entry in SCM.
-   */
-  rpc allocateContainer(ContainerRequestProto) returns (ContainerResponseProto);
-
-  /**
-   * Returns the pipeline for a given container.
-   */
-  rpc getContainer(GetContainerRequestProto) returns (GetContainerResponseProto);
-
-  /**
-   * Returns the pipeline for a given container.
-   */
-  rpc getContainerWithPipeline(GetContainerWithPipelineRequestProto) returns (GetContainerWithPipelineResponseProto);
-
-  rpc listContainer(SCMListContainerRequestProto) returns (SCMListContainerResponseProto);
-
-  /**
-   * Deletes a container in SCM.
-   */
-  rpc deleteContainer(SCMDeleteContainerRequestProto) returns (SCMDeleteContainerResponseProto);
-
-  /**
-  * Returns a set of Nodes that meet a criteria.
-  */
-  rpc queryNode(NodeQueryRequestProto) returns (NodeQueryResponseProto);
-
-  /**
-  * Notify from client when begin or finish container or pipeline operations on datanodes.
-  */
-  rpc notifyObjectStageChange(ObjectStageChangeRequestProto) returns (ObjectStageChangeResponseProto);
-
-  /*
-  *  Apis that Manage Pipelines.
-  *
-  * Pipelines are abstractions offered by SCM and Datanode that allows users
-  * to create a replication pipeline.
-  *
-  *  These following APIs allow command line programs like SCM CLI to list
-  * and manage pipelines.
-  */
-
-  /**
-  *  Creates a replication pipeline.
-  */
-  rpc allocatePipeline(PipelineRequestProto)
-      returns (PipelineResponseProto);
-
-  /**
-   * Returns the list of Pipelines managed by SCM.
-   */
-  rpc listPipelines(ListPipelineRequestProto)
-      returns (ListPipelineResponseProto);
-
-  rpc activatePipeline(ActivatePipelineRequestProto)
-      returns (ActivatePipelineResponseProto);
-
-  rpc deactivatePipeline(DeactivatePipelineRequestProto)
-      returns (DeactivatePipelineResponseProto);
-
-  /**
-   * Closes a pipeline.
-   */
-  rpc closePipeline(ClosePipelineRequestProto)
-      returns (ClosePipelineResponseProto);
-
-  /**
-  *  Returns information about SCM.
-  */
-  rpc getScmInfo(GetScmInfoRequestProto)
-      returns (GetScmInfoResponseProto);
-
-  /**
-  *  Checks if SCM is in SafeMode.
-  */
-  rpc inSafeMode(InSafeModeRequestProto)
-  returns (InSafeModeResponseProto);
-
-  /**
-  *  Returns information about SCM.
-  */
-  rpc forceExitSafeMode(ForceExitSafeModeRequestProto)
-  returns (ForceExitSafeModeResponseProto);
-
-  rpc startReplicationManager(StartReplicationManagerRequestProto)
-  returns (StartReplicationManagerResponseProto);
-
-  rpc stopReplicationManager(StopReplicationManagerRequestProto)
-  returns (StopReplicationManagerResponseProto);
-
-  rpc getReplicationManagerStatus(ReplicationManagerStatusRequestProto)
-  returns (ReplicationManagerStatusResponseProto);
 }
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 9e4c5ea..31bc652 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -204,6 +204,14 @@
     </description>
   </property>
   <property>
+    <name>dfs.container.ratis.leader.num.pending.requests</name>
+    <value>4096</value>
+    <tag>OZONE, RATIS, PERFORMANCE</tag>
+    <description>Maximum number of pending requests after which the leader
+      starts rejecting requests from client.
+    </description>
+  </property>
+  <property>
     <name>dfs.container.ratis.replication.level</name>
     <value>MAJORITY</value>
     <tag>OZONE, RATIS</tag>
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java
index 1ea6110..d406060 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java
@@ -20,7 +20,7 @@
 package org.apache.hadoop.hdds.utils.db;
 
 import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Rule;
@@ -50,14 +50,14 @@
 
   @Test
   public void builderWithoutAnyParams() throws IOException {
-    Configuration conf = new Configuration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     thrown.expect(IOException.class);
     DBStoreBuilder.newBuilder(conf).build();
   }
 
   @Test
   public void builderWithOneParamV1() throws IOException {
-    Configuration conf = new Configuration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     thrown.expect(IOException.class);
     DBStoreBuilder.newBuilder(conf)
         .setName("Test.db")
@@ -66,7 +66,7 @@
 
   @Test
   public void builderWithOneParamV2() throws IOException {
-    Configuration conf = new Configuration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     File newFolder = folder.newFolder();
     if(!newFolder.exists()) {
       Assert.assertTrue(newFolder.mkdirs());
@@ -79,7 +79,7 @@
 
   @Test
   public void builderWithOpenClose() throws Exception {
-    Configuration conf = new Configuration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     File newFolder = folder.newFolder();
     if(!newFolder.exists()) {
       Assert.assertTrue(newFolder.mkdirs());
@@ -94,7 +94,7 @@
 
   @Test
   public void builderWithDoubleTableName() throws Exception {
-    Configuration conf = new Configuration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     File newFolder = folder.newFolder();
     if(!newFolder.exists()) {
       Assert.assertTrue(newFolder.mkdirs());
@@ -112,7 +112,7 @@
 
   @Test
   public void builderWithDataWrites() throws Exception {
-    Configuration conf = new Configuration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     File newFolder = folder.newFolder();
     if(!newFolder.exists()) {
       Assert.assertTrue(newFolder.mkdirs());
@@ -141,7 +141,7 @@
 
   @Test
   public void builderWithDiskProfileWrites() throws Exception {
-    Configuration conf = new Configuration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     File newFolder = folder.newFolder();
     if(!newFolder.exists()) {
       Assert.assertTrue(newFolder.mkdirs());
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java
index fa3030d..e88b1bb1 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java
@@ -29,34 +29,143 @@
 public class TestLockManager {
 
   @Test(timeout = 1000)
-  public void testWithDifferentResource() {
-    LockManager<String> manager = new LockManager<>(new OzoneConfiguration());
-    manager.lock("/resourceOne");
+  public void testWriteLockWithDifferentResource() {
+    final LockManager<String> manager =
+        new LockManager<>(new OzoneConfiguration());
+    manager.writeLock("/resourceOne");
     // This should work, as they are different resource.
-    manager.lock("/resourceTwo");
-    manager.unlock("/resourceOne");
-    manager.unlock("/resourceTwo");
+    manager.writeLock("/resourceTwo");
+    manager.writeUnlock("/resourceOne");
+    manager.writeUnlock("/resourceTwo");
     Assert.assertTrue(true);
   }
 
   @Test
-  public void testWithSameResource() throws Exception {
-    LockManager<String> manager = new LockManager<>(new OzoneConfiguration());
-    manager.lock("/resourceOne");
-    AtomicBoolean gotLock = new AtomicBoolean(false);
+  public void testWriteLockWithSameResource() throws Exception {
+    final LockManager<String> manager =
+        new LockManager<>(new OzoneConfiguration());
+    final AtomicBoolean gotLock = new AtomicBoolean(false);
+    manager.writeLock("/resourceOne");
     new Thread(() -> {
-      manager.lock("/resourceOne");
+      manager.writeLock("/resourceOne");
       gotLock.set(true);
-      manager.unlock("/resourceOne");
+      manager.writeUnlock("/resourceOne");
     }).start();
-    // Let's give some time for the new thread to run
+    // Let's give some time for the other thread to run
     Thread.sleep(100);
-    // Since the new thread is trying to get lock on same object, it will wait.
+    // Since the other thread is trying to get write lock on same object,
+    // it will wait.
     Assert.assertFalse(gotLock.get());
-    manager.unlock("/resourceOne");
-    // Since we have released the lock, the new thread should have the lock
-    // now
-    // Let's give some time for the new thread to run
+    manager.writeUnlock("/resourceOne");
+    // Since we have released the write lock, the other thread should have
+    // the lock now
+    // Let's give some time for the other thread to run
+    Thread.sleep(100);
+    Assert.assertTrue(gotLock.get());
+  }
+
+  @Test(timeout = 1000)
+  public void testReadLockWithDifferentResource() {
+    final LockManager<String> manager =
+        new LockManager<>(new OzoneConfiguration());
+    manager.readLock("/resourceOne");
+    manager.readLock("/resourceTwo");
+    manager.readUnlock("/resourceOne");
+    manager.readUnlock("/resourceTwo");
+    Assert.assertTrue(true);
+  }
+
+  @Test
+  public void testReadLockWithSameResource() throws Exception {
+    final LockManager<String> manager =
+        new LockManager<>(new OzoneConfiguration());
+    final AtomicBoolean gotLock = new AtomicBoolean(false);
+    manager.readLock("/resourceOne");
+    new Thread(() -> {
+      manager.readLock("/resourceOne");
+      gotLock.set(true);
+      manager.readUnlock("/resourceOne");
+    }).start();
+    // Let's give some time for the other thread to run
+    Thread.sleep(100);
+    // Since the new thread is trying to get read lock, it should work.
+    Assert.assertTrue(gotLock.get());
+    manager.readUnlock("/resourceOne");
+  }
+
+  @Test
+  public void testWriteReadLockWithSameResource() throws Exception {
+    final LockManager<String> manager =
+        new LockManager<>(new OzoneConfiguration());
+    final AtomicBoolean gotLock = new AtomicBoolean(false);
+    manager.writeLock("/resourceOne");
+    new Thread(() -> {
+      manager.readLock("/resourceOne");
+      gotLock.set(true);
+      manager.readUnlock("/resourceOne");
+    }).start();
+    // Let's give some time for the other thread to run
+    Thread.sleep(100);
+    // Since the other thread is trying to get read lock on same object,
+    // it will wait.
+    Assert.assertFalse(gotLock.get());
+    manager.writeUnlock("/resourceOne");
+    // Since we have released the write lock, the other thread should have
+    // the lock now
+    // Let's give some time for the other thread to run
+    Thread.sleep(100);
+    Assert.assertTrue(gotLock.get());
+  }
+
+  @Test
+  public void testReadWriteLockWithSameResource() throws Exception {
+    final LockManager<String> manager =
+        new LockManager<>(new OzoneConfiguration());
+    final AtomicBoolean gotLock = new AtomicBoolean(false);
+    manager.readLock("/resourceOne");
+    new Thread(() -> {
+      manager.writeLock("/resourceOne");
+      gotLock.set(true);
+      manager.writeUnlock("/resourceOne");
+    }).start();
+    // Let's give some time for the other thread to run
+    Thread.sleep(100);
+    // Since the other thread is trying to get write lock on same object,
+    // it will wait.
+    Assert.assertFalse(gotLock.get());
+    manager.readUnlock("/resourceOne");
+    // Since we have released the read lock, the other thread should have
+    // the lock now
+    // Let's give some time for the other thread to run
+    Thread.sleep(100);
+    Assert.assertTrue(gotLock.get());
+  }
+
+  @Test
+  public void testMultiReadWriteLockWithSameResource() throws Exception {
+    final LockManager<String> manager =
+        new LockManager<>(new OzoneConfiguration());
+    final AtomicBoolean gotLock = new AtomicBoolean(false);
+    manager.readLock("/resourceOne");
+    manager.readLock("/resourceOne");
+    new Thread(() -> {
+      manager.writeLock("/resourceOne");
+      gotLock.set(true);
+      manager.writeUnlock("/resourceOne");
+    }).start();
+    // Let's give some time for the other thread to run
+    Thread.sleep(100);
+    // Since the other thread is trying to get write lock on same object,
+    // it will wait.
+    Assert.assertFalse(gotLock.get());
+    manager.readUnlock("/resourceOne");
+    //We have only released one read lock, we still hold another read lock.
+    Thread.sleep(100);
+    Assert.assertFalse(gotLock.get());
+    manager.readUnlock("/resourceOne");
+    // Since we have released the read lock, the other thread should have
+    // the lock now
+    // Let's give some time for the other thread to run
     Thread.sleep(100);
     Assert.assertTrue(gotLock.get());
   }
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java
index 64c20ac..471b679 100644
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java
@@ -33,6 +33,7 @@
 import java.io.OutputStreamWriter;
 import java.io.Writer;
 import java.nio.charset.StandardCharsets;
+import java.nio.file.NoSuchFileException;
 import java.util.Set;
 
 /**
@@ -60,7 +61,7 @@
           .getResource(StandardLocation.CLASS_OUTPUT, "",
               OUTPUT_FILE_NAME).openInputStream()) {
         appender.load(input);
-      } catch (FileNotFoundException ex) {
+      } catch (FileNotFoundException | NoSuchFileException ex) {
         appender.init();
       }
 
@@ -105,7 +106,7 @@
 
     } catch (IOException e) {
       processingEnv.getMessager().printMessage(Kind.ERROR,
-          "Can't generate the config file from annotation: " + e.getMessage());
+          "Can't generate the config file from annotation: " + e);
     }
     return false;
   }
diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml
index 2f89fa2..0eef961 100644
--- a/hadoop-hdds/container-service/pom.xml
+++ b/hadoop-hdds/container-service/pom.xml
@@ -55,9 +55,8 @@
       <version>1.16</version>
     </dependency>
     <dependency>
-      <groupId>com.google.code.findbugs</groupId>
-      <artifactId>findbugs</artifactId>
-      <version>3.0.1</version>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs</artifactId>
       <scope>provided</scope>
     </dependency>
   </dependencies>
@@ -93,8 +92,8 @@
         </executions>
       </plugin>
       <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>findbugs-maven-plugin</artifactId>
+        <groupId>com.github.spotbugs</groupId>
+        <artifactId>spotbugs-maven-plugin</artifactId>
         <configuration>
           <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
         </configuration>
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 0535763..7b638a3 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -150,7 +150,7 @@
   private final Cache<Long, ByteString> stateMachineDataCache;
   private final boolean isBlockTokenEnabled;
   private final TokenVerifier tokenVerifier;
-  private final AtomicBoolean isStateMachineHealthy;
+  private final AtomicBoolean stateMachineHealthy;
 
   private final Semaphore applyTransactionSemaphore;
   /**
@@ -190,7 +190,7 @@
         ScmConfigKeys.
             DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT);
     applyTransactionSemaphore = new Semaphore(maxPendingApplyTransactions);
-    isStateMachineHealthy = new AtomicBoolean(true);
+    stateMachineHealthy = new AtomicBoolean(true);
     this.executors = new ExecutorService[numContainerOpExecutors];
     for (int i = 0; i < numContainerOpExecutors; i++) {
       final int index = i;
@@ -271,11 +271,15 @@
     IOUtils.write(builder.build().toByteArray(), out);
   }
 
+  public boolean isStateMachineHealthy() {
+    return stateMachineHealthy.get();
+  }
+
   @Override
   public long takeSnapshot() throws IOException {
     TermIndex ti = getLastAppliedTermIndex();
     long startTime = Time.monotonicNow();
-    if (!isStateMachineHealthy.get()) {
+    if (!isStateMachineHealthy()) {
       String msg =
           "Failed to take snapshot " + " for " + gid + " as the stateMachine"
               + " is unhealthy. The last applied index is at " + ti;
@@ -731,7 +735,11 @@
           metrics.incPipelineLatency(cmdType,
               Time.monotonicNowNanos() - startTime);
         }
-        if (r.getResult() != ContainerProtos.Result.SUCCESS) {
+        // ignore close container exception while marking the stateMachine
+        // unhealthy
+        if (r.getResult() != ContainerProtos.Result.SUCCESS
+            && r.getResult() != ContainerProtos.Result.CONTAINER_NOT_OPEN
+            && r.getResult() != ContainerProtos.Result.CLOSED_CONTAINER_IO) {
           StorageContainerException sce =
               new StorageContainerException(r.getMessage(), r.getResult());
           LOG.error(
@@ -744,7 +752,7 @@
           // caught in stateMachineUpdater in Ratis and ratis server will
           // shutdown.
           applyTransactionFuture.completeExceptionally(sce);
-          isStateMachineHealthy.compareAndSet(true, false);
+          stateMachineHealthy.compareAndSet(true, false);
           ratisServer.handleApplyTransactionFailure(gid, trx.getServerRole());
         } else {
           LOG.debug(
@@ -759,7 +767,7 @@
           // add the entry to the applyTransactionCompletionMap only if the
           // stateMachine is healthy i.e, there has been no applyTransaction
           // failures before.
-          if (isStateMachineHealthy.get()) {
+          if (isStateMachineHealthy()) {
             final Long previous = applyTransactionCompletionMap
                 .put(index, trx.getLogEntry().getTerm());
             Preconditions.checkState(previous == null);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index 746bfb86..179547b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -33,6 +33,7 @@
 import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
 import org.apache.hadoop.hdds.tracing.TracingUtil;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
+
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
@@ -225,6 +226,11 @@
       setAutoTriggerEnabled(properties, true);
     RaftServerConfigKeys.Snapshot.
       setAutoTriggerThreshold(properties, snapshotThreshold);
+    int maxPendingRequets = conf.getInt(
+        OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS,
+        OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT
+    );
+    RaftServerConfigKeys.Write.setElementLimit(properties, maxPendingRequets);
     int logQueueNumElements =
         conf.getInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS,
             OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT);
@@ -260,7 +266,7 @@
         conf.getObject(RatisServerConfiguration.class);
     int numSnapshotsRetained =
         ratisServerConfiguration.getNumSnapshotsRetained();
-    RaftServerConfigKeys.Snapshot.setSnapshotRetentionPolicy(properties,
+    RaftServerConfigKeys.Snapshot.setRetentionFileNum(properties,
         numSnapshotsRetained);
     return properties;
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
index 768d266..8494a15 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
@@ -24,6 +24,7 @@
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .CopyContainerRequestProto;
@@ -92,6 +93,11 @@
 
   public void shutdown() {
     channel.shutdown();
+    try {
+      channel.awaitTermination(5, TimeUnit.SECONDS);
+    } catch (Exception e) {
+      LOG.error("failed to shutdown replication channel", e);
+    }
   }
 
   /**
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
index 4e1e27e..9b44666 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
@@ -24,6 +24,9 @@
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeRequest;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeRequest.Builder;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeResponse;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
@@ -38,6 +41,7 @@
     .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.Type;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.ipc.RPC;
@@ -45,6 +49,7 @@
 
 import java.io.Closeable;
 import java.io.IOException;
+import java.util.function.Consumer;
 
 /**
  * This class is the client-side translator to translate the requests made on
@@ -97,6 +102,25 @@
   }
 
   /**
+   * Helper method to wrap the request and send the message.
+   */
+  private SCMDatanodeResponse submitRequest(Type type,
+      Consumer<SCMDatanodeRequest.Builder> builderConsumer) throws IOException {
+    final SCMDatanodeResponse response;
+    try {
+      Builder builder = SCMDatanodeRequest.newBuilder()
+          .setCmdType(type);
+      builderConsumer.accept(builder);
+      SCMDatanodeRequest wrapper = builder.build();
+
+      response = rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper);
+    } catch (ServiceException ex) {
+      throw ProtobufHelper.getRemoteException(ex);
+    }
+    return response;
+  }
+
+  /**
    * Returns SCM version.
    *
    * @param unused - set to null and unused.
@@ -104,16 +128,11 @@
    */
   @Override
   public SCMVersionResponseProto getVersion(SCMVersionRequestProto
-      unused) throws IOException {
-    SCMVersionRequestProto request =
-        SCMVersionRequestProto.newBuilder().build();
-    final SCMVersionResponseProto response;
-    try {
-      response = rpcProxy.getVersion(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException ex) {
-      throw ProtobufHelper.getRemoteException(ex);
-    }
-    return response;
+      request) throws IOException {
+    return submitRequest(Type.GetVersion,
+        (builder) -> builder
+            .setGetVersionRequest(SCMVersionRequestProto.newBuilder().build()))
+        .getGetVersionResponse();
   }
 
   /**
@@ -126,13 +145,9 @@
   @Override
   public SCMHeartbeatResponseProto sendHeartbeat(
       SCMHeartbeatRequestProto heartbeat) throws IOException {
-    final SCMHeartbeatResponseProto resp;
-    try {
-      resp = rpcProxy.sendHeartbeat(NULL_RPC_CONTROLLER, heartbeat);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    return resp;
+    return submitRequest(Type.SendHeartbeat,
+        (builder) -> builder.setSendHeartbeatRequest(heartbeat))
+        .getSendHeartbeatResponse();
   }
 
   /**
@@ -155,13 +170,8 @@
     req.setContainerReport(containerReportsRequestProto);
     req.setPipelineReports(pipelineReportsProto);
     req.setNodeReport(nodeReport);
-    final SCMRegisteredResponseProto response;
-    try {
-      response = rpcProxy.register(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    return response;
+    return submitRequest(Type.Register,
+        (builder) -> builder.setRegisterRequest(req))
+        .getRegisterResponse();
   }
-
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
index 8622332..ed704eb 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
@@ -16,29 +16,24 @@
  */
 package org.apache.hadoop.ozone.protocolPB;
 
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisterRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
+import java.io.IOException;
+
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeRequest;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeResponse;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisterRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.Status;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.Type;
+import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher;
 import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
 
-import java.io.IOException;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class is the server-side translator that forwards requests received on
@@ -48,47 +43,71 @@
 public class StorageContainerDatanodeProtocolServerSideTranslatorPB
     implements StorageContainerDatanodeProtocolPB {
 
+  private static final Logger LOG = LoggerFactory
+      .getLogger(StorageContainerDatanodeProtocolServerSideTranslatorPB.class);
+
   private final StorageContainerDatanodeProtocol impl;
+  private final OzoneProtocolMessageDispatcher<SCMDatanodeRequest,
+      SCMDatanodeResponse> dispatcher;
 
   public StorageContainerDatanodeProtocolServerSideTranslatorPB(
-      StorageContainerDatanodeProtocol impl) {
+      StorageContainerDatanodeProtocol impl,
+      ProtocolMessageMetrics protocolMessageMetrics) {
     this.impl = impl;
+    dispatcher =
+        new OzoneProtocolMessageDispatcher<>("SCMDatanodeProtocol",
+            protocolMessageMetrics,
+            LOG);
+  }
+
+  public SCMRegisteredResponseProto register(
+      SCMRegisterRequestProto request) throws IOException {
+    ContainerReportsProto containerRequestProto = request
+        .getContainerReport();
+    NodeReportProto dnNodeReport = request.getNodeReport();
+    PipelineReportsProto pipelineReport = request.getPipelineReports();
+    return impl.register(request.getDatanodeDetails(), dnNodeReport,
+        containerRequestProto, pipelineReport);
+
   }
 
   @Override
-  public SCMVersionResponseProto getVersion(RpcController controller,
-      SCMVersionRequestProto request)
+  public SCMDatanodeResponse submitRequest(RpcController controller,
+      SCMDatanodeRequest request) throws ServiceException {
+    return dispatcher.processRequest(request, this::processMessage,
+        request.getCmdType(), request.getTraceID());
+  }
+
+  public SCMDatanodeResponse processMessage(SCMDatanodeRequest request)
       throws ServiceException {
     try {
-      return impl.getVersion(request);
+      Type cmdType = request.getCmdType();
+      switch (cmdType) {
+      case GetVersion:
+        return SCMDatanodeResponse.newBuilder()
+            .setCmdType(cmdType)
+            .setStatus(Status.OK)
+            .setGetVersionResponse(
+                impl.getVersion(request.getGetVersionRequest()))
+            .build();
+      case SendHeartbeat:
+        return SCMDatanodeResponse.newBuilder()
+            .setCmdType(cmdType)
+            .setStatus(Status.OK)
+            .setSendHeartbeatResponse(
+                impl.sendHeartbeat(request.getSendHeartbeatRequest()))
+            .build();
+      case Register:
+        return SCMDatanodeResponse.newBuilder()
+            .setCmdType(cmdType)
+            .setStatus(Status.OK)
+            .setRegisterResponse(register(request.getRegisterRequest()))
+            .build();
+      default:
+        throw new ServiceException("Unknown command type: " + cmdType);
+      }
     } catch (IOException e) {
       throw new ServiceException(e);
     }
   }
-
-  @Override
-  public SCMRegisteredResponseProto register(RpcController controller,
-      SCMRegisterRequestProto request) throws ServiceException {
-    try {
-      ContainerReportsProto containerRequestProto = request
-          .getContainerReport();
-      NodeReportProto dnNodeReport = request.getNodeReport();
-      PipelineReportsProto pipelineReport = request.getPipelineReports();
-      return impl.register(request.getDatanodeDetails(), dnNodeReport,
-          containerRequestProto, pipelineReport);
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public SCMHeartbeatResponseProto sendHeartbeat(RpcController controller,
-      SCMHeartbeatRequestProto request) throws ServiceException {
-    try {
-      return impl.sendHeartbeat(request);
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
 }
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
index 1d09dfa..a975cd5 100644
--- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
+++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
@@ -34,6 +34,45 @@
 
 import "hdds.proto";
 
+
+message SCMDatanodeRequest {
+  required Type cmdType = 1; // Type of the command
+
+  optional string traceID = 2;
+
+  optional SCMVersionRequestProto getVersionRequest = 3;
+  optional SCMRegisterRequestProto registerRequest = 4;
+  optional SCMHeartbeatRequestProto sendHeartbeatRequest = 5;
+}
+
+message SCMDatanodeResponse {
+  required Type cmdType = 1; // Type of the command
+
+  optional string traceID = 2;
+
+  optional bool success = 3 [default = true];
+
+  optional string message = 4;
+
+  required Status status = 5;
+
+  optional SCMVersionResponseProto getVersionResponse = 6;
+  optional SCMRegisteredResponseProto registerResponse = 7;
+  optional SCMHeartbeatResponseProto sendHeartbeatResponse = 8;
+
+}
+
+enum Type {
+  GetVersion = 1;
+  Register = 2;
+  SendHeartbeat = 3;
+}
+
+enum Status {
+  OK = 1;
+  ERROR = 2;
+}
+
 /**
  * Request for version info of the software stack on the server.
  */
@@ -385,21 +424,6 @@
  */
 service StorageContainerDatanodeProtocolService {
 
-  /**
-  * Gets the version information from the SCM.
-  */
-  rpc getVersion (SCMVersionRequestProto) returns (SCMVersionResponseProto);
-
-  /**
-  * Registers a data node with SCM.
-  */
-  rpc register (SCMRegisterRequestProto) returns (SCMRegisteredResponseProto);
-
-  /**
-   * Send heartbeat from datanode to SCM. HB's under SCM looks more
-   * like life line protocol than HB's under HDFS. In other words, it is
-   * extremely light weight and contains no data payload.
-   */
-  rpc sendHeartbeat (SCMHeartbeatRequestProto) returns (SCMHeartbeatResponseProto);
-
+  //Message sent from Datanode to SCM as a heartbeat.
+  rpc submitRequest (SCMDatanodeRequest) returns (SCMDatanodeResponse);
 }
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
index 514c822..5a7c30c 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
@@ -29,12 +29,14 @@
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
+import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics;
 import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB;
 import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolServerSideTranslatorPB;
 import org.apache.hadoop.test.GenericTestUtils;
 
 import com.google.protobuf.BlockingService;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
+import org.mockito.Mockito;
 
 /**
  * Test Endpoint class.
@@ -91,7 +93,7 @@
         StorageContainerDatanodeProtocolService.
             newReflectiveBlockingService(
                 new StorageContainerDatanodeProtocolServerSideTranslatorPB(
-                    server));
+                    server, Mockito.mock(ProtocolMessageMetrics.class)));
 
     RPC.Server scmServer = startRpcServer(configuration, rpcServerAddresss,
         StorageContainerDatanodeProtocolPB.class, scmDatanodeService,
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneProtocolMessageDispatcher.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneProtocolMessageDispatcher.java
new file mode 100644
index 0000000..d67a759
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneProtocolMessageDispatcher.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.hdds.server;
+
+import org.apache.hadoop.hdds.function.FunctionWithServiceException;
+import org.apache.hadoop.hdds.tracing.TracingUtil;
+import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics;
+
+import com.google.protobuf.ProtocolMessageEnum;
+import com.google.protobuf.ServiceException;
+import io.opentracing.Scope;
+import org.slf4j.Logger;
+
+/**
+ * Dispatch message after tracing and message logging for insight.
+ * <p>
+ * This is a generic utility to dispatch message in ServerSide translators.
+ * <p>
+ * It logs the message type/content on DEBUG/TRACING log for insight and create
+ * a new span based on the tracing information.
+ */
+public class OzoneProtocolMessageDispatcher<REQUEST, RESPONSE> {
+
+  private String serviceName;
+
+  private final ProtocolMessageMetrics protocolMessageMetrics;
+
+  private Logger logger;
+
+  public OzoneProtocolMessageDispatcher(String serviceName,
+      ProtocolMessageMetrics protocolMessageMetrics, Logger logger) {
+    this.serviceName = serviceName;
+    this.protocolMessageMetrics = protocolMessageMetrics;
+    this.logger = logger;
+  }
+
+  public RESPONSE processRequest(
+      REQUEST request,
+      FunctionWithServiceException<REQUEST, RESPONSE> methodCall,
+      ProtocolMessageEnum type,
+      String traceId) throws ServiceException {
+    Scope scope = TracingUtil
+        .importAndCreateScope(type.toString(), traceId);
+    try {
+      if (logger.isTraceEnabled()) {
+        logger.trace(
+            "{} {} request is received: <json>{}</json>",
+            serviceName,
+            type.toString(),
+            request.toString().replaceAll("\n", "\\\\n"));
+      } else if (logger.isDebugEnabled()) {
+        logger.debug("{} {} request is received",
+            serviceName, type.toString());
+      }
+      protocolMessageMetrics.increment(type);
+
+      RESPONSE response = methodCall.apply(request);
+
+      if (logger.isTraceEnabled()) {
+        logger.trace(
+            "{} {} request is processed. Response: "
+                + "<json>{}</json>",
+            serviceName,
+            type.toString(),
+            response.toString().replaceAll("\n", "\\\\n"));
+      }
+      return response;
+
+    } finally {
+      scope.close();
+    }
+  }
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java
index 016445c..7cea582 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java
@@ -119,7 +119,7 @@
       Pattern.compile(FILE_PREFIX + "[0-9]+-[0-9A-Za-z\\-_]+-[0-9]+\\.[a-z]+");
 
   private Lock profilerLock = new ReentrantLock();
-  private Integer pid;
+  private final Integer pid;
   private String asyncProfilerHome;
   private transient Process process;
 
@@ -208,11 +208,11 @@
       return;
     }
     // if pid is explicitly specified, use it else default to current process
-    pid = getInteger(req, "pid", pid);
+    Integer processId = getInteger(req, "pid", pid);
 
     // if pid is not specified in query param and if current process pid
     // cannot be determined
-    if (pid == null) {
+    if (processId == null) {
       resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
       setResponseHeader(resp);
       resp.getWriter().write(
@@ -243,7 +243,7 @@
             //Should be in sync with FILE_NAME_PATTERN
             File outputFile =
                 OUTPUT_DIR.resolve(
-                    ProfileServlet.generateFileName(pid, output, event))
+                    ProfileServlet.generateFileName(processId, output, event))
                     .toFile();
             List<String> cmd = new ArrayList<>();
             cmd.add(asyncProfilerHome + PROFILER_SCRIPT);
@@ -288,7 +288,7 @@
             if (reverse) {
               cmd.add("--reverse");
             }
-            cmd.add(pid.toString());
+            cmd.add(processId.toString());
             process = runCmdAsync(cmd);
 
             // set response and set refresh header to output location
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/PrometheusMetricsSink.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/PrometheusMetricsSink.java
index 39c8c8b..f37d323 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/PrometheusMetricsSink.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/PrometheusMetricsSink.java
@@ -69,8 +69,10 @@
             .append(key)
             .append(" ")
             .append(metrics.type().toString().toLowerCase())
-            .append("\n")
-            .append(key)
+            .append("\n");
+
+        StringBuilder prometheusMetricKey = new StringBuilder();
+        prometheusMetricKey.append(key)
             .append("{");
         String sep = "";
 
@@ -80,7 +82,7 @@
 
           //ignore specific tag which includes sub-hierarchy
           if (!tagName.equals("numopenconnectionsperuser")) {
-            builder.append(sep)
+            prometheusMetricKey.append(sep)
                 .append(tagName)
                 .append("=\"")
                 .append(tag.value())
@@ -88,10 +90,14 @@
             sep = ",";
           }
         }
-        builder.append("} ");
+        prometheusMetricKey.append("}");
+
+        String prometheusMetricKeyAsString = prometheusMetricKey.toString();
+        builder.append(prometheusMetricKeyAsString);
+        builder.append(" ");
         builder.append(metrics.value());
         builder.append("\n");
-        metricLines.put(key, builder.toString());
+        metricLines.put(prometheusMetricKeyAsString, builder.toString());
 
       }
     }
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestPrometheusMetricsSink.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestPrometheusMetricsSink.java
index e233f65..f2683b5 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestPrometheusMetricsSink.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestPrometheusMetricsSink.java
@@ -21,17 +21,19 @@
 import java.io.IOException;
 import java.io.OutputStreamWriter;
 
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsSource;
 import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.MetricsTag;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
 import org.junit.Assert;
 import org.junit.Test;
 
-import static java.nio.charset.StandardCharsets.UTF_8;
-
 /**
  * Test prometheus Sink.
  */
@@ -60,7 +62,6 @@
 
     //THEN
     String writtenMetrics = stream.toString(UTF_8.name());
-    System.out.println(writtenMetrics);
     Assert.assertTrue(
         "The expected metric line is missing from prometheus metrics output",
         writtenMetrics.contains(
@@ -72,6 +73,49 @@
   }
 
   @Test
+  public void testPublishWithSameName() throws IOException {
+    //GIVEN
+    MetricsSystem metrics = DefaultMetricsSystem.instance();
+
+    metrics.init("test");
+    PrometheusMetricsSink sink = new PrometheusMetricsSink();
+    metrics.register("Prometheus", "Prometheus", sink);
+    metrics.register("FooBar", "fooBar", (MetricsSource) (collector, all) -> {
+      collector.addRecord("RpcMetrics").add(new MetricsTag(PORT_INFO, "1234"))
+          .addGauge(COUNTER_INFO, 123).endRecord();
+
+      collector.addRecord("RpcMetrics").add(new MetricsTag(
+          PORT_INFO, "2345")).addGauge(COUNTER_INFO, 234).endRecord();
+    });
+
+    metrics.start();
+    metrics.publishMetricsNow();
+
+    ByteArrayOutputStream stream = new ByteArrayOutputStream();
+    OutputStreamWriter writer = new OutputStreamWriter(stream, UTF_8);
+
+    //WHEN
+    sink.writeMetrics(writer);
+    writer.flush();
+
+    //THEN
+    String writtenMetrics = stream.toString(UTF_8.name());
+    Assert.assertTrue(
+        "The expected metric line is missing from prometheus metrics output",
+        writtenMetrics.contains(
+            "rpc_metrics_counter{port=\"2345\""));
+
+    Assert.assertTrue(
+        "The expected metric line is missing from prometheus metrics "
+            + "output",
+        writtenMetrics.contains(
+            "rpc_metrics_counter{port=\"1234\""));
+
+    metrics.stop();
+    metrics.shutdown();
+  }
+
+  @Test
   public void testNamingCamelCase() {
     PrometheusMetricsSink sink = new PrometheusMetricsSink();
 
@@ -127,4 +171,29 @@
     @Metric
     private MutableCounterLong numBucketCreateFails;
   }
-}
\ No newline at end of file
+
+  public static final MetricsInfo PORT_INFO = new MetricsInfo() {
+    @Override
+    public String name() {
+      return "PORT";
+    }
+
+    @Override
+    public String description() {
+      return "port";
+    }
+  };
+
+  public static final MetricsInfo COUNTER_INFO = new MetricsInfo() {
+    @Override
+    public String name() {
+      return "COUNTER";
+    }
+
+    @Override
+    public String description() {
+      return "counter";
+    }
+  };
+
+}
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index 5c98e38..a174337 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -46,7 +46,7 @@
     <hdds.version>0.5.0-SNAPSHOT</hdds.version>
 
     <!-- Apache Ratis version -->
-    <ratis.version>0.4.0</ratis.version>
+    <ratis.version>0.5.0-201fc85-SNAPSHOT</ratis.version>
 
     <bouncycastle.version>1.60</bouncycastle.version>
 
@@ -195,13 +195,6 @@
         <version>${junit.jupiter.version}</version>
         <scope>test</scope>
       </dependency>
-
-      <dependency>
-        <groupId>com.google.code.findbugs</groupId>
-        <artifactId>findbugs</artifactId>
-        <version>3.0.1</version>
-        <scope>provided</scope>
-      </dependency>
     </dependencies>
   </dependencyManagement>
   <dependencies>
@@ -308,14 +301,6 @@
         </configuration>
       </plugin>
       <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>findbugs-maven-plugin</artifactId>
-        <version>3.0.4</version>
-        <configuration>
-          <excludeFilterFile combine.self="override"></excludeFilterFile>
-        </configuration>
-      </plugin>
-      <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-dependency-plugin</artifactId>
         <executions>
diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml
index 99d5922..68a5cd8 100644
--- a/hadoop-hdds/server-scm/pom.xml
+++ b/hadoop-hdds/server-scm/pom.xml
@@ -101,8 +101,8 @@
       <artifactId>bcprov-jdk15on</artifactId>
     </dependency>
     <dependency>
-      <groupId>com.google.code.findbugs</groupId>
-      <artifactId>findbugs</artifactId>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs</artifactId>
       <scope>provided</scope>
     </dependency>
   </dependencies>
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
index f660442..1665a77 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
@@ -233,4 +233,12 @@
     return replicaState.get() == ContainerReplicaProto.State.UNHEALTHY;
   }
 
+  /**
+   * Return ContainerManager.
+   * @return {@link ContainerManager}
+   */
+  protected ContainerManager getContainerManager() {
+    return containerManager;
+  }
+
 }
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
index 02b1353..f9488e2 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
@@ -179,4 +179,12 @@
    */
   ContainerInfo getMatchingContainer(long size, String owner,
       Pipeline pipeline, List<ContainerID> excludedContainerIDS);
+
+  /**
+   * Once after report processor handler completes, call this to notify
+   * container manager to increment metrics.
+   * @param isFullReport
+   * @param success
+   */
+  void notifyContainerReportProcessing(boolean isFullReport, boolean success);
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
index 934b244..2227df6 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
@@ -104,7 +104,9 @@
        */
       nodeManager.setContainers(datanodeDetails, containersInDn);
 
+      containerManager.notifyContainerReportProcessing(true, true);
     } catch (NodeNotFoundException ex) {
+      containerManager.notifyContainerReportProcessing(true, false);
       LOG.error("Received container report from unknown datanode {} {}",
           datanodeDetails, ex);
     }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
index 3dd3d9d..a8f5730 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
@@ -57,6 +57,7 @@
     LOG.debug("Processing incremental container report from data node {}",
             report.getDatanodeDetails().getUuid());
 
+    boolean success = true;
     for (ContainerReplicaProto replicaProto :
         report.getReport().getReportList()) {
       try {
@@ -66,16 +67,25 @@
         nodeManager.addContainer(dd, id);
         processContainerReplica(dd, replicaProto);
       } catch (ContainerNotFoundException e) {
+        success = false;
         LOG.warn("Container {} not found!", replicaProto.getContainerID());
       } catch (NodeNotFoundException ex) {
+        success = false;
         LOG.error("Received ICR from unknown datanode {} {}",
             report.getDatanodeDetails(), ex);
       } catch (IOException e) {
+        success = false;
         LOG.error("Exception while processing ICR for container {}",
             replicaProto.getContainerID());
       }
     }
 
+    if (success) {
+      getContainerManager().notifyContainerReportProcessing(false, true);
+    } else {
+      getContainerManager().notifyContainerReportProcessing(false, false);
+    }
+
   }
 
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
index 3c44c4e..470d4eb 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
@@ -23,6 +23,7 @@
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.container.metrics.SCMContainerManagerMetrics;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
@@ -72,6 +73,8 @@
   private final ContainerStateManager containerStateManager;
   private final int numContainerPerOwnerInPipeline;
 
+  private final SCMContainerManagerMetrics scmContainerManagerMetrics;
+
   /**
    * Constructs a mapping class that creates mapping between container names
    * and pipelines.
@@ -109,6 +112,8 @@
             ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT);
 
     loadExistingContainers();
+
+    scmContainerManagerMetrics = SCMContainerManagerMetrics.create();
   }
 
   private void loadExistingContainers() throws IOException {
@@ -204,6 +209,7 @@
       int count) {
     lock.lock();
     try {
+      scmContainerManagerMetrics.incNumListContainersOps();
       final long startId = startContainerID == null ?
           0 : startContainerID.getId();
       final List<ContainerID> containersIds =
@@ -241,11 +247,17 @@
   public ContainerInfo allocateContainer(final ReplicationType type,
       final ReplicationFactor replicationFactor, final String owner)
       throws IOException {
-    lock.lock();
     try {
-      final ContainerInfo containerInfo =
-          containerStateManager.allocateContainer(pipelineManager, type,
+      lock.lock();
+      ContainerInfo containerInfo = null;
+      try {
+        containerInfo =
+            containerStateManager.allocateContainer(pipelineManager, type,
               replicationFactor, owner);
+      } catch (IOException ex) {
+        scmContainerManagerMetrics.incNumFailureCreateContainers();
+        throw ex;
+      }
       // Add container to DB.
       try {
         addContainerToDB(containerInfo);
@@ -286,7 +298,9 @@
         LOG.warn("Unable to remove the container {} from container store," +
                 " it's missing!", containerID);
       }
+      scmContainerManagerMetrics.incNumSuccessfulDeleteContainers();
     } catch (ContainerNotFoundException cnfe) {
+      scmContainerManagerMetrics.incNumFailureDeleteContainers();
       throw new SCMException(
           "Failed to delete container " + containerID + ", reason : " +
               "container doesn't exist.",
@@ -447,9 +461,16 @@
           containerInfo.getContainerID());
       containerStore.put(containerIDBytes,
           containerInfo.getProtobuf().toByteArray());
+      // Incrementing here, as allocateBlock to create a container calls
+      // getMatchingContainer() and finally calls this API to add newly
+      // created container to DB.
+      // Even allocateContainer calls this API to add newly allocated
+      // container to DB. So we need to increment metrics here.
+      scmContainerManagerMetrics.incNumSuccessfulCreateContainers();
     } catch (IOException ex) {
       // If adding to containerStore fails, we should remove the container
       // from in-memory map.
+      scmContainerManagerMetrics.incNumFailureCreateContainers();
       LOG.error("Add Container to DB failed for ContainerID #{}",
           containerInfo.getContainerID());
       try {
@@ -546,5 +567,26 @@
     if (containerStore != null) {
       containerStore.close();
     }
+
+    if (scmContainerManagerMetrics != null) {
+      this.scmContainerManagerMetrics.unRegister();
+    }
+  }
+
+  public void notifyContainerReportProcessing(boolean isFullReport,
+      boolean success) {
+    if (isFullReport) {
+      if (success) {
+        scmContainerManagerMetrics.incNumContainerReportsProcessedSuccessful();
+      } else {
+        scmContainerManagerMetrics.incNumContainerReportsProcessedFailed();
+      }
+    } else {
+      if (success) {
+        scmContainerManagerMetrics.incNumICRReportsProcessedSuccessful();
+      } else {
+        scmContainerManagerMetrics.incNumICRReportsProcessedFailed();
+      }
+    }
   }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/SCMContainerManagerMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/SCMContainerManagerMetrics.java
new file mode 100644
index 0000000..e9a2579
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/SCMContainerManagerMetrics.java
@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.container.metrics;
+
+
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+
+/**
+ * Class contains metrics related to ContainerManager.
+ */
+@Metrics(about = "SCM ContainerManager metrics", context = "ozone")
+public final class SCMContainerManagerMetrics {
+
+  private static final String SOURCE_NAME =
+      SCMContainerManagerMetrics.class.getSimpleName();
+
+  // These are the metrics which will be reset to zero after restart.
+  // These metrics capture count of number of successful/failure operations
+  // of create/delete containers in SCM.
+
+  private @Metric MutableCounterLong numSuccessfulCreateContainers;
+  private @Metric MutableCounterLong numFailureCreateContainers;
+  private @Metric MutableCounterLong numSuccessfulDeleteContainers;
+  private @Metric MutableCounterLong numFailureDeleteContainers;
+  private @Metric MutableCounterLong numListContainerOps;
+
+
+  private @Metric MutableCounterLong numContainerReportsProcessedSuccessful;
+  private @Metric MutableCounterLong numContainerReportsProcessedFailed;
+  private @Metric MutableCounterLong numICRReportsProcessedSuccessful;
+  private @Metric MutableCounterLong numICRReportsProcessedFailed;
+
+  private SCMContainerManagerMetrics() {
+  }
+
+  /**
+   * Create and return metrics instance.
+   * @return SCMContainerManagerMetrics
+   */
+  public static SCMContainerManagerMetrics create() {
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    return ms.register(SOURCE_NAME, "SCM ContainerManager Metrics",
+        new SCMContainerManagerMetrics());
+  }
+
+  /**
+   * Unregister metrics.
+   */
+  public void unRegister() {
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    ms.unregisterSource(SOURCE_NAME);
+  }
+
+  public void incNumSuccessfulCreateContainers() {
+    this.numSuccessfulCreateContainers.incr();
+  }
+
+  public void incNumFailureCreateContainers() {
+    this.numFailureCreateContainers.incr();
+  }
+
+  public void incNumSuccessfulDeleteContainers() {
+    this.numSuccessfulDeleteContainers.incr();
+  }
+
+  public void incNumFailureDeleteContainers() {
+    this.numFailureDeleteContainers.incr();
+  }
+
+  public void incNumListContainersOps() {
+    this.numListContainerOps.incr();
+  }
+
+  public void incNumContainerReportsProcessedSuccessful() {
+    this.numContainerReportsProcessedSuccessful.incr();
+  }
+
+  public void incNumContainerReportsProcessedFailed() {
+    this.numContainerReportsProcessedFailed.incr();
+  }
+
+  public void incNumICRReportsProcessedSuccessful() {
+    this.numICRReportsProcessedSuccessful.incr();
+  }
+
+  public void incNumICRReportsProcessedFailed() {
+    this.numICRReportsProcessedFailed.incr();
+  }
+
+  public long getNumContainerReportsProcessedSuccessful() {
+    return numContainerReportsProcessedSuccessful.value();
+  }
+
+  public long getNumContainerReportsProcessedFailed() {
+    return numContainerReportsProcessedFailed.value();
+  }
+
+  public long getNumICRReportsProcessedSuccessful() {
+    return numICRReportsProcessedSuccessful.value();
+  }
+
+  public long getNumICRReportsProcessedFailed() {
+    return numICRReportsProcessedFailed.value();
+  }
+
+  public long getNumSuccessfulCreateContainers() {
+    return numSuccessfulCreateContainers.value();
+  }
+
+  public long getNumFailureCreateContainers() {
+    return numFailureCreateContainers.value();
+  }
+
+  public long getNumSuccessfulDeleteContainers() {
+    return numSuccessfulDeleteContainers.value();
+  }
+
+  public long getNumFailureDeleteContainers() {
+    return numFailureDeleteContainers.value();
+  }
+
+  public long getNumListContainersOps() {
+    return numListContainerOps.value();
+  }
+
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/package-info.java
new file mode 100644
index 0000000..3198de1
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/package-info.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.container.metrics;
+
+/*
+ * This package contains StorageContainerManager metric classes.
+ */
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index afff7a3..205f2e1 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -213,11 +213,11 @@
   DatanodeDetails getNodeByUuid(String uuid);
 
   /**
-   * Given datanode address(Ipaddress or hostname), returns the DatanodeDetails
-   * for the node.
+   * Given datanode address(Ipaddress or hostname), returns a list of
+   * DatanodeDetails for the datanodes running at that address.
    *
    * @param address datanode address
-   * @return the given datanode, or null if not found
+   * @return the given datanode, or empty list if none found
    */
-  DatanodeDetails getNodeByAddress(String address);
+  List<DatanodeDetails> getNodesByAddress(String address);
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index c277ea9..e48eda1 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -25,11 +25,13 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.LinkedList;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ScheduledFuture;
 import java.util.stream.Collectors;
 
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
@@ -99,7 +101,7 @@
   private final NetworkTopology clusterMap;
   private final DNSToSwitchMapping dnsToSwitchMapping;
   private final boolean useHostname;
-  private final ConcurrentHashMap<String, String> dnsToUuidMap =
+  private final ConcurrentHashMap<String, Set<String>> dnsToUuidMap =
       new ConcurrentHashMap<>();
 
   /**
@@ -294,7 +296,7 @@
       }
       nodeStateManager.addNode(datanodeDetails);
       clusterMap.add(datanodeDetails);
-      dnsToUuidMap.put(dnsName, datanodeDetails.getUuidString());
+      addEntryTodnsToUuidMap(dnsName, datanodeDetails.getUuidString());
       // Updating Node Report, as registration is successful
       processNodeReport(datanodeDetails, nodeReport);
       LOG.info("Registered Data node : {}", datanodeDetails);
@@ -310,6 +312,26 @@
   }
 
   /**
+   * Add an entry to the dnsToUuidMap, which maps hostname / IP to the DNs
+   * running on that host. As each address can have many DNs running on it,
+   * this is a one to many mapping.
+   * @param dnsName String representing the hostname or IP of the node
+   * @param uuid String representing the UUID of the registered node.
+   */
+  @SuppressFBWarnings(value="AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION",
+      justification="The method is synchronized and this is the only place "+
+          "dnsToUuidMap is modified")
+  private synchronized void addEntryTodnsToUuidMap(
+      String dnsName, String uuid) {
+    Set<String> dnList = dnsToUuidMap.get(dnsName);
+    if (dnList == null) {
+      dnList = ConcurrentHashMap.newKeySet();
+      dnsToUuidMap.put(dnsName, dnList);
+    }
+    dnList.add(uuid);
+  }
+
+  /**
    * Send heartbeat to indicate the datanode is alive and doing well.
    *
    * @param datanodeDetails - DatanodeDetailsProto.
@@ -619,29 +641,34 @@
   }
 
   /**
-   * Given datanode address(Ipaddress or hostname), returns the DatanodeDetails
-   * for the node.
+   * Given datanode address(Ipaddress or hostname), return a list of
+   * DatanodeDetails for the datanodes registered on that address.
    *
    * @param address datanode address
-   * @return the given datanode, or null if not found
+   * @return the given datanode, or empty list if none found
    */
   @Override
-  public DatanodeDetails getNodeByAddress(String address) {
+  public List<DatanodeDetails> getNodesByAddress(String address) {
+    List<DatanodeDetails> results = new LinkedList<>();
     if (Strings.isNullOrEmpty(address)) {
       LOG.warn("address is null");
-      return null;
+      return results;
     }
-    String uuid = dnsToUuidMap.get(address);
-    if (uuid != null) {
+    Set<String> uuids = dnsToUuidMap.get(address);
+    if (uuids == null) {
+      LOG.warn("Cannot find node for address {}", address);
+      return results;
+    }
+
+    for (String uuid : uuids) {
       DatanodeDetails temp = DatanodeDetails.newBuilder().setUuid(uuid).build();
       try {
-        return nodeStateManager.getNode(temp);
+        results.add(nodeStateManager.getNode(temp));
       } catch (NodeNotFoundException e) {
         LOG.warn("Cannot find node for uuid {}", uuid);
       }
     }
-    LOG.warn("Cannot find node for address {}", address);
-    return null;
+    return results;
   }
 
   private String nodeResolve(String hostname) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SCMSecurityProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SCMSecurityProtocolServerSideTranslatorPB.java
new file mode 100644
index 0000000..2d14fa6
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SCMSecurityProtocolServerSideTranslatorPB.java
@@ -0,0 +1,186 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.protocol;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos;
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertificateRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetDataNodeCertRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto.ResponseCode;
+import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetOMCertRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityRequest;
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityResponse;
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.Status;
+import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB;
+import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher;
+import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics;
+
+/**
+ * This class is the server-side translator that forwards requests received on
+ * {@link SCMSecurityProtocolPB} to the {@link
+ * SCMSecurityProtocol} server implementation.
+ */
+public class SCMSecurityProtocolServerSideTranslatorPB
+    implements SCMSecurityProtocolPB {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SCMSecurityProtocolServerSideTranslatorPB.class);
+
+  private final SCMSecurityProtocol impl;
+
+  private OzoneProtocolMessageDispatcher<SCMSecurityRequest,
+      SCMSecurityResponse>
+      dispatcher;
+
+  public SCMSecurityProtocolServerSideTranslatorPB(SCMSecurityProtocol impl,
+      ProtocolMessageMetrics messageMetrics) {
+    this.impl = impl;
+    this.dispatcher =
+        new OzoneProtocolMessageDispatcher<>("ScmSecurityProtocol",
+            messageMetrics, LOG);
+  }
+
+  @Override
+  public SCMSecurityResponse submitRequest(RpcController controller,
+      SCMSecurityRequest request) throws ServiceException {
+    return dispatcher.processRequest(request, this::processRequest,
+        request.getCmdType(), request.getTraceID());
+  }
+
+  public SCMSecurityResponse processRequest(SCMSecurityRequest request)
+      throws ServiceException {
+    try {
+      switch (request.getCmdType()) {
+      case GetCertificate:
+        return SCMSecurityResponse.newBuilder()
+            .setCmdType(request.getCmdType())
+            .setStatus(Status.OK)
+            .setGetCertResponseProto(
+                getCertificate(request.getGetCertificateRequest()))
+            .build();
+      case GetCACertificate:
+        return SCMSecurityResponse.newBuilder()
+            .setCmdType(request.getCmdType())
+            .setStatus(Status.OK)
+            .setGetCertResponseProto(
+                getCACertificate(request.getGetCACertificateRequest()))
+            .build();
+      case GetOMCertificate:
+        return SCMSecurityResponse.newBuilder()
+            .setCmdType(request.getCmdType())
+            .setStatus(Status.OK)
+            .setGetCertResponseProto(
+                getOMCertificate(request.getGetOMCertRequest()))
+            .build();
+      case GetDataNodeCertificate:
+        return SCMSecurityResponse.newBuilder()
+            .setCmdType(request.getCmdType())
+            .setStatus(Status.OK)
+            .setGetCertResponseProto(
+                getDataNodeCertificate(request.getGetDataNodeCertRequest()))
+            .build();
+      default:
+        throw new IllegalArgumentException(
+            "Unknown request type: " + request.getCmdType());
+      }
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  /**
+   * Get SCM signed certificate for DataNode.
+   *
+   * @param request
+   * @return SCMGetDataNodeCertResponseProto.
+   */
+
+  public SCMGetCertResponseProto getDataNodeCertificate(
+      SCMGetDataNodeCertRequestProto request)
+      throws IOException {
+
+    String certificate = impl
+        .getDataNodeCertificate(request.getDatanodeDetails(),
+            request.getCSR());
+    SCMGetCertResponseProto.Builder builder =
+        SCMGetCertResponseProto
+            .newBuilder()
+            .setResponseCode(ResponseCode.success)
+            .setX509Certificate(certificate)
+            .setX509CACertificate(impl.getCACertificate());
+
+    return builder.build();
+
+  }
+
+  /**
+   * Get SCM signed certificate for OzoneManager.
+   *
+   * @param request
+   * @return SCMGetCertResponseProto.
+   */
+  public SCMGetCertResponseProto getOMCertificate(
+      SCMGetOMCertRequestProto request) throws IOException {
+    String certificate = impl
+        .getOMCertificate(request.getOmDetails(),
+            request.getCSR());
+    SCMGetCertResponseProto.Builder builder =
+        SCMGetCertResponseProto
+            .newBuilder()
+            .setResponseCode(ResponseCode.success)
+            .setX509Certificate(certificate)
+            .setX509CACertificate(impl.getCACertificate());
+    return builder.build();
+
+  }
+
+  public SCMGetCertResponseProto getCertificate(
+      SCMGetCertificateRequestProto request) throws IOException {
+
+    String certificate = impl.getCertificate(request.getCertSerialId());
+    SCMGetCertResponseProto.Builder builder =
+        SCMGetCertResponseProto
+            .newBuilder()
+            .setResponseCode(ResponseCode.success)
+            .setX509Certificate(certificate);
+    return builder.build();
+
+  }
+
+  public SCMGetCertResponseProto getCACertificate(
+      SCMSecurityProtocolProtos.SCMGetCACertificateRequestProto request)
+      throws IOException {
+
+    String certificate = impl.getCACertificate();
+    SCMGetCertResponseProto.Builder builder =
+        SCMGetCertResponseProto
+            .newBuilder()
+            .setResponseCode(ResponseCode.success)
+            .setX509Certificate(certificate);
+    return builder.build();
+
+  }
+
+}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java
similarity index 84%
rename from hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java
rename to hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java
index bad24cf..b6ce067 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.ozone.protocolPB;
+package org.apache.hadoop.hdds.scm.protocol;
 
 import java.io.IOException;
 import java.util.List;
@@ -40,17 +40,15 @@
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
 import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
+import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher;
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
+import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics;
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
-import io.opentracing.Scope;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -68,8 +66,9 @@
   private static final Logger LOG = LoggerFactory
       .getLogger(ScmBlockLocationProtocolServerSideTranslatorPB.class);
 
-  private final ProtocolMessageMetrics
-      protocolMessageMetrics;
+  private final OzoneProtocolMessageDispatcher<SCMBlockLocationRequest,
+      SCMBlockLocationResponse>
+      dispatcher;
 
   /**
    * Creates a new ScmBlockLocationProtocolServerSideTranslatorPB.
@@ -80,7 +79,9 @@
       ScmBlockLocationProtocol impl,
       ProtocolMessageMetrics metrics) throws IOException {
     this.impl = impl;
-    this.protocolMessageMetrics = metrics;
+    dispatcher = new OzoneProtocolMessageDispatcher<>(
+        "BlockLocationProtocol", metrics, LOG);
+
   }
 
   private SCMBlockLocationResponse.Builder createSCMBlockResponse(
@@ -94,43 +95,18 @@
   @Override
   public SCMBlockLocationResponse send(RpcController controller,
       SCMBlockLocationRequest request) throws ServiceException {
-    String traceId = request.getTraceID();
-
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("BlockLocationProtocol {} request is received: <json>{}</json>",
-          request.getCmdType().toString(),
-          request.toString().replaceAll("\n", "\\\\n"));
-
-    } else if (LOG.isDebugEnabled()) {
-      LOG.debug("BlockLocationProtocol {} request is received",
-          request.getCmdType().toString());
-    }
-
-    protocolMessageMetrics.increment(request.getCmdType());
-
-    try (Scope scope = TracingUtil
-        .importAndCreateScope(
-            "ScmBlockLocationProtocol." + request.getCmdType(),
-            request.getTraceID())) {
-      SCMBlockLocationResponse response =
-          processMessage(request, traceId);
-
-      if (LOG.isTraceEnabled()) {
-        LOG.trace(
-            "BlockLocationProtocol {} request is processed. Response: "
-                + "<json>{}</json>",
-            request.getCmdType().toString(),
-            response.toString().replaceAll("\n", "\\\\n"));
-      }
-      return response;
-    }
+    return dispatcher.processRequest(
+        request,
+        this::processMessage,
+        request.getCmdType(),
+        request.getTraceID());
   }
 
   private SCMBlockLocationResponse processMessage(
-      SCMBlockLocationRequest request, String traceId) throws ServiceException {
+      SCMBlockLocationRequest request) throws ServiceException {
     SCMBlockLocationResponse.Builder response = createSCMBlockResponse(
         request.getCmdType(),
-        traceId);
+        request.getTraceID());
     response.setSuccess(true);
     response.setStatus(Status.OK);
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
new file mode 100644
index 0000000..0d2f470
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
@@ -0,0 +1,393 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.protocol;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ActivatePipelineRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ActivatePipelineResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ClosePipelineRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ClosePipelineResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DeactivatePipelineRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DeactivatePipelineResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMDeleteContainerResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationRequest;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse.Status;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartReplicationManagerRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartReplicationManagerResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopReplicationManagerRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopReplicationManagerResponseProto;
+import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
+import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher;
+import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class is the server-side translator that forwards requests received on
+ * {@link StorageContainerLocationProtocolPB} to the
+ * {@link StorageContainerLocationProtocol} server implementation.
+ */
+@InterfaceAudience.Private
+public final class StorageContainerLocationProtocolServerSideTranslatorPB
+    implements StorageContainerLocationProtocolPB {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(
+          StorageContainerLocationProtocolServerSideTranslatorPB.class);
+
+  private final StorageContainerLocationProtocol impl;
+
+  private OzoneProtocolMessageDispatcher<ScmContainerLocationRequest,
+      ScmContainerLocationResponse>
+      dispatcher;
+
+  /**
+   * Creates a new StorageContainerLocationProtocolServerSideTranslatorPB.
+   *
+   * @param impl            {@link StorageContainerLocationProtocol} server
+   *                        implementation
+   * @param protocolMetrics
+   */
+  public StorageContainerLocationProtocolServerSideTranslatorPB(
+      StorageContainerLocationProtocol impl,
+      ProtocolMessageMetrics protocolMetrics) throws IOException {
+    this.impl = impl;
+    this.dispatcher =
+        new OzoneProtocolMessageDispatcher<>("ScmContainerLocation",
+            protocolMetrics, LOG);
+  }
+
+  @Override
+  public ScmContainerLocationResponse submitRequest(RpcController controller,
+      ScmContainerLocationRequest request) throws ServiceException {
+    return dispatcher
+        .processRequest(request, this::processRequest, request.getCmdType(),
+            request.getTraceID());
+  }
+
+  public ScmContainerLocationResponse processRequest(
+      ScmContainerLocationRequest request) throws ServiceException {
+    try {
+      switch (request.getCmdType()) {
+      case AllocateContainer:
+        return ScmContainerLocationResponse.newBuilder()
+            .setCmdType(request.getCmdType())
+            .setStatus(Status.OK)
+            .setContainerResponse(
+                allocateContainer(request.getContainerRequest()))
+            .build();
+      case GetContainer:
+        return ScmContainerLocationResponse.newBuilder()
+            .setCmdType(request.getCmdType())
+            .setStatus(Status.OK)
+            .setGetContainerResponse(
+                getContainer(request.getGetContainerRequest()))
+            .build();
+      case GetContainerWithPipeline:
+        return ScmContainerLocationResponse.newBuilder()
+            .setCmdType(request.getCmdType())
+            .setStatus(Status.OK)
+            .setGetContainerWithPipelineResponse(getContainerWithPipeline(
+                request.getGetContainerWithPipelineRequest()))
+            .build();
+      case ListContainer:
+        return ScmContainerLocationResponse.newBuilder()
+            .setCmdType(request.getCmdType())
+            .setStatus(Status.OK)
+            .setScmListContainerResponse(listContainer(
+                request.getScmListContainerRequest()))
+            .build();
+      case QueryNode:
+        return ScmContainerLocationResponse.newBuilder()
+            .setCmdType(request.getCmdType())
+            .setStatus(Status.OK)
+            .setNodeQueryResponse(queryNode(request.getNodeQueryRequest()))
+            .build();
+      case NotifyObjectStageChange:
+        return ScmContainerLocationResponse.newBuilder()
+            .setCmdType(request.getCmdType())
+            .setStatus(Status.OK)
+            .setObjectStageChangeResponse(notifyObjectStageChange(
+                request.getObjectStageChangeRequest()))
+            .build();
+      case ListPipelines:
+        return ScmContainerLocationResponse.newBuilder()
+            .setCmdType(request.getCmdType())
+            .setStatus(Status.OK)
+            .setListPipelineResponse(listPipelines(
+                request.getListPipelineRequest()))
+            .build();
+      case ActivatePipeline:
+        return ScmContainerLocationResponse.newBuilder()
+            .setCmdType(request.getCmdType())
+            .setStatus(Status.OK)
+            .setActivatePipelineResponse(activatePipeline(
+                request.getActivatePipelineRequest()))
+            .build();
+      case GetScmInfo:
+        return ScmContainerLocationResponse.newBuilder()
+            .setCmdType(request.getCmdType())
+            .setStatus(Status.OK)
+            .setGetScmInfoResponse(getScmInfo(
+                request.getGetScmInfoRequest()))
+            .build();
+      case InSafeMode:
+        return ScmContainerLocationResponse.newBuilder()
+            .setCmdType(request.getCmdType())
+            .setStatus(Status.OK)
+            .setInSafeModeResponse(inSafeMode(
+                request.getInSafeModeRequest()))
+            .build();
+      case ForceExitSafeMode:
+        return ScmContainerLocationResponse.newBuilder()
+            .setCmdType(request.getCmdType())
+            .setStatus(Status.OK)
+            .setForceExitSafeModeResponse(forceExitSafeMode(
+                request.getForceExitSafeModeRequest()))
+            .build();
+      case StartReplicationManager:
+        return ScmContainerLocationResponse.newBuilder()
+            .setCmdType(request.getCmdType())
+            .setStatus(Status.OK)
+            .setStartReplicationManagerResponse(startReplicationManager(
+                request.getStartReplicationManagerRequest()))
+            .build();
+      case StopReplicationManager:
+        return ScmContainerLocationResponse.newBuilder()
+            .setCmdType(request.getCmdType())
+            .setStatus(Status.OK)
+            .setStopReplicationManagerResponse(stopReplicationManager(
+                request.getStopReplicationManagerRequest()))
+            .build();
+      case GetReplicationManagerStatus:
+        return ScmContainerLocationResponse.newBuilder()
+            .setCmdType(request.getCmdType())
+            .setStatus(Status.OK)
+            .setReplicationManagerStatusResponse(getReplicationManagerStatus(
+                request.getSeplicationManagerStatusRequest()))
+            .build();
+      default:
+        throw new IllegalArgumentException(
+            "Unknown command type: " + request.getCmdType());
+      }
+
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  public ContainerResponseProto allocateContainer(ContainerRequestProto request)
+      throws IOException {
+    ContainerWithPipeline containerWithPipeline = impl
+        .allocateContainer(request.getReplicationType(),
+            request.getReplicationFactor(), request.getOwner());
+    return ContainerResponseProto.newBuilder()
+        .setContainerWithPipeline(containerWithPipeline.getProtobuf())
+        .setErrorCode(ContainerResponseProto.Error.success)
+        .build();
+
+  }
+
+  public GetContainerResponseProto getContainer(
+      GetContainerRequestProto request) throws IOException {
+    ContainerInfo container = impl.getContainer(request.getContainerID());
+    return GetContainerResponseProto.newBuilder()
+        .setContainerInfo(container.getProtobuf())
+        .build();
+  }
+
+  public GetContainerWithPipelineResponseProto getContainerWithPipeline(
+      GetContainerWithPipelineRequestProto request)
+      throws IOException {
+    ContainerWithPipeline container = impl
+        .getContainerWithPipeline(request.getContainerID());
+    return GetContainerWithPipelineResponseProto.newBuilder()
+        .setContainerWithPipeline(container.getProtobuf())
+        .build();
+  }
+
+  public SCMListContainerResponseProto listContainer(
+      SCMListContainerRequestProto request) throws IOException {
+
+    long startContainerID = 0;
+    int count = -1;
+
+    // Arguments check.
+    if (request.hasStartContainerID()) {
+      // End container name is given.
+      startContainerID = request.getStartContainerID();
+    }
+    count = request.getCount();
+    List<ContainerInfo> containerList =
+        impl.listContainer(startContainerID, count);
+    SCMListContainerResponseProto.Builder builder =
+        SCMListContainerResponseProto.newBuilder();
+    for (ContainerInfo container : containerList) {
+      builder.addContainers(container.getProtobuf());
+    }
+    return builder.build();
+  }
+
+  public SCMDeleteContainerResponseProto deleteContainer(
+      SCMDeleteContainerRequestProto request)
+      throws IOException {
+    impl.deleteContainer(request.getContainerID());
+    return SCMDeleteContainerResponseProto.newBuilder().build();
+
+  }
+
+  public NodeQueryResponseProto queryNode(
+      StorageContainerLocationProtocolProtos.NodeQueryRequestProto request)
+      throws IOException {
+
+    HddsProtos.NodeState nodeState = request.getState();
+    List<HddsProtos.Node> datanodes = impl.queryNode(nodeState,
+        request.getScope(), request.getPoolName());
+    return NodeQueryResponseProto.newBuilder()
+        .addAllDatanodes(datanodes)
+        .build();
+
+  }
+
+  public ObjectStageChangeResponseProto notifyObjectStageChange(
+      ObjectStageChangeRequestProto request)
+      throws IOException {
+    impl.notifyObjectStageChange(request.getType(), request.getId(),
+        request.getOp(), request.getStage());
+    return ObjectStageChangeResponseProto.newBuilder().build();
+  }
+
+  public ListPipelineResponseProto listPipelines(
+      ListPipelineRequestProto request)
+      throws IOException {
+    ListPipelineResponseProto.Builder builder = ListPipelineResponseProto
+        .newBuilder();
+    List<Pipeline> pipelines = impl.listPipelines();
+    for (Pipeline pipeline : pipelines) {
+      HddsProtos.Pipeline protobufMessage = pipeline.getProtobufMessage();
+      builder.addPipelines(protobufMessage);
+    }
+    return builder.build();
+  }
+
+  public ActivatePipelineResponseProto activatePipeline(
+      ActivatePipelineRequestProto request)
+      throws IOException {
+    impl.activatePipeline(request.getPipelineID());
+    return ActivatePipelineResponseProto.newBuilder().build();
+  }
+
+  public DeactivatePipelineResponseProto deactivatePipeline(
+      DeactivatePipelineRequestProto request)
+      throws IOException {
+    impl.deactivatePipeline(request.getPipelineID());
+    return DeactivatePipelineResponseProto.newBuilder().build();
+  }
+
+  public ClosePipelineResponseProto closePipeline(
+      RpcController controller, ClosePipelineRequestProto request)
+      throws IOException {
+
+    impl.closePipeline(request.getPipelineID());
+    return ClosePipelineResponseProto.newBuilder().build();
+
+  }
+
+  public HddsProtos.GetScmInfoResponseProto getScmInfo(
+      HddsProtos.GetScmInfoRequestProto req)
+      throws IOException {
+    ScmInfo scmInfo = impl.getScmInfo();
+    return HddsProtos.GetScmInfoResponseProto.newBuilder()
+        .setClusterId(scmInfo.getClusterId())
+        .setScmId(scmInfo.getScmId())
+        .build();
+
+  }
+
+  public InSafeModeResponseProto inSafeMode(
+      InSafeModeRequestProto request) throws IOException {
+
+    return InSafeModeResponseProto.newBuilder()
+        .setInSafeMode(impl.inSafeMode()).build();
+
+  }
+
+  public ForceExitSafeModeResponseProto forceExitSafeMode(
+      ForceExitSafeModeRequestProto request)
+      throws IOException {
+    return ForceExitSafeModeResponseProto.newBuilder()
+        .setExitedSafeMode(impl.forceExitSafeMode()).build();
+
+  }
+
+  public StartReplicationManagerResponseProto startReplicationManager(
+      StartReplicationManagerRequestProto request)
+      throws IOException {
+    impl.startReplicationManager();
+    return StartReplicationManagerResponseProto.newBuilder().build();
+  }
+
+  public StopReplicationManagerResponseProto stopReplicationManager(
+      StopReplicationManagerRequestProto request)
+      throws IOException {
+    impl.stopReplicationManager();
+    return StopReplicationManagerResponseProto.newBuilder().build();
+
+  }
+
+  public ReplicationManagerStatusResponseProto getReplicationManagerStatus(
+      ReplicationManagerStatusRequestProto request)
+      throws IOException {
+    return ReplicationManagerStatusResponseProto.newBuilder()
+        .setIsRunning(impl.getReplicationManagerStatus()).build();
+  }
+
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java
new file mode 100644
index 0000000..411f22e
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.protocol;
+/**
+ * RPC/protobuf specific translator classes for SCM protocol.
+ */
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java
index 496d481..8eadeb3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java
@@ -19,6 +19,7 @@
 
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicLong;
 
@@ -63,19 +64,18 @@
             " value should be >= 0.0 and <= 1.0");
 
     containerMap = new ConcurrentHashMap<>();
-    if(containers != null) {
-      containers.forEach(c -> {
-        // TODO: There can be containers in OPEN state which were never
-        // created by the client. We are not considering these containers for
-        // now. These containers can be handled by tracking pipelines.
-        if (c != null && c.getState() != null &&
-            !c.getState().equals(HddsProtos.LifeCycleState.OPEN)) {
-          containerMap.put(c.getContainerID(), c);
-        }
-      });
-      maxContainer = containerMap.size();
-    }
+    containers.forEach(container -> {
+      // There can be containers in OPEN/CLOSING state which were never
+      // created by the client. We are not considering these containers for
+      // now. These containers can be handled by tracking pipelines.
 
+      Optional.ofNullable(container.getState())
+          .filter(state -> state != HddsProtos.LifeCycleState.OPEN)
+          .filter(state -> state != HddsProtos.LifeCycleState.CLOSING)
+          .ifPresent(s -> containerMap.put(container.getContainerID(),
+              container));
+    });
+    maxContainer = containerMap.size();
     long cutOff = (long) Math.ceil(maxContainer * safeModeCutoff);
     getSafeModeMetrics().setNumContainerWithOneReplicaReportedThreshold(cutOff);
   }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
index 500a8cd..9c69758 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
@@ -57,7 +57,7 @@
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
 import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics;
-import org.apache.hadoop.ozone.protocolPB.ScmBlockLocationProtocolServerSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocolServerSideTranslatorPB;
 
 import com.google.common.collect.Maps;
 import com.google.protobuf.BlockingService;
@@ -295,7 +295,12 @@
     boolean auditSuccess = true;
     try{
       NodeManager nodeManager = scm.getScmNodeManager();
-      Node client = nodeManager.getNodeByAddress(clientMachine);
+      Node client = null;
+      List<DatanodeDetails> possibleClients =
+          nodeManager.getNodesByAddress(clientMachine);
+      if (possibleClients.size()>0){
+        client = possibleClients.get(0);
+      }
       List<Node> nodeList = new ArrayList();
       nodes.stream().forEach(uuid -> {
         DatanodeDetails node = nodeManager.getNodeByUuid(uuid);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index 9e9d2fe..d982507 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -60,8 +60,8 @@
 import org.apache.hadoop.ozone.audit.AuditMessage;
 import org.apache.hadoop.ozone.audit.Auditor;
 import org.apache.hadoop.ozone.audit.SCMAction;
-import org.apache.hadoop.ozone.protocolPB
-    .StorageContainerLocationProtocolServerSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocolServerSideTranslatorPB;
+import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -104,6 +104,7 @@
   private final StorageContainerManager scm;
   private final OzoneConfiguration conf;
   private SafeModePrecheck safeModePrecheck;
+  private final ProtocolMessageMetrics protocolMetrics;
 
   public SCMClientProtocolServer(OzoneConfiguration conf,
       StorageContainerManager scm) throws IOException {
@@ -116,10 +117,16 @@
     RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class,
         ProtobufRpcEngine.class);
 
+    protocolMetrics = ProtocolMessageMetrics
+        .create("ScmContainerLocationProtocol",
+            "SCM ContainerLocation protocol metrics",
+            StorageContainerLocationProtocolProtos.Type.values());
+
     // SCM Container Service RPC
     BlockingService storageProtoPbService =
         newReflectiveBlockingService(
-            new StorageContainerLocationProtocolServerSideTranslatorPB(this));
+            new StorageContainerLocationProtocolServerSideTranslatorPB(this,
+                protocolMetrics));
 
     final InetSocketAddress scmAddress = HddsServerUtil
         .getScmClientBindAddress(conf);
@@ -148,6 +155,7 @@
   }
 
   public void start() {
+    protocolMetrics.register();
     LOG.info(
         StorageContainerManager.buildRpcServerStartMessage(
             "RPC server for Client ", getClientRpcAddress()));
@@ -155,6 +163,7 @@
   }
 
   public void stop() {
+    protocolMetrics.unregister();
     try {
       LOG.info("Stopping the RPC server for Client Protocol");
       getClientRpcServer().stop();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index 6dd9dab..530c0a6 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -21,61 +21,32 @@
  */
 package org.apache.hadoop.hdds.scm.server;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
-import com.google.protobuf.BlockingService;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
-
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ReregisterCommandProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-
-import static org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto
-    .Type.closeContainerCommand;
-import static org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto
-    .Type.deleteBlocksCommand;
-import static org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto
-    .Type.deleteContainerCommand;
-import static org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type
-    .replicateContainerCommand;
-import static org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto
-    .Type.reregisterCommand;
-
-
-
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReregisterCommandProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
 import org.apache.hadoop.hdds.scm.HddsServerUtil;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .ReportFromDatanode;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-        .PipelineReportFromDatanode;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
@@ -95,27 +66,28 @@
 import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
 import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
+import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics;
 import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB;
-import org.apache.hadoop.ozone.protocolPB
-    .StorageContainerDatanodeProtocolServerSideTranslatorPB;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolServerSideTranslatorPB;
 
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Maps;
+import com.google.protobuf.BlockingService;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.closeContainerCommand;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.deleteBlocksCommand;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.deleteContainerCommand;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.replicateContainerCommand;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.reregisterCommand;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY;
-
 import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_REPORT;
 import static org.apache.hadoop.hdds.scm.events.SCMEvents.PIPELINE_REPORT;
 import static org.apache.hadoop.hdds.scm.server.StorageContainerManager.startRpcServer;
 import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Protocol Handler for Datanode Protocol.
@@ -138,6 +110,7 @@
   private final InetSocketAddress datanodeRpcAddress;
   private final SCMDatanodeHeartbeatDispatcher heartbeatDispatcher;
   private final EventPublisher eventPublisher;
+  private final ProtocolMessageMetrics protocolMessageMetrics;
 
   public SCMDatanodeProtocolServer(final OzoneConfiguration conf,
       StorageContainerManager scm, EventPublisher eventPublisher)
@@ -157,12 +130,17 @@
 
     RPC.setProtocolEngine(conf, StorageContainerDatanodeProtocolPB.class,
         ProtobufRpcEngine.class);
+
+    protocolMessageMetrics = ProtocolMessageMetrics
+        .create("SCMDatanodeProtocol", "SCM Datanode protocol",
+            StorageContainerDatanodeProtocolProtos.Type.values());
+
     BlockingService dnProtoPbService =
         StorageContainerDatanodeProtocolProtos
             .StorageContainerDatanodeProtocolService
             .newReflectiveBlockingService(
                 new StorageContainerDatanodeProtocolServerSideTranslatorPB(
-                    this));
+                    this, protocolMessageMetrics));
 
     InetSocketAddress datanodeRpcAddr =
         HddsServerUtil.getScmDataNodeBindAddress(conf);
@@ -191,6 +169,7 @@
     LOG.info(
         StorageContainerManager.buildRpcServerStartMessage(
             "RPC server for DataNodes", datanodeRpcAddress));
+    protocolMessageMetrics.register();
     datanodeRpcServer.start();
   }
 
@@ -370,6 +349,7 @@
       LOG.error(" datanodeRpcServer stop failed.", ex);
     }
     IOUtils.cleanupWithLogger(LOG, scm.getScmNodeManager());
+    protocolMessageMetrics.unregister();
   }
 
   @Override
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java
index 05a1e04..c4b4efd 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java
@@ -5,9 +5,9 @@
  * licenses this file to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance with the License.
  * You may obtain a copy of the License at
- *
+ * <p>
  * http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
@@ -17,6 +17,7 @@
 package org.apache.hadoop.hdds.scm.server;
 
 import com.google.protobuf.BlockingService;
+
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.security.cert.CertificateException;
@@ -32,7 +33,7 @@
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos;
 import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB;
-import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolServerSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.protocol.SCMSecurityProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdds.scm.HddsServerUtil;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
@@ -41,7 +42,9 @@
 import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics;
 import org.apache.hadoop.security.KerberosInfo;
+
 import org.bouncycastle.cert.X509CertificateHolder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -62,6 +65,7 @@
   private final CertificateServer certificateServer;
   private final RPC.Server rpcServer;
   private final InetSocketAddress rpcAddress;
+  private final ProtocolMessageMetrics metrics;
 
   SCMSecurityProtocolServer(OzoneConfiguration conf,
       CertificateServer certificateServer) throws IOException {
@@ -76,10 +80,13 @@
     // SCM security service RPC service.
     RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class,
         ProtobufRpcEngine.class);
+    metrics = new ProtocolMessageMetrics("ScmSecurityProtocol",
+        "SCM Security protocol metrics",
+        SCMSecurityProtocolProtos.Type.values());
     BlockingService secureProtoPbService =
         SCMSecurityProtocolProtos.SCMSecurityProtocolService
             .newReflectiveBlockingService(
-                new SCMSecurityProtocolServerSideTranslatorPB(this));
+                new SCMSecurityProtocolServerSideTranslatorPB(this, metrics));
     this.rpcServer =
         StorageContainerManager.startRpcServer(
             conf,
@@ -96,8 +103,8 @@
   /**
    * Get SCM signed certificate for DataNode.
    *
-   * @param dnDetails       - DataNode Details.
-   * @param certSignReq     - Certificate signing request.
+   * @param dnDetails   - DataNode Details.
+   * @param certSignReq - Certificate signing request.
    * @return String         - SCM signed pem encoded certificate.
    */
   @Override
@@ -122,8 +129,8 @@
   /**
    * Get SCM signed certificate for OM.
    *
-   * @param omDetails       - OzoneManager Details.
-   * @param certSignReq     - Certificate signing request.
+   * @param omDetails   - OzoneManager Details.
+   * @param certSignReq - Certificate signing request.
    * @return String         - SCM signed pem encoded certificate.
    */
   @Override
@@ -147,7 +154,7 @@
   /**
    * Get SCM signed certificate with given serial id.
    *
-   * @param certSerialId    - Certificate serial id.
+   * @param certSerialId - Certificate serial id.
    * @return string         - pem encoded SCM signed certificate.
    */
   @Override
@@ -196,12 +203,14 @@
   public void start() {
     LOGGER.info(StorageContainerManager.buildRpcServerStartMessage("Starting"
         + " RPC server for SCMSecurityProtocolServer.", getRpcAddress()));
+    metrics.register();
     getRpcServer().start();
   }
 
   public void stop() {
     try {
       LOGGER.info("Stopping the SCMSecurityProtocolServer.");
+      metrics.unregister();
       getRpcServer().stop();
     } catch (Exception ex) {
       LOGGER.error("SCMSecurityProtocolServer stop failed.", ex);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index e27a451..20a8b74 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -89,7 +89,7 @@
   private final Node2PipelineMap node2PipelineMap;
   private final Node2ContainerMap node2ContainerMap;
   private NetworkTopology clusterMap;
-  private ConcurrentHashMap<String, String> dnsToUuidMap;
+  private ConcurrentHashMap<String, Set<String>> dnsToUuidMap;
 
   public MockNodeManager(boolean initializeFakeNodes, int nodeCount) {
     this.healthyNodes = new LinkedList<>();
@@ -413,7 +413,7 @@
     try {
       node2ContainerMap.insertNewDatanode(datanodeDetails.getUuid(),
           Collections.emptySet());
-      dnsToUuidMap.put(datanodeDetails.getIpAddress(),
+      addEntryTodnsToUuidMap(datanodeDetails.getIpAddress(),
           datanodeDetails.getUuidString());
       if (clusterMap != null) {
         datanodeDetails.setNetworkName(datanodeDetails.getUuidString());
@@ -426,6 +426,23 @@
   }
 
   /**
+   * Add an entry to the dnsToUuidMap, which maps hostname / IP to the DNs
+   * running on that host. As each address can have many DNs running on it,
+   * this is a one to many mapping.
+   * @param dnsName String representing the hostname or IP of the node
+   * @param uuid String representing the UUID of the registered node.
+   */
+  private synchronized void addEntryTodnsToUuidMap(
+      String dnsName, String uuid) {
+    Set<String> dnList = dnsToUuidMap.get(dnsName);
+    if (dnList == null) {
+      dnList = ConcurrentHashMap.newKeySet();
+      dnsToUuidMap.put(dnsName, dnList);
+    }
+    dnList.add(uuid);
+  }
+
+  /**
    * Send heartbeat to indicate the datanode is alive and doing well.
    *
    * @param datanodeDetails - Datanode ID.
@@ -511,8 +528,19 @@
   }
 
   @Override
-  public DatanodeDetails getNodeByAddress(String address) {
-    return getNodeByUuid(dnsToUuidMap.get(address));
+  public List<DatanodeDetails> getNodesByAddress(String address) {
+    List<DatanodeDetails> results = new LinkedList<>();
+    Set<String> uuids = dnsToUuidMap.get(address);
+    if (uuids == null) {
+      return results;
+    }
+    for(String uuid : uuids) {
+      DatanodeDetails dn = getNodeByUuid(uuid);
+      if (dn != null) {
+        results.add(dn);
+      }
+    }
+    return results;
   }
 
   public void setNetworkTopology(NetworkTopology topology) {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
index a37142f..ea60532 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
@@ -1072,6 +1072,25 @@
   }
 
   /**
+   * Test getNodesByAddress when using IPs.
+   *
+   */
+  @Test
+  public void testgetNodesByAddressWithIpAddress()
+      throws IOException, InterruptedException, AuthenticationException {
+    testGetNodesByAddress(false);
+  }
+
+  /**
+   * Test getNodesByAddress when using hostnames.
+   */
+  @Test
+  public void testgetNodesByAddressWithHostname()
+      throws IOException, InterruptedException, AuthenticationException {
+    testGetNodesByAddress(true);
+  }
+
+  /**
    * Test add node into a 4-layer network topology during node register.
    */
   @Test
@@ -1161,11 +1180,55 @@
       // test get node
       if (useHostname) {
         Arrays.stream(hostNames).forEach(hostname ->
-            Assert.assertNotNull(nodeManager.getNodeByAddress(hostname)));
+            Assert.assertNotEquals(0, nodeManager.getNodesByAddress(hostname)
+                .size()));
       } else {
         Arrays.stream(ipAddress).forEach(ip ->
-            Assert.assertNotNull(nodeManager.getNodeByAddress(ip)));
+            Assert.assertNotEquals(0, nodeManager.getNodesByAddress(ip)
+                .size()));
       }
     }
   }
+
+  /**
+   * Test add node into a 4-layer network topology during node register.
+   */
+  private void testGetNodesByAddress(boolean useHostname)
+      throws IOException, InterruptedException, AuthenticationException {
+    OzoneConfiguration conf = getConf();
+    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000,
+        MILLISECONDS);
+
+    // create a set of hosts - note two hosts on "host1"
+    String[] hostNames = {"host1", "host1", "host2", "host3", "host4"};
+    String[] ipAddress =
+        {"1.2.3.4", "1.2.3.4", "2.3.4.5", "3.4.5.6", "4.5.6.7"};
+
+    if (useHostname) {
+      conf.set(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, "true");
+    }
+    final int nodeCount = hostNames.length;
+    try (SCMNodeManager nodeManager = createNodeManager(conf)) {
+      DatanodeDetails[] nodes = new DatanodeDetails[nodeCount];
+      for (int i = 0; i < nodeCount; i++) {
+        DatanodeDetails node = TestUtils.createDatanodeDetails(
+            UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null);
+        nodeManager.register(node, null, null);
+      }
+      // test get node
+      Assert.assertEquals(0, nodeManager.getNodesByAddress(null).size());
+      if (useHostname) {
+        Assert.assertEquals(2,
+            nodeManager.getNodesByAddress("host1").size());
+        Assert.assertEquals(1, nodeManager.getNodesByAddress("host2").size());
+        Assert.assertEquals(0, nodeManager.getNodesByAddress("unknown").size());
+      } else {
+        Assert.assertEquals(2,
+            nodeManager.getNodesByAddress("1.2.3.4").size());
+        Assert.assertEquals(1, nodeManager.getNodesByAddress("2.3.4.5").size());
+        Assert.assertEquals(0, nodeManager.getNodesByAddress("1.9.8.7").size());
+      }
+    }
+  }
+
 }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
index ba92035..247b38a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
@@ -23,6 +23,7 @@
 
 import java.io.File;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.UUID;
 
@@ -60,7 +61,7 @@
   private static EventQueue queue;
   private SCMSafeModeManager scmSafeModeManager;
   private static Configuration config;
-  private List<ContainerInfo> containers;
+  private List<ContainerInfo> containers = Collections.emptyList();
 
   @Rule
   public Timeout timeout = new Timeout(1000 * 300);
@@ -85,7 +86,8 @@
 
   @Test
   public void testSafeModeStateWithNullContainers() {
-    new SCMSafeModeManager(config, null, null, queue);
+    new SCMSafeModeManager(config, Collections.emptyList(),
+        null, queue);
   }
 
   private void testSafeMode(int numContainers) throws Exception {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
index e08fdc1..d2044f5 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
@@ -25,8 +25,7 @@
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics;
-import org.apache.hadoop.ozone.protocolPB
-    .ScmBlockLocationProtocolServerSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocolServerSideTranslatorPB;
 import org.apache.hadoop.test.GenericTestUtils;
 
 import org.junit.After;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
index b584f3f..a48b2a0 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
@@ -46,6 +46,7 @@
 import java.util.Map;
 import java.util.Set;
 import java.util.UUID;
+import java.util.LinkedList;
 
 /**
  * A Node Manager to test replication.
@@ -351,7 +352,7 @@
   }
 
   @Override
-  public DatanodeDetails getNodeByAddress(String address) {
-    return null;
+  public List<DatanodeDetails> getNodesByAddress(String address) {
+    return new LinkedList<>();
   }
 }
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java
index 1b95418..ff30eca 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java
@@ -27,15 +27,8 @@
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.cli.container.CloseSubcommand;
-import org.apache.hadoop.hdds.scm.cli.container.CreateSubcommand;
-import org.apache.hadoop.hdds.scm.cli.container.DeleteSubcommand;
-import org.apache.hadoop.hdds.scm.cli.container.InfoSubcommand;
-import org.apache.hadoop.hdds.scm.cli.container.ListSubcommand;
-import org.apache.hadoop.hdds.scm.cli.pipeline.ActivatePipelineSubcommand;
-import org.apache.hadoop.hdds.scm.cli.pipeline.ClosePipelineSubcommand;
-import org.apache.hadoop.hdds.scm.cli.pipeline.DeactivatePipelineSubcommand;
-import org.apache.hadoop.hdds.scm.cli.pipeline.ListPipelinesSubcommand;
+import org.apache.hadoop.hdds.scm.cli.container.ContainerCommands;
+import org.apache.hadoop.hdds.scm.cli.pipeline.PipelineCommands;
 import org.apache.hadoop.hdds.scm.client.ContainerOperationClient;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
@@ -80,15 +73,8 @@
     versionProvider = HddsVersionProvider.class,
     subcommands = {
         SafeModeCommands.class,
-        ListSubcommand.class,
-        InfoSubcommand.class,
-        DeleteSubcommand.class,
-        CreateSubcommand.class,
-        CloseSubcommand.class,
-        ListPipelinesSubcommand.class,
-        ActivatePipelineSubcommand.class,
-        DeactivatePipelineSubcommand.class,
-        ClosePipelineSubcommand.class,
+        ContainerCommands.class,
+        PipelineCommands.class,
         TopologySubcommand.class,
         ReplicationManagerCommands.class
     },
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java
index 173d0ce..4bf2013 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java
@@ -20,7 +20,6 @@
 import java.util.concurrent.Callable;
 
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.scm.cli.SCMCLI;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
 
 import picocli.CommandLine.Command;
@@ -38,15 +37,15 @@
 public class CloseSubcommand implements Callable<Void> {
 
   @ParentCommand
-  private SCMCLI parent;
+  private ContainerCommands parent;
 
   @Parameters(description = "Id of the container to close")
   private long containerId;
 
   @Override
   public Void call() throws Exception {
-    try (ScmClient scmClient = parent.createScmClient()) {
-      parent.checkContainerExists(scmClient, containerId);
+    try (ScmClient scmClient = parent.getParent().createScmClient()) {
+      parent.getParent().checkContainerExists(scmClient, containerId);
       scmClient.closeContainer(containerId);
       return null;
     }
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java
new file mode 100644
index 0000000..bf17bfd
--- /dev/null
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.cli.container;
+
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.cli.MissingSubcommandException;
+import org.apache.hadoop.hdds.scm.cli.SCMCLI;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.ParentCommand;
+
+import java.util.concurrent.Callable;
+
+/**
+ * Subcommand to group container related operations.
+ */
+@Command(
+    name = "container",
+    description = "Container specific operations",
+    mixinStandardHelpOptions = true,
+    versionProvider = HddsVersionProvider.class,
+    subcommands = {
+        ListSubcommand.class,
+        InfoSubcommand.class,
+        DeleteSubcommand.class,
+        CreateSubcommand.class,
+        CloseSubcommand.class
+    })
+public class ContainerCommands implements Callable<Void> {
+
+  @ParentCommand
+  private SCMCLI parent;
+
+  public SCMCLI getParent() {
+    return parent;
+  }
+
+  @Override
+  public Void call() throws Exception {
+    throw new MissingSubcommandException(
+        this.parent.getCmd().getSubcommands().get("container"));
+  }
+}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java
index 1dda9c4..eb79e50 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java
@@ -20,7 +20,6 @@
 import java.util.concurrent.Callable;
 
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.scm.cli.SCMCLI;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .ContainerWithPipeline;
@@ -45,7 +44,7 @@
       LoggerFactory.getLogger(CreateSubcommand.class);
 
   @ParentCommand
-  private SCMCLI parent;
+  private ContainerCommands parent;
 
   @Option(description = "Owner of the new container", defaultValue = "OZONE",
       required = false, names = {
@@ -55,7 +54,7 @@
 
   @Override
   public Void call() throws Exception {
-    try (ScmClient scmClient = parent.createScmClient()) {
+    try (ScmClient scmClient = parent.getParent().createScmClient()) {
       ContainerWithPipeline container = scmClient.createContainer(owner);
       LOG.info("Container {} is created.",
           container.getContainerInfo().getContainerID());
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java
index c163a3a..4989e03 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java
@@ -21,7 +21,6 @@
 import java.util.concurrent.Callable;
 
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.scm.cli.SCMCLI;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
 
 import picocli.CommandLine.Command;
@@ -47,12 +46,12 @@
   private boolean force;
 
   @ParentCommand
-  private SCMCLI parent;
+  private ContainerCommands parent;
 
   @Override
   public Void call() throws Exception {
-    try (ScmClient scmClient = parent.createScmClient()) {
-      parent.checkContainerExists(scmClient, containerId);
+    try (ScmClient scmClient = parent.getParent().createScmClient()) {
+      parent.getParent().checkContainerExists(scmClient, containerId);
       scmClient.deleteContainer(containerId, force);
       return null;
     }
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
index f202254..31fdb1d 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
@@ -24,7 +24,6 @@
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerDataProto;
-import org.apache.hadoop.hdds.scm.cli.SCMCLI;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .ContainerWithPipeline;
@@ -50,14 +49,14 @@
       LoggerFactory.getLogger(InfoSubcommand.class);
 
   @ParentCommand
-  private SCMCLI parent;
+  private ContainerCommands parent;
 
   @Parameters(description = "Decimal id of the container.")
   private long containerID;
 
   @Override
   public Void call() throws Exception {
-    try (ScmClient scmClient = parent.createScmClient()) {
+    try (ScmClient scmClient = parent.getParent().createScmClient()) {
       ContainerWithPipeline container = scmClient.
           getContainerWithPipeline(containerID);
       Preconditions.checkNotNull(container, "Container cannot be null");
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java
index 431befe..288d9fa 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java
@@ -22,7 +22,6 @@
 import java.util.concurrent.Callable;
 
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.scm.cli.SCMCLI;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.ozone.web.utils.JsonUtils;
@@ -48,7 +47,7 @@
       LoggerFactory.getLogger(ListSubcommand.class);
 
   @ParentCommand
-  private SCMCLI parent;
+  private ContainerCommands parent;
 
   @Option(names = {"-s", "--start"},
       description = "Container id to start the iteration", required = true)
@@ -68,7 +67,7 @@
 
   @Override
   public Void call() throws Exception {
-    try (ScmClient scmClient = parent.createScmClient()) {
+    try (ScmClient scmClient = parent.getParent().createScmClient()) {
 
       List<ContainerInfo> containerList =
           scmClient.listContainer(startId, count);
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ActivatePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ActivatePipelineSubcommand.java
index d8f7138..ec4b1b7 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ActivatePipelineSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ActivatePipelineSubcommand.java
@@ -20,34 +20,33 @@
 
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.cli.SCMCLI;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
 import picocli.CommandLine;
 
 import java.util.concurrent.Callable;
 
 /**
- * Handler of activatePipeline command.
+ * Handler of activate pipeline command.
  */
 @CommandLine.Command(
-    name = "activatePipeline",
+    name = "activate",
     description = "Activates the given Pipeline",
     mixinStandardHelpOptions = true,
     versionProvider = HddsVersionProvider.class)
 public class ActivatePipelineSubcommand implements Callable<Void> {
 
   @CommandLine.ParentCommand
-  private SCMCLI parent;
+  private PipelineCommands parent;
 
   @CommandLine.Parameters(description = "ID of the pipeline to activate")
   private String pipelineId;
 
   @Override
   public Void call() throws Exception {
-    try (ScmClient scmClient = parent.createScmClient()) {
+    try (ScmClient scmClient = parent.getParent().createScmClient()) {
       scmClient.activatePipeline(
           HddsProtos.PipelineID.newBuilder().setId(pipelineId).build());
       return null;
     }
   }
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java
index d99823b..89a280e 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java
@@ -20,34 +20,33 @@
 
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.cli.SCMCLI;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
 import picocli.CommandLine;
 
 import java.util.concurrent.Callable;
 
 /**
- * Handler of closePipeline command.
+ * Handler of close pipeline command.
  */
 @CommandLine.Command(
-    name = "closePipeline",
+    name = "close",
     description = "Close pipeline",
     mixinStandardHelpOptions = true,
     versionProvider = HddsVersionProvider.class)
 public class ClosePipelineSubcommand implements Callable<Void> {
 
   @CommandLine.ParentCommand
-  private SCMCLI parent;
+  private PipelineCommands parent;
 
   @CommandLine.Parameters(description = "ID of the pipeline to close")
   private String pipelineId;
 
   @Override
   public Void call() throws Exception {
-    try (ScmClient scmClient = parent.createScmClient()) {
+    try (ScmClient scmClient = parent.getParent().createScmClient()) {
       scmClient.closePipeline(
           HddsProtos.PipelineID.newBuilder().setId(pipelineId).build());
       return null;
     }
   }
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/DeactivatePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/DeactivatePipelineSubcommand.java
index 67342d0..4f4f741 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/DeactivatePipelineSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/DeactivatePipelineSubcommand.java
@@ -20,34 +20,33 @@
 
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.cli.SCMCLI;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
 import picocli.CommandLine;
 
 import java.util.concurrent.Callable;
 
 /**
- * Handler of deactivatePipeline command.
+ * Handler of deactivate pipeline command.
  */
 @CommandLine.Command(
-    name = "deactivatePipeline",
+    name = "deactivate",
     description = "Deactivates the given Pipeline",
     mixinStandardHelpOptions = true,
     versionProvider = HddsVersionProvider.class)
 public class DeactivatePipelineSubcommand implements Callable<Void> {
 
   @CommandLine.ParentCommand
-  private SCMCLI parent;
+  private PipelineCommands parent;
 
   @CommandLine.Parameters(description = "ID of the pipeline to deactivate")
   private String pipelineId;
 
   @Override
   public Void call() throws Exception {
-    try (ScmClient scmClient = parent.createScmClient()) {
+    try (ScmClient scmClient = parent.getParent().createScmClient()) {
       scmClient.deactivatePipeline(
           HddsProtos.PipelineID.newBuilder().setId(pipelineId).build());
       return null;
     }
   }
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java
index 51c4043..8b3b1b3 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java
@@ -19,24 +19,23 @@
 package org.apache.hadoop.hdds.scm.cli.pipeline;
 
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.scm.cli.SCMCLI;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
 import picocli.CommandLine;
 
 import java.util.concurrent.Callable;
 
 /**
- * Handler of listPipelines command.
+ * Handler of list pipelines command.
  */
 @CommandLine.Command(
-    name = "listPipelines",
+    name = "list",
     description = "List all active pipelines",
     mixinStandardHelpOptions = true,
     versionProvider = HddsVersionProvider.class)
 public class ListPipelinesSubcommand implements Callable<Void> {
 
   @CommandLine.ParentCommand
-  private SCMCLI parent;
+  private PipelineCommands parent;
 
   @CommandLine.Option(names = {"-ffc", "--filterByFactor"},
       description = "Filter listed pipelines by Factor(ONE/one)",
@@ -53,7 +52,7 @@
 
   @Override
   public Void call() throws Exception {
-    try (ScmClient scmClient = parent.createScmClient()) {
+    try (ScmClient scmClient = parent.getParent().createScmClient()) {
       if (isNullOrEmpty(factor) && isNullOrEmpty(state)) {
         scmClient.listPipelines().forEach(System.out::println);
       } else {
@@ -72,4 +71,4 @@
   protected static boolean isNullOrEmpty(String str) {
     return ((str == null) || str.trim().isEmpty());
   }
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java
new file mode 100644
index 0000000..948a51a
--- /dev/null
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.cli.pipeline;
+
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.cli.MissingSubcommandException;
+import org.apache.hadoop.hdds.scm.cli.SCMCLI;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.ParentCommand;
+
+import java.util.concurrent.Callable;
+
+/**
+ * Subcommand to group pipeline related operations.
+ */
+@Command(
+    name = "pipeline",
+    description = "Pipeline specific operations",
+    mixinStandardHelpOptions = true,
+    versionProvider = HddsVersionProvider.class,
+    subcommands = {
+        ListPipelinesSubcommand.class,
+        ActivatePipelineSubcommand.class,
+        DeactivatePipelineSubcommand.class,
+        ClosePipelineSubcommand.class
+    })
+public class PipelineCommands implements Callable<Void> {
+
+  @ParentCommand
+  private SCMCLI parent;
+
+  public SCMCLI getParent() {
+    return parent;
+  }
+
+  @Override
+  public Void call() throws Exception {
+    throw new MissingSubcommandException(
+        this.parent.getCmd().getSubcommands().get("pipeline"));
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 2262295..f9e7d6e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -53,6 +53,7 @@
 import org.apache.hadoop.fs.CanSetReadahead;
 import org.apache.hadoop.fs.CanUnbuffer;
 import org.apache.hadoop.fs.ChecksumException;
+import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.HasEnhancedByteBufferAccess;
@@ -1570,6 +1571,19 @@
     return pread(position, buf);
   }
 
+  @Override
+  public void readFully(long position, final ByteBuffer buf)
+      throws IOException {
+    int nread = 0;
+    while (buf.hasRemaining()) {
+      int nbytes = read(position + nread, buf);
+      if (nbytes < 0) {
+        throw new EOFException(FSExceptionMessages.EOF_IN_READ_FULLY);
+      }
+      nread += nbytes;
+    }
+  }
+
   /** Utility class to encapsulate data node info and its address. */
   static final class DNAddrPair {
     final DatanodeInfo info;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 73abb99..5a05ffe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -31,6 +31,7 @@
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.CacheFlag;
+import org.apache.hadoop.fs.CommonPathCapabilities;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -67,6 +68,7 @@
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
+import org.apache.hadoop.hdfs.client.DfsPathCapabilities;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
@@ -120,6 +122,8 @@
 import java.util.Map;
 import java.util.Optional;
 
+import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
+
 /****************************************************************
  * Implementation of the abstract FileSystem for the DFS system.
  * This object is the way end-user code interacts with a Hadoop
@@ -3404,4 +3408,22 @@
   public HdfsDataOutputStreamBuilder appendFile(Path path) {
     return new HdfsDataOutputStreamBuilder(this, path).append();
   }
+
+  /**
+   * HDFS client capabilities.
+   * Uses {@link DfsPathCapabilities} to keep {@code WebHdfsFileSystem} in sync.
+   * {@inheritDoc}
+   */
+  @Override
+  public boolean hasPathCapability(final Path path, final String capability)
+      throws IOException {
+    // qualify the path to make sure that it refers to the current FS.
+    final Path p = makeQualified(path);
+    Optional<Boolean> cap = DfsPathCapabilities.hasPathCapability(p,
+        capability);
+    if (cap.isPresent()) {
+      return cap.get();
+    }
+    return super.hasPathCapability(p, capability);
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/DfsPathCapabilities.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/DfsPathCapabilities.java
new file mode 100644
index 0000000..6cad69a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/DfsPathCapabilities.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.client;
+
+import java.util.Optional;
+
+import org.apache.hadoop.fs.CommonPathCapabilities;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
+
+public final class DfsPathCapabilities {
+
+  private DfsPathCapabilities() {
+  }
+
+  /**
+   * Common implementation of {@code hasPathCapability} for DFS and webhdfs.
+   * @param path path to check
+   * @param capability capability
+   * @return either a value to return or, if empty, a cue for the FS to
+   * pass up to its superclass.
+   */
+  public static Optional<Boolean> hasPathCapability(final Path path,
+      final String capability) {
+    switch (validatePathCapabilityArgs(path, capability)) {
+
+    case CommonPathCapabilities.FS_ACLS:
+    case CommonPathCapabilities.FS_APPEND:
+    case CommonPathCapabilities.FS_CHECKSUMS:
+    case CommonPathCapabilities.FS_CONCAT:
+    case CommonPathCapabilities.FS_LIST_CORRUPT_FILE_BLOCKS:
+    case CommonPathCapabilities.FS_PATHHANDLES:
+    case CommonPathCapabilities.FS_PERMISSIONS:
+    case CommonPathCapabilities.FS_SNAPSHOTS:
+    case CommonPathCapabilities.FS_STORAGEPOLICY:
+    case CommonPathCapabilities.FS_XATTRS:
+      return Optional.of(true);
+    case CommonPathCapabilities.FS_SYMLINKS:
+      return Optional.of(FileSystem.areSymlinksEnabled());
+    default:
+      return Optional.empty();
+    }
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
index 4128ece..0a41254 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
@@ -17,21 +17,32 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
-import java.io.*;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.File;
+import java.io.IOException;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.*;
-
 import javax.annotation.Nonnull;
 
-/**************************************************
- * A Block is a Hadoop FS primitive, identified by a
- * long.
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+
+/**
+ * A Block is a Hadoop FS primitive, identified by its block ID (a long). A
+ * block also has an accompanying generation stamp. A generation stamp is a
+ * monotonically increasing 8-byte number for each block that is maintained
+ * persistently by the NameNode. However, for the purposes of this class, two
+ * Blocks are considered equal iff they have the same block ID.
  *
- **************************************************/
+ * @see Block#equals(Object)
+ * @see Block#hashCode()
+ * @see Block#compareTo(Block)
+ */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class Block implements Writable, Comparable<Block> {
@@ -119,8 +130,7 @@
     this.numBytes = len;
     this.generationStamp = genStamp;
   }
-  /**
-   */
+
   public long getBlockId() {
     return blockId;
   }
@@ -130,17 +140,21 @@
   }
 
   /**
+   * Get the block name. The format of the name is in the format:
+   * <p>
+   * blk_1, blk_2, blk_3, etc.
+   * </p>
+   *
+   * @return the block name
    */
   public String getBlockName() {
-    return new StringBuilder().append(BLOCK_FILE_PREFIX)
-        .append(blockId).toString();
+    return BLOCK_FILE_PREFIX + blockId;
   }
 
-  /**
-   */
   public long getNumBytes() {
     return numBytes;
   }
+
   public void setNumBytes(long len) {
     this.numBytes = len;
   }
@@ -161,28 +175,33 @@
    * @return the string representation of the block
    */
   public static String toString(final Block b) {
-    StringBuilder sb = new StringBuilder();
-    sb.append(BLOCK_FILE_PREFIX).
-       append(b.blockId).append("_").
-       append(b.generationStamp);
-    return sb.toString();
+    return new StringBuilder(BLOCK_FILE_PREFIX)
+        .append(b.blockId)
+        .append('_')
+        .append(b.generationStamp)
+        .toString();
   }
 
   /**
+   * Get the block name. The format of the name is in the format:
+   * <p>
+   * blk_block-id_generation, blk_1_1, blk_1_2, blk_2_1, etc.
+   * </p>
+   *
+   * @return the full block name
    */
   @Override
   public String toString() {
-    return toString(this);
+    return Block.toString(this);
   }
 
   public void appendStringTo(StringBuilder sb) {
     sb.append(BLOCK_FILE_PREFIX)
       .append(blockId)
-      .append("_")
+      .append('_')
       .append(getGenerationStamp());
   }
 
-
   /////////////////////////////////////
   // Writable
   /////////////////////////////////////
@@ -223,32 +242,74 @@
     this.generationStamp = in.readLong();
   }
 
-  @Override // Comparable
+  /**
+   * Compares this Block with the specified Block for order. Returns a negative
+   * integer, zero, or a positive integer as this Block is less than, equal to,
+   * or greater than the specified Block. Blocks are ordered based on their
+   * block ID.
+   *
+   * @param b the Block to be compared
+   * @return a negative integer, zero, or a positive integer as this Block is
+   *         less than, equal to, or greater than the specified Block.
+   */
+  @Override
   public int compareTo(@Nonnull Block b) {
-    return blockId < b.blockId ? -1 :
-        blockId > b.blockId ? 1 : 0;
-  }
-
-  @Override // Object
-  public boolean equals(Object o) {
-    return this == o || o instanceof Block && compareTo((Block) o) == 0;
+    return Long.compare(blockId, b.blockId);
   }
 
   /**
-   * @return true if the two blocks have the same block ID and the same
-   * generation stamp, or if both blocks are null.
+   * Indicates whether some Block is "equal to" this one. Two blocks are
+   * considered equal if they have the same block ID.
+   *
+   * @param obj the reference object with which to compare.
+   * @return true if this Block is the same as the argument; false otherwise.
    */
-  public static boolean matchingIdAndGenStamp(Block a, Block b) {
-    if (a == b) return true; // same block, or both null
-    // only one null
-    return !(a == null || b == null) &&
-        a.blockId == b.blockId &&
-        a.generationStamp == b.generationStamp;
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (!(obj instanceof Block)) {
+      return false;
+    }
+    Block other = (Block) obj;
+    return (blockId == other.blockId);
   }
 
-  @Override // Object
+  /**
+   * Returns a hash code value for the Block. The hash code adheres to the
+   * general contract of hashCode. If two Blocks are equal according to the
+   * equals(Object) method, then calling the hashCode method on each of the two
+   * blocks produce the same integer result.
+   *
+   * @return a hash code value for this block
+   * @see Block#equals(Object)
+   */
+  @Override
   public int hashCode() {
-    //GenerationStamp is IRRELEVANT and should not be used here
-    return (int)(blockId^(blockId>>>32));
+    return Long.hashCode(blockId);
+  }
+
+  /**
+   * A helper function to determine if two blocks are equal, based on the block
+   * ID and the generation stamp. This is a different equalities check than the
+   * default behavior of the Block class. Two blocks are considered equal by
+   * this function iff the two blocks have the same block ID and the same
+   * generation stamp, or if both blocks are null.
+   *
+   * @param a an object
+   * @param b an object to be compared with {@code a} for equality
+   * @return {@code true} if the blocks are deeply equal to each other and
+   *         {@code false} otherwise
+   * @see Block
+   */
+  public static boolean matchingIdAndGenStamp(Block a, Block b) {
+    if (a == b) {
+      return true;
+    } else if (a == null || b == null) {
+      return false;
+    } else {
+      return a.blockId == b.blockId && a.generationStamp == b.generationStamp;
+    }
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
index 5780ce3..67c34fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
@@ -286,8 +286,10 @@
     } catch (IOException e) {
       ioe = e;
     }
-    LOG.warn("Failed to connect to {} while fetching HAServiceState",
-        proxyInfo.getAddress(), ioe);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Failed to connect to {} while fetching HAServiceState",
+          proxyInfo.getAddress(), ioe);
+    }
     return null;
   }
 
@@ -435,11 +437,21 @@
           }
         }
 
-        // If we get here, it means all observers have failed.
-        LOG.warn("{} observers have failed for read request {}; also found {} "
-            + "standby, {} active, and {} unreachable. Falling back to active.",
-            failedObserverCount, method.getName(), standbyCount, activeCount,
-            unreachableCount);
+        // Only log message if there are actual observer failures.
+        // Getting here with failedObserverCount = 0 could
+        // be that there is simply no Observer node running at all.
+        if (failedObserverCount > 0) {
+          // If we get here, it means all observers have failed.
+          LOG.warn("{} observers have failed for read request {}; "
+                  + "also found {} standby, {} active, and {} unreachable. "
+                  + "Falling back to active.", failedObserverCount,
+              method.getName(), standbyCount, activeCount, unreachableCount);
+        } else {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Read falling back to active without observer read "
+                + "fail, is there no observer node running?");
+          }
+        }
       }
 
       // Either all observers have failed, observer reads are disabled,
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index baebdc1..d0b10cb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -46,7 +46,9 @@
 import java.util.EnumSet;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Locale;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Set;
 import java.util.StringTokenizer;
 import java.util.concurrent.TimeUnit;
@@ -61,6 +63,7 @@
 import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CommonPathCapabilities;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.DelegationTokenRenewer;
@@ -74,6 +77,7 @@
 import org.apache.hadoop.fs.GlobalStorageStatistics;
 import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider;
 import org.apache.hadoop.fs.QuotaUsage;
+import org.apache.hadoop.fs.PathCapabilities;
 import org.apache.hadoop.fs.StorageStatistics;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.permission.FsCreateModes;
@@ -91,6 +95,7 @@
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.HAUtilClient;
 import org.apache.hadoop.hdfs.HdfsKMSUtil;
+import org.apache.hadoop.hdfs.client.DfsPathCapabilities;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@@ -132,6 +137,8 @@
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 
+import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
+
 /** A FileSystem for HDFS over the web. */
 public class WebHdfsFileSystem extends FileSystem
     implements DelegationTokenRenewer.Renewable,
@@ -1125,6 +1132,11 @@
     ).run();
   }
 
+  @Override
+  public boolean supportsSymlinks() {
+    return true;
+  }
+
   /**
    * Create a symlink pointing to the destination path.
    */
@@ -2080,6 +2092,24 @@
   }
 
   /**
+   * HDFS client capabilities.
+   * Uses {@link DfsPathCapabilities} to keep in sync with HDFS.
+   * {@inheritDoc}
+   */
+  @Override
+  public boolean hasPathCapability(final Path path, final String capability)
+      throws IOException {
+    // qualify the path to make sure that it refers to the current FS.
+    final Path p = makeQualified(path);
+    Optional<Boolean> cap = DfsPathCapabilities.hasPathCapability(p,
+        capability);
+    if (cap.isPresent()) {
+      return cap.get();
+    }
+    return super.hasPathCapability(p, capability);
+  }
+
+  /**
    * This class is used for opening, reading, and seeking files while using the
    * WebHdfsFileSystem. This class will invoke the retry policy when performing
    * any of these actions.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
index fc76a07..d097eaf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
@@ -32,35 +32,37 @@
 
   @Test
   public void testGetAddressFromString() throws Exception {
-    assertEquals(DFSUtilClient.getNNAddress("foo").getPort(),
-                 HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
-    assertEquals(DFSUtilClient.getNNAddress("hdfs://foo/").getPort(),
-                 HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
-    assertEquals(DFSUtilClient.getNNAddress("hdfs://foo:555").getPort(),
-                 555);
-    assertEquals(DFSUtilClient.getNNAddress("foo:555").getPort(),
-                 555);
+    assertEquals(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT,
+        DFSUtilClient.getNNAddress("foo").getPort());
+    assertEquals(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT,
+        DFSUtilClient.getNNAddress("hdfs://foo/").getPort());
+    assertEquals(555,
+        DFSUtilClient.getNNAddress("hdfs://foo:555").getPort());
+    assertEquals(555,
+        DFSUtilClient.getNNAddress("foo:555").getPort());
   }
 
   @Test
   public void testGetAddressFromConf() throws Exception {
     Configuration conf = new HdfsConfiguration();
     FileSystem.setDefaultUri(conf, "hdfs://foo/");
-    assertEquals(DFSUtilClient.getNNAddress(conf).getPort(),
-        HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
+    assertEquals(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT,
+        DFSUtilClient.getNNAddress(conf).getPort());
     FileSystem.setDefaultUri(conf, "hdfs://foo:555/");
-    assertEquals(DFSUtilClient.getNNAddress(conf).getPort(), 555);
+    assertEquals(555, DFSUtilClient.getNNAddress(conf).getPort());
     FileSystem.setDefaultUri(conf, "foo");
-    assertEquals(DFSUtilClient.getNNAddress(conf).getPort(),
-        HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
+    assertEquals(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT,
+        DFSUtilClient.getNNAddress(conf).getPort());
+    FileSystem.setDefaultUri(conf, "foo:555");
+    assertEquals(555, DFSUtilClient.getNNAddress(conf).getPort());
   }
 
   @Test
   public void testGetUri() {
-    assertEquals(DFSUtilClient.getNNUri(new InetSocketAddress("foo", 555)),
-                 URI.create("hdfs://foo:555"));
-    assertEquals(DFSUtilClient.getNNUri(new InetSocketAddress("foo",
-            HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT)),
-        URI.create("hdfs://foo"));
+    assertEquals(URI.create("hdfs://foo:555"),
+        DFSUtilClient.getNNUri(new InetSocketAddress("foo", 555)));
+    assertEquals(URI.create("hdfs://foo"),
+        DFSUtilClient.getNNUri(new InetSocketAddress("foo",
+            HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT)));
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index ac909dd..0e5aae89 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -26,6 +26,7 @@
 import com.google.common.base.Charsets;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonPathCapabilities;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.DelegationTokenRenewer;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -85,8 +86,11 @@
 import java.security.PrivilegedExceptionAction;
 import java.text.MessageFormat;
 import java.util.HashMap;
+import java.util.Locale;
 import java.util.Map;
 
+import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
+
 /**
  * HttpFSServer implementation of the FileSystemAccess FileSystem.
  * <p>
@@ -1561,4 +1565,30 @@
     return JsonUtilClient.toSnapshottableDirectoryList(json);
   }
 
+  /**
+   * This filesystem's capabilities must be in sync with that of
+   * {@code DistributedFileSystem.hasPathCapability()} except
+   * where the feature is not exposed (e.g. symlinks).
+   * {@inheritDoc}
+   */
+  @Override
+  public boolean hasPathCapability(final Path path, final String capability)
+      throws IOException {
+    // query the superclass, which triggers argument validation.
+    final Path p = makeQualified(path);
+    switch (validatePathCapabilityArgs(p, capability)) {
+    case CommonPathCapabilities.FS_ACLS:
+    case CommonPathCapabilities.FS_APPEND:
+    case CommonPathCapabilities.FS_CONCAT:
+    case CommonPathCapabilities.FS_PERMISSIONS:
+    case CommonPathCapabilities.FS_SNAPSHOTS:
+    case CommonPathCapabilities.FS_STORAGEPOLICY:
+    case CommonPathCapabilities.FS_XATTRS:
+      return true;
+    case CommonPathCapabilities.FS_SYMLINKS:
+      return false;
+    default:
+      return super.hasPathCapability(p, capability);
+    }
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java
index 7bdaa84..56d0862 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java
@@ -48,6 +48,8 @@
 
   static final String CONF_PREFIX = "httpfs.authentication.";
 
+  static final String HADOOP_HTTP_CONF_PREFIX = "hadoop.http.authentication.";
+
   private static final String SIGNATURE_SECRET_FILE = SIGNATURE_SECRET
       + ".file";
 
@@ -55,8 +57,9 @@
    * Returns the hadoop-auth configuration from HttpFSServer's configuration.
    * <p>
    * It returns all HttpFSServer's configuration properties prefixed with
-   * <code>httpfs.authentication</code>. The <code>httpfs.authentication</code>
-   * prefix is removed from the returned property names.
+   * <code>hadoop.http.authentication</code>. The
+   * <code>hadoop.http.authentication</code> prefix is removed from the
+   * returned property names.
    *
    * @param configPrefix parameter not used.
    * @param filterConfig parameter not used.
@@ -72,6 +75,15 @@
     props.setProperty(AuthenticationFilter.COOKIE_PATH, "/");
     for (Map.Entry<String, String> entry : conf) {
       String name = entry.getKey();
+      if (name.startsWith(HADOOP_HTTP_CONF_PREFIX)) {
+        name = name.substring(HADOOP_HTTP_CONF_PREFIX.length());
+        props.setProperty(name, entry.getValue());
+      }
+    }
+
+    // Replace Hadoop Http Authentication Configs with HttpFS specific Configs
+    for (Map.Entry<String, String> entry : conf) {
+      String name = entry.getKey();
       if (name.startsWith(CONF_PREFIX)) {
         String value = conf.get(name);
         name = name.substring(CONF_PREFIX.length());
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebServer.java
index 39f1caf..1135512 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebServer.java
@@ -24,11 +24,15 @@
 import java.net.MalformedURLException;
 import java.net.URI;
 import java.net.URL;
+import java.util.LinkedHashSet;
+import java.util.Set;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.ConfigurationWithLogging;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer2;
+import org.apache.hadoop.security.AuthenticationFilterInitializer;
+import org.apache.hadoop.security.authentication.server.ProxyUserAuthenticationFilterInitializer;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.slf4j.Logger;
@@ -98,6 +102,24 @@
     int port = conf.getInt(HTTP_PORT_KEY, HTTP_PORT_DEFAULT);
     URI endpoint = new URI(scheme, null, host, port, null, null, null);
 
+    // Allow the default authFilter HttpFSAuthenticationFilter
+    String configuredInitializers = conf.get(HttpServer2.
+        FILTER_INITIALIZER_PROPERTY);
+    if (configuredInitializers != null) {
+      Set<String> target = new LinkedHashSet<String>();
+      String[] parts = configuredInitializers.split(",");
+      for (String filterInitializer : parts) {
+        if (!filterInitializer.equals(AuthenticationFilterInitializer.class.
+            getName()) && !filterInitializer.equals(
+            ProxyUserAuthenticationFilterInitializer.class.getName())) {
+          target.add(filterInitializer);
+        }
+      }
+      String actualInitializers =
+          org.apache.commons.lang3.StringUtils.join(target, ",");
+      conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY, actualInitializers);
+    }
+
     httpServer = new HttpServer2.Builder()
         .setName(NAME)
         .setConf(conf)
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
index 61d3b45..b2bba08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
@@ -136,6 +136,7 @@
   private Collection<String> nameNodeWhitelist;
 
   Configuration serviceHadoopConf;
+  private Configuration fileSystemConf;
 
   private AtomicInteger unmanagedFileSystems = new AtomicInteger();
 
@@ -188,6 +189,7 @@
     }
     try {
       serviceHadoopConf = loadHadoopConf(hadoopConfDir);
+      fileSystemConf = getNewFileSystemConfiguration();
     } catch (IOException ex) {
       throw new ServiceException(FileSystemAccessException.ERROR.H11, ex.toString(), ex);
     }
@@ -212,6 +214,16 @@
     return hadoopConf;
   }
 
+  private Configuration getNewFileSystemConfiguration() {
+    Configuration conf = new Configuration(true);
+    ConfigurationUtils.copy(serviceHadoopConf, conf);
+    conf.setBoolean(FILE_SYSTEM_SERVICE_CREATED, true);
+
+    // Force-clear server-side umask to make HttpFS match WebHDFS behavior
+    conf.set(FsPermission.UMASK_LABEL, "000");
+    return conf;
+  }
+
   @Override
   public void postInit() throws ServiceException {
     super.postInit();
@@ -397,14 +409,7 @@
 
   @Override
   public Configuration getFileSystemConfiguration() {
-    Configuration conf = new Configuration(true);
-    ConfigurationUtils.copy(serviceHadoopConf, conf);
-    conf.setBoolean(FILE_SYSTEM_SERVICE_CREATED, true);
-
-    // Force-clear server-side umask to make HttpFS match WebHDFS behavior
-    conf.set(FsPermission.UMASK_LABEL, "000");
-
-    return conf;
+    return fileSystemConf;
   }
 
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml
index e884a12..5b8e469 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml
@@ -148,7 +148,7 @@
   </property>
 
   <property>
-    <name>httpfs.authentication.signature.secret.file</name>
+    <name>hadoop.http.authentication.signature.secret.file</name>
     <value>${httpfs.config.dir}/httpfs-signature.secret</value>
     <description>
       File containing the secret to sign HttpFS hadoop-auth cookies.
@@ -160,11 +160,14 @@
 
       If the secret file specified here does not exist, random secret is
       generated at startup time.
+
+      httpfs.authentication.signature.secret.file is deprecated. Instead use
+      hadoop.http.authentication.signature.secret.file.
     </description>
   </property>
 
   <property>
-    <name>httpfs.authentication.type</name>
+    <name>hadoop.http.authentication.type</name>
     <value>simple</value>
     <description>
       Defines the authentication mechanism used by httpfs for its HTTP clients.
@@ -175,26 +178,35 @@
       'user.name' query string parameter.
 
       If using 'kerberos' HTTP clients must use HTTP SPNEGO or delegation tokens.
+
+      httpfs.authentication.type is deprecated. Instead use
+      hadoop.http.authentication.type.
     </description>
   </property>
 
   <property>
-    <name>httpfs.authentication.kerberos.principal</name>
+    <name>hadoop.http.authentication.kerberos.principal</name>
     <value>HTTP/${httpfs.hostname}@${kerberos.realm}</value>
     <description>
       The HTTP Kerberos principal used by HttpFS in the HTTP endpoint.
 
       The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
       HTTP SPNEGO specification.
+
+      httpfs.authentication.kerberos.principal is deprecated. Instead use
+      hadoop.http.authentication.kerberos.principal.
     </description>
   </property>
 
   <property>
-    <name>httpfs.authentication.kerberos.keytab</name>
+    <name>hadoop.http.authentication.kerberos.keytab</name>
     <value>${user.home}/httpfs.keytab</value>
     <description>
       The Kerberos keytab file with the credentials for the
       HTTP Kerberos principal used by httpfs in the HTTP endpoint.
+
+      httpfs.authentication.kerberos.keytab is deprecated. Instead use
+      hadoop.http.authentication.kerberos.keytab.
     </description>
   </property>
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c
index ebf0dd7..23fa2e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c
@@ -360,6 +360,25 @@
             shutdown_and_exit(cl, -1);
         }
 
+        // hdfsPreadFully (direct) test
+        if (hdfsPreadFully(fs, preadFile, 0, (void*)buffer,
+                (tSize)(strlen(fileContents) + 1))) {
+            fprintf(stderr, "Failed to preadFully (direct).");
+            shutdown_and_exit(cl, -1);
+        }
+        if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
+            fprintf(stderr, "Failed to preadFully (direct). Expected %s but "
+                            "got %s\n", fileContents, buffer);
+            shutdown_and_exit(cl, -1);
+        }
+        fprintf(stderr, "PreadFully (direct) following %d bytes:\n%s\n",
+                num_pread_bytes, buffer);
+        memset(buffer, 0, strlen(fileContents + 1));
+        if (hdfsTell(fs, preadFile) != 0) {
+            fprintf(stderr, "PreadFully changed position of file\n");
+            shutdown_and_exit(cl, -1);
+        }
+
         // Disable the direct pread path so that we really go through the slow
         // read path
         hdfsFileDisableDirectPread(preadFile);
@@ -388,19 +407,39 @@
             shutdown_and_exit(cl, -1);
         }
 
+        // Test pread midway through the file rather than at the beginning
         num_pread_bytes = hdfsPread(fs, preadFile, 7, (void*)buffer, sizeof(buffer));
         if (strncmp(fileContentsChunk, buffer, strlen(fileContentsChunk)) != 0) {
-            fprintf(stderr, "Failed to pread (direct). Expected %s but got %s (%d bytes)\n",
+            fprintf(stderr, "Failed to pread. Expected %s but got %s (%d bytes)\n",
                     fileContentsChunk, buffer, num_read_bytes);
             shutdown_and_exit(cl, -1);
         }
-        fprintf(stderr, "Pread (direct) following %d bytes:\n%s\n", num_pread_bytes, buffer);
+        fprintf(stderr, "Pread following %d bytes:\n%s\n", num_pread_bytes, buffer);
         memset(buffer, 0, strlen(fileContents + 1));
         if (hdfsTell(fs, preadFile) != 0) {
             fprintf(stderr, "Pread changed position of file\n");
             shutdown_and_exit(cl, -1);
         }
 
+        // hdfsPreadFully test
+        if (hdfsPreadFully(fs, preadFile, 0, (void*)buffer,
+                            (tSize)(strlen(fileContents) + 1))) {
+            fprintf(stderr, "Failed to preadFully.");
+            shutdown_and_exit(cl, -1);
+        }
+        if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
+            fprintf(stderr, "Failed to preadFully. Expected %s but got %s\n",
+                    fileContents, buffer);
+            shutdown_and_exit(cl, -1);
+        }
+        fprintf(stderr, "PreadFully following %d bytes:\n%s\n",
+                num_pread_bytes, buffer);
+        memset(buffer, 0, strlen(fileContents + 1));
+        if (hdfsTell(fs, preadFile) != 0) {
+            fprintf(stderr, "PreadFully changed position of file\n");
+            shutdown_and_exit(cl, -1);
+        }
+
         hdfsCloseFile(fs, preadFile);
 
         // Test correct behaviour for unsupported filesystems
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
index e6b2010..22020867 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
@@ -57,6 +57,9 @@
 tSize preadDirect(hdfsFS fs, hdfsFile file, tOffset position, void* buffer,
                   tSize length);
 
+int preadFullyDirect(hdfsFS fs, hdfsFile file, tOffset position, void* buffer,
+                  tSize length);
+
 static void hdfsFreeFileInfoEntry(hdfsFileInfo *hdfsFileInfo);
 
 /**
@@ -1645,6 +1648,7 @@
             "hdfsPread: NewByteArray");
         return -1;
     }
+
     jthr = invokeMethod(env, &jVal, INSTANCE, f->file,
             JC_FS_DATA_INPUT_STREAM, "read", "(J[BII)I", position,
             jbRarray, 0, length);
@@ -1727,6 +1731,119 @@
     return jVal.i;
 }
 
+/**
+ * Like hdfsPread, if the underlying stream supports the
+ * ByteBufferPositionedReadable interface then this method will transparently
+ * use readFully(long, ByteBuffer).
+ */
+int hdfsPreadFully(hdfsFS fs, hdfsFile f, tOffset position,
+                void* buffer, tSize length) {
+    JNIEnv* env;
+    jbyteArray jbRarray;
+    jthrowable jthr;
+
+    if (length == 0) {
+        return 0;
+    } else if (length < 0) {
+        errno = EINVAL;
+        return -1;
+    }
+    if (!f || f->type == HDFS_STREAM_UNINITIALIZED) {
+        errno = EBADF;
+        return -1;
+    }
+
+    if (f->flags & HDFS_FILE_SUPPORTS_DIRECT_PREAD) {
+        return preadFullyDirect(fs, f, position, buffer, length);
+    }
+
+    env = getJNIEnv();
+    if (env == NULL) {
+        errno = EINTERNAL;
+        return -1;
+    }
+
+    //Error checking... make sure that this file is 'readable'
+    if (f->type != HDFS_STREAM_INPUT) {
+        fprintf(stderr, "Cannot read from a non-InputStream object!\n");
+        errno = EINVAL;
+        return -1;
+    }
+
+    // JAVA EQUIVALENT:
+    //  byte [] bR = new byte[length];
+    //  fis.read(pos, bR, 0, length);
+    jbRarray = (*env)->NewByteArray(env, length);
+    if (!jbRarray) {
+        errno = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+                                             "hdfsPread: NewByteArray");
+        return -1;
+    }
+
+    jthr = invokeMethod(env, NULL, INSTANCE, f->file,
+                        JC_FS_DATA_INPUT_STREAM, "readFully", "(J[BII)V",
+                        position, jbRarray, 0, length);
+    if (jthr) {
+        destroyLocalReference(env, jbRarray);
+        errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                                      "hdfsPread: FSDataInputStream#read");
+        return -1;
+    }
+
+    (*env)->GetByteArrayRegion(env, jbRarray, 0, length, buffer);
+    destroyLocalReference(env, jbRarray);
+    if ((*env)->ExceptionCheck(env)) {
+        errno = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+                "hdfsPread: GetByteArrayRegion");
+        return -1;
+    }
+    return 0;
+}
+
+int preadFullyDirect(hdfsFS fs, hdfsFile f, tOffset position, void* buffer,
+                  tSize length)
+{
+    // JAVA EQUIVALENT:
+    //  ByteBuffer buf = ByteBuffer.allocateDirect(length) // wraps C buffer
+    //  fis.read(position, buf);
+
+    jthrowable jthr;
+    jobject bb;
+
+    //Get the JNIEnv* corresponding to current thread
+    JNIEnv* env = getJNIEnv();
+    if (env == NULL) {
+        errno = EINTERNAL;
+        return -1;
+    }
+
+    //Error checking... make sure that this file is 'readable'
+    if (f->type != HDFS_STREAM_INPUT) {
+        fprintf(stderr, "Cannot read from a non-InputStream object!\n");
+        errno = EINVAL;
+        return -1;
+    }
+
+    //Read the requisite bytes
+    bb = (*env)->NewDirectByteBuffer(env, buffer, length);
+    if (bb == NULL) {
+        errno = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+                "readDirect: NewDirectByteBuffer");
+        return -1;
+    }
+
+    jthr = invokeMethod(env, NULL, INSTANCE, f->file,
+            JC_FS_DATA_INPUT_STREAM, "readFully",
+            "(JLjava/nio/ByteBuffer;)V", position, bb);
+    destroyLocalReference(env, bb);
+    if (jthr) {
+        errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                "preadDirect: FSDataInputStream#read");
+        return -1;
+    }
+    return 0;
+}
+
 tSize hdfsWrite(hdfsFS fs, hdfsFile f, const void* buffer, tSize length)
 {
     // JAVA EQUIVALENT
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
index 7e45634..e58a623 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
@@ -600,7 +600,8 @@
     tSize hdfsRead(hdfsFS fs, hdfsFile file, void* buffer, tSize length);
 
     /** 
-     * hdfsPread - Positional read of data from an open file.
+     * hdfsPread - Positional read of data from an open file. Reads up to the
+     * number of specified bytes in length.
      * @param fs The configured filesystem handle.
      * @param file The file handle.
      * @param position Position from which to read
@@ -612,6 +613,24 @@
     tSize hdfsPread(hdfsFS fs, hdfsFile file, tOffset position,
                     void* buffer, tSize length);
 
+    /**
+     * hdfsPreadFully - Positional read of data from an open file. Reads the
+     * number of specified bytes in length, or until the end of the data is
+     * reached. Unlike hdfsRead and hdfsPread, this method does not return
+     * the number of bytes read because either (1) the entire length of the
+     * buffer is filled, or (2) the end of the file is reached. If the eof is
+     * reached, an exception is thrown and errno is set to EINTR.
+     * @param fs The configured filesystem handle.
+     * @param file The file handle.
+     * @param position Position from which to read
+     * @param buffer The buffer to copy read bytes into.
+     * @param length The length of the buffer.
+     * @return Returns 0 on success, -1 on error.
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsPreadFully(hdfsFS fs, hdfsFile file, tOffset position,
+                    void* buffer, tSize length);
+
 
     /** 
      * hdfsWrite - Write data into an open file.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c
index 54d4cf6..bda27b9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/hdfs_shim.c
@@ -317,6 +317,12 @@
   return ret;
 }
 
+int hdfsPreadFully(hdfsFS fs, hdfsFile file, tOffset position,
+                void* buffer, tSize length) {
+  return libhdfs_hdfsPreadFully(fs->libhdfsRep, file->libhdfsRep, position,
+          buffer, length);
+}
+
 tSize hdfsWrite(hdfsFS fs, hdfsFile file, const void* buffer,
                 tSize length) {
   return libhdfs_hdfsWrite(fs->libhdfsRep, file->libhdfsRep, buffer, length);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_defines.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_defines.h
index b907768..0d01434 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_defines.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_defines.h
@@ -47,6 +47,7 @@
 #define hdfsTell libhdfs_hdfsTell
 #define hdfsRead libhdfs_hdfsRead
 #define hdfsPread libhdfs_hdfsPread
+#define hdfsPreadFully libhdfs_hdfsPreadFully
 #define hdfsWrite libhdfs_hdfsWrite
 #define hdfsFlush libhdfs_hdfsFlush
 #define hdfsHFlush libhdfs_hdfsHFlush
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h
index fce0e82..d46768c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_wrapper_undefs.h
@@ -47,6 +47,7 @@
 #undef hdfsTell
 #undef hdfsRead
 #undef hdfsPread
+#undef hdfsPreadFully
 #undef hdfsWrite
 #undef hdfsFlush
 #undef hdfsHFlush
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h
index d0411c21..4b08d05 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfspp_wrapper_defines.h
@@ -47,6 +47,7 @@
 #define hdfsTell libhdfspp_hdfsTell
 #define hdfsRead libhdfspp_hdfsRead
 #define hdfsPread libhdfspp_hdfsPread
+#define hdfsPreadFully libhdfspp_hdfsPreadFully
 #define hdfsWrite libhdfspp_hdfsWrite
 #define hdfsFlush libhdfspp_hdfsFlush
 #define hdfsHFlush libhdfspp_hdfsHFlush
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
index b5c1472..1c3a5f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
@@ -236,4 +236,52 @@
       </plugin>
     </plugins>
   </build>
+  <profiles>
+    <profile>
+      <id>parallel-tests</id>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-maven-plugins</artifactId>
+            <executions>
+              <execution>
+                <id>parallel-tests-createdir</id>
+                <goals>
+                  <goal>parallel-tests-createdir</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-surefire-plugin</artifactId>
+            <configuration>
+              <forkCount>${testsThreadCount}</forkCount>
+              <reuseForks>false</reuseForks>
+              <argLine>${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true</argLine>
+              <systemPropertyVariables>
+                <testsThreadCount>${testsThreadCount}</testsThreadCount>
+                <test.build.data>${test.build.data}/${surefire.forkNumber}</test.build.data>
+                <test.build.dir>${test.build.dir}/${surefire.forkNumber}</test.build.dir>
+                <hadoop.tmp.dir>${hadoop.tmp.dir}/${surefire.forkNumber}</hadoop.tmp.dir>
+
+                <!-- This is intentionally the same directory for all JUnit -->
+                <!-- forks, for use in the very rare situation that -->
+                <!-- concurrent tests need to coordinate, such as using lock -->
+                <!-- files. -->
+                <test.build.shared.data>${test.build.data}</test.build.shared.data>
+
+                <!-- Due to a Maven quirk, setting this to just -->
+                <!-- surefire.forkNumber won't do the parameter substitution. -->
+                <!-- Putting a prefix in front of it like "fork-" makes it -->
+                <!-- work. -->
+                <test.unique.fork.id>fork-${surefire.forkNumber}</test.unique.fork.id>
+              </systemPropertyVariables>
+            </configuration>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+    </profiles>
 </project>
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
index e6214db..496326b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
@@ -384,6 +384,12 @@
 <!-- Mount table -->
 <script type="text/x-dust-template" id="tmpl-mounttable">
 <div class="page-header"><h1>Mount Table</h1></div>
+<div>
+  <ul class="federationhealth-mounttable-legend">
+    <li class="federationhealth-mounttable-icon federationhealth-mounttable-readonly">Read Only</li>
+    <li class="federationhealth-mounttable-icon federationhealth-mounttable-readwrite">Read Write</li>
+  </ul>
+</div>
 <small>
 <table class="table">
   <thead>
@@ -392,7 +398,7 @@
       <th>Target nameservice</th>
       <th>Target path</th>
       <th>Order</th>
-      <th>Read only</th>
+      <th>Mount option</th>
       <th>Fault tolerant</th>
       <th>Owner</th>
       <th>Group</th>
@@ -409,7 +415,7 @@
       <td>{nameserviceId}</td>
       <td>{path}</td>
       <td>{order}</td>
-      <td align="center" class="mount-table-icon mount-table-read-only-{readonly}" title="{status}"/>
+      <td align="center" class="federationhealth-mounttable-icon federationhealth-mounttable-{readonly}" title="{status}"/>
       <td align="center" class="mount-table-icon mount-table-fault-tolerant-{faulttolerant}" title="{ftStatus}"></td>
       <td>{ownerName}</td>
       <td>{groupName}</td>
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
index 32edaf3..58a2382 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
@@ -315,10 +315,11 @@
       function augment_read_only(mountTable) {
         for (var i = 0, e = mountTable.length; i < e; ++i) {
           if (mountTable[i].readonly == true) {
-            mountTable[i].readonly = "true"
-            mountTable[i].status = "Read only"
+            mountTable[i].readonly = "readonly"
+            mountTable[i].status = "Read Only"
           } else {
-            mountTable[i].readonly = "false"
+            mountTable[i].readonly = "readwrite"
+            mountTable[i].status = "Read Write"
           }
         }
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/static/rbf.css b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/static/rbf.css
index b2eef6a..d1420a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/static/rbf.css
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/static/rbf.css
@@ -120,7 +120,7 @@
     padding-right: 5pt;
 }
 
-.mount-table-icon:before {
+.federationhealth-mounttable-icon:before {
     font-size: 10pt;
     padding-right: 1pt;
     font-family: 'Glyphicons Halflings';
@@ -131,11 +131,31 @@
     -moz-osx-font-smoothing: grayscale;
 }
 
-.mount-table-read-only-true:before {
-    color: #5fa341;
+.federationhealth-mounttable-readonly:before {
+    color: #000000;
     content: "\e033";
 }
 
+.federationhealth-mounttable-readwrite:before {
+    color: #000000;
+    content: "\e065";
+}
+
+.federationhealth-mounttable-legend {
+    list-style-type: none;
+    text-align: right;
+}
+
+.federationhealth-mounttable-legend li {
+    display: inline;
+    padding: 10pt;
+    padding-left: 10pt;
+}
+
+.federationhealth-mounttable-legend li:before {
+    padding-right: 5pt;
+}
+
 .mount-table-fault-tolerant-true:before {
     color: #5fa341;
     content: "\e033";
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java
index eaf874b..472a789 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java
@@ -43,16 +43,20 @@
   }
 
   public static void createCluster() throws IOException {
-    createCluster(null);
+    createCluster(false);
   }
 
-  public static void createCluster(Configuration conf) throws IOException {
-    createCluster(true, 2, conf);
+  public static void createCluster(boolean security) throws IOException {
+    createCluster(true, 2, security);
   }
 
   public static void createCluster(
-      boolean ha, int numNameServices, Configuration conf) throws IOException {
+      boolean ha, int numNameServices, boolean security) throws IOException {
     try {
+      Configuration conf = null;
+      if (security) {
+        conf = SecurityConfUtil.initSecurity();
+      }
       cluster = new MiniRouterDFSCluster(ha, numNameServices, conf);
 
       // Start NNs and DNs and wait until ready
@@ -88,6 +92,11 @@
       cluster.shutdown();
       cluster = null;
     }
+    try {
+      SecurityConfUtil.destroy();
+    } catch (Exception e) {
+      throw new IOException("Cannot destroy security context", e);
+    }
   }
 
   public static MiniDFSCluster getCluster() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java
index 6154eee..47ab0d2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java
@@ -65,6 +65,9 @@
   private static final String ROUTER_USER_NAME = "router";
   private static final String PREFIX = "hadoop.http.authentication.";
 
+  private static MiniKdc kdc;
+  private static File baseDir;
+
   private static String spnegoPrincipal;
   private static String routerPrincipal;
 
@@ -78,14 +81,14 @@
 
   public static Configuration initSecurity() throws Exception {
     // delete old test dir
-    File baseDir = GenericTestUtils.getTestDir(
+    baseDir = GenericTestUtils.getTestDir(
         SecurityConfUtil.class.getSimpleName());
     FileUtil.fullyDelete(baseDir);
     assertTrue(baseDir.mkdirs());
 
     // start a mini kdc with default conf
     Properties kdcConf = MiniKdc.createConf();
-    MiniKdc kdc = new MiniKdc(kdcConf, baseDir);
+    kdc = new MiniKdc(kdcConf, baseDir);
     kdc.start();
 
     Configuration conf = new HdfsConfiguration();
@@ -156,4 +159,12 @@
 
     return conf;
   }
+
+  public static void destroy() throws Exception {
+    if (kdc != null) {
+      kdc.stop();
+      FileUtil.fullyDelete(baseDir);
+      KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
+    }
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractAppendSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractAppendSecure.java
index fe4951db..33f59f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractAppendSecure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractAppendSecure.java
@@ -21,8 +21,6 @@
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 
-import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity;
-
 /**
  * Test secure append operations on the Router-based FS.
  */
@@ -31,7 +29,7 @@
 
   @BeforeClass
   public static void createCluster() throws Exception {
-    RouterHDFSContract.createCluster(initSecurity());
+    RouterHDFSContract.createCluster(true);
   }
 
   @AfterClass
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractConcatSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractConcatSecure.java
index c9a0cc8..d45f639 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractConcatSecure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractConcatSecure.java
@@ -23,9 +23,6 @@
 
 import java.io.IOException;
 
-import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity;
-
-
 /**
  * Test secure concat operations on the Router-based FS.
  */
@@ -34,7 +31,7 @@
 
   @BeforeClass
   public static void createCluster() throws Exception {
-    RouterHDFSContract.createCluster(initSecurity());
+    RouterHDFSContract.createCluster(true);
     // perform a simple operation on the cluster to verify it is up
     RouterHDFSContract.getFileSystem().getDefaultBlockSize(new Path("/"));
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractCreateSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractCreateSecure.java
index dc264b0..9327c1b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractCreateSecure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractCreateSecure.java
@@ -22,9 +22,6 @@
 
 import java.io.IOException;
 
-import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity;
-
-
 /**
  * Test secure create operations on the Router-based FS.
  */
@@ -33,7 +30,7 @@
 
   @BeforeClass
   public static void createCluster() throws Exception {
-    RouterHDFSContract.createCluster(initSecurity());
+    RouterHDFSContract.createCluster(true);
   }
 
   @AfterClass
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelegationToken.java
index f22489e..77cb602 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelegationToken.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelegationToken.java
@@ -46,7 +46,7 @@
 
   @BeforeClass
   public static void createCluster() throws Exception {
-    RouterHDFSContract.createCluster(false, 1, initSecurity());
+    RouterHDFSContract.createCluster(false, 1, true);
   }
 
   @AfterClass
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDeleteSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDeleteSecure.java
index 57cc138..43af1a7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDeleteSecure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDeleteSecure.java
@@ -21,8 +21,6 @@
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 
-import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity;
-
 /**
  * Test secure delete operations on the Router-based FS.
  */
@@ -31,7 +29,7 @@
 
   @BeforeClass
   public static void createCluster() throws Exception {
-    RouterHDFSContract.createCluster(initSecurity());
+    RouterHDFSContract.createCluster(true);
   }
 
   @AfterClass
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractGetFileStatusSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractGetFileStatusSecure.java
index 13e4e96..5643cfa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractGetFileStatusSecure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractGetFileStatusSecure.java
@@ -21,9 +21,6 @@
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 
-import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity;
-
-
 /**
  * Test secure get file status operations on the Router-based FS.
  */
@@ -32,7 +29,7 @@
 
   @BeforeClass
   public static void createCluster() throws Exception {
-    RouterHDFSContract.createCluster(initSecurity());
+    RouterHDFSContract.createCluster(true);
   }
 
   @AfterClass
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractMkdirSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractMkdirSecure.java
index 7c223a6..bb1564f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractMkdirSecure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractMkdirSecure.java
@@ -22,9 +22,6 @@
 
 import java.io.IOException;
 
-import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity;
-
-
 /**
  * Test secure dir operations on the Router-based FS.
  */
@@ -33,7 +30,7 @@
 
   @BeforeClass
   public static void createCluster() throws Exception {
-    RouterHDFSContract.createCluster(initSecurity());
+    RouterHDFSContract.createCluster(true);
   }
 
   @AfterClass
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractOpenSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractOpenSecure.java
index 434402c..91749fd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractOpenSecure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractOpenSecure.java
@@ -22,9 +22,6 @@
 
 import java.io.IOException;
 
-import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity;
-
-
 /**
  * Test secure open operations on the Router-based FS.
  */
@@ -32,7 +29,7 @@
 
   @BeforeClass
   public static void createCluster() throws Exception {
-    RouterHDFSContract.createCluster(initSecurity());
+    RouterHDFSContract.createCluster(true);
   }
 
   @AfterClass
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRenameSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRenameSecure.java
index 29d7398..3de39fd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRenameSecure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRenameSecure.java
@@ -22,9 +22,6 @@
 
 import java.io.IOException;
 
-import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity;
-
-
 /**
  * Test secure rename operations on the Router-based FS.
  */
@@ -33,7 +30,7 @@
 
   @BeforeClass
   public static void createCluster() throws Exception {
-    RouterHDFSContract.createCluster(initSecurity());
+    RouterHDFSContract.createCluster(true);
   }
 
   @AfterClass
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectorySecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectorySecure.java
index faa08ba..f7fb59c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectorySecure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectorySecure.java
@@ -22,9 +22,6 @@
 
 import java.io.IOException;
 
-import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity;
-
-
 /**
  * Test secure root dir operations on the Router-based FS.
  */
@@ -33,7 +30,7 @@
 
   @BeforeClass
   public static void createCluster() throws Exception {
-    RouterHDFSContract.createCluster(initSecurity());
+    RouterHDFSContract.createCluster(true);
   }
 
   @AfterClass
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSeekSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSeekSecure.java
index f281b47..e318f0e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSeekSecure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSeekSecure.java
@@ -22,9 +22,6 @@
 
 import java.io.IOException;
 
-import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity;
-
-
 /**
  * Test secure seek operations on the Router-based FS.
  */
@@ -32,7 +29,7 @@
 
   @BeforeClass
   public static void createCluster() throws Exception {
-    RouterHDFSContract.createCluster(initSecurity());
+    RouterHDFSContract.createCluster(true);
   }
 
   @AfterClass
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSetTimesSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSetTimesSecure.java
index 8f86b95..69123f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSetTimesSecure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSetTimesSecure.java
@@ -22,9 +22,6 @@
 
 import java.io.IOException;
 
-import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity;
-
-
 /**
  * Test secure set times operations on the Router-based FS.
  */
@@ -33,7 +30,7 @@
 
   @BeforeClass
   public static void createCluster() throws Exception {
-    RouterHDFSContract.createCluster(initSecurity());
+    RouterHDFSContract.createCluster(true);
   }
 
   @AfterClass
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
index f0e4dc1..5e36262 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
@@ -177,7 +177,7 @@
   }
 
   @Test
-  public void testStorageSpaceQuotaaExceed() throws Exception {
+  public void testStorageSpaceQuotaExceed() throws Exception {
     long ssQuota = 3071;
     final FileSystem nnFs1 = nnContext1.getFileSystem();
     final FileSystem nnFs2 = nnContext2.getFileSystem();
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/TestRouterHttpDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/TestRouterHttpDelegationToken.java
index b5f6849..7a1859c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/TestRouterHttpDelegationToken.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/TestRouterHttpDelegationToken.java
@@ -144,6 +144,7 @@
       router.stop();
       router.close();
     }
+    SecurityConfUtil.destroy();
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.1.3.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.1.3.xml
new file mode 100644
index 0000000..474b4b3
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.1.3.xml
@@ -0,0 +1,676 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Thu Sep 12 04:55:36 UTC 2019 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop HDFS 3.1.3"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -verbose -classpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/classes:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-3.1.3.jar:/usr/lib/jvm/java-8-openjdk-amd64/lib/tools.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-3.1.3.jar:/maven/org/slf4j/slf4j-api/1.7.25/slf4j-api-1.7.25.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/com/nimbusds/nimbus-jose-jwt/4.41.1/nimbus-jose-jwt-4.41.1.jar:/maven/com/github/stephenc/jcip/jcip-annotations/1.0-1/jcip-annotations-1.0-1.jar:/maven/net/minidev/json-smart/2.3/json-smart-2.3.jar:/maven/net/minidev/accessors-smart/1.2/accessors-smart-1.2.jar:/maven/org/ow2/asm/asm/5.0.4/asm-5.0.4.jar:/maven/org/apache/zookeeper/zookeeper/3.4.13/zookeeper-3.4.13.jar:/maven/org/apache/curator/curator-framework/2.13.0/curator-framework-2.13.0.jar:/maven/org/apache/kerby/kerb-simplekdc/1.0.1/kerb-simplekdc-1.0.1.jar:/maven/org/apache/kerby/kerb-client/1.0.1/kerb-client-1.0.1.jar:/maven/org/apache/kerby/kerby-config/1.0.1/kerby-config-1.0.1.jar:/maven/org/apache/kerby/kerb-core/1.0.1/kerb-core-1.0.1.jar:/maven/org/apache/kerby/kerby-pkix/1.0.1/kerby-pkix-1.0.1.jar:/maven/org/apache/kerby/kerby-asn1/1.0.1/kerby-asn1-1.0.1.jar:/maven/org/apache/kerby/kerby-util/1.0.1/kerby-util-1.0.1.jar:/maven/org/apache/kerby/kerb-common/1.0.1/kerb-common-1.0.1.jar:/maven/org/apache/kerby/kerb-crypto/1.0.1/kerb-crypto-1.0.1.jar:/maven/org/apache/kerby/kerb-util/1.0.1/kerb-util-1.0.1.jar:/maven/org/apache/kerby/token-provider/1.0.1/token-provider-1.0.1.jar:/maven/org/apache/kerby/kerb-admin/1.0.1/kerb-admin-1.0.1.jar:/maven/org/apache/kerby/kerb-server/1.0.1/kerb-server-1.0.1.jar:/maven/org/apache/kerby/kerb-identity/1.0.1/kerb-identity-1.0.1.jar:/maven/org/apache/kerby/kerby-xdr/1.0.1/kerby-xdr-1.0.1.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-3.1.3.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/commons-net/commons-net/3.6/commons-net-3.6.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/org/eclipse/jetty/jetty-servlet/9.3.24.v20180605/jetty-servlet-9.3.24.v20180605.jar:/maven/org/eclipse/jetty/jetty-security/9.3.24.v20180605/jetty-security-9.3.24.v20180605.jar:/maven/org/eclipse/jetty/jetty-webapp/9.3.24.v20180605/jetty-webapp-9.3.24.v20180605.jar:/maven/org/eclipse/jetty/jetty-xml/9.3.24.v20180605/jetty-xml-9.3.24.v20180605.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-servlet/1.19/jersey-servlet-1.19.jar:/maven/com/sun/jersey/jersey-json/1.19/jersey-json-1.19.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/javax/xml/bind/jaxb-api/2.2.11/jaxb-api-2.2.11.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/commons-beanutils/commons-beanutils/1.9.3/commons-beanutils-1.9.3.jar:/maven/org/apache/commons/commons-configuration2/2.1.1/commons-configuration2-2.1.1.jar:/maven/org/apache/commons/commons-lang3/3.4/commons-lang3-3.4.jar:/maven/org/apache/avro/avro/1.7.7/avro-1.7.7.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.5/snappy-java-1.0.5.jar:/maven/com/google/re2j/re2j/1.1/re2j-1.1.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.13.0/curator-client-2.13.0.jar:/maven/org/apache/curator/curator-recipes/2.13.0/curator-recipes-2.13.0.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/commons/commons-compress/1.18/commons-compress-1.18.jar:/maven/org/codehaus/woodstox/stax2-api/3.1.4/stax2-api-3.1.4.jar:/maven/com/fasterxml/woodstox/woodstox-core/5.0.3/woodstox-core-5.0.3.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-3.1.3.jar:/maven/com/squareup/okhttp/okhttp/2.7.5/okhttp-2.7.5.jar:/maven/com/squareup/okio/okio/1.6.0/okio-1.6.0.jar:/maven/com/fasterxml/jackson/core/jackson-annotations/2.7.8/jackson-annotations-2.7.8.jar:/maven/org/apache/yetus/audience-annotations/0.5.0/audience-annotations-0.5.0.jar:/maven/com/google/guava/guava/27.0-jre/guava-27.0-jre.jar:/maven/com/google/guava/failureaccess/1.0/failureaccess-1.0.jar:/maven/com/google/guava/listenablefuture/9999.0-empty-to-avoid-conflict-with-guava/listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar:/maven/org/checkerframework/checker-qual/2.5.2/checker-qual-2.5.2.jar:/maven/com/google/errorprone/error_prone_annotations/2.2.0/error_prone_annotations-2.2.0.jar:/maven/com/google/j2objc/j2objc-annotations/1.1/j2objc-annotations-1.1.jar:/maven/org/codehaus/mojo/animal-sniffer-annotations/1.17/animal-sniffer-annotations-1.17.jar:/maven/org/eclipse/jetty/jetty-server/9.3.24.v20180605/jetty-server-9.3.24.v20180605.jar:/maven/org/eclipse/jetty/jetty-http/9.3.24.v20180605/jetty-http-9.3.24.v20180605.jar:/maven/org/eclipse/jetty/jetty-io/9.3.24.v20180605/jetty-io-9.3.24.v20180605.jar:/maven/org/eclipse/jetty/jetty-util/9.3.24.v20180605/jetty-util-9.3.24.v20180605.jar:/maven/org/eclipse/jetty/jetty-util-ajax/9.3.24.v20180605/jetty-util-ajax-9.3.24.v20180605.jar:/maven/com/sun/jersey/jersey-core/1.19/jersey-core-1.19.jar:/maven/javax/ws/rs/jsr311-api/1.1.1/jsr311-api-1.1.1.jar:/maven/com/sun/jersey/jersey-server/1.19/jersey-server-1.19.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/commons-codec/commons-codec/1.11/commons-codec-1.11.jar:/maven/commons-io/commons-io/2.5/commons-io-2.5.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/commons-daemon/commons-daemon/1.0.13/commons-daemon-1.0.13.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/javax/servlet/javax.servlet-api/3.1.0/javax.servlet-api-3.1.0.jar:/maven/org/slf4j/slf4j-log4j12/1.7.25/slf4j-log4j12-1.7.25.jar:/maven/io/netty/netty/3.10.5.Final/netty-3.10.5.Final.jar:/maven/io/netty/netty-all/4.0.52.Final/netty-all-4.0.52.Final.jar:/maven/org/apache/htrace/htrace-core4/4.1.0-incubating/htrace-core4-4.1.0-incubating.jar:/maven/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar:/maven/com/fasterxml/jackson/core/jackson-databind/2.7.8/jackson-databind-2.7.8.jar:/maven/com/fasterxml/jackson/core/jackson-core/2.7.8/jackson-core-2.7.8.jar:/maven/xerces/xercesImpl/2.11.0/xercesImpl-2.11.0.jar:/maven/xml-apis/xml-apis/1.4.01/xml-apis-1.4.01.jar -sourcepath /build/source/hadoop-hdfs-project/hadoop-hdfs/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -apidir /build/source/hadoop-hdfs-project/hadoop-hdfs/target/site/jdiff/xml -apiname Apache Hadoop HDFS 3.1.3 -->
+<package name="org.apache.hadoop.hdfs">
+  <doc>
+  <![CDATA[<p>A distributed implementation of {@link
+org.apache.hadoop.fs.FileSystem}.  This is loosely modelled after
+Google's <a href="http://research.google.com/archive/gfs.html">GFS</a>.</p>
+
+<p>The most important difference is that unlike GFS, Hadoop DFS files 
+have strictly one writer at any one time.  Bytes are always appended 
+to the end of the writer's stream.  There is no notion of "record appends"
+or "mutations" that are then checked or reordered.  Writers simply emit 
+a byte stream.  That byte stream is guaranteed to be stored in the 
+order written.</p>]]>
+  </doc>
+</package>
+<package name="org.apache.hadoop.hdfs.net">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol.datatransfer">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol.datatransfer.sasl">
+</package>
+<package name="org.apache.hadoop.hdfs.protocolPB">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.client">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.protocolPB">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.server">
+  <!-- start interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
+  <interface name="JournalNodeMXBean"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getJournalsStatus" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get status information (e.g., whether formatted) of JournalNode's journals.
+ 
+ @return A string presenting status for each journal]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This is the JMX management interface for JournalNode information]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.block">
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.delegation">
+</package>
+<package name="org.apache.hadoop.hdfs.server.aliasmap">
+  <!-- start class org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap -->
+  <class name="InMemoryAliasMap" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMapProtocol"/>
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="init" return="org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="blockPoolID" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="list" return="org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMapProtocol.IterationResult"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="marker" type="java.util.Optional"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="read" return="java.util.Optional"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+      <param name="providedStorageLocation" type="org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getBlockPoolId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="fromProvidedStorageLocationBytes" return="org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="providedStorageLocationDbFormat" type="byte[]"/>
+      <exception name="InvalidProtocolBufferException" type="com.google.protobuf.InvalidProtocolBufferException"/>
+    </method>
+    <method name="fromBlockBytes" return="org.apache.hadoop.hdfs.protocol.Block"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="blockDbFormat" type="byte[]"/>
+      <exception name="InvalidProtocolBufferException" type="com.google.protobuf.InvalidProtocolBufferException"/>
+    </method>
+    <method name="toProtoBufBytes" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="providedStorageLocation" type="org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="toProtoBufBytes" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[InMemoryAliasMap is an implementation of the InMemoryAliasMapProtocol for
+ use with LevelDB.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.balancer">
+</package>
+<package name="org.apache.hadoop.hdfs.server.blockmanagement">
+</package>
+<package name="org.apache.hadoop.hdfs.server.common">
+  <!-- start interface org.apache.hadoop.hdfs.server.common.BlockAlias -->
+  <interface name="BlockAlias"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Interface used to load provided blocks.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.hdfs.server.common.BlockAlias -->
+  <!-- start class org.apache.hadoop.hdfs.server.common.FileRegion -->
+  <class name="FileRegion" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.hdfs.server.common.BlockAlias"/>
+    <constructor name="FileRegion" type="long, org.apache.hadoop.fs.Path, long, long, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FileRegion" type="long, org.apache.hadoop.fs.Path, long, long, long, byte[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FileRegion" type="long, org.apache.hadoop.fs.Path, long, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FileRegion" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getProvidedStorageLocation" return="org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[This class is used to represent provided blocks that are file regions,
+ i.e., can be described using (path, offset, length).]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.common.FileRegion -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.common.blockaliasmap">
+  <!-- start class org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap -->
+  <class name="BlockAliasMap" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="BlockAliasMap"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getReader" return="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Reader"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="opts" type="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Reader.Options"/>
+      <param name="blockPoolID" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns a reader to the alias map.
+ @param opts reader options
+ @param blockPoolID block pool id to use
+ @return {@link Reader} to the alias map. If a Reader for the blockPoolID
+ cannot be created, this will return null.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getWriter" return="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Writer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="opts" type="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Writer.Options"/>
+      <param name="blockPoolID" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns the writer for the alias map.
+ @param opts writer options.
+ @param blockPoolID block pool id to use
+ @return {@link Writer} to the alias map.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="refresh"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Refresh the alias map.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[An abstract class used to read and write block maps for provided blocks.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.common.blockaliasmap.impl">
+  <!-- start class org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.LevelDBFileRegionAliasMap -->
+  <class name="LevelDBFileRegionAliasMap" extends="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="LevelDBFileRegionAliasMap"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getReader" return="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Reader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="opts" type="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Reader.Options"/>
+      <param name="blockPoolID" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getWriter" return="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Writer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="opts" type="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Writer.Options"/>
+      <param name="blockPoolID" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="refresh"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A LevelDB based implementation of {@link BlockAliasMap}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.LevelDBFileRegionAliasMap -->
+  <!-- start class org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap -->
+  <class name="TextFileRegionAliasMap" extends="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="TextFileRegionAliasMap"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getReader" return="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Reader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="opts" type="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Reader.Options"/>
+      <param name="blockPoolID" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getWriter" return="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Writer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="opts" type="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Writer.Options"/>
+      <param name="blockPoolID" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="refresh"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="blockPoolIDFromFileName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <method name="fileNameFromBlockPoolID" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="blockPoolID" type="java.lang.String"/>
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This class is used for block maps stored as text files,
+ with a specified delimiter.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset.impl">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web.webhdfs">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer.command">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer.connectors">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer.datamodel">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer.planner">
+</package>
+<package name="org.apache.hadoop.hdfs.server.mover">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode">
+  <!-- start interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
+  <interface name="AuditLogger"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="initialize"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Called during initialization of the logger.
+
+ @param conf The configuration object.]]>
+      </doc>
+    </method>
+    <method name="logAuditEvent"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <doc>
+      <![CDATA[Called to log an audit event.
+ <p>
+ This method must return as quickly as possible, since it's called
+ in a critical section of the NameNode's operation.
+
+ @param succeeded Whether authorization succeeded.
+ @param userName Name of the user executing the request.
+ @param addr Remote address of the request.
+ @param cmd The requested command.
+ @param src Path of affected source file.
+ @param dst Path of affected destination file (if any).
+ @param stat File information for operations that change the file's
+             metadata (permissions, owner, times, etc).]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Interface defining an audit logger.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
+  <!-- start class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
+  <class name="HdfsAuditLogger" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.hdfs.server.namenode.AuditLogger"/>
+    <constructor name="HdfsAuditLogger"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="logAuditEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="status" type="org.apache.hadoop.fs.FileStatus"/>
+    </method>
+    <method name="logAuditEvent"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="callerContext" type="org.apache.hadoop.ipc.CallerContext"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+      <doc>
+      <![CDATA[Same as
+ {@link #logAuditEvent(boolean, String, InetAddress, String, String, String,
+ FileStatus)} with additional parameters related to logging delegation token
+ tracking IDs.
+ 
+ @param succeeded Whether authorization succeeded.
+ @param userName Name of the user executing the request.
+ @param addr Remote address of the request.
+ @param cmd The requested command.
+ @param src Path of affected source file.
+ @param dst Path of affected destination file (if any).
+ @param stat File information for operations that change the file's metadata
+          (permissions, owner, times, etc).
+ @param callerContext Context information of the caller
+ @param ugi UserGroupInformation of the current user, or null if not logging
+          token tracking information
+ @param dtSecretManager The token secret manager, or null if not logging
+          token tracking information]]>
+      </doc>
+    </method>
+    <method name="logAuditEvent"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+      <doc>
+      <![CDATA[Same as
+ {@link #logAuditEvent(boolean, String, InetAddress, String, String,
+ String, FileStatus, CallerContext, UserGroupInformation,
+ DelegationTokenSecretManager)} without {@link CallerContext} information.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Extension of {@link AuditLogger}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
+  <!-- start class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
+  <class name="INodeAttributeProvider" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="INodeAttributeProvider"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="start"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Initialize the provider. This method is called at NameNode startup
+ time.]]>
+      </doc>
+    </method>
+    <method name="stop"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Shutdown the provider. This method is called at NameNode shutdown time.]]>
+      </doc>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fullPath" type="java.lang.String"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pathElements" type="java.lang.String[]"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="components" type="byte[][]"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getExternalAccessControlEnforcer" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="defaultEnforcer" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"/>
+      <doc>
+      <![CDATA[Can be over-ridden by implementations to provide a custom Access Control
+ Enforcer that can provide an alternate implementation of the
+ default permission checking logic.
+ @param defaultEnforcer The Default AccessControlEnforcer
+ @return The AccessControlEnforcer to use]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.ha">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.snapshot">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top.window">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.web.resources">
+</package>
+<package name="org.apache.hadoop.hdfs.server.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.tools">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineEditsViewer">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineImageViewer">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.snapshot">
+</package>
+<package name="org.apache.hadoop.hdfs.util">
+</package>
+<package name="org.apache.hadoop.hdfs.web">
+</package>
+<package name="org.apache.hadoop.hdfs.web.resources">
+</package>
+
+</api>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 0826cef..3b776bab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1171,6 +1171,8 @@
   public static final String  DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_KEY = "dfs.qjournal.write-txns.timeout.ms";
   public static final String  DFS_QJOURNAL_HTTP_OPEN_TIMEOUT_KEY = "dfs.qjournal.http.open.timeout.ms";
   public static final String  DFS_QJOURNAL_HTTP_READ_TIMEOUT_KEY = "dfs.qjournal.http.read.timeout.ms";
+  public static final String DFS_QJOURNAL_PARALLEL_READ_NUM_THREADS_KEY =
+      "dfs.qjournal.parallel-read.num-threads";
   public static final int     DFS_QJOURNAL_START_SEGMENT_TIMEOUT_DEFAULT = 20000;
   public static final int     DFS_QJOURNAL_PREPARE_RECOVERY_TIMEOUT_DEFAULT = 120000;
   public static final int     DFS_QJOURNAL_ACCEPT_RECOVERY_TIMEOUT_DEFAULT = 120000;
@@ -1181,7 +1183,8 @@
   public static final int     DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_DEFAULT = 20000;
   public static final int     DFS_QJOURNAL_HTTP_OPEN_TIMEOUT_DEFAULT = URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT;
   public static final int     DFS_QJOURNAL_HTTP_READ_TIMEOUT_DEFAULT = URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT;
-  
+  public static final int DFS_QJOURNAL_PARALLEL_READ_NUM_THREADS_DEFAULT = 5;
+
   public static final String DFS_MAX_NUM_BLOCKS_TO_LOG_KEY = "dfs.namenode.max-num-blocks-to-log";
   public static final long   DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT = 1000l;
   
@@ -1354,7 +1357,11 @@
       "dfs.namenode.send.qop.enabled";
   public static final boolean DFS_NAMENODE_SEND_QOP_ENABLED_DEFAULT = false;
 
-  // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
+  public static final String DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY =
+      "dfs.namenode.state.context.enabled";
+  public static final boolean DFS_NAMENODE_STATE_CONTEXT_ENABLED_DEFAULT = false;
+
+  // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY
       = HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
index 3a882e5..d5ec5ac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
@@ -27,6 +27,7 @@
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
+import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -54,6 +55,7 @@
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.util.StopWatch;
+import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -270,12 +272,14 @@
    */
   @VisibleForTesting
   protected ExecutorService createParallelExecutor() {
-    return Executors.newCachedThreadPool(
-        new ThreadFactoryBuilder()
-            .setDaemon(true)
+    int numThreads =
+        conf.getInt(DFSConfigKeys.DFS_QJOURNAL_PARALLEL_READ_NUM_THREADS_KEY,
+            DFSConfigKeys.DFS_QJOURNAL_PARALLEL_READ_NUM_THREADS_DEFAULT);
+    return new HadoopThreadPoolExecutor(1, numThreads, 60L,
+        TimeUnit.SECONDS, new LinkedBlockingQueue<>(),
+        new ThreadFactoryBuilder().setDaemon(true)
             .setNameFormat("Logger channel (from parallel executor) to " + addr)
-            .setUncaughtExceptionHandler(
-                UncaughtExceptionHandlers.systemExit())
+            .setUncaughtExceptionHandler(UncaughtExceptionHandlers.systemExit())
             .build());
   }
   
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
index ef32eb1..49d9993 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.qjournal.client;
 
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.concurrent.TimeoutException;
@@ -64,6 +66,7 @@
   private static final float WAIT_PROGRESS_WARN_THRESHOLD = 0.7f;
   private final StopWatch quorumStopWatch;
   private final Timer timer;
+  private final List<ListenableFuture<RESULT>> allCalls;
   
   static <KEY, RESULT> QuorumCall<KEY, RESULT> create(
       Map<KEY, ? extends ListenableFuture<RESULT>> calls, Timer timer) {
@@ -71,6 +74,7 @@
     for (final Entry<KEY, ? extends ListenableFuture<RESULT>> e : calls.entrySet()) {
       Preconditions.checkArgument(e.getValue() != null,
           "null future for key: " + e.getKey());
+      qr.addCall(e.getValue());
       Futures.addCallback(e.getValue(), new FutureCallback<RESULT>() {
         @Override
         public void onFailure(Throwable t) {
@@ -102,6 +106,11 @@
     // Only instantiated from factory method above
     this.timer = timer;
     this.quorumStopWatch = new StopWatch(timer);
+    this.allCalls = new ArrayList<>();
+  }
+
+  private void addCall(ListenableFuture<RESULT> call) {
+    allCalls.add(call);
   }
 
   /**
@@ -212,6 +221,15 @@
   }
 
   /**
+   * Cancel any outstanding calls.
+   */
+  void cancelCalls() {
+    for (ListenableFuture<RESULT> call : allCalls) {
+      call.cancel(true);
+    }
+  }
+
+  /**
    * Check if any of the responses came back with an AssertionError.
    * If so, it re-throws it, even if there was a quorum of responses.
    * This code only runs if assertions are enabled for this class,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
index 674ca70..abc2d4c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
@@ -579,6 +579,8 @@
         LOG.debug(msg.toString());
       }
     }
+    // Cancel any outstanding calls to JN's.
+    q.cancelCalls();
 
     int maxAllowedTxns = !onlyDurableTxns ? highestTxnCount :
         responseCounts.get(responseCounts.size() - loggers.getMajoritySize());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
index 956bd65..a56074a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
@@ -65,17 +65,6 @@
 
   public static final Token<BlockTokenIdentifier> DUMMY_TOKEN = new Token<BlockTokenIdentifier>();
 
-  /**
-   * In order to prevent serial No. of different NameNode from overlapping,
-   * Using 6 bits (identify 64=2^6 namenodes, and presuppose that no scenario
-   * where deploy more than 64 namenodes (include ANN, SBN, Observers, etc.)
-   * in one namespace) to identify index of NameNode, and the remainder 26 bits
-   * auto-incr to change the serial No.
-   */
-  @VisibleForTesting
-  public static final int NUM_VALID_BITS = 26;
-  private static final int LOW_MASK = (1 << NUM_VALID_BITS) - 1;
-
   private final boolean isMaster;
 
   /**
@@ -92,8 +81,8 @@
   private String blockPoolId;
   private final String encryptionAlgorithm;
 
-  private final int nnIndex;
-
+  private final int intRange;
+  private final int nnRangeStart;
   private final boolean useProto;
 
   private final boolean shouldWrapQOP;
@@ -133,8 +122,6 @@
         encryptionAlgorithm, nnIndex, numNNs, useProto, shouldWrapQOP);
     Preconditions.checkArgument(nnIndex >= 0);
     Preconditions.checkArgument(numNNs > 0);
-    setSerialNo(new SecureRandom().nextInt());
-    generateKeys();
   }
 
   /**
@@ -152,7 +139,8 @@
   private BlockTokenSecretManager(boolean isMaster, long keyUpdateInterval,
       long tokenLifetime, String blockPoolId, String encryptionAlgorithm,
       int nnIndex, int numNNs, boolean useProto, boolean shouldWrapQOP) {
-    this.nnIndex = nnIndex;
+    this.intRange = Integer.MAX_VALUE / numNNs;
+    this.nnRangeStart = intRange * nnIndex;
     this.isMaster = isMaster;
     this.keyUpdateInterval = keyUpdateInterval;
     this.tokenLifetime = tokenLifetime;
@@ -162,12 +150,19 @@
     this.useProto = useProto;
     this.shouldWrapQOP = shouldWrapQOP;
     this.timer = new Timer();
+    setSerialNo(new SecureRandom().nextInt(Integer.MAX_VALUE));
+    LOG.info("Block token key range: [{}, {})",
+        nnRangeStart, nnRangeStart + intRange);
     generateKeys();
   }
 
   @VisibleForTesting
-  public synchronized void setSerialNo(int serialNo) {
-    this.serialNo = (serialNo & LOW_MASK) | (nnIndex << NUM_VALID_BITS);
+  public synchronized void setSerialNo(int nextNo) {
+    // we mod the serial number by the range and then add that times the index
+    this.serialNo = (nextNo % intRange) + (nnRangeStart);
+    assert serialNo >= nnRangeStart && serialNo < (nnRangeStart + intRange) :
+      "serialNo " + serialNo + " is not in the designated range: [" +
+      nnRangeStart + ", " + (nnRangeStart + intRange) + ")";
   }
 
   public void setBlockPoolId(String blockPoolId) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index d160f61..dc6cf32 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -182,6 +182,12 @@
   abstract boolean hasNoStorage();
 
   /**
+   * Checks whether this block has a Provided replica.
+   * @return true if this block has a replica on Provided storage.
+   */
+  abstract boolean isProvided();
+
+  /**
    * Find specified DatanodeStorageInfo.
    * @return DatanodeStorageInfo or null if not found.
    */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
index 149efc9..7378e6f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
@@ -19,6 +19,7 @@
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockType;
 
@@ -81,6 +82,19 @@
   }
 
   @Override
+  boolean isProvided() {
+    int len = getCapacity();
+    for (int idx = 0; idx < len; idx++) {
+      DatanodeStorageInfo storage = getStorageInfo(idx);
+      if (storage != null
+          && storage.getStorageType().equals(StorageType.PROVIDED)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  @Override
   public int numNodes() {
     assert this.storages != null : "BlockInfo is not initialized";
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
index 8bc63c1..16265de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
@@ -245,6 +245,15 @@
   }
 
   /**
+   * Striped blocks on Provided Storage is not supported. All blocks on
+   * Provided storage are assumed to be "contiguous".
+   */
+  @Override
+  boolean isProvided() {
+    return false;
+  }
+
+  /**
    * This class contains datanode storage information and block index in the
    * block group.
    */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 6c349ffd..66802d2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2009,6 +2009,7 @@
         (pendingReplicaNum > 0 || isPlacementPolicySatisfied(block));
   }
 
+  @VisibleForTesting
   BlockReconstructionWork scheduleReconstruction(BlockInfo block,
       int priority) {
     // skip abandoned block or block reopened for append
@@ -2053,7 +2054,9 @@
       additionalReplRequired = requiredRedundancy - numReplicas.liveReplicas()
           - pendingNum;
     } else {
-      additionalReplRequired = 1; // Needed on a new rack
+      // Violates placement policy. Needed on a new rack or domain etc.
+      BlockPlacementStatus placementStatus = getBlockPlacementStatus(block);
+      additionalReplRequired = placementStatus.getAdditionalReplicasRequired();
     }
 
     final BlockCollection bc = getBlockCollection(block);
@@ -2086,20 +2089,6 @@
     }
   }
 
-  private boolean isInNewRack(DatanodeDescriptor[] srcs,
-      DatanodeDescriptor target) {
-    LOG.debug("check if target {} increases racks, srcs={}", target,
-        Arrays.asList(srcs));
-    for (DatanodeDescriptor src : srcs) {
-      if (!src.isDecommissionInProgress() &&
-          src.getNetworkLocation().equals(target.getNetworkLocation())) {
-        LOG.debug("the target {} is in the same rack with src {}", target, src);
-        return false;
-      }
-    }
-    return true;
-  }
-
   private boolean validateReconstructionWork(BlockReconstructionWork rw) {
     BlockInfo block = rw.getBlock();
     int priority = rw.getPriority();
@@ -2125,10 +2114,16 @@
     }
 
     DatanodeStorageInfo[] targets = rw.getTargets();
+    BlockPlacementStatus placementStatus = getBlockPlacementStatus(block);
     if ((numReplicas.liveReplicas() >= requiredRedundancy) &&
-        (!isPlacementPolicySatisfied(block)) ) {
-      if (!isInNewRack(rw.getSrcNodes(), targets[0].getDatanodeDescriptor())) {
-        // No use continuing, unless a new rack in this case
+        (!placementStatus.isPlacementPolicySatisfied())) {
+      BlockPlacementStatus newPlacementStatus =
+          getBlockPlacementStatus(block, targets);
+      if (!newPlacementStatus.isPlacementPolicySatisfied() &&
+          (newPlacementStatus.getAdditionalReplicasRequired() >=
+              placementStatus.getAdditionalReplicasRequired())) {
+        // If the new targets do not meet the placement policy, or at least
+        // reduce the number of replicas needed, then no use continuing.
         return false;
       }
       // mark that the reconstruction work is to replicate internal block to a
@@ -2359,11 +2354,13 @@
       if (isStriped) {
         blockIndex = ((BlockInfoStriped) block)
             .getStorageBlockIndex(storage);
-        if (!bitSet.get(blockIndex)) {
-          bitSet.set(blockIndex);
-        } else if (state == StoredReplicaState.LIVE) {
-          numReplicas.subtract(StoredReplicaState.LIVE, 1);
-          numReplicas.add(StoredReplicaState.REDUNDANT, 1);
+        if (state == StoredReplicaState.LIVE) {
+          if (!bitSet.get(blockIndex)) {
+            bitSet.set(blockIndex);
+          } else {
+            numReplicas.subtract(StoredReplicaState.LIVE, 1);
+            numReplicas.add(StoredReplicaState.REDUNDANT, 1);
+          }
         }
       }
 
@@ -3159,23 +3156,26 @@
               + storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH);
         }
         boolean wrongSize;
+        long blockMapSize;
         if (storedBlock.isStriped()) {
           assert BlockIdManager.isStripedBlockID(reported.getBlockId());
           assert storedBlock.getBlockId() ==
               BlockIdManager.convertToStripedID(reported.getBlockId());
           BlockInfoStriped stripedBlock = (BlockInfoStriped) storedBlock;
           int reportedBlkIdx = BlockIdManager.getBlockIndex(reported);
-          wrongSize = reported.getNumBytes() != getInternalBlockLength(
-              stripedBlock.getNumBytes(), stripedBlock.getCellSize(),
-              stripedBlock.getDataBlockNum(), reportedBlkIdx);
+          blockMapSize = getInternalBlockLength(stripedBlock.getNumBytes(),
+              stripedBlock.getCellSize(), stripedBlock.getDataBlockNum(),
+              reportedBlkIdx);
+          wrongSize = reported.getNumBytes() != blockMapSize;
         } else {
-          wrongSize = storedBlock.getNumBytes() != reported.getNumBytes();
+          blockMapSize = storedBlock.getNumBytes();
+          wrongSize = blockMapSize != reported.getNumBytes();
         }
         if (wrongSize) {
           return new BlockToMarkCorrupt(new Block(reported), storedBlock,
               "block is " + ucState + " and reported length " +
               reported.getNumBytes() + " does not match " +
-              "length in block map " + storedBlock.getNumBytes(),
+              "length in block map " + blockMapSize,
               Reason.SIZE_MISMATCH);
         } else {
           return null; // not corrupt
@@ -4557,7 +4557,25 @@
   }
 
   boolean isPlacementPolicySatisfied(BlockInfo storedBlock) {
+    return getBlockPlacementStatus(storedBlock, null)
+        .isPlacementPolicySatisfied();
+  }
+
+  BlockPlacementStatus getBlockPlacementStatus(BlockInfo storedBlock) {
+    return getBlockPlacementStatus(storedBlock, null);
+  }
+
+  BlockPlacementStatus getBlockPlacementStatus(BlockInfo storedBlock,
+      DatanodeStorageInfo[] additionalStorage) {
     List<DatanodeDescriptor> liveNodes = new ArrayList<>();
+    if (additionalStorage != null) {
+      // additionalNodes, are potential new targets for the block. If there are
+      // any passed, include them when checking the placement policy to see if
+      // the policy is met, when it may not have been met without these nodes.
+      for (DatanodeStorageInfo s : additionalStorage) {
+        liveNodes.add(getDatanodeDescriptorFromStorage(s));
+      }
+    }
     Collection<DatanodeDescriptor> corruptNodes = corruptReplicas
         .getNodes(storedBlock);
     for (DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock)) {
@@ -4565,7 +4583,22 @@
           && storage.getState() == State.NORMAL) {
         // assume the policy is satisfied for blocks on PROVIDED storage
         // as long as the storage is in normal state.
-        return true;
+        return new BlockPlacementStatus() {
+          @Override
+          public boolean isPlacementPolicySatisfied() {
+            return true;
+          }
+
+          @Override
+          public String getErrorDescription() {
+            return null;
+          }
+
+          @Override
+          public int getAdditionalReplicasRequired() {
+            return 0;
+          }
+        };
       }
       final DatanodeDescriptor cur = getDatanodeDescriptorFromStorage(storage);
       // Nodes under maintenance should be counted as valid replicas from
@@ -4581,8 +4614,7 @@
         .getPolicy(blockType);
     int numReplicas = blockType == STRIPED ? ((BlockInfoStriped) storedBlock)
         .getRealTotalBlockNum() : storedBlock.getReplication();
-    return placementPolicy.verifyBlockPlacement(locs, numReplicas)
-        .isPlacementPolicySatisfied();
+    return placementPolicy.verifyBlockPlacement(locs, numReplicas);
   }
 
   boolean isNeededReconstructionForMaintenance(BlockInfo storedBlock,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatus.java
index e2ac54a..a227666 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatus.java
@@ -39,4 +39,12 @@
    */
   public String getErrorDescription();
 
+  /**
+   * Return the number of additional replicas needed to ensure the block
+   * placement policy is satisfied.
+   * @return The number of new replicas needed to satisify the placement policy
+   * or zero if no extra are needed
+   */
+  int getAdditionalReplicasRequired();
+
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusDefault.java
index 75bb65d..7612142 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusDefault.java
@@ -45,4 +45,12 @@
         " more rack(s). Total number of racks in the cluster: " + totalRacks;
   }
 
+  @Override
+  public int getAdditionalReplicasRequired() {
+    if (isPlacementPolicySatisfied()) {
+      return 0;
+    } else {
+      return requiredRacks - currentRacks;
+    }
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusWithNodeGroup.java
index b98b3da..ac5a5b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusWithNodeGroup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusWithNodeGroup.java
@@ -78,4 +78,15 @@
     }
     return errorDescription.toString();
   }
+
+  @Override
+  public int getAdditionalReplicasRequired() {
+    if (isPlacementPolicySatisfied()) {
+      return 0;
+    } else {
+      int parent = parentBlockPlacementStatus.getAdditionalReplicasRequired();
+      int child = requiredNodeGroups - currentNodeGroups.size();
+      return Math.max(parent, child);
+    }
+  }
 }
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusWithUpgradeDomain.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusWithUpgradeDomain.java
index 4b3c3cc..b839ced 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusWithUpgradeDomain.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusWithUpgradeDomain.java
@@ -85,4 +85,24 @@
     }
     return errorDescription.toString();
   }
-}
\ No newline at end of file
+
+  @Override
+  public int getAdditionalReplicasRequired() {
+    if (isPlacementPolicySatisfied()) {
+      return 0;
+    } else {
+      // It is possible for a block to have the correct number of upgrade
+      // domains, but only a single rack, or be on multiple racks, but only in
+      // one upgrade domain.
+      int parent = parentBlockPlacementStatus.getAdditionalReplicasRequired();
+      int child;
+
+      if (numberOfReplicas <= upgradeDomainFactor) {
+        child = numberOfReplicas - upgradeDomains.size();
+      } else {
+        child = upgradeDomainFactor - upgradeDomains.size();
+      }
+      return Math.max(parent, child);
+    }
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReportLeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReportLeaseManager.java
index 7db05c7..2a4b6e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReportLeaseManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReportLeaseManager.java
@@ -180,12 +180,8 @@
   /**
    * Get the next block report lease ID.  Any number is valid except 0.
    */
-  private synchronized long getNextId() {
-    long id;
-    do {
-      id = nextId++;
-    } while (id == 0);
-    return id;
+  private long getNextId() {
+    return ++nextId == 0L ? ++nextId : nextId;
   }
 
   public synchronized void register(DatanodeDescriptor dn) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index e71cb0a..b592c3f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -838,6 +838,12 @@
     // there is snapshot data
     if (sf != null) {
       sf.cleanDirectory(reclaimContext, this, snapshotId, priorSnapshotId);
+      // If the inode has empty diff list and sf is not a
+      // DirectorySnapshottableFeature, remove the feature to save heap.
+      if (sf.getDiffs().isEmpty() &&
+          !(sf instanceof DirectorySnapshottableFeature)) {
+        this.removeFeature(sf);
+      }
     } else {
       // there is no snapshot data
       if (priorSnapshotId == Snapshot.NO_SNAPSHOT_ID &&
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 7b6f1e3..ce654b7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -744,6 +744,9 @@
       sf.cleanFile(reclaimContext, this, snapshot, priorSnapshotId,
           getStoragePolicyID());
       updateRemovedUnderConstructionFiles(reclaimContext);
+      if (sf.getDiffs().isEmpty()) {
+        this.removeFeature(sf);
+      }
     } else {
       if (snapshot == CURRENT_STATE_ID) {
         if (priorSnapshotId == NO_SNAPSHOT_ID) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index d91c857..3a34aa7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -27,6 +27,8 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_AUXILIARY_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STATE_CONTEXT_ENABLED_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY;
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.MAX_PATH_DEPTH;
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.MAX_PATH_LENGTH;
 import static org.apache.hadoop.util.Time.now;
@@ -447,6 +449,16 @@
     }
     LOG.info("RPC server is binding to " + bindHost + ":" + rpcAddr.getPort());
 
+    boolean enableStateContext = conf.getBoolean(
+        DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY,
+        DFS_NAMENODE_STATE_CONTEXT_ENABLED_DEFAULT);
+    LOG.info("Enable NameNode state context:" + enableStateContext);
+
+    GlobalStateIdContext stateIdContext = null;
+    if (enableStateContext) {
+      stateIdContext = new GlobalStateIdContext((namesystem));
+    }
+
     clientRpcServer = new RPC.Builder(conf)
         .setProtocol(
             org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class)
@@ -456,7 +468,7 @@
         .setNumHandlers(handlerCount)
         .setVerbose(false)
         .setSecretManager(namesystem.getDelegationTokenSecretManager())
-        .setAlignmentContext(new GlobalStateIdContext(namesystem))
+        .setAlignmentContext(stateIdContext)
         .build();
 
     // Add all the RPC protocols that the namenode implements
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
index 9cd87f1..163b181 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
@@ -45,6 +45,10 @@
     return diffs != null ?
         DiffList.unmodifiableList(diffs) : DiffList.emptyList();
   }
+
+  public boolean isEmpty() {
+    return diffs == null || diffs.isEmpty();
+  }
   
   /** Clear the list. */
   public void clear() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index b3ab32a..04960e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -434,7 +434,7 @@
   private static final String commonUsageSummary =
     "\t[-report [-live] [-dead] [-decommissioning] " +
     "[-enteringmaintenance] [-inmaintenance]]\n" +
-    "\t[-safemode <enter | leave | get | wait>]\n" +
+    "\t[-safemode <enter | leave | get | wait | forceExit>]\n" +
     "\t[-saveNamespace [-beforeShutdown]]\n" +
     "\t[-rollEdits]\n" +
     "\t[-restoreFailedStorage true|false|check]\n" +
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index d34e4cd..9b99964 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -3303,6 +3303,17 @@
 </property>
 
 <property>
+  <name>dfs.namenode.state.context.enabled</name>
+  <value>false</value>
+  <description>
+    Whether enable namenode sending back its current txnid back to client.
+    Setting this to true is required by Consistent Read from Standby feature.
+    But for regular cases, this should be set to false to avoid the overhead
+    of updating and maintaining this state.
+  </description>
+</property>
+
+<property>
   <name>dfs.namenode.ec.system.default.policy</name>
   <value>RS-6-3-1024k</value>
   <description>The default erasure coding policy name will be used
@@ -5000,6 +5011,14 @@
 </property>
 
 <property>
+  <name>dfs.qjournal.parallel-read.num-threads</name>
+  <value>5</value>
+  <description>
+    Number of threads per JN to be used for tailing edits.
+  </description>
+</property>
+
+<property>
   <name>dfs.quota.by.storage.type.enabled</name>
   <value>true</value>
   <description>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index df6e4e8..05c04b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -278,7 +278,7 @@
 </table>
 </small>
 
-<div class="page-header"><h1><small>Snapshotted directories: {@size key=Snapshots}{/size}</small></div>
+<div class="page-header"><h1><small>Snapshots: {@size key=Snapshots}{/size}</small></div>
 
 <small>
 <table class="table">
@@ -420,7 +420,7 @@
   {/DecomNodes}
 </table>
 {:else}
-No nodes are decommissioning
+No nodes are decommissioning.
 {/DecomNodes}
 </small>
 </script>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index d2185d7..92e7c4d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -377,7 +377,7 @@
         hdfs dfsadmin [-evictWriters <datanode_host:ipc_port>]
         hdfs dfsadmin [-getDatanodeInfo <datanode_host:ipc_port>]
         hdfs dfsadmin [-metasave filename]
-        hdfs dfsadmin [-triggerBlockReport [-incremental] <datanode_host:ipc_port> [-namenode] <namenode_host:ipc_port>]
+        hdfs dfsadmin [-triggerBlockReport [-incremental] <datanode_host:ipc_port> [-namenode <namenode_host:ipc_port>]]
         hdfs dfsadmin [-listOpenFiles [-blockingDecommission] [-path <path>]]
         hdfs dfsadmin [-help [cmd]]
 
@@ -415,7 +415,7 @@
 | `-evictWriters` \<datanode\_host:ipc\_port\> | Make the datanode evict all clients that are writing a block. This is useful if decommissioning is hung due to slow writers. |
 | `-getDatanodeInfo` \<datanode\_host:ipc\_port\> | Get the information about the given datanode. See [Rolling Upgrade document](./HdfsRollingUpgrade.html#dfsadmin_-getDatanodeInfo) for the detail. |
 | `-metasave` filename | Save Namenode's primary data structures to *filename* in the directory specified by hadoop.log.dir property. *filename* is overwritten if it exists. *filename* will contain one line for each of the following<br/>1. Datanodes heart beating with Namenode<br/>2. Blocks waiting to be replicated<br/>3. Blocks currently being replicated<br/>4. Blocks waiting to be deleted |
-| `-triggerBlockReport` `[-incremental]` \<datanode\_host:ipc\_port\> `[-namenode]` \<namenode\_host:ipc\_port\> | Trigger a block report for the given datanode. If 'incremental' is specified, it will be otherwise, it will be a full block report. If '-namenode \<host\>:\<port\>' is given, it only sends block report to a specified namenode. |
+| `-triggerBlockReport` `[-incremental]` \<datanode\_host:ipc\_port\> `[-namenode <namenode_host:ipc_port>]` | Trigger a block report for the given datanode. If 'incremental' is specified, it will be otherwise, it will be a full block report. If '-namenode \<namenode\_host:ipc\_port\>' is given, it only sends block report to a specified namenode. |
 | `-listOpenFiles` `[-blockingDecommission]` `[-path <path>]` | List all open files currently managed by the NameNode along with client name and client machine accessing them. Open files list will be filtered by given type and path. Add -blockingDecommission option if you only want to list open files that are blocking the DataNode decommissioning. |
 | `-help` [cmd] | Displays help for the given command or all commands if none is specified. |
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ObserverNameNode.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ObserverNameNode.md
index 07c384c..af1569c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ObserverNameNode.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ObserverNameNode.md
@@ -120,6 +120,20 @@
 To enable consistent reads from Observer NameNode, you'll need to add a
 few configurations to your **hdfs-site.xml**:
 
+*  **dfs.namenode.state.context.enabled** - to enable NameNode to maintain
+   and update server state and id.
+
+   This will lead to NameNode creating alignment context instance, which
+   keeps track of current server state id. Server state id will be carried
+   back to client. It is disabled by default to optimize performance of
+   Observer read cases. But this is **required to be turned on**
+   for the Observer NameNode feature.
+
+        <property>
+           <name>dfs.namenode.state.context.enabled</name>
+           <value>true</value>
+        </property>
+
 *  **dfs.ha.tail-edits.in-progress** - to enable fast tailing on
    in-progress edit logs.
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index adc29a1..36f2eb2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -556,17 +556,24 @@
     }
   }
 
+  public static void waitForReplication(MiniDFSCluster cluster, ExtendedBlock b,
+      int racks, int replicas, int neededReplicas)
+      throws TimeoutException, InterruptedException {
+    waitForReplication(cluster, b, racks, replicas, neededReplicas, 0);
+  }
+
   /*
    * Wait up to 20s for the given block to be replicated across
    * the requested number of racks, with the requested number of
    * replicas, and the requested number of replicas still needed.
    */
   public static void waitForReplication(MiniDFSCluster cluster, ExtendedBlock b,
-      int racks, int replicas, int neededReplicas)
+      int racks, int replicas, int neededReplicas, int neededDomains)
       throws TimeoutException, InterruptedException {
     int curRacks = 0;
     int curReplicas = 0;
     int curNeededReplicas = 0;
+    int curDomains = 0;
     int count = 0;
     final int ATTEMPTS = 20;
 
@@ -577,17 +584,21 @@
       curRacks = r[0];
       curReplicas = r[1];
       curNeededReplicas = r[2];
+      curDomains = r[3];
       count++;
     } while ((curRacks != racks ||
               curReplicas != replicas ||
-              curNeededReplicas != neededReplicas) && count < ATTEMPTS);
+        curNeededReplicas != neededReplicas ||
+        (neededDomains != 0 && curDomains != neededDomains))
+        && count < ATTEMPTS);
 
     if (count == ATTEMPTS) {
       throw new TimeoutException("Timed out waiting for replication."
           + " Needed replicas = "+neededReplicas
           + " Cur needed replicas = "+curNeededReplicas
           + " Replicas = "+replicas+" Cur replicas = "+curReplicas
-          + " Racks = "+racks+" Cur racks = "+curRacks);
+          + " Racks = "+racks+" Cur racks = "+curRacks
+          + " Domains = "+neededDomains+" Cur domains = "+curDomains);
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java
index e664f99..c679f6c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java
@@ -19,7 +19,6 @@
 
 import static org.junit.Assert.assertTrue;
 
-import java.io.File;
 import java.io.IOException;
 
 import org.slf4j.Logger;
@@ -31,7 +30,6 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.junit.Test;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteBufferPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteBufferPread.java
index 4547db1..1c7f150 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteBufferPread.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteBufferPread.java
@@ -85,6 +85,7 @@
     testPreadWithPositionedByteBuffer(ByteBuffer.allocate(FILE_SIZE));
     testPreadWithLimitedByteBuffer(ByteBuffer.allocate(FILE_SIZE));
     testPositionedPreadWithByteBuffer(ByteBuffer.allocate(FILE_SIZE));
+    testPreadFullyWithByteBuffer(ByteBuffer.allocate(FILE_SIZE));
   }
 
   /**
@@ -97,6 +98,7 @@
     testPreadWithPositionedByteBuffer(ByteBuffer.allocateDirect(FILE_SIZE));
     testPreadWithLimitedByteBuffer(ByteBuffer.allocateDirect(FILE_SIZE));
     testPositionedPreadWithByteBuffer(ByteBuffer.allocateDirect(FILE_SIZE));
+    testPreadFullyWithByteBuffer(ByteBuffer.allocateDirect(FILE_SIZE));
   }
 
   /**
@@ -122,7 +124,6 @@
       byte[] bufferContents = new byte[FILE_SIZE];
       buffer.get(bufferContents);
       assertArrayEquals(bufferContents, fileContents);
-      buffer.position(buffer.limit());
     }
   }
 
@@ -157,7 +158,7 @@
 
   /**
    * Reads half of the testFile into the {@link ByteBuffer} by setting a
-   * {@link ByteBuffer#limit} on the buffer. Validates that only half of the
+   * {@link ByteBuffer#limit()} on the buffer. Validates that only half of the
    * testFile is loaded into the buffer.
    */
   private void testPreadWithLimitedByteBuffer(
@@ -191,7 +192,7 @@
 
   /**
    * Reads half of the testFile into the {@link ByteBuffer} by setting the
-   * {@link ByteBuffer#position} the half the size of the file. Validates that
+   * {@link ByteBuffer#position()} the half the size of the file. Validates that
    * only half of the testFile is loaded into the buffer.
    */
   private void testPreadWithPositionedByteBuffer(
@@ -257,6 +258,26 @@
     }
   }
 
+  /**
+   * Reads the entire testFile using the preadFully API and validates that its
+   * contents are properly loaded into the supplied {@link ByteBuffer}.
+   */
+  private void testPreadFullyWithByteBuffer(ByteBuffer buffer)
+          throws IOException {
+    int totalBytesRead = 0;
+    try (FSDataInputStream in = fs.open(testFile)) {
+      in.readFully(totalBytesRead, buffer);
+      // Make sure the buffer is full
+      assertFalse(buffer.hasRemaining());
+      // Make sure the contents of the read buffer equal the contents of the
+      // file
+      buffer.position(0);
+      byte[] bufferContents = new byte[FILE_SIZE];
+      buffer.get(bufferContents);
+      assertArrayEquals(bufferContents, fileContents);
+    }
+  }
+
   @AfterClass
   public static void shutdown() throws IOException {
     try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClose.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClose.java
index 719743b..94f3612 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClose.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClose.java
@@ -26,7 +26,6 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 
 public class TestClose {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
index 3dbeea7..3056b43 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
@@ -94,6 +94,7 @@
     CONF.setBoolean(String.format(
         "fs.%s.impl.disable.cache", HdfsConstants.HDFS_URI_SCHEME), true);
     CONF.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, NUMDATANODES);
+    CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY, true);
 
     qjmhaCluster = HATestUtil.setUpObserverCluster(CONF, 1, NUMDATANODES, true);
     cluster = qjmhaCluster.getDfsCluster();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
index b81b710..e3e862f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
@@ -196,7 +196,11 @@
   public JournalNode getJournalNode(int i) {
     return nodes[i].node;
   }
-  
+
+  public String getJournalNodeIpcAddress(int i) {
+    return nodes[i].ipcAddr.toString();
+  }
+
   public void restartJournalNode(int i) throws InterruptedException, IOException {
     JNInfo info = nodes[i];
     JournalNode jn = info.node;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
index f3bb954..cd0216e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
@@ -49,6 +49,7 @@
 import org.apache.hadoop.hdfs.qjournal.QJMTestUtil;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto;
 import org.apache.hadoop.hdfs.qjournal.server.JournalFaultInjector;
+import org.apache.hadoop.hdfs.qjournal.server.JournalNode;
 import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
 import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream;
 import org.apache.hadoop.hdfs.server.namenode.FileJournalManager;
@@ -62,7 +63,9 @@
 import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.TestName;
 import org.mockito.Mockito;
 import org.mockito.stubbing.Stubber;
 
@@ -87,11 +90,17 @@
     GenericTestUtils.setLogLevel(ProtobufRpcEngine.LOG, Level.ALL);
   }
 
+  @Rule
+  public TestName name = new TestName();
+
   @Before
   public void setup() throws Exception {
     conf = new Configuration();
-    // Don't retry connections - it just slows down the tests.
-    conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
+    if (!name.getMethodName().equals("testSelectThreadCounts")) {
+      // Don't retry connections - it just slows down the tests.
+      conf.setInt(
+          CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
+    }
     // Turn off IPC client caching to handle daemon restarts.
     conf.setInt(
         CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
@@ -1040,6 +1049,27 @@
   }
 
   @Test
+  public void testSelectThreadCounts() throws Exception {
+    EditLogOutputStream stm =
+        qjm.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
+    writeTxns(stm, 1, 10);
+    JournalNode jn0 = cluster.getJournalNode(0);
+    String ipcAddr = cluster.getJournalNodeIpcAddress(0);
+    jn0.stopAndJoin(0);
+    for (int i = 0; i < 1000; i++) {
+      qjm.selectInputStreams(new ArrayList<>(), 1, true, false);
+    }
+    String expectedName =
+        "Logger channel (from parallel executor) to " + ipcAddr;
+    long num = Thread.getAllStackTraces().keySet().stream()
+        .filter((t) -> t.getName().contains(expectedName)).count();
+    // The number of threads for the stopped jn shouldn't be more than the
+    // configured value.
+    assertTrue("Number of threads are : " + num,
+        num <= DFSConfigKeys.DFS_QJOURNAL_PARALLEL_READ_NUM_THREADS_DEFAULT);
+  }
+
+  @Test
   public void testSelectViaRpcTwoJNsError() throws Exception {
     EditLogOutputStream stm =
         qjm.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
index 20e0d46..d993b66 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
@@ -819,4 +819,27 @@
     testBadStorageIDCheckAccess(true);
   }
 
+  /**
+   * Verify that block token serialNo is always within the range designated to
+   * to the NameNode.
+   */
+  @Test
+  public void testBlockTokenRanges() throws IOException {
+    final int interval = 1024;
+    final int numNNs = Integer.MAX_VALUE / interval;
+    for(int nnIdx = 0; nnIdx < 64; nnIdx++) {
+      BlockTokenSecretManager sm = new BlockTokenSecretManager(
+          blockKeyUpdateInterval, blockTokenLifetime, nnIdx, numNNs,
+          "fake-pool", null, false);
+      int rangeStart = nnIdx * interval;
+      for(int i = 0; i < interval * 3; i++) {
+        int serialNo = sm.getSerialNoForTesting();
+        assertTrue(
+            "serialNo " + serialNo + " is not in the designated range: [" +
+                rangeStart + ", " + (rangeStart + interval) + ")",
+                serialNo >= rangeStart && serialNo < (rangeStart + interval));
+        sm.updateKeys();
+      }
+    }
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
index ae61f8c..4d6f202 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
@@ -81,7 +81,8 @@
 
   /**
    * @return a tuple of the replica state (number racks, number live
-   * replicas, and number needed replicas) for the given block.
+   * replicas, number needed replicas and number of UpgradeDomains) for the
+   * given block.
    */
   public static int[] getReplicaInfo(final FSNamesystem namesystem, final Block b) {
     final BlockManager bm = namesystem.getBlockManager();
@@ -90,7 +91,8 @@
       final BlockInfo storedBlock = bm.getStoredBlock(b);
       return new int[]{getNumberOfRacks(bm, b),
           bm.countNodes(storedBlock).liveReplicas(),
-          bm.neededReconstruction.contains(storedBlock) ? 1 : 0};
+          bm.neededReconstruction.contains(storedBlock) ? 1 : 0,
+          getNumberOfDomains(bm, b)};
     } finally {
       namesystem.readUnlock();
     }
@@ -121,6 +123,30 @@
   }
 
   /**
+   * @return the number of UpgradeDomains over which a given block is replicated
+   * decommissioning/decommissioned nodes are not counted. corrupt replicas
+   * are also ignored.
+   */
+  private static int getNumberOfDomains(final BlockManager blockManager,
+                                        final Block b) {
+    final Set<String> domSet = new HashSet<String>(0);
+    final Collection<DatanodeDescriptor> corruptNodes =
+        getCorruptReplicas(blockManager).getNodes(b);
+    for(DatanodeStorageInfo storage : blockManager.blocksMap.getStorages(b)) {
+      final DatanodeDescriptor cur = storage.getDatanodeDescriptor();
+      if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) {
+        if ((corruptNodes == null) || !corruptNodes.contains(cur)) {
+          String domain = cur.getUpgradeDomain();
+          if (domain != null && !domSet.contains(domain)) {
+            domSet.add(domain);
+          }
+        }
+      }
+    }
+    return domSet.size();
+  }
+
+  /**
    * @return redundancy monitor thread instance from block manager.
    */
   public static Daemon getRedundancyThread(final BlockManager blockManager) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
index fa0dd70..3c5c5d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
@@ -19,7 +19,10 @@
 
 import static org.apache.hadoop.hdfs.server.namenode.INodeId.INVALID_INODE_ID;
 import static org.hamcrest.core.Is.is;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
+import org.apache.hadoop.fs.StorageType;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -65,6 +68,39 @@
   }
 
   @Test
+  public void testAddProvidedStorage() throws Exception {
+    // block with only provided storage
+    BlockInfo blockInfo = new BlockInfoContiguous((short) 3);
+    DatanodeStorageInfo providedStorage = mock(DatanodeStorageInfo.class);
+    when(providedStorage.getStorageType()).thenReturn(StorageType.PROVIDED);
+    boolean added = blockInfo.addStorage(providedStorage, blockInfo);
+    Assert.assertTrue(added);
+    Assert.assertEquals(providedStorage, blockInfo.getStorageInfo(0));
+    Assert.assertTrue(blockInfo.isProvided());
+  }
+
+  @Test
+  public void testAddTwoStorageTypes() throws Exception {
+    // block with only disk storage
+    BlockInfo blockInfo = new BlockInfoContiguous((short) 3);
+    DatanodeStorageInfo diskStorage = mock(DatanodeStorageInfo.class);
+    DatanodeDescriptor mockDN = mock(DatanodeDescriptor.class);
+    when(diskStorage.getDatanodeDescriptor()).thenReturn(mockDN);
+    when(diskStorage.getStorageType()).thenReturn(StorageType.DISK);
+    boolean added = blockInfo.addStorage(diskStorage, blockInfo);
+    Assert.assertTrue(added);
+    Assert.assertEquals(diskStorage, blockInfo.getStorageInfo(0));
+    Assert.assertFalse(blockInfo.isProvided());
+
+    // now add provided storage
+    DatanodeStorageInfo providedStorage = mock(DatanodeStorageInfo.class);
+    when(providedStorage.getStorageType()).thenReturn(StorageType.PROVIDED);
+    added = blockInfo.addStorage(providedStorage, blockInfo);
+    Assert.assertTrue(added);
+    Assert.assertTrue(blockInfo.isProvided());
+  }
+
+  @Test
   public void testReplaceStorage() throws Exception {
 
     // Create two dummy storages.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 006513c..ba88afe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -21,6 +21,7 @@
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.LinkedListMultimap;
 import com.google.common.collect.Lists;
+import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CreateFlag;
@@ -749,6 +750,63 @@
   }
 
   @Test
+  public void testChooseSrcDNWithDupECInDecommissioningNode() throws Exception {
+    long blockId = -9223372036854775776L; // real ec block id
+    Block aBlock = new Block(blockId, 0, 0);
+    // RS-3-2 EC policy
+    ErasureCodingPolicy ecPolicy =
+        SystemErasureCodingPolicies.getPolicies().get(1);
+    // striped blockInfo
+    BlockInfoStriped aBlockInfoStriped = new BlockInfoStriped(aBlock, ecPolicy);
+    // ec storageInfo
+    DatanodeStorageInfo ds1 = DFSTestUtil.createDatanodeStorageInfo(
+        "storage1", "1.1.1.1", "rack1", "host1");
+    DatanodeStorageInfo ds2 = DFSTestUtil.createDatanodeStorageInfo(
+        "storage2", "2.2.2.2", "rack2", "host2");
+    DatanodeStorageInfo ds3 = DFSTestUtil.createDatanodeStorageInfo(
+        "storage3", "3.3.3.3", "rack3", "host3");
+    DatanodeStorageInfo ds4 = DFSTestUtil.createDatanodeStorageInfo(
+        "storage4", "4.4.4.4", "rack4", "host4");
+    DatanodeStorageInfo ds5 = DFSTestUtil.createDatanodeStorageInfo(
+        "storage5", "5.5.5.5", "rack5", "host5");
+    DatanodeStorageInfo ds6 = DFSTestUtil.createDatanodeStorageInfo(
+        "storage6", "6.6.6.6", "rack6", "host6");
+
+    // link block with storage
+    aBlockInfoStriped.addStorage(ds1, aBlock);
+    aBlockInfoStriped.addStorage(ds2, new Block(blockId + 1, 0, 0));
+    aBlockInfoStriped.addStorage(ds3, new Block(blockId + 2, 0, 0));
+    aBlockInfoStriped.addStorage(ds4, new Block(blockId + 3, 0, 0));
+    aBlockInfoStriped.addStorage(ds5, new Block(blockId + 4, 0, 0));
+    // NOTE: duplicate block 0,this DN will replace the decommission ds1 DN
+    aBlockInfoStriped.addStorage(ds6, aBlock);
+
+    addEcBlockToBM(blockId, ecPolicy);
+    // decommission datanode where store block 0
+    ds1.getDatanodeDescriptor().startDecommission();
+
+    List<DatanodeDescriptor> containingNodes =
+        new LinkedList<DatanodeDescriptor>();
+    List<DatanodeStorageInfo> nodesContainingLiveReplicas =
+        new LinkedList<DatanodeStorageInfo>();
+    NumberReplicas numReplicas = new NumberReplicas();
+    List<Byte> liveBlockIndices = new ArrayList<>();
+
+    bm.chooseSourceDatanodes(
+        aBlockInfoStriped,
+        containingNodes,
+        nodesContainingLiveReplicas,
+        numReplicas, liveBlockIndices,
+        LowRedundancyBlocks.QUEUE_HIGHEST_PRIORITY);
+    assertEquals("There are 5 live replicas in " +
+            "[ds2, ds3, ds4, ds5, ds6] datanodes ",
+        5, numReplicas.liveReplicas());
+    assertEquals("The ds1 datanode is in decommissioning, " +
+            "so there is no redundant replica",
+        0, numReplicas.redundantInternalBlocks());
+  }
+
+  @Test
   public void testFavorDecomUntilHardLimit() throws Exception {
     bm.maxReplicationStreams = 0;
     bm.replicationStreamsHardLimit = 1;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementStatusDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementStatusDefault.java
new file mode 100644
index 0000000..6b07334
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementStatusDefault.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
+import org.junit.Test;
+
+/**
+ * Unit tests to validate the BlockPlacementStatusDefault policy, focusing on
+ * the getAdditionAlReplicasRequired method.
+ */
+public class TestBlockPlacementStatusDefault {
+
+  @Test
+  public void testIsPolicySatisfiedCorrectly() {
+    // 2 current racks and 2 expected
+    BlockPlacementStatusDefault bps =
+        new BlockPlacementStatusDefault(2, 2, 5);
+    assertTrue(bps.isPlacementPolicySatisfied());
+    assertEquals(0, bps.getAdditionalReplicasRequired());
+
+    // 1 current rack and 2 expected
+    bps =
+        new BlockPlacementStatusDefault(1, 2, 5);
+    assertFalse(bps.isPlacementPolicySatisfied());
+    assertEquals(1, bps.getAdditionalReplicasRequired());
+
+    // 3 current racks and 2 expected
+    bps =
+        new BlockPlacementStatusDefault(3, 2, 5);
+    assertTrue(bps.isPlacementPolicySatisfied());
+    assertEquals(0, bps.getAdditionalReplicasRequired());
+
+    // 1 current rack and 2 expected, but only 1 rack on the cluster
+    bps =
+        new BlockPlacementStatusDefault(1, 2, 1);
+    assertTrue(bps.isPlacementPolicySatisfied());
+    assertEquals(0, bps.getAdditionalReplicasRequired());
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementStatusWithUpgradeDomain.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementStatusWithUpgradeDomain.java
index bfff932..1e0fb76 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementStatusWithUpgradeDomain.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementStatusWithUpgradeDomain.java
@@ -19,6 +19,7 @@
 
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertEquals;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -49,11 +50,13 @@
   @Test
   public void testIsPolicySatisfiedParentFalse() {
     when(bpsd.isPlacementPolicySatisfied()).thenReturn(false);
+    when(bpsd.getAdditionalReplicasRequired()).thenReturn(1);
     BlockPlacementStatusWithUpgradeDomain bps =
         new BlockPlacementStatusWithUpgradeDomain(bpsd, upgradeDomains, 3, 3);
 
     // Parent policy is not satisfied but upgrade domain policy is
     assertFalse(bps.isPlacementPolicySatisfied());
+    assertEquals(1, bps.getAdditionalReplicasRequired());
   }
 
   @Test
@@ -63,21 +66,73 @@
     // Number of domains, replicas and upgradeDomainFactor is equal and parent
     // policy is satisfied
     assertTrue(bps.isPlacementPolicySatisfied());
+    assertEquals(0, bps.getAdditionalReplicasRequired());
   }
 
   @Test
-  public void testIsPolicySatisifedSmallDomains() {
+  public void testIsPolicySatisfiedSmallDomains() {
     // Number of domains is less than replicas but equal to factor
     BlockPlacementStatusWithUpgradeDomain bps =
         new BlockPlacementStatusWithUpgradeDomain(bpsd, upgradeDomains, 4, 3);
     assertTrue(bps.isPlacementPolicySatisfied());
+    assertEquals(0, bps.getAdditionalReplicasRequired());
 
     // Same as above but replicas is greater than factor
     bps = new BlockPlacementStatusWithUpgradeDomain(bpsd, upgradeDomains, 4, 2);
     assertTrue(bps.isPlacementPolicySatisfied());
+    assertEquals(0, bps.getAdditionalReplicasRequired());
 
     // Number of domains is less than replicas and factor
     bps = new BlockPlacementStatusWithUpgradeDomain(bpsd, upgradeDomains, 4, 4);
     assertFalse(bps.isPlacementPolicySatisfied());
+    assertEquals(1, bps.getAdditionalReplicasRequired());
   }
-}
\ No newline at end of file
+
+  @Test
+  public void testIsPolicySatisfiedSmallReplicas() {
+    // Replication factor 1 file
+    upgradeDomains.clear();
+    upgradeDomains.add("1");
+    BlockPlacementStatusWithUpgradeDomain bps =
+        new BlockPlacementStatusWithUpgradeDomain(bpsd, upgradeDomains, 1, 3);
+    assertTrue(bps.isPlacementPolicySatisfied());
+    assertEquals(0, bps.getAdditionalReplicasRequired());
+
+    // Replication factor 2 file, but one domain
+    bps =
+        new BlockPlacementStatusWithUpgradeDomain(bpsd, upgradeDomains, 2, 3);
+    assertFalse(bps.isPlacementPolicySatisfied());
+    assertEquals(1, bps.getAdditionalReplicasRequired());
+
+    // Replication factor 2 file, but two domains
+    upgradeDomains.add("2");
+    bps =
+        new BlockPlacementStatusWithUpgradeDomain(bpsd, upgradeDomains, 2, 3);
+    assertTrue(bps.isPlacementPolicySatisfied());
+    assertEquals(0, bps.getAdditionalReplicasRequired());
+  }
+
+  @Test
+  public void testPolicyIsNotSatisfiedInsufficientDomains() {
+    // Insufficient Domains - 1 domain, replication factor 3
+    upgradeDomains.clear();
+    upgradeDomains.add("1");
+    BlockPlacementStatusWithUpgradeDomain bps =
+        new BlockPlacementStatusWithUpgradeDomain(bpsd, upgradeDomains, 3, 3);
+    assertFalse(bps.isPlacementPolicySatisfied());
+    assertEquals(2, bps.getAdditionalReplicasRequired());
+
+    // One domain, replication factor 2 file
+    bps =
+        new BlockPlacementStatusWithUpgradeDomain(bpsd, upgradeDomains, 2, 3);
+    assertFalse(bps.isPlacementPolicySatisfied());
+    assertEquals(1, bps.getAdditionalReplicasRequired());
+
+    // 2 domains, replication factor 3
+    upgradeDomains.add("2");
+    bps =
+        new BlockPlacementStatusWithUpgradeDomain(bpsd, upgradeDomains, 3, 3);
+    assertFalse(bps.isPlacementPolicySatisfied());
+    assertEquals(1, bps.getAdditionalReplicasRequired());
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
index 1704367..c0cf7ea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
@@ -18,14 +18,10 @@
 
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.fail;
-import static org.junit.Assert.assertTrue;
-
+import java.io.IOException;
 import java.util.ArrayList;
+import java.util.List;
 import java.util.concurrent.TimeoutException;
-
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.slf4j.Logger;
@@ -48,6 +44,8 @@
 import org.junit.Test;
 import org.slf4j.event.Level;
 
+import static org.junit.Assert.*;
+
 public class TestBlocksWithNotEnoughRacks {
   public static final Logger LOG =
       LoggerFactory.getLogger(TestBlocksWithNotEnoughRacks.class);
@@ -545,4 +543,105 @@
       hostsFileWriter.cleanup();
     }
   }
+
+  @Test
+  public void testMultipleReplicasScheduledForUpgradeDomain() throws Exception {
+    Configuration conf = getConf();
+    final short replicationFactor = 3;
+    final Path filePath = new Path("/testFile");
+
+    conf.set("dfs.block.replicator.classname",
+        "org.apache.hadoop.hdfs.server.blockmanagement." +
+            "BlockPlacementPolicyWithUpgradeDomain");
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(6).build();
+    cluster.waitClusterUp();
+
+    List<DatanodeDescriptor> dnDescriptors = getDnDescriptors(cluster);
+
+    try {
+      // Create a file with one block with a replication factor of 3
+      // No upgrade domains are set.
+      final FileSystem fs = cluster.getFileSystem();
+      DFSTestUtil.createFile(fs, filePath, 1L, replicationFactor, 1L);
+      ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
+
+      BlockManager bm = cluster.getNamesystem().getBlockManager();
+      BlockInfo storedBlock = bm.getStoredBlock(b.getLocalBlock());
+
+      // The block should be replicated OK - so Reconstruction Work will be null
+      BlockReconstructionWork work = bm.scheduleReconstruction(storedBlock, 2);
+      assertNull(work);
+      // Set the upgradeDomain to "3" for the 3 nodes hosting the block.
+      // Then alternately set the remaining 3 nodes to have an upgradeDomain
+      // of 0 or 1 giving a total of 3 upgradeDomains.
+      for (int i=0; i<storedBlock.getReplication(); i++) {
+        storedBlock.getDatanode(i).setUpgradeDomain("3");
+      }
+      int udInd = 0;
+      for (DatanodeDescriptor d : dnDescriptors) {
+        if (d.getUpgradeDomain() == null) {
+          d.setUpgradeDomain(Integer.toString(udInd % 2));
+          udInd++;
+        }
+      }
+      // Now reconWork is non-null and 2 extra targets are needed
+      work = bm.scheduleReconstruction(storedBlock, 2);
+      assertEquals(2, work.getAdditionalReplRequired());
+
+      // Add the block to the replication queue and ensure it is replicated
+      // correctly.
+      bm.neededReconstruction.add(storedBlock, 3, 0, 0, replicationFactor);
+      DFSTestUtil.waitForReplication(cluster, b, 1, replicationFactor, 0, 3);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testUnderReplicatedRespectsRacksAndUpgradeDomain()
+      throws Exception {
+    Configuration conf = getConf();
+    final short replicationFactor = 3;
+    final Path filePath = new Path("/testFile");
+
+    conf.set("dfs.block.replicator.classname",
+        "org.apache.hadoop.hdfs.server.blockmanagement." +
+        "BlockPlacementPolicyWithUpgradeDomain");
+
+    // All hosts are on two racks
+    String[] racks = {"/r1", "/r1", "/r1", "/r2", "/r2", "/r2"};
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(6).racks(racks).build();
+    cluster.waitClusterUp();
+    List<DatanodeDescriptor> dnDescriptors = getDnDescriptors(cluster);
+    for (int i=0; i < dnDescriptors.size(); i++) {
+      dnDescriptors.get(i).setUpgradeDomain(Integer.toString(i%3));
+    }
+    try {
+      final FileSystem fs = cluster.getFileSystem();
+      DFSTestUtil.createFile(fs, filePath, 1L, (short)1, 1L);
+      ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
+      fs.setReplication(filePath, replicationFactor);
+      DFSTestUtil.waitForReplication(cluster, b, 2, replicationFactor, 0, 3);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  private List<DatanodeDescriptor> getDnDescriptors(MiniDFSCluster cluster)
+      throws IOException {
+    List<DatanodeDescriptor> dnDesc = new ArrayList<>();
+    DatanodeManager dnManager = cluster.getNamesystem().getBlockManager()
+        .getDatanodeManager();
+    for (DataNode dn : cluster.getDataNodes()) {
+      DatanodeDescriptor d = dnManager.getDatanode(dn.getDatanodeUuid());
+      if (d == null) {
+        throw new IOException("DatanodeDescriptor not found for DN "+
+            dn.getDatanodeUuid());
+      }
+      dnDesc.add(d);
+    }
+    return dnDesc;
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
index a6fac80..9606785 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -71,6 +72,7 @@
   @BeforeClass
   public static void startUpCluster() throws Exception {
     conf = new Configuration();
+    conf.setBoolean(DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY, true);
     // disable fast tailing here because this test's assertions are based on the
     // timing of explicitly called rollEditLogAndTail. Although this means this
     // test takes some time to run
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
index 850b961..ff90121 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
@@ -92,11 +92,10 @@
 
     setAndCheckSerialNumber(0, btsm1, btsm2, btsm3);
     setAndCheckSerialNumber(Integer.MAX_VALUE, btsm1, btsm2, btsm3);
-    setAndCheckSerialNumber(Integer.MIN_VALUE, btsm1, btsm2, btsm3);
     setAndCheckSerialNumber(Integer.MAX_VALUE / 2, btsm1, btsm2, btsm3);
-    setAndCheckSerialNumber(Integer.MIN_VALUE / 2, btsm1, btsm2, btsm3);
     setAndCheckSerialNumber(Integer.MAX_VALUE / 3, btsm1, btsm2, btsm3);
-    setAndCheckSerialNumber(Integer.MIN_VALUE / 3, btsm1, btsm2, btsm3);
+    setAndCheckSerialNumber(Integer.MAX_VALUE / 171717,
+        btsm1, btsm2, btsm3);
   }
 
   private void setAndCheckSerialNumber(int serialNumber, BlockTokenSecretManager... btsms) {
@@ -116,36 +115,7 @@
       }
     }
   }
-
-  @Test
-  public void testSerialNumberMaskMatchIndex() {
-    BlockTokenSecretManager btsm1 = cluster.getNamesystem(0).getBlockManager()
-        .getBlockTokenSecretManager();
-    BlockTokenSecretManager btsm2 = cluster.getNamesystem(1).getBlockManager()
-        .getBlockTokenSecretManager();
-    BlockTokenSecretManager btsm3 = cluster.getNamesystem(2).getBlockManager()
-        .getBlockTokenSecretManager();
-    int[] testSet = {0, Integer.MAX_VALUE, Integer.MIN_VALUE,
-        Integer.MAX_VALUE / 2, Integer.MIN_VALUE / 2,
-        Integer.MAX_VALUE / 3, Integer.MIN_VALUE / 3};
-    for (int i = 0; i < testSet.length; i++) {
-      setAndCheckHighBitsSerialNumber(testSet[i], btsm1, 0);
-      setAndCheckHighBitsSerialNumber(testSet[i], btsm2, 1);
-      setAndCheckHighBitsSerialNumber(testSet[i], btsm3, 2);
-    }
-  }
-
-  /**
-   * Check mask of serial number if equal to index of NameNode.
-   */
-  private void setAndCheckHighBitsSerialNumber(int serialNumber,
-      BlockTokenSecretManager btsm, int nnIndex) {
-    btsm.setSerialNo(serialNumber);
-    int serialNo = btsm.getSerialNoForTesting();
-    int index = serialNo >> BlockTokenSecretManager.NUM_VALID_BITS;
-    assertEquals(index, nnIndex);
-  }
-
+  
   @Test
   public void ensureInvalidBlockTokensAreRejected() throws IOException,
       URISyntaxException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestMultiObserverNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestMultiObserverNode.java
index a8e1245..a0913e4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestMultiObserverNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestMultiObserverNode.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
@@ -46,6 +47,7 @@
   @BeforeClass
   public static void startUpCluster() throws Exception {
     conf = new Configuration();
+    conf.setBoolean(DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY, true);
     qjmhaCluster = HATestUtil.setUpObserverCluster(conf, 2, 0, true);
     dfsCluster = qjmhaCluster.getDfsCluster();
     dfs = HATestUtil.configureObserverReadFs(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java
index 20e0bbd..b89a157 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY;
 import static org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter.getServiceState;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
@@ -77,6 +78,7 @@
   @BeforeClass
   public static void startUpCluster() throws Exception {
     conf = new Configuration();
+    conf.setBoolean(DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY, true);
     qjmhaCluster = HATestUtil.setUpObserverCluster(conf, 1, 0, true);
     dfsCluster = qjmhaCluster.getDfsCluster();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
index db3d93f..b8898eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
@@ -2144,24 +2144,11 @@
     INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString())
         .asDirectory();
     assertTrue("the diff list of " + dir2
-        + " should be empty after deleting s0", dir2Node.getDiffs().asList()
-        .isEmpty());
+        + " should be empty after deleting s0", !dir2Node.isWithSnapshot());
     
     assertTrue(hdfs.exists(newfoo));
     INode fooRefNode = fsdir.getINode4Write(newfoo.toString());
     assertTrue(fooRefNode instanceof INodeReference.DstReference);
-    INodeDirectory fooNode = fooRefNode.asDirectory();
-    // fooNode should be still INodeDirectory (With Snapshot) since we call
-    // recordModification before the rename
-    assertTrue(fooNode.isWithSnapshot());
-    assertTrue(fooNode.getDiffs().asList().isEmpty());
-    INodeDirectory barNode = fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID)
-        .get(0).asDirectory();
-    // bar should also be INodeDirectory (With Snapshot), and both of its diff 
-    // list and children list are empty 
-    assertTrue(barNode.getDiffs().asList().isEmpty());
-    assertTrue(barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
-    
     restartClusterAndCheckImage(true);
   }
   
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index 8bd7967..3e318b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
@@ -527,7 +527,7 @@
     assertEquals(snapshot1.getId(), diffList.getLast().getSnapshotId());
     diffList = fsdir.getINode(metaChangeDir.toString()).asDirectory()
         .getDiffs();
-    assertEquals(0, diffList.asList().size());
+    assertEquals(null, diffList);
     
     // check 2. noChangeDir and noChangeFile are still there
     final INodeDirectory noChangeDirNode = 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
index 5820226..a6f633a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
@@ -56,31 +56,21 @@
   <build>
     <plugins>
       <plugin>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-maven-plugins</artifactId>
+        <groupId>org.xolstice.maven.plugins</groupId>
+        <artifactId>protobuf-maven-plugin</artifactId>
         <executions>
           <execution>
-            <id>compile-protoc</id>
-            <goals>
-              <goal>protoc</goal>
-            </goals>
+            <id>src-compile-protoc</id>
             <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>${basedir}/../../../hadoop-common-project/hadoop-common/src/main/proto</param>
-                <param>${basedir}/../../../hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto</param>
-                <param>${basedir}/src/main/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/main/proto</directory>
-                <includes>
-                  <include>HSAdminRefreshProtocol.proto</include>
-                  <include>mr_protos.proto</include>
-                  <include>mr_service_protos.proto</include>
-                  <include>MRClientProtocol.proto</include>
-                </includes>
-              </source>
+              <skip>false</skip>
+              <additionalProtoPathElements>
+                <additionalProtoPathElement>
+                  ${basedir}/../../../hadoop-common-project/hadoop-common/src/main/proto
+                </additionalProtoPathElement>
+                <additionalProtoPathElement>
+                  ${basedir}/../../../hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto
+                </additionalProtoPathElement>
+              </additionalProtoPathElements>
             </configuration>
           </execution>
         </executions>
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/HSAdminRefreshProtocol.proto b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/HSAdminRefreshProtocol.proto
index 1f95ee1..8e077ad 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/HSAdminRefreshProtocol.proto
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/HSAdminRefreshProtocol.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.mapreduce.v2.hs.proto";
 option java_outer_classname = "HSAdminRefreshProtocolProtos";
 option java_generic_services = true;
@@ -97,4 +98,4 @@
    */
   rpc refreshLogRetentionSettings(RefreshLogRetentionSettingsRequestProto)
       returns(RefreshLogRetentionSettingsResponseProto);
-}
\ No newline at end of file
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/MRClientProtocol.proto b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/MRClientProtocol.proto
index 3f09719b..1fb3004 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/MRClientProtocol.proto
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/MRClientProtocol.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "MRClientProtocol";
 option java_generic_services = true;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto
index cf09900..9a1f245 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.mapreduce.v2.proto";
 option java_outer_classname = "MRProtos";
 option java_generic_services = true;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_service_protos.proto b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_service_protos.proto
index cb3c30c..86bb8ca 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_service_protos.proto
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_service_protos.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.mapreduce.v2.proto";
 option java_outer_classname = "MRServiceProtos";
 option java_generic_services = true;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
index afdc0ca..b3e2b4a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.mapred;
 
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
@@ -250,7 +251,9 @@
             job, dirs, recursive, inputFilter, false);
         locatedFiles = locatedFileStatusFetcher.getFileStatuses();
       } catch (InterruptedException e) {
-        throw new IOException("Interrupted while getting file statuses");
+        throw  (IOException)
+            new InterruptedIOException("Interrupted while getting file statuses")
+                .initCause(e);
       }
       result = Iterables.toArray(locatedFiles, FileStatus.class);
     }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/InvalidInputException.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/InvalidInputException.java
index e1bb36b..faf1a38 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/InvalidInputException.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/InvalidInputException.java
@@ -38,10 +38,14 @@
   
   /**
    * Create the exception with the given list.
+   * The first element of the list is used as the init cause value.
    * @param probs the list of problems to report. this list is not copied.
    */
   public InvalidInputException(List<IOException> probs) {
     problems = probs;
+    if (!probs.isEmpty()) {
+      initCause(probs.get(0));
+    }
   }
   
   /**
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java
index 3869c49..a248f14 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java
@@ -46,15 +46,23 @@
 import com.google.common.util.concurrent.ListeningExecutorService;
 import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
 
 /**
  * Utility class to fetch block locations for specified Input paths using a
  * configured number of threads.
+ * The thread count is determined from the value of
+ * "mapreduce.input.fileinputformat.list-status.num-threads" in the
+ * configuration.
  */
 @Private
 public class LocatedFileStatusFetcher {
 
+  public static final Logger LOG =
+      LoggerFactory.getLogger(LocatedFileStatusFetcher.class.getName());
   private final Path[] inputDirs;
   private final PathFilter inputFilter;
   private final Configuration conf;
@@ -64,7 +72,7 @@
   private final ExecutorService rawExec;
   private final ListeningExecutorService exec;
   private final BlockingQueue<List<FileStatus>> resultQueue;
-  private final List<IOException> invalidInputErrors = new LinkedList<IOException>();
+  private final List<IOException> invalidInputErrors = new LinkedList<>();
 
   private final ProcessInitialInputPathCallback processInitialInputPathCallback = 
       new ProcessInitialInputPathCallback();
@@ -79,25 +87,30 @@
   private volatile Throwable unknownError;
 
   /**
+   * Instantiate.
+   * The newApi switch is only used to configure what exception is raised
+   * on failure of {@link #getFileStatuses()}, it does not change the algorithm.
    * @param conf configuration for the job
    * @param dirs the initial list of paths
-   * @param recursive whether to traverse the patchs recursively
+   * @param recursive whether to traverse the paths recursively
    * @param inputFilter inputFilter to apply to the resulting paths
    * @param newApi whether using the mapred or mapreduce API
    * @throws InterruptedException
    * @throws IOException
    */
   public LocatedFileStatusFetcher(Configuration conf, Path[] dirs,
-      boolean recursive, PathFilter inputFilter, boolean newApi) throws InterruptedException,
-      IOException {
+      boolean recursive, PathFilter inputFilter, boolean newApi)
+      throws InterruptedException, IOException {
     int numThreads = conf.getInt(FileInputFormat.LIST_STATUS_NUM_THREADS,
         FileInputFormat.DEFAULT_LIST_STATUS_NUM_THREADS);
+    LOG.debug("Instantiated LocatedFileStatusFetcher with {} threads",
+        numThreads);
     rawExec = HadoopExecutors.newFixedThreadPool(
         numThreads,
         new ThreadFactoryBuilder().setDaemon(true)
             .setNameFormat("GetFileInfo #%d").build());
     exec = MoreExecutors.listeningDecorator(rawExec);
-    resultQueue = new LinkedBlockingQueue<List<FileStatus>>();
+    resultQueue = new LinkedBlockingQueue<>();
     this.conf = conf;
     this.inputDirs = dirs;
     this.recursive = recursive;
@@ -106,10 +119,13 @@
   }
 
   /**
-   * Start executing and return FileStatuses based on the parameters specified
+   * Start executing and return FileStatuses based on the parameters specified.
    * @return fetched file statuses
-   * @throws InterruptedException
-   * @throws IOException
+   * @throws InterruptedException interruption waiting for results.
+   * @throws IOException IO failure or other error.
+   * @throws InvalidInputException on an invalid input and the old API
+   * @throws org.apache.hadoop.mapreduce.lib.input.InvalidInputException on an
+   *         invalid input and the new API.
    */
   public Iterable<FileStatus> getFileStatuses() throws InterruptedException,
       IOException {
@@ -117,6 +133,7 @@
     // rest being scheduled does not lead to a termination.
     runningTasks.incrementAndGet();
     for (Path p : inputDirs) {
+      LOG.debug("Queuing scan of directory {}", p);
       runningTasks.incrementAndGet();
       ListenableFuture<ProcessInitialInputPathCallable.Result> future = exec
           .submit(new ProcessInitialInputPathCallable(p, conf, inputFilter));
@@ -128,14 +145,20 @@
 
     lock.lock();
     try {
+      LOG.debug("Waiting scan completion");
       while (runningTasks.get() != 0 && unknownError == null) {
         condition.await();
       }
     } finally {
       lock.unlock();
     }
+    // either the scan completed or an error was raised.
+    // in the case of an error shutting down the executor will interrupt all
+    // active threads, which can add noise to the logs.
+    LOG.debug("Scan complete: shutting down");
     this.exec.shutdownNow();
     if (this.unknownError != null) {
+      LOG.debug("Scan failed", this.unknownError);
       if (this.unknownError instanceof Error) {
         throw (Error) this.unknownError;
       } else if (this.unknownError instanceof RuntimeException) {
@@ -148,7 +171,11 @@
         throw new IOException(this.unknownError);
       }
     }
-    if (this.invalidInputErrors.size() != 0) {
+    if (!this.invalidInputErrors.isEmpty()) {
+      LOG.debug("Invalid Input Errors raised");
+      for (IOException error : invalidInputErrors) {
+        LOG.debug("Error", error);
+      }
       if (this.newApi) {
         throw new org.apache.hadoop.mapreduce.lib.input.InvalidInputException(
             invalidInputErrors);
@@ -161,7 +188,7 @@
 
   /**
    * Collect misconfigured Input errors. Errors while actually reading file info
-   * are reported immediately
+   * are reported immediately.
    */
   private void registerInvalidInputError(List<IOException> errors) {
     synchronized (this) {
@@ -171,9 +198,10 @@
 
   /**
    * Register fatal errors - example an IOException while accessing a file or a
-   * full exection queue
+   * full execution queue.
    */
   private void registerError(Throwable t) {
+    LOG.debug("Error", t);
     lock.lock();
     try {
       if (unknownError == null) {
@@ -221,7 +249,7 @@
     public Result call() throws Exception {
       Result result = new Result();
       result.fs = fs;
-
+      LOG.debug("ProcessInputDirCallable {}", fileStatus);
       if (fileStatus.isDirectory()) {
         RemoteIterator<LocatedFileStatus> iter = fs
             .listLocatedStatus(fileStatus.getPath());
@@ -242,8 +270,8 @@
     }
 
     private static class Result {
-      private List<FileStatus> locatedFileStatuses = new LinkedList<FileStatus>();
-      private List<FileStatus> dirsNeedingRecursiveCalls = new LinkedList<FileStatus>();
+      private List<FileStatus> locatedFileStatuses = new LinkedList<>();
+      private List<FileStatus> dirsNeedingRecursiveCalls = new LinkedList<>();
       private FileSystem fs;
     }
   }
@@ -259,11 +287,12 @@
     @Override
     public void onSuccess(ProcessInputDirCallable.Result result) {
       try {
-        if (result.locatedFileStatuses.size() != 0) {
+        if (!result.locatedFileStatuses.isEmpty()) {
           resultQueue.add(result.locatedFileStatuses);
         }
-        if (result.dirsNeedingRecursiveCalls.size() != 0) {
+        if (!result.dirsNeedingRecursiveCalls.isEmpty()) {
           for (FileStatus fileStatus : result.dirsNeedingRecursiveCalls) {
+            LOG.debug("Queueing directory scan {}", fileStatus.getPath());
             runningTasks.incrementAndGet();
             ListenableFuture<ProcessInputDirCallable.Result> future = exec
                 .submit(new ProcessInputDirCallable(result.fs, fileStatus,
@@ -285,7 +314,7 @@
     }
   }
 
-  
+
   /**
    * Processes an initial Input Path pattern through the globber and PathFilter
    * to generate a list of files which need further processing.
@@ -309,6 +338,7 @@
       Result result = new Result();
       FileSystem fs = path.getFileSystem(conf);
       result.fs = fs;
+      LOG.debug("ProcessInitialInputPathCallable path {}", path);
       FileStatus[] matches = fs.globStatus(path, inputFilter);
       if (matches == null) {
         result.addError(new IOException("Input path does not exist: " + path));
@@ -337,7 +367,7 @@
 
   /**
    * The callback handler to handle results generated by
-   * {@link ProcessInitialInputPathCallable}
+   * {@link ProcessInitialInputPathCallable}.
    * 
    */
   private class ProcessInitialInputPathCallback implements
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
index e2658ca..22efe14 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.mapreduce.lib.input;
 
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -283,7 +284,10 @@
             job.getConfiguration(), dirs, recursive, inputFilter, true);
         locatedFiles = locatedFileStatusFetcher.getFileStatuses();
       } catch (InterruptedException e) {
-        throw new IOException("Interrupted while getting file statuses");
+        throw (IOException)
+            new InterruptedIOException(
+                "Interrupted while getting file statuses")
+                .initCause(e);
       }
       result = Lists.newArrayList(locatedFiles);
     }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/InvalidInputException.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/InvalidInputException.java
index 61e1484..1113bec 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/InvalidInputException.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/InvalidInputException.java
@@ -37,10 +37,14 @@
   
   /**
    * Create the exception with the given list.
+   * The first element of the list is used as the init cause value.
    * @param probs the list of problems to report. this list is not copied.
    */
   public InvalidInputException(List<IOException> probs) {
     problems = probs;
+    if (!probs.isEmpty()) {
+      initCause(probs.get(0));
+    }
   }
   
   /**
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
index 06b1362..090da57 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
@@ -60,27 +60,18 @@
   <build>
     <plugins>
       <plugin>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-maven-plugins</artifactId>
+        <groupId>org.xolstice.maven.plugins</groupId>
+        <artifactId>protobuf-maven-plugin</artifactId>
         <executions>
           <execution>
-            <id>compile-protoc</id>
-            <goals>
-              <goal>protoc</goal>
-            </goals>
+            <id>src-compile-protoc</id>
             <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>${basedir}/../../../hadoop-common-project/hadoop-common/src/main/proto</param>
-                <param>${basedir}/src/main/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/main/proto</directory>
-                <includes>
-                  <include>ShuffleHandlerRecovery.proto</include>
-                </includes>
-              </source>
+              <skip>false</skip>
+              <additionalProtoPathElements>
+                <additionalProtoPathElement>
+                  ${basedir}/../../../hadoop-common-project/hadoop-common/src/main/proto
+                </additionalProtoPathElement>
+              </additionalProtoPathElements>
             </configuration>
           </execution>
         </executions>
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/proto/ShuffleHandlerRecovery.proto b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/proto/ShuffleHandlerRecovery.proto
index 7a81ffb..a3129f3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/proto/ShuffleHandlerRecovery.proto
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/proto/ShuffleHandlerRecovery.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.mapred.proto";
 option java_outer_classname = "ShuffleHandlerRecoveryProtos";
 option java_generic_services = true;
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 5b6d620..ebe4477 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -94,6 +94,7 @@
 import java.io.IOException;
 import java.net.URI;
 import java.security.InvalidKeyException;
+import java.security.SecureRandom;
 import java.util.*;
 import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
@@ -587,7 +588,7 @@
 
     if(Boolean.valueOf(metadata.get(OzoneConsts.GDPR_FLAG))){
       try{
-        GDPRSymmetricKey gKey = new GDPRSymmetricKey();
+        GDPRSymmetricKey gKey = new GDPRSymmetricKey(new SecureRandom());
         metadata.putAll(gKey.getKeyDetails());
       }catch (Exception e) {
         if(e instanceof InvalidKeyException &&
diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml
index 9cbee56..09ac27a 100644
--- a/hadoop-ozone/common/pom.xml
+++ b/hadoop-ozone/common/pom.xml
@@ -154,8 +154,8 @@
         </executions>
       </plugin>
       <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>findbugs-maven-plugin</artifactId>
+        <groupId>com.github.spotbugs</groupId>
+        <artifactId>spotbugs-maven-plugin</artifactId>
         <configuration>
           <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
         </configuration>
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
index b7a6c2f..7cd38ad 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -30,6 +30,7 @@
 import java.nio.file.Paths;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
+import java.security.SecureRandom;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Optional;
@@ -39,13 +40,14 @@
 import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
 import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
 import org.apache.commons.compress.utils.IOUtils;
-import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.HddsServerUtil;
 import org.apache.hadoop.hdds.server.ServerUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 
 import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
@@ -71,6 +73,8 @@
  */
 public final class OmUtils {
   public static final Logger LOG = LoggerFactory.getLogger(OmUtils.class);
+  private static final SecureRandom SRAND = new SecureRandom();
+  private static byte[] randomBytes = new byte[32];
 
   private OmUtils() {
   }
@@ -272,9 +276,9 @@
 
   public static byte[] getSHADigest() throws IOException {
     try {
+      SRAND.nextBytes(randomBytes);
       MessageDigest sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
-      return sha.digest(RandomStringUtils.random(32)
-          .getBytes(StandardCharsets.UTF_8));
+      return sha.digest(randomBytes);
     } catch (NoSuchAlgorithmException ex) {
       throw new IOException("Error creating an instance of SHA-256 digest.\n" +
           "This could possibly indicate a faulty JRE");
@@ -402,18 +406,14 @@
   /**
    * If a OM conf is only set with key suffixed with OM Node ID, return the
    * set value.
-   * @return null if base conf key is set, otherwise the value set for
-   * key suffixed with Node ID.
+   * @return if the value is set for key suffixed with OM Node ID, return the
+   * value, else return null.
    */
   public static String getConfSuffixedWithOMNodeId(Configuration conf,
       String confKey, String omServiceID, String omNodeId) {
-    String confValue = conf.getTrimmed(confKey);
-    if (StringUtils.isNotEmpty(confValue)) {
-      return null;
-    }
     String suffixedConfKey = OmUtils.addKeySuffixes(
         confKey, omServiceID, omNodeId);
-    confValue = conf.getTrimmed(suffixedConfKey);
+    String confValue = conf.getTrimmed(suffixedConfKey);
     if (StringUtils.isNotEmpty(confValue)) {
       return confValue;
     }
@@ -498,13 +498,36 @@
   }
 
   /**
-   * Returns the DB key name of a deleted key in OM metadata store. The
-   * deleted key name is the <keyName>_<deletionTimestamp>.
-   * @param key Original key name
-   * @param timestamp timestamp of deletion
-   * @return Deleted key name
+   * Prepares key info to be moved to deletedTable.
+   * 1. It strips GDPR metadata from key info
+   * 2. For given object key, if the repeatedOmKeyInfo instance is null, it
+   * implies that no entry for the object key exists in deletedTable so we
+   * create a new instance to include this key, else we update the existing
+   * repeatedOmKeyInfo instance.
+   * @param keyInfo args supplied by client
+   * @param repeatedOmKeyInfo key details from deletedTable
+   * @return {@link RepeatedOmKeyInfo}
+   * @throws IOException if I/O Errors when checking for key
    */
-  public static String getDeletedKeyName(String key, long timestamp) {
-    return key + "_" + timestamp;
+  public static RepeatedOmKeyInfo prepareKeyForDelete(OmKeyInfo keyInfo,
+      RepeatedOmKeyInfo repeatedOmKeyInfo) throws IOException{
+    // If this key is in a GDPR enforced bucket, then before moving
+    // KeyInfo to deletedTable, remove the GDPR related metadata from
+    // KeyInfo.
+    if(Boolean.valueOf(keyInfo.getMetadata().get(OzoneConsts.GDPR_FLAG))) {
+      keyInfo.getMetadata().remove(OzoneConsts.GDPR_FLAG);
+      keyInfo.getMetadata().remove(OzoneConsts.GDPR_ALGORITHM);
+      keyInfo.getMetadata().remove(OzoneConsts.GDPR_SECRET);
+    }
+
+    if(repeatedOmKeyInfo == null) {
+      //The key doesn't exist in deletedTable, so create a new instance.
+      repeatedOmKeyInfo = new RepeatedOmKeyInfo(keyInfo);
+    } else {
+      //The key exists in deletedTable, so update existing instance.
+      repeatedOmKeyInfo.addOmKeyInfo(keyInfo);
+    }
+
+    return repeatedOmKeyInfo;
   }
 }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
index cc908fc..673d26a 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -26,9 +26,11 @@
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
 import org.apache.hadoop.ozone.om.lock.OzoneManagerLock;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .UserVolumeInfo;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.Table;
@@ -223,7 +225,7 @@
    *
    * @return UserTable.
    */
-  Table<String, VolumeList> getUserTable();
+  Table<String, UserVolumeInfo> getUserTable();
 
   /**
    * Returns the Volume Table.
@@ -251,7 +253,7 @@
    *
    * @return Deleted Table.
    */
-  Table<String, OmKeyInfo> getDeletedTable();
+  Table<String, RepeatedOmKeyInfo> getDeletedTable();
 
   /**
    * Gets the OpenKeyTable.
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java
new file mode 100644
index 0000000..a0ef4a5
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om.codec;
+
+import com.google.common.base.Preconditions;
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.hdds.utils.db.Codec;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .RepeatedKeyInfo;
+
+import java.io.IOException;
+
+/**
+ * Codec to encode RepeatedOmKeyInfo as byte array.
+ */
+public class RepeatedOmKeyInfoCodec implements Codec<RepeatedOmKeyInfo> {
+  @Override
+  public byte[] toPersistedFormat(RepeatedOmKeyInfo object)
+      throws IOException {
+    Preconditions.checkNotNull(object,
+        "Null object can't be converted to byte array.");
+    return object.getProto().toByteArray();
+  }
+
+  @Override
+  public RepeatedOmKeyInfo fromPersistedFormat(byte[] rawData)
+      throws IOException {
+    Preconditions.checkNotNull(rawData,
+        "Null byte array can't converted to real object.");
+    try {
+      return RepeatedOmKeyInfo.getFromProto(RepeatedKeyInfo.parseFrom(rawData));
+    } catch (InvalidProtocolBufferException e) {
+      throw new IllegalArgumentException(
+          "Can't encode the the raw data from the byte array", e);
+    }
+  }
+}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/VolumeListCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/UserVolumeInfoCodec.java
similarity index 78%
rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/VolumeListCodec.java
rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/UserVolumeInfoCodec.java
index c819083..2545454 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/VolumeListCodec.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/UserVolumeInfoCodec.java
@@ -18,31 +18,32 @@
 package org.apache.hadoop.ozone.om.codec;
 
 import java.io.IOException;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.UserVolumeInfo;
 import org.apache.hadoop.hdds.utils.db.Codec;
 
 import com.google.common.base.Preconditions;
 import com.google.protobuf.InvalidProtocolBufferException;
 
 /**
- * Codec to encode VolumeList as byte array.
+ * Codec to encode UserVolumeInfo as byte array.
  */
-public class VolumeListCodec implements Codec<VolumeList> {
+public class UserVolumeInfoCodec implements Codec<UserVolumeInfo> {
 
   @Override
-  public byte[] toPersistedFormat(VolumeList object) throws IOException {
+  public byte[] toPersistedFormat(UserVolumeInfo object) throws IOException {
     Preconditions
         .checkNotNull(object, "Null object can't be converted to byte array.");
     return object.toByteArray();
   }
 
   @Override
-  public VolumeList fromPersistedFormat(byte[] rawData) throws IOException {
+  public UserVolumeInfo fromPersistedFormat(byte[] rawData) throws IOException {
     Preconditions
         .checkNotNull(rawData,
             "Null byte array can't converted to real object.");
     try {
-      return VolumeList.parseFrom(rawData);
+      return UserVolumeInfo.parseFrom(rawData);
     } catch (InvalidProtocolBufferException e) {
       throw new IllegalArgumentException(
           "Can't encode the the raw data from the byte array", e);
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
index 2049d72..6453e8e 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
@@ -44,6 +44,50 @@
   private long creationTime;
   private long quotaInBytes;
   private final OmOzoneAclMap aclMap;
+  private long objectID;
+  private long updateID;
+
+  /**
+   * Set the Object ID. If this value is already set then this function throws.
+   * There is a reason why we cannot use the final here. The OmVolumeArgs is
+   * deserialized from the protobuf in many places in code. We need to set
+   * this object ID, after it is deserialized.
+   *
+   * @param obId - long
+   */
+  public void setObjectID(long obId) {
+    if(this.objectID != 0) {
+      throw new UnsupportedOperationException("Attempt to modify object ID " +
+          "which is not zero. Current Object ID is " + this.objectID);
+    }
+    this.objectID = obId;
+  }
+
+  /**
+   * Returns a monotonically increasing ID, that denotes the last update.
+   * Each time an update happens, this ID is incremented.
+   * @return long
+   */
+  public long getUpdateID() {
+    return updateID;
+  }
+
+  /**
+   * Sets the update ID. For each modification of this object, we will set
+   * this to a value greater than the current value.
+   * @param updateID  long
+   */
+  public void setUpdateID(long updateID) {
+    this.updateID = updateID;
+  }
+
+  /**
+   * A immutable identity field for this object.
+   * @return  long.
+   */
+  public long getObjectID() {
+    return objectID;
+  }
 
   /**
    * Private constructor, constructed via builder.
@@ -54,10 +98,16 @@
    * @param metadata - metadata map for custom key/value data.
    * @param aclMap - User to access rights map.
    * @param creationTime - Volume creation time.
+   * @param  objectID - ID of this object.
+   * @param updateID - A sequence number that denotes the last update on this
+   * object. This is a monotonically increasing number.
    */
+  @SuppressWarnings({"checkstyle:ParameterNumber", "This is invoked from a " +
+      "builder."})
   private OmVolumeArgs(String adminName, String ownerName, String volume,
                        long quotaInBytes, Map<String, String> metadata,
-                       OmOzoneAclMap aclMap, long creationTime) {
+                       OmOzoneAclMap aclMap, long creationTime, long objectID,
+                      long updateID) {
     this.adminName = adminName;
     this.ownerName = ownerName;
     this.volume = volume;
@@ -65,6 +115,8 @@
     this.metadata = metadata;
     this.aclMap = aclMap;
     this.creationTime = creationTime;
+    this.objectID = objectID;
+    this.updateID = updateID;
   }
 
 
@@ -152,6 +204,8 @@
     auditMap.put(OzoneConsts.VOLUME, this.volume);
     auditMap.put(OzoneConsts.CREATION_TIME, String.valueOf(this.creationTime));
     auditMap.put(OzoneConsts.QUOTA_IN_BYTES, String.valueOf(this.quotaInBytes));
+    auditMap.put(OzoneConsts.OBJECT_ID, String.valueOf(this.getObjectID()));
+    auditMap.put(OzoneConsts.UPDATE_ID, String.valueOf(this.getUpdateID()));
     return auditMap;
   }
 
@@ -164,17 +218,12 @@
       return false;
     }
     OmVolumeArgs that = (OmVolumeArgs) o;
-    return creationTime == that.creationTime &&
-        quotaInBytes == that.quotaInBytes &&
-        Objects.equals(adminName, that.adminName) &&
-        Objects.equals(ownerName, that.ownerName) &&
-        Objects.equals(volume, that.volume);
+    return Objects.equals(this.objectID, that.objectID);
   }
 
   @Override
   public int hashCode() {
-    return Objects.hash(adminName, ownerName, volume, creationTime,
-        quotaInBytes);
+    return Objects.hash(this.objectID);
   }
 
   /**
@@ -188,6 +237,29 @@
     private long quotaInBytes;
     private Map<String, String> metadata;
     private OmOzoneAclMap aclMap;
+    private long objectID;
+    private long updateID;
+
+    /**
+     * Sets the Object ID for this Object.
+     * Object ID are unique and immutable identifier for each object in the
+     * System.
+     * @param objectID - long
+     */
+    public void setObjectID(long objectID) {
+      this.objectID = objectID;
+    }
+
+    /**
+     * Sets the update ID for this Object. Update IDs are monotonically
+     * increasing values which are updated each time there is an update.
+     * @param updateID - long
+     */
+    public void setUpdateID(long updateID) {
+      this.updateID = updateID;
+    }
+
+
 
     /**
      * Constructs a builder.
@@ -248,15 +320,13 @@
       Preconditions.checkNotNull(ownerName);
       Preconditions.checkNotNull(volume);
       return new OmVolumeArgs(adminName, ownerName, volume, quotaInBytes,
-          metadata, aclMap, creationTime);
+          metadata, aclMap, creationTime, objectID, updateID);
     }
 
   }
 
   public VolumeInfo getProtobuf() {
-
     List<OzoneAclInfo> aclList = aclMap.ozoneAclGetProtobuf();
-
     return VolumeInfo.newBuilder()
         .setAdminName(adminName)
         .setOwnerName(ownerName)
@@ -266,15 +336,15 @@
         .addAllVolumeAcls(aclList)
         .setCreationTime(
             creationTime == 0 ? System.currentTimeMillis() : creationTime)
+        .setObjectID(objectID)
+        .setUpdateID(updateID)
         .build();
   }
 
   public static OmVolumeArgs getFromProtobuf(VolumeInfo volInfo)
       throws OMException {
-
     OmOzoneAclMap aclMap =
         OmOzoneAclMap.ozoneAclGetFromProtobuf(volInfo.getVolumeAclsList());
-
     return new OmVolumeArgs(
         volInfo.getAdminName(),
         volInfo.getOwnerName(),
@@ -282,6 +352,8 @@
         volInfo.getQuotaInBytes(),
         KeyValueUtil.getFromProtobuf(volInfo.getMetadataList()),
         aclMap,
-        volInfo.getCreationTime());
+        volInfo.getCreationTime(),
+        volInfo.getObjectID(),
+        volInfo.getUpdateID());
   }
 }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java
new file mode 100644
index 0000000..c28c2c8
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .RepeatedKeyInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .KeyInfo;
+
+/**
+ * Args for deleted keys. This is written to om metadata deletedTable.
+ * Once a key is deleted, it is moved to om metadata deletedTable. Having a
+ * {label: List<OMKeyInfo>} ensures that if users create & delete keys with
+ * exact same uri multiple times, all the delete instances are bundled under
+ * the same key name. This is useful as part of GDPR compliance where an
+ * admin wants to confirm if a given key is deleted from deletedTable metadata.
+ */
+public class RepeatedOmKeyInfo {
+  private List<OmKeyInfo> omKeyInfoList;
+
+  public RepeatedOmKeyInfo(List<OmKeyInfo> omKeyInfos) {
+    this.omKeyInfoList = omKeyInfos;
+  }
+
+  public RepeatedOmKeyInfo(OmKeyInfo omKeyInfos) {
+    this.omKeyInfoList = new ArrayList<>();
+    this.omKeyInfoList.add(omKeyInfos);
+  }
+
+  public void addOmKeyInfo(OmKeyInfo info) {
+    this.omKeyInfoList.add(info);
+  }
+
+  public List<OmKeyInfo> getOmKeyInfoList() {
+    return omKeyInfoList;
+  }
+
+  public static RepeatedOmKeyInfo getFromProto(RepeatedKeyInfo
+      repeatedKeyInfo) {
+    List<OmKeyInfo> list = new ArrayList<>();
+    for(KeyInfo k : repeatedKeyInfo.getKeyInfoList()) {
+      list.add(OmKeyInfo.getFromProtobuf(k));
+    }
+    return new RepeatedOmKeyInfo.Builder().setOmKeyInfos(list).build();
+  }
+
+  public RepeatedKeyInfo getProto() {
+    List<KeyInfo> list = new ArrayList<>();
+    for(OmKeyInfo k : omKeyInfoList) {
+      list.add(k.getProtobuf());
+    }
+
+    RepeatedKeyInfo.Builder builder = RepeatedKeyInfo.newBuilder()
+        .addAllKeyInfo(list);
+    return builder.build();
+  }
+
+  /**
+   * Builder of RepeatedOmKeyInfo.
+   */
+  public static class Builder {
+    private List<OmKeyInfo> omKeyInfos;
+
+    public Builder(){}
+
+    public Builder setOmKeyInfos(List<OmKeyInfo> infoList) {
+      this.omKeyInfos = infoList;
+      return this;
+    }
+
+    public RepeatedOmKeyInfo build() {
+      return new RepeatedOmKeyInfo(omKeyInfos);
+    }
+  }
+}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/GDPRSymmetricKey.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/GDPRSymmetricKey.java
index b5e6909..0fd6b08 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/GDPRSymmetricKey.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/GDPRSymmetricKey.java
@@ -20,6 +20,7 @@
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
 
+import java.security.SecureRandom;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -48,10 +49,11 @@
    * Default constructor creates key with default values.
    * @throws Exception
    */
-  public GDPRSymmetricKey() throws Exception {
+  public GDPRSymmetricKey(SecureRandom secureRandom) throws Exception {
     algorithm = OzoneConsts.GDPR_ALGORITHM_NAME;
-    secret = RandomStringUtils
-        .randomAlphabetic(OzoneConsts.GDPR_DEFAULT_RANDOM_SECRET_LENGTH);
+    secret = RandomStringUtils.random(
+        OzoneConsts.GDPR_DEFAULT_RANDOM_SECRET_LENGTH,
+        0, 0, true, true, null, secureRandom);
     this.secretKey = new SecretKeySpec(
         secret.getBytes(OzoneConsts.GDPR_CHARSET), algorithm);
     this.cipher = Cipher.getInstance(algorithm);
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java
index 52e6d79..7e03095 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java
@@ -84,13 +84,16 @@
    * milliseconds
    * @param dtRemoverScanInterval how often the tokens are scanned for expired
    * tokens in milliseconds
+   * @param certClient certificate client to SCM CA
    */
   public OzoneDelegationTokenSecretManager(OzoneConfiguration conf,
       long tokenMaxLifetime, long tokenRenewInterval,
       long dtRemoverScanInterval, Text service,
-      S3SecretManager s3SecretManager) throws IOException {
+      S3SecretManager s3SecretManager, CertificateClient certClient)
+      throws IOException {
     super(new SecurityConfig(conf), tokenMaxLifetime, tokenRenewInterval,
         service, LOG);
+    setCertClient(certClient);
     currentTokens = new ConcurrentHashMap();
     this.tokenRemoverScanInterval = dtRemoverScanInterval;
     this.s3SecretManager = (S3SecretManagerImpl) s3SecretManager;
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java
index 45d6e66..78f0565b 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java
@@ -70,6 +70,7 @@
    * @param tokenRenewInterval how often the tokens must be renewed in
    * milliseconds
    * @param service name of service
+   * @param logger logger for the secret manager
    */
   public OzoneSecretManager(SecurityConfig secureConf, long tokenMaxLifetime,
       long tokenRenewInterval, Text service, Logger logger) {
@@ -188,7 +189,7 @@
   public synchronized void start(CertificateClient client)
       throws IOException {
     Preconditions.checkState(!isRunning());
-    this.certClient = client;
+    setCertClient(client);
     updateCurrentKey(new KeyPair(certClient.getPublicKey(),
         certClient.getPrivateKey()));
     setIsRunning(true);
@@ -247,5 +248,9 @@
   public CertificateClient getCertClient() {
     return certClient;
   }
+
+  public void setCertClient(CertificateClient client) {
+    this.certClient = client;
+  }
 }
 
diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
index 0fd02ce..d82fdf2 100644
--- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
+++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
@@ -308,6 +308,8 @@
     repeated hadoop.hdds.KeyValue metadata = 5;
     repeated OzoneAclInfo volumeAcls = 6;
     optional uint64 creationTime = 7;
+    optional uint64 objectID = 8;
+    optional uint64 updateID = 9;
 }
 
 /**
@@ -350,8 +352,10 @@
 
 }
 
-message VolumeList {
+message UserVolumeInfo {
     repeated string volumeNames = 1;
+    optional uint64 objectID = 2;
+    optional uint64 updateID = 3;
 }
 
 /**
@@ -596,7 +600,6 @@
 }
 
 message InfoBucketResponse {
-
     optional BucketInfo bucketInfo = 2;
 }
 
@@ -687,6 +690,10 @@
     repeated OzoneAclInfo acls = 13;
 }
 
+message RepeatedKeyInfo {
+    repeated KeyInfo keyInfo = 1;
+}
+
 message OzoneFileStatusProto {
     required hadoop.fs.FileStatusProto status = 1;
 }
@@ -751,7 +758,6 @@
 }
 
 message CreateKeyResponse {
-
     optional KeyInfo keyInfo = 2;
     // clients' followup request may carry this ID for stateful operations
     // (similar to a cookie).
@@ -764,7 +770,6 @@
 }
 
 message LookupKeyResponse {
-
     optional KeyInfo keyInfo = 2;
     // clients' followup request may carry this ID for stateful operations (similar
     // to a cookie).
@@ -840,7 +845,6 @@
 }
 
 message ListKeysResponse {
-
     repeated KeyInfo keyInfo = 2;
 }
 
@@ -949,8 +953,7 @@
 }
 
 message S3ListBucketsResponse {
-
-    repeated BucketInfo bucketInfo = 2;
+   repeated BucketInfo bucketInfo = 2;
 }
 
 message MultipartInfoInitiateRequest {
@@ -1020,7 +1023,6 @@
 }
 
 message MultipartUploadListPartsResponse {
-
     optional hadoop.hdds.ReplicationType type = 2;
     optional hadoop.hdds.ReplicationFactor factor = 3;
     optional uint32 nextPartNumberMarker = 4;
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java
index e0fdc90..39c6220 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java
@@ -21,6 +21,8 @@
 import org.junit.Assert;
 import org.junit.Test;
 
+import java.security.SecureRandom;
+
 /**
  * Tests GDPRSymmetricKey structure.
  */
@@ -28,7 +30,7 @@
 
   @Test
   public void testKeyGenerationWithDefaults() throws Exception {
-    GDPRSymmetricKey gkey = new GDPRSymmetricKey();
+    GDPRSymmetricKey gkey = new GDPRSymmetricKey(new SecureRandom());
 
     Assert.assertTrue(gkey.getCipher().getAlgorithm()
         .equalsIgnoreCase(OzoneConsts.GDPR_ALGORITHM_NAME));
diff --git a/hadoop-ozone/csi/pom.xml b/hadoop-ozone/csi/pom.xml
index fb04d8c..6e7b807 100644
--- a/hadoop-ozone/csi/pom.xml
+++ b/hadoop-ozone/csi/pom.xml
@@ -176,8 +176,8 @@
         </executions>
       </plugin>
       <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>findbugs-maven-plugin</artifactId>
+        <groupId>com.github.spotbugs</groupId>
+        <artifactId>spotbugs-maven-plugin</artifactId>
         <configuration>
           <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml
           </excludeFilterFile>
diff --git a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh
index 9525a9f..df19330 100755
--- a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh
+++ b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh
@@ -16,6 +16,16 @@
 
 REPORT_DIR=${REPORT_DIR:-$PWD}
 
+_realpath() {
+  if realpath "$@" > /dev/null; then
+    realpath "$@"
+  else
+    local relative_to
+    relative_to=$(realpath "${1/--relative-to=/}") || return 1
+    realpath "$2" | sed -e "s@${relative_to}/@@"
+  fi
+}
+
 ## generate summary txt file
 find "." -name 'TEST*.xml' -print0 \
     | xargs -n1 -0 "grep" -l -E "<failure|<error" \
@@ -41,7 +51,7 @@
       DIR_OF_TESTFILE=$(dirname "$file")
       NAME_OF_TESTFILE=$(basename "$file")
       NAME_OF_TEST="${NAME_OF_TESTFILE%.*}"
-      DESTDIRNAME=$(realpath --relative-to="$PWD" "$DIR_OF_TESTFILE/../..")
+      DESTDIRNAME=$(_realpath --relative-to="$PWD" "$DIR_OF_TESTFILE/../..") || continue
       mkdir -p "$REPORT_DIR/$DESTDIRNAME"
       #shellcheck disable=SC2086
       cp -r "$DIR_OF_TESTFILE"/*$NAME_OF_TEST* "$REPORT_DIR/$DESTDIRNAME/"
@@ -55,8 +65,8 @@
     FAILURES=$(grep FAILURE "$TEST_RESULT_FILE" | grep "Tests run" | awk '{print $18}' | sort | uniq)
 
     for FAILURE in $FAILURES; do
-        TEST_RESULT_LOCATION="$(realpath --relative-to="$REPORT_DIR" "$TEST_RESULT_FILE")"
-        TEST_OUTPUT_LOCATION="${TEST_RESULT_LOCATION//.txt/-output.txt/}"
+        TEST_RESULT_LOCATION="$(_realpath --relative-to="$REPORT_DIR" "$TEST_RESULT_FILE")"
+        TEST_OUTPUT_LOCATION="${TEST_RESULT_LOCATION//.txt/-output.txt}"
         printf " * [%s](%s) ([output](%s))\n" "$FAILURE" "$TEST_RESULT_LOCATION" "$TEST_OUTPUT_LOCATION" >> "$SUMMARY_FILE"
     done
 done
diff --git a/hadoop-ozone/dev-support/checks/checkstyle.sh b/hadoop-ozone/dev-support/checks/checkstyle.sh
index 7a218a4..685bf14 100755
--- a/hadoop-ozone/dev-support/checks/checkstyle.sh
+++ b/hadoop-ozone/dev-support/checks/checkstyle.sh
@@ -36,7 +36,7 @@
   | tee "$REPORT_FILE"
 
 ## generate counter
-wc -l "$REPORT_DIR/summary.txt" | awk '{print $1}'> "$REPORT_DIR/failures"
+grep -c ':' "$REPORT_FILE" > "$REPORT_DIR/failures"
 
 if [[ -s "${REPORT_FILE}" ]]; then
    exit 1
diff --git a/hadoop-ozone/dev-support/checks/findbugs.sh b/hadoop-ozone/dev-support/checks/findbugs.sh
index 3108bdd..ccbf2ed 100755
--- a/hadoop-ozone/dev-support/checks/findbugs.sh
+++ b/hadoop-ozone/dev-support/checks/findbugs.sh
@@ -16,7 +16,12 @@
 DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
 cd "$DIR/../../.." || exit 1
 
-mvn -B compile -fn findbugs:check -Dfindbugs.failOnError=false  -f pom.ozone.xml
+if ! type unionBugs >/dev/null 2>&1 || ! type convertXmlToText >/dev/null 2>&1; then
+  mvn -B -fae compile spotbugs:check -f pom.ozone.xml
+  exit $?
+fi
+
+mvn -B -fae compile spotbugs:spotbugs -f pom.ozone.xml
 
 REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/findbugs"}
 mkdir -p "$REPORT_DIR"
@@ -24,8 +29,9 @@
 
 touch "$REPORT_FILE"
 
-find hadoop-ozone -name findbugsXml.xml -print0 | xargs -0 -n1 convertXmlToText | tee -a "${REPORT_FILE}"
-find hadoop-hdds -name findbugsXml.xml -print0  | xargs -0 -n1 convertXmlToText | tee -a "${REPORT_FILE}"
+find hadoop-hdds hadoop-ozone -name spotbugsXml.xml -print0 | xargs -0 unionBugs -output "${REPORT_DIR}"/summary.xml
+convertXmlToText "${REPORT_DIR}"/summary.xml | tee -a "${REPORT_FILE}"
+convertXmlToText -html:fancy-hist.xsl "${REPORT_DIR}"/summary.xml "${REPORT_DIR}"/summary.html
 
 wc -l "$REPORT_FILE" | awk '{print $1}'> "$REPORT_DIR/failures"
 
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml
index 31f74f0..468506c 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-version: "3"
+version: "3.5"
 services:
   kdc:
     build:
@@ -23,17 +23,23 @@
       args:
         buildno: 1
     hostname: kdc
+    networks:
+      - ozone
     volumes:
       - ../..:/opt/hadoop
   kms:
-      image: apache/hadoop:${HADOOP_VERSION}
-      ports:
+    image: apache/hadoop:${HADOOP_VERSION}
+    networks:
+      - ozone
+    ports:
       - 9600:9600
-      env_file:
+    env_file:
       - ./docker-config
-      command: ["hadoop", "kms"]
+    command: ["hadoop", "kms"]
   datanode:
     image: apache/ozone-runner:${HADOOP_RUNNER_VERSION}
+    networks:
+      - ozone
     volumes:
       - ../..:/opt/hadoop
     ports:
@@ -44,6 +50,8 @@
   om:
     image: apache/ozone-runner:${HADOOP_RUNNER_VERSION}
     hostname: om
+    networks:
+      - ozone
     volumes:
       - ../..:/opt/hadoop
     ports:
@@ -56,6 +64,8 @@
   s3g:
     image: apache/ozone-runner:${HADOOP_RUNNER_VERSION}
     hostname: s3g
+    networks:
+      - ozone
     volumes:
       - ../..:/opt/hadoop
     ports:
@@ -66,6 +76,8 @@
   scm:
     image: apache/ozone-runner:${HADOOP_RUNNER_VERSION}
     hostname: scm
+    networks:
+      - ozone
     volumes:
       - ../..:/opt/hadoop
     ports:
@@ -78,6 +90,8 @@
   rm:
     image: apache/hadoop:${HADOOP_VERSION}
     hostname: rm
+    networks:
+      - ozone
     volumes:
       - ../..:/opt/ozone
     ports:
@@ -90,6 +104,8 @@
   nm:
     image: apache/hadoop:${HADOOP_VERSION}
     hostname: nm
+    networks:
+      - ozone
     volumes:
       - ../..:/opt/ozone
     env_file:
@@ -100,7 +116,10 @@
     command: ["yarn","nodemanager"]
   jhs:
     image: apache/hadoop:${HADOOP_VERSION}
+    container_name: jhs
     hostname: jhs
+    networks:
+      - ozone
     volumes:
       - ../..:/opt/ozone
     ports:
@@ -111,13 +130,3 @@
       HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
       WAIT_FOR: rm:8088
     command: ["yarn","timelineserver"]
-  spark:
-    image: ahadoop/spark-2.4:hadoop-3.2
-    hostname: spark
-    volumes:
-      - ../..:/opt/hadoop
-    ports:
-      - 4040:4040
-    env_file:
-      - docker-config
-    command: ["watch","-n","100000","ls"]
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config
index f5c5fbd..be9dc1e 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config
@@ -31,7 +31,7 @@
 OZONE-SITE.XML_ozone.om.kerberos.keytab.file=/etc/security/keytabs/om.keytab
 OZONE-SITE.XML_ozone.s3g.keytab.file=/etc/security/keytabs/HTTP.keytab
 OZONE-SITE.XML_ozone.s3g.authentication.kerberos.principal=HTTP/s3g@EXAMPLE.COM
-OZONE_SITE.XML_ozone.administrators=*
+OZONE-SITE.XML_ozone.administrators=*
 
 OZONE-SITE.XML_ozone.security.enabled=true
 OZONE-SITE.XML_hdds.scm.http.kerberos.principal=HTTP/scm@EXAMPLE.COM
@@ -62,9 +62,8 @@
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true
 HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
 
-CORE-SITE.xml_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem
-CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs
-CORE-SITE.xml_fs.defaultFS=o3fs://bucket1.vol1/
+CORE-SITE.XML_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs
+CORE-SITE.XML_fs.defaultFS=o3fs://bucket1.vol1/
 
 MAPRED-SITE.XML_mapreduce.framework.name=yarn
 MAPRED-SITE.XML_yarn.app.mapreduce.am.env=HADOOP_MAPRED_HOME=$HADOOP_HOME
@@ -76,12 +75,12 @@
 MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar
 
 YARN-SITE.XML_yarn.app.mapreduce.am.staging-dir=/user
-YARN_SITE.XML_yarn.timeline-service.enabled=true
-YARN_SITE.XML_yarn.timeline-service.generic.application.history.enabled=true
-YARN_SITE.XML_yarn.timeline-service.hostname=jhs
+YARN-SITE.XML_yarn.timeline-service.enabled=true
+YARN-SITE.XML_yarn.timeline-service.generic.application.history.enabled=true
+YARN-SITE.XML_yarn.timeline-service.hostname=jhs
 YARN-SITE.XML_yarn.timeline-service.principal=jhs/jhs@EXAMPLE.COM
 YARN-SITE.XML_yarn.timeline-service.keytab=/etc/security/keytabs/jhs.keytab
-YARN_SITE.XML_yarn.log.server.url=http://jhs:8188/applicationhistory/logs/
+YARN-SITE.XML_yarn.log.server.url=http://jhs:8188/applicationhistory/logs/
 
 YARN-SITE.XML_yarn.nodemanager.principal=nm/_HOST@EXAMPLE.COM
 YARN-SITE.XML_yarn.nodemanager.keytab=/etc/security/keytabs/nm.keytab
@@ -94,15 +93,17 @@
 YARN-SITE.XML_yarn.resourcemanager.hostname=rm
 YARN-SITE.XML_yarn.resourcemanager.keytab=/etc/security/keytabs/rm.keytab
 YARN-SITE.XML_yarn.resourcemanager.principal=rm/rm@EXAMPLE.COM
-YARN_SITE_XML_yarn.resourcemanager.system.metrics.publisher.enabled=true
+YARN-SITE.XML_yarn.resourcemanager.system.metrics.publisher.enabled=true
 
 YARN-SITE.XML_yarn.log-aggregation-enable=true
-YARN-SITE.yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds=3600
-YARN-SITE.yarn.nodemanager.delete.debug-delay-sec=600
+YARN-SITE.XML_yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds=3600
+YARN-SITE.XML_yarn.nodemanager.delete.debug-delay-sec=600
 
-YARN-SITE.yarn.nodemanager.container-executor.class=org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor
-YARN-SITE.yarn.nodemanager.linux-container-executor.path=/opt/hadoop/bin/container-executor
-YARN-SITE.yarn.nodemanager.linux-container-executor.group=hadoop
+# Yarn LinuxContainer requires the /opt/hadoop/etc/hadoop to be owned by root and not modifiable by other users,
+# which prevents start.sh from changing the configurations based on docker-config
+# YARN-SITE.XML_yarn.nodemanager.container-executor.class=org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor
+# YARN-SITE.XML_yarn.nodemanager.linux-container-executor.path=/opt/hadoop/bin/container-executor
+# YARN-SITE.XML_yarn.nodemanager.linux-container-executor.group=hadoop
 
 CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.maximum-applications=10000
 CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.maximum-am-resource-percent=0.1
@@ -175,4 +176,4 @@
 JAVA_HOME=/usr/lib/jvm/jre
 JSVC_HOME=/usr/bin
 SLEEP_SECONDS=5
-KERBEROS_ENABLED=true
\ No newline at end of file
+KERBEROS_ENABLED=true
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/test.sh
new file mode 100755
index 0000000..cc6ebf0
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/test.sh
@@ -0,0 +1,44 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+export COMPOSE_DIR
+
+# shellcheck source=/dev/null
+source "$COMPOSE_DIR/../testlib.sh"
+
+export SECURITY_ENABLED=true
+
+start_docker_env
+
+execute_robot_test om kinit.robot
+
+execute_robot_test om createmrenv.robot
+
+# reinitialize the directories to use
+export OZONE_DIR=/opt/ozone
+
+# shellcheck source=/dev/null
+source "$COMPOSE_DIR/../testlib.sh"
+
+execute_robot_test rm kinit-hadoop.robot
+
+execute_robot_test rm mapreduce.robot
+
+stop_docker_env
+
+generate_report
diff --git a/hadoop-ozone/dist/src/main/compose/test-all.sh b/hadoop-ozone/dist/src/main/compose/test-all.sh
index 7883e87..afa5d56 100755
--- a/hadoop-ozone/dist/src/main/compose/test-all.sh
+++ b/hadoop-ozone/dist/src/main/compose/test-all.sh
@@ -29,7 +29,7 @@
 RESULT=0
 IFS=$'\n'
 # shellcheck disable=SC2044
-for test in $(find "$SCRIPT_DIR" -name test.sh); do
+for test in $(find "$SCRIPT_DIR" -name test.sh | sort); do
   echo "Executing test in $(dirname "$test")"
 
   #required to read the .env file from the right location
@@ -41,7 +41,7 @@
       echo "ERROR: Test execution of $(dirname "$test") is FAILED!!!!"
   fi
   RESULT_DIR="$(dirname "$test")/result"
-  cp "$RESULT_DIR"/robot-*.xml "$ALL_RESULT_DIR"
+  cp "$RESULT_DIR"/robot-*.xml "$RESULT_DIR"/docker-*.log "$ALL_RESULT_DIR"/
 done
 
 rebot -N "smoketests" -d "$SCRIPT_DIR/result" "$SCRIPT_DIR/result/robot-*.xml"
diff --git a/hadoop-ozone/dist/src/main/compose/test-single.sh b/hadoop-ozone/dist/src/main/compose/test-single.sh
index f1203d3..629a9bc 100755
--- a/hadoop-ozone/dist/src/main/compose/test-single.sh
+++ b/hadoop-ozone/dist/src/main/compose/test-single.sh
@@ -48,6 +48,8 @@
 # shellcheck source=testlib.sh
 source "$COMPOSE_DIR/../testlib.sh"
 
+create_results_dir
+
 execute_robot_test "$1" "$2"
 
 generate_report
diff --git a/hadoop-ozone/dist/src/main/compose/testlib.sh b/hadoop-ozone/dist/src/main/compose/testlib.sh
index 9aa7c48..b20dca8 100755
--- a/hadoop-ozone/dist/src/main/compose/testlib.sh
+++ b/hadoop-ozone/dist/src/main/compose/testlib.sh
@@ -22,11 +22,14 @@
 RESULT_DIR_INSIDE="/tmp/smoketest/$(basename "$COMPOSE_ENV_NAME")/result"
 SMOKETEST_DIR_INSIDE="${OZONE_DIR:-/opt/hadoop}/smoketest"
 
-#delete previous results
-rm -rf "$RESULT_DIR"
-mkdir -p "$RESULT_DIR"
-#Should be writeable from the docker containers where user is different.
-chmod ogu+w "$RESULT_DIR"
+## @description create results directory, purging any prior data
+create_results_dir() {
+  #delete previous results
+  rm -rf "$RESULT_DIR"
+  mkdir -p "$RESULT_DIR"
+  #Should be writeable from the docker containers where user is different.
+  chmod ogu+w "$RESULT_DIR"
+}
 
 ## @description print the number of datanodes up
 ## @param the docker-compose file
@@ -74,6 +77,7 @@
       sleep 2
    done
    echo "WARNING! Datanodes are not started successfully. Please check the docker-compose files"
+   return 1
 }
 
 ## @description  Starts a docker-compose based test environment
@@ -81,13 +85,16 @@
 start_docker_env(){
   local -i datanode_count=${1:-3}
 
-  docker-compose -f "$COMPOSE_FILE" down
-  docker-compose -f "$COMPOSE_FILE" up -d --scale datanode="${datanode_count}" \
+  create_results_dir
+
+  docker-compose -f "$COMPOSE_FILE" --no-ansi down
+  docker-compose -f "$COMPOSE_FILE" --no-ansi up -d --scale datanode="${datanode_count}" \
     && wait_for_datanodes "$COMPOSE_FILE" "${datanode_count}" \
     && sleep 10
 
   if [[ $? -gt 0 ]]; then
-    docker-compose -f "$COMPOSE_FILE" down
+    OUTPUT_NAME="$COMPOSE_ENV_NAME"
+    stop_docker_env
     return 1
   fi
 }
@@ -131,9 +138,9 @@
 
 ## @description  Stops a docker-compose based test environment (with saving the logs)
 stop_docker_env(){
-  docker-compose -f "$COMPOSE_FILE" logs > "$RESULT_DIR/docker-$OUTPUT_NAME.log"
+  docker-compose -f "$COMPOSE_FILE" --no-ansi logs > "$RESULT_DIR/docker-$OUTPUT_NAME.log"
   if [ "${KEEP_RUNNING:-false}" = false ]; then
-     docker-compose -f "$COMPOSE_FILE" down
+     docker-compose -f "$COMPOSE_FILE" --no-ansi down
   fi
 }
 
diff --git a/hadoop-ozone/dist/src/main/smoketest/gdpr/gdpr.robot b/hadoop-ozone/dist/src/main/smoketest/gdpr/gdpr.robot
new file mode 100644
index 0000000..f4705eb
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/smoketest/gdpr/gdpr.robot
@@ -0,0 +1,89 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation       Smoketest Ozone GDPR Feature
+Library             OperatingSystem
+Library             BuiltIn
+Library             String
+Resource            ../commonlib.robot
+Suite Setup         Generate volume
+
+*** Variables ***
+${volume}    generated
+
+*** Keywords ***
+Generate volume
+   ${random} =         Generate Random String  5  [LOWER]
+   Set Suite Variable  ${volume}  ${random}
+
+*** Test Cases ***
+Test GDPR disabled
+  Test GDPR(disabled) without explicit options      ${volume}
+
+Test GDPR --enforcegdpr=true
+  Test GDPR with --enforcegdpr=true                 ${volume}
+
+Test GDPR -g=true
+  Test GDPR with -g=true                            ${volume}
+
+Test GDPR -g=false
+  Test GDPR with -g=false                            ${volume}
+
+*** Keywords ***
+Test GDPR(disabled) without explicit options
+    [arguments]     ${volume}
+                    Execute             ozone sh volume create /${volume} --quota 100TB
+                    Execute             ozone sh bucket create /${volume}/mybucket1
+    ${result} =     Execute             ozone sh bucket info /${volume}/mybucket1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mybucket1") | .metadata | .gdprEnabled'
+                    Should Be Equal     ${result}       null
+                    Execute             ozone sh key put /${volume}/mybucket1/mykey /opt/hadoop/NOTICE.txt
+                    Execute             rm -f NOTICE.txt.1
+    ${result} =     Execute             ozone sh key info /${volume}/mybucket1/mykey | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mykey") | .metadata | .gdprEnabled'
+                    Should Be Equal     ${result}       null
+                    Execute             ozone sh key delete /${volume}/mybucket1/mykey
+
+Test GDPR with --enforcegdpr=true
+    [arguments]     ${volume}
+                    Execute             ozone sh bucket create --enforcegdpr=true /${volume}/mybucket2
+    ${result} =     Execute             ozone sh bucket info /${volume}/mybucket2 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mybucket2") | .metadata | .gdprEnabled'
+                    Should Be Equal     ${result}       true
+                    Execute             ozone sh key put /${volume}/mybucket2/mykey /opt/hadoop/NOTICE.txt
+                    Execute             rm -f NOTICE.txt.1
+    ${result} =     Execute             ozone sh key info /${volume}/mybucket2/mykey | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mykey") | .metadata | .gdprEnabled'
+                    Should Be Equal     ${result}       true
+                    Execute             ozone sh key delete /${volume}/mybucket2/mykey
+
+Test GDPR with -g=true
+    [arguments]     ${volume}
+                    Execute             ozone sh bucket create -g=true /${volume}/mybucket3
+    ${result} =     Execute             ozone sh bucket info /${volume}/mybucket3 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mybucket3") | .metadata | .gdprEnabled'
+                    Should Be Equal     ${result}       true
+                    Execute             ozone sh key put /${volume}/mybucket3/mykey /opt/hadoop/NOTICE.txt
+                    Execute             rm -f NOTICE.txt.1
+    ${result} =     Execute             ozone sh key info /${volume}/mybucket3/mykey | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mykey") | .metadata | .gdprEnabled'
+                    Should Be Equal     ${result}       true
+                    Execute             ozone sh key delete /${volume}/mybucket3/mykey
+
+Test GDPR with -g=false
+    [arguments]     ${volume}
+                    Execute             ozone sh bucket create /${volume}/mybucket4
+    ${result} =     Execute             ozone sh bucket info /${volume}/mybucket4 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mybucket4") | .metadata | .gdprEnabled'
+                    Should Be Equal     ${result}       null
+                    Execute             ozone sh key put /${volume}/mybucket4/mykey /opt/hadoop/NOTICE.txt
+                    Execute             rm -f NOTICE.txt.1
+    ${result} =     Execute             ozone sh key info /${volume}/mybucket4/mykey | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mykey") | .metadata | .gdprEnabled'
+                    Should Be Equal     ${result}       null
+                    Execute             ozone sh key delete /${volume}/mybucket4/mykey
diff --git a/hadoop-ozone/dist/src/main/smoketest/kinit-hadoop.robot b/hadoop-ozone/dist/src/main/smoketest/kinit-hadoop.robot
new file mode 100644
index 0000000..5d85555
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/smoketest/kinit-hadoop.robot
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation       Kinit test user
+Library             OperatingSystem
+Resource            commonlib.robot
+Test Timeout        2 minute
+
+
+*** Test Cases ***
+Kinit
+   Kinit test user     hadoop     hadoop.keytab
diff --git a/hadoop-ozone/dist/src/main/smoketest/kinit.robot b/hadoop-ozone/dist/src/main/smoketest/kinit.robot
index 11df516..c9c1b75 100644
--- a/hadoop-ozone/dist/src/main/smoketest/kinit.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/kinit.robot
@@ -20,6 +20,9 @@
 Test Timeout        2 minute
 
 
+*** Variables ***
+${testuser}          testuser
+
 *** Test Cases ***
 Kinit
-   Kinit test user     testuser     testuser.keytab
+   Kinit test user     ${testuser}     ${testuser}.keytab
diff --git a/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot b/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot
index a608677..789ec4f 100644
--- a/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot
@@ -17,7 +17,7 @@
 Documentation       Execute MR jobs
 Library             OperatingSystem
 Resource            commonlib.robot
-Test Timeout        2 minute
+Test Timeout        4 minute
 
 
 *** Variables ***
diff --git a/hadoop-ozone/insight/pom.xml b/hadoop-ozone/insight/pom.xml
index 80b2f8f..8287334 100644
--- a/hadoop-ozone/insight/pom.xml
+++ b/hadoop-ozone/insight/pom.xml
@@ -92,9 +92,8 @@
       <version>1.19</version>
     </dependency>
     <dependency>
-      <groupId>com.google.code.findbugs</groupId>
-      <artifactId>findbugs</artifactId>
-      <version>3.0.1</version>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs</artifactId>
       <scope>provided</scope>
     </dependency>
     <dependency>
@@ -118,8 +117,8 @@
   <build>
     <plugins>
       <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>findbugs-maven-plugin</artifactId>
+        <groupId>com.github.spotbugs</groupId>
+        <artifactId>spotbugs-maven-plugin</artifactId>
         <configuration>
           <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml
           </excludeFilterFile>
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/BaseInsightSubCommand.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/BaseInsightSubCommand.java
index 95cda41..4c3875c 100644
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/BaseInsightSubCommand.java
+++ b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/BaseInsightSubCommand.java
@@ -31,6 +31,8 @@
 import org.apache.hadoop.ozone.insight.scm.NodeManagerInsight;
 import org.apache.hadoop.ozone.insight.scm.ReplicaManagerInsight;
 import org.apache.hadoop.ozone.insight.scm.ScmProtocolBlockLocationInsight;
+import org.apache.hadoop.ozone.insight.scm.ScmProtocolContainerLocationInsight;
+import org.apache.hadoop.ozone.insight.scm.ScmProtocolSecurityInsight;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 
 import picocli.CommandLine;
@@ -88,7 +90,10 @@
     insights.put("scm.event-queue", new EventQueueInsight());
     insights.put("scm.protocol.block-location",
         new ScmProtocolBlockLocationInsight());
-
+    insights.put("scm.protocol.container-location",
+        new ScmProtocolContainerLocationInsight());
+    insights.put("scm.protocol.security",
+             new ScmProtocolSecurityInsight());
     insights.put("om.key-manager", new KeyManagerInsight());
     insights.put("om.protocol.client", new OmProtocolInsight());
 
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolBlockLocationInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolBlockLocationInsight.java
index 73f1512..f67f641 100644
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolBlockLocationInsight.java
+++ b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolBlockLocationInsight.java
@@ -28,7 +28,7 @@
 import org.apache.hadoop.ozone.insight.Component.Type;
 import org.apache.hadoop.ozone.insight.LoggerSource;
 import org.apache.hadoop.ozone.insight.MetricGroupDisplay;
-import org.apache.hadoop.ozone.protocolPB.ScmBlockLocationProtocolServerSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocolServerSideTranslatorPB;
 
 /**
  * Insight metric to check the SCM block location protocol behaviour.
@@ -42,9 +42,9 @@
         new LoggerSource(Type.SCM,
             ScmBlockLocationProtocolServerSideTranslatorPB.class,
             defaultLevel(verbose)));
-    new LoggerSource(Type.SCM,
+    loggers.add(new LoggerSource(Type.SCM,
         SCMBlockProtocolServer.class,
-        defaultLevel(verbose));
+        defaultLevel(verbose)));
     return loggers;
   }
 
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolContainerLocationInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolContainerLocationInsight.java
new file mode 100644
index 0000000..d6db589
--- /dev/null
+++ b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolContainerLocationInsight.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.insight.scm;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StorageContainerLocationProtocolService;
+import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocolServerSideTranslatorPB;
+import org.apache.hadoop.ozone.insight.BaseInsightPoint;
+import org.apache.hadoop.ozone.insight.Component.Type;
+import org.apache.hadoop.ozone.insight.LoggerSource;
+import org.apache.hadoop.ozone.insight.MetricGroupDisplay;
+
+/**
+ * Insight metric to check the SCM block location protocol behaviour.
+ */
+public class ScmProtocolContainerLocationInsight extends BaseInsightPoint {
+
+  @Override
+  public List<LoggerSource> getRelatedLoggers(boolean verbose) {
+    List<LoggerSource> loggers = new ArrayList<>();
+    loggers.add(
+        new LoggerSource(Type.SCM,
+            StorageContainerLocationProtocolServerSideTranslatorPB.class,
+            defaultLevel(verbose)));
+    new LoggerSource(Type.SCM,
+        StorageContainerLocationProtocolService.class,
+        defaultLevel(verbose));
+    return loggers;
+  }
+
+  @Override
+  public List<MetricGroupDisplay> getMetrics() {
+    List<MetricGroupDisplay> metrics = new ArrayList<>();
+
+    Map<String, String> filter = new HashMap<>();
+    filter.put("servername", "StorageContainerLocationProtocolService");
+
+    addRpcMetrics(metrics, Type.SCM, filter);
+
+    addProtocolMessageMetrics(metrics, "scm_container_location_protocol",
+        Type.SCM, StorageContainerLocationProtocolProtos.Type.values());
+
+    return metrics;
+  }
+
+  @Override
+  public String getDescription() {
+    return "SCM Container location protocol endpoint";
+  }
+
+}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolDatanodeInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolDatanodeInsight.java
new file mode 100644
index 0000000..289af89
--- /dev/null
+++ b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolDatanodeInsight.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.insight.scm;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer;
+import org.apache.hadoop.ozone.insight.BaseInsightPoint;
+import org.apache.hadoop.ozone.insight.Component.Type;
+import org.apache.hadoop.ozone.insight.LoggerSource;
+import org.apache.hadoop.ozone.insight.MetricGroupDisplay;
+import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolServerSideTranslatorPB;
+
+/**
+ * Insight metric to check the SCM datanode protocol behaviour.
+ */
+public class ScmProtocolDatanodeInsight extends BaseInsightPoint {
+
+  @Override
+  public List<LoggerSource> getRelatedLoggers(boolean verbose) {
+    List<LoggerSource> loggers = new ArrayList<>();
+    loggers.add(
+        new LoggerSource(Type.SCM,
+            SCMDatanodeProtocolServer.class,
+            defaultLevel(verbose)));
+    loggers.add(
+        new LoggerSource(Type.SCM,
+            StorageContainerDatanodeProtocolServerSideTranslatorPB.class,
+            defaultLevel(verbose)));
+    return loggers;
+  }
+
+  @Override
+  public List<MetricGroupDisplay> getMetrics() {
+    List<MetricGroupDisplay> metrics = new ArrayList<>();
+
+    Map<String, String> filter = new HashMap<>();
+    filter.put("servername", "StorageContainerDatanodeProtocolService");
+
+    addRpcMetrics(metrics, Type.SCM, filter);
+
+    addProtocolMessageMetrics(metrics, "scm_datanode_protocol",
+        Type.SCM, StorageContainerDatanodeProtocolProtos.Type.values());
+
+    return metrics;
+  }
+
+  @Override
+  public String getDescription() {
+    return "SCM Datanode protocol endpoint";
+  }
+
+}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolSecurityInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolSecurityInsight.java
new file mode 100644
index 0000000..734da34
--- /dev/null
+++ b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolSecurityInsight.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.insight.scm;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos;
+import org.apache.hadoop.hdds.scm.protocol.SCMSecurityProtocolServerSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.server.SCMSecurityProtocolServer;
+import org.apache.hadoop.ozone.insight.BaseInsightPoint;
+import org.apache.hadoop.ozone.insight.Component.Type;
+import org.apache.hadoop.ozone.insight.LoggerSource;
+import org.apache.hadoop.ozone.insight.MetricGroupDisplay;
+
+/**
+ * Insight metric to check the SCM block location protocol behaviour.
+ */
+public class ScmProtocolSecurityInsight extends BaseInsightPoint {
+
+  @Override
+  public List<LoggerSource> getRelatedLoggers(boolean verbose) {
+    List<LoggerSource> loggers = new ArrayList<>();
+    loggers.add(
+        new LoggerSource(Type.SCM,
+            SCMSecurityProtocolServerSideTranslatorPB.class,
+            defaultLevel(verbose)));
+    new LoggerSource(Type.SCM,
+        SCMSecurityProtocolServer.class,
+        defaultLevel(verbose));
+    return loggers;
+  }
+
+  @Override
+  public List<MetricGroupDisplay> getMetrics() {
+    List<MetricGroupDisplay> metrics = new ArrayList<>();
+
+    Map<String, String> filter = new HashMap<>();
+    filter.put("servername", "SCMSecurityProtocolService");
+
+    addRpcMetrics(metrics, Type.SCM, filter);
+
+    addProtocolMessageMetrics(metrics, "scm_security_protocol",
+        Type.SCM, SCMSecurityProtocolProtos.Type.values());
+
+    return metrics;
+  }
+
+  @Override
+  public String getDescription() {
+    return "SCM Block location protocol endpoint";
+  }
+
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
new file mode 100644
index 0000000..5643cb6
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.container.metrics;
+
+import org.apache.commons.lang3.RandomUtils;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashMap;
+
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
+import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.fail;
+
+/**
+ * Class used to test {@link SCMContainerManagerMetrics}.
+ */
+public class TestSCMContainerManagerMetrics {
+
+  private MiniOzoneCluster cluster;
+  private StorageContainerManager scm;
+  private String containerOwner = "OZONE";
+
+  @Before
+  public void setup() throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.set(HDDS_CONTAINER_REPORT_INTERVAL, "3000s");
+    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
+    cluster.waitForClusterToBeReady();
+    scm = cluster.getStorageContainerManager();
+  }
+
+
+  @After
+  public void teardown() {
+    cluster.shutdown();
+  }
+
+  @Test
+  public void testContainerOpsMetrics() throws IOException {
+    MetricsRecordBuilder metrics;
+    ContainerManager containerManager = scm.getContainerManager();
+    metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
+    long numSuccessfulCreateContainers = getLongCounter(
+        "NumSuccessfulCreateContainers", metrics);
+
+    ContainerInfo containerInfo = containerManager.allocateContainer(
+        HddsProtos.ReplicationType.RATIS,
+        HddsProtos.ReplicationFactor.ONE, containerOwner);
+
+    metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
+    Assert.assertEquals(getLongCounter("NumSuccessfulCreateContainers",
+        metrics), ++numSuccessfulCreateContainers);
+
+    try {
+      containerManager.allocateContainer(
+          HddsProtos.ReplicationType.RATIS,
+          HddsProtos.ReplicationFactor.THREE, containerOwner);
+      fail("testContainerOpsMetrics failed");
+    } catch (IOException ex) {
+      // Here it should fail, so it should have the old metric value.
+      metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
+      Assert.assertEquals(getLongCounter("NumSuccessfulCreateContainers",
+          metrics), numSuccessfulCreateContainers);
+      Assert.assertEquals(getLongCounter("NumFailureCreateContainers",
+          metrics), 1);
+    }
+
+    metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
+    long numSuccessfulDeleteContainers = getLongCounter(
+        "NumSuccessfulDeleteContainers", metrics);
+
+    containerManager.deleteContainer(
+        new ContainerID(containerInfo.getContainerID()));
+
+    metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
+    Assert.assertEquals(getLongCounter("NumSuccessfulDeleteContainers",
+        metrics), numSuccessfulDeleteContainers + 1);
+
+
+    try {
+      // Give random container to delete.
+      containerManager.deleteContainer(
+          new ContainerID(RandomUtils.nextLong(10000, 20000)));
+      fail("testContainerOpsMetrics failed");
+    } catch (IOException ex) {
+      // Here it should fail, so it should have the old metric value.
+      metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
+      Assert.assertEquals(getLongCounter("NumSuccessfulDeleteContainers",
+          metrics), numSuccessfulCreateContainers);
+      Assert.assertEquals(getLongCounter("NumFailureDeleteContainers",
+          metrics), 1);
+    }
+
+    containerManager.listContainer(
+        new ContainerID(containerInfo.getContainerID()), 1);
+    metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
+    Assert.assertEquals(getLongCounter("NumListContainerOps",
+        metrics), 1);
+
+  }
+
+  @Test
+  public void testReportProcessingMetrics() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    String key = "key1";
+
+    MetricsRecordBuilder metrics =
+        getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
+    Assert.assertEquals(getLongCounter("NumContainerReportsProcessedSuccessful",
+        metrics), 1);
+
+    // Create key should create container on DN.
+    cluster.getRpcClient().getObjectStore().getClientProxy()
+        .createVolume(volumeName);
+    cluster.getRpcClient().getObjectStore().getClientProxy()
+        .createBucket(volumeName, bucketName);
+    OzoneOutputStream ozoneOutputStream = cluster.getRpcClient()
+        .getObjectStore().getClientProxy().createKey(volumeName, bucketName,
+            key, 0, ReplicationType.RATIS, ReplicationFactor.ONE,
+            new HashMap<>());
+
+    String data = "file data";
+    ozoneOutputStream.write(data.getBytes(), 0, data.length());
+    ozoneOutputStream.close();
+
+
+    GenericTestUtils.waitFor(() -> {
+      final MetricsRecordBuilder scmMetrics =
+          getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
+      return getLongCounter("NumICRReportsProcessedSuccessful",
+          scmMetrics) == 1;
+    }, 1000, 500000);
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
index 00144e4..4b3d5d6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
@@ -139,9 +139,15 @@
   @Test
   public void testCreatePipelinesDnExclude() throws IOException {
 
-    // We have 10 DNs in MockNodeManager.
+    // We need 9 Healthy DNs in MockNodeManager.
+    NodeManager mockNodeManager = new MockNodeManager(true, 12);
+    PipelineStateManager stateManagerMock =
+        new PipelineStateManager(new OzoneConfiguration());
+    PipelineProvider providerMock = new MockRatisPipelineProvider(
+        mockNodeManager, stateManagerMock, new OzoneConfiguration());
+
     // Use up first 3 DNs for an open pipeline.
-    List<DatanodeDetails> openPiplineDns = nodeManager.getAllNodes()
+    List<DatanodeDetails> openPiplineDns = mockNodeManager.getAllNodes()
         .subList(0, 3);
     HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE;
 
@@ -153,10 +159,10 @@
         .setId(PipelineID.randomId())
         .build();
 
-    stateManager.addPipeline(openPipeline);
+    stateManagerMock.addPipeline(openPipeline);
 
     // Use up next 3 DNs also for an open pipeline.
-    List<DatanodeDetails> moreOpenPiplineDns = nodeManager.getAllNodes()
+    List<DatanodeDetails> moreOpenPiplineDns = mockNodeManager.getAllNodes()
         .subList(3, 6);
     Pipeline anotherOpenPipeline = Pipeline.newBuilder()
         .setType(HddsProtos.ReplicationType.RATIS)
@@ -165,10 +171,10 @@
         .setState(Pipeline.PipelineState.OPEN)
         .setId(PipelineID.randomId())
         .build();
-    stateManager.addPipeline(anotherOpenPipeline);
+    stateManagerMock.addPipeline(anotherOpenPipeline);
 
     // Use up next 3 DNs also for a closed pipeline.
-    List<DatanodeDetails> closedPiplineDns = nodeManager.getAllNodes()
+    List<DatanodeDetails> closedPiplineDns = mockNodeManager.getAllNodes()
         .subList(6, 9);
     Pipeline anotherClosedPipeline = Pipeline.newBuilder()
         .setType(HddsProtos.ReplicationType.RATIS)
@@ -177,9 +183,9 @@
         .setState(Pipeline.PipelineState.CLOSED)
         .setId(PipelineID.randomId())
         .build();
-    stateManager.addPipeline(anotherClosedPipeline);
+    stateManagerMock.addPipeline(anotherClosedPipeline);
 
-    Pipeline pipeline = provider.create(factor);
+    Pipeline pipeline = providerMock.create(factor);
     Assert.assertEquals(pipeline.getType(), HddsProtos.ReplicationType.RATIS);
     Assert.assertEquals(pipeline.getFactor(), factor);
     Assert.assertEquals(pipeline.getPipelineState(),
@@ -193,8 +199,8 @@
         (openPiplineDns.contains(dn) || moreOpenPiplineDns.contains(dn)))
         .count() == 0);
 
-    // Since we have only 10 DNs, at least 1 pipeline node should have been
-    // from the closed pipeline DN list.
+    // Since we have only 9 Healthy DNs, at least 1 pipeline node should have
+    // been from the closed pipeline DN list.
     Assert.assertTrue(pipelineNodes.parallelStream().filter(
         closedPiplineDns::contains).count() > 0);
   }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
index 7b90815..9ac45b8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
@@ -354,6 +354,71 @@
   }
 
   @Test
+  public void testApplyTransactionIdempotencyWithClosedContainer()
+      throws Exception {
+    OzoneOutputStream key =
+        objectStore.getVolume(volumeName).getBucket(bucketName)
+            .createKey("ratis", 1024, ReplicationType.RATIS,
+                ReplicationFactor.ONE, new HashMap<>());
+    // First write and flush creates a container in the datanode
+    key.write("ratis".getBytes());
+    key.flush();
+    key.write("ratis".getBytes());
+    KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
+    List<OmKeyLocationInfo> locationInfoList =
+        groupOutputStream.getLocationInfoList();
+    Assert.assertEquals(1, locationInfoList.size());
+    OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
+    ContainerData containerData =
+        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
+            .getContainer().getContainerSet()
+            .getContainer(omKeyLocationInfo.getContainerID())
+            .getContainerData();
+    Assert.assertTrue(containerData instanceof KeyValueContainerData);
+    key.close();
+    ContainerStateMachine stateMachine =
+        (ContainerStateMachine) ContainerTestHelper.getStateMachine(cluster);
+    SimpleStateMachineStorage storage =
+        (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
+    Path parentPath = storage.findLatestSnapshot().getFile().getPath();
+    // Since the snapshot threshold is set to 1, since there are
+    // applyTransactions, we should see snapshots
+    Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0);
+    FileInfo snapshot = storage.findLatestSnapshot().getFile();
+    Assert.assertNotNull(snapshot);
+    long containerID = omKeyLocationInfo.getContainerID();
+    Pipeline pipeline = cluster.getStorageContainerLocationClient()
+        .getContainerWithPipeline(containerID).getPipeline();
+    XceiverClientSpi xceiverClient =
+        xceiverClientManager.acquireClient(pipeline);
+    ContainerProtos.ContainerCommandRequestProto.Builder request =
+        ContainerProtos.ContainerCommandRequestProto.newBuilder();
+    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
+    request.setCmdType(ContainerProtos.Type.CloseContainer);
+    request.setContainerID(containerID);
+    request.setCloseContainer(
+        ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
+    try {
+      xceiverClient.sendCommand(request.build());
+    } catch (IOException e) {
+      Assert.fail("Exception should not be thrown");
+    }
+    Assert.assertTrue(
+        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
+            .getContainer().getContainerSet().getContainer(containerID)
+            .getContainerState()
+            == ContainerProtos.ContainerDataProto.State.CLOSED);
+    Assert.assertTrue(stateMachine.isStateMachineHealthy());
+    try {
+      stateMachine.takeSnapshot();
+    } catch (IOException ioe) {
+      Assert.fail("Exception should not be thrown");
+    }
+    FileInfo latestSnapshot = storage.findLatestSnapshot().getFile();
+    Assert.assertFalse(snapshot.getPath().equals(latestSnapshot.getPath()));
+  }
+
+  @Test
   public void testValidateBCSIDOnDnRestart() throws Exception {
     OzoneOutputStream key =
         objectStore.getVolume(volumeName).getBucket(bucketName)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index d91f7393..9189c2f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -86,6 +86,7 @@
 import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.s3.util.OzoneS3Util;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
 import org.apache.hadoop.ozone.security.acl.OzoneAclConfig;
@@ -2667,7 +2668,7 @@
    * @throws Exception
    */
   @Test
-  public void testGDPR() throws Exception {
+  public void testKeyReadWriteForGDPR() throws Exception {
     //Step 1
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
@@ -2733,4 +2734,77 @@
     Assert.assertNotEquals(text, new String(fileContent));
 
   }
+
+  /**
+   * Tests deletedKey for GDPR.
+   * 1. Create GDPR Enabled bucket.
+   * 2. Create a Key in this bucket so it gets encrypted via GDPRSymmetricKey.
+   * 3. Read key and validate the content/metadata is as expected because the
+   * readKey will decrypt using the GDPR Symmetric Key with details from KeyInfo
+   * Metadata.
+   * 4. Delete this key in GDPR enabled bucket
+   * 5. Confirm the deleted key metadata in deletedTable does not contain the
+   * GDPR encryption details (flag, secret, algorithm).
+   * @throws Exception
+   */
+  @Test
+  public void testDeletedKeyForGDPR() throws Exception {
+    //Step 1
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    BucketArgs args = BucketArgs.newBuilder()
+        .addMetadata(OzoneConsts.GDPR_FLAG, "true").build();
+    volume.createBucket(bucketName, args);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    Assert.assertEquals(bucketName, bucket.getName());
+    Assert.assertNotNull(bucket.getMetadata());
+    Assert.assertEquals("true",
+        bucket.getMetadata().get(OzoneConsts.GDPR_FLAG));
+
+    //Step 2
+    String text = "hello world";
+    Map<String, String> keyMetadata = new HashMap<>();
+    keyMetadata.put(OzoneConsts.GDPR_FLAG, "true");
+    OzoneOutputStream out = bucket.createKey(keyName,
+        text.getBytes().length, STAND_ALONE, ONE, keyMetadata);
+    out.write(text.getBytes());
+    out.close();
+
+    //Step 3
+    OzoneKeyDetails key = bucket.getKey(keyName);
+
+    Assert.assertEquals(keyName, key.getName());
+    Assert.assertEquals("true", key.getMetadata().get(OzoneConsts.GDPR_FLAG));
+    Assert.assertEquals("AES",
+        key.getMetadata().get(OzoneConsts.GDPR_ALGORITHM));
+    Assert.assertTrue(key.getMetadata().get(OzoneConsts.GDPR_SECRET) != null);
+
+    OzoneInputStream is = bucket.readKey(keyName);
+    byte[] fileContent = new byte[text.getBytes().length];
+    is.read(fileContent);
+    Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
+        keyName, STAND_ALONE,
+        ONE));
+    Assert.assertEquals(text, new String(fileContent));
+
+    //Step 4
+    bucket.deleteKey(keyName);
+
+    //Step 5
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+    String objectKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
+        keyName);
+    RepeatedOmKeyInfo deletedKeys =
+        omMetadataManager.getDeletedTable().get(objectKey);
+    Map<String, String> deletedKeyMetadata =
+        deletedKeys.getOmKeyInfoList().get(0).getMetadata();
+    Assert.assertFalse(deletedKeyMetadata.containsKey(OzoneConsts.GDPR_FLAG));
+    Assert.assertFalse(deletedKeyMetadata.containsKey(OzoneConsts.GDPR_SECRET));
+    Assert.assertFalse(
+        deletedKeyMetadata.containsKey(OzoneConsts.GDPR_ALGORITHM));
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
index 77f0dfc..2716d51 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
@@ -119,10 +119,13 @@
     String omNode1Id = "omNode1";
     String omNode2Id = "omNode2";
     String omNodesKeyValue = omNode1Id + "," + omNode2Id;
-    conf.set(OMConfigKeys.OZONE_OM_NODES_KEY, omNodesKeyValue);
+    String serviceID = "service1";
+    conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, serviceID);
+    conf.set(OMConfigKeys.OZONE_OM_NODES_KEY + "." + serviceID,
+        omNodesKeyValue);
 
-    String omNode1RpcAddrKey = getOMAddrKeyWithSuffix(null, omNode1Id);
-    String omNode2RpcAddrKey = getOMAddrKeyWithSuffix(null, omNode2Id);
+    String omNode1RpcAddrKey = getOMAddrKeyWithSuffix(serviceID, omNode1Id);
+    String omNode2RpcAddrKey = getOMAddrKeyWithSuffix(serviceID, omNode2Id);
 
     conf.set(omNode1RpcAddrKey, "0.0.0.0");
     conf.set(omNode2RpcAddrKey, "122.0.0.122");
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRocksDBLogging.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRocksDBLogging.java
new file mode 100644
index 0000000..5ca2eea
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRocksDBLogging.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import java.util.UUID;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+/**
+ * Test RocksDB logging for Ozone Manager.
+ */
+public class TestOzoneManagerRocksDBLogging {
+  private MiniOzoneCluster cluster = null;
+  private OzoneConfiguration conf;
+  private String clusterId;
+  private String scmId;
+  private String omId;
+
+  @Rule
+  public Timeout timeout = new Timeout(60000);
+
+  @Before
+  public void init() throws Exception {
+    conf = new OzoneConfiguration();
+    conf.set("hadoop.hdds.db.rocksdb.logging.enabled", "true");
+    clusterId = UUID.randomUUID().toString();
+    scmId = UUID.randomUUID().toString();
+    omId = UUID.randomUUID().toString();
+    cluster =  MiniOzoneCluster.newBuilder(conf)
+        .setClusterId(clusterId)
+        .setScmId(scmId)
+        .setOmId(omId)
+        .build();
+    cluster.waitForClusterToBeReady();
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @After
+  public void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testOMRocksDBLoggingEnabled() throws Exception {
+
+    GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
+        .captureLogs(DBStoreBuilder.ROCKS_DB_LOGGER);
+    cluster.restartOzoneManager();
+    GenericTestUtils.waitFor(() -> logCapturer.getOutput()
+            .contains("db_impl.cc"),
+        1000, 10000);
+
+    cluster.getConf().set("hadoop.hdds.db.rocksdb.logging.enabled", "false");
+    cluster.restartOzoneManager();
+    logCapturer.clearOutput();
+    try {
+      GenericTestUtils.waitFor(() -> logCapturer.getOutput()
+              .contains("db_impl.cc"),
+          1000, 10000);
+      Assert.fail();
+    } catch (TimeoutException ex) {
+      Assert.assertTrue(ex.getMessage().contains("Timed out"));
+    }
+  }
+
+}
diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml
index fa68398..653209b 100644
--- a/hadoop-ozone/ozone-manager/pom.xml
+++ b/hadoop-ozone/ozone-manager/pom.xml
@@ -57,9 +57,8 @@
       <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>com.google.code.findbugs</groupId>
-      <artifactId>findbugs</artifactId>
-      <version>3.0.1</version>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs</artifactId>
       <scope>provided</scope>
     </dependency>
     <dependency>
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 5b6ac42..bae71bf 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -60,6 +60,7 @@
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.common.BlockGroup;
@@ -85,6 +86,7 @@
 import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
 import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
@@ -781,9 +783,12 @@
           return;
         }
       }
-      metadataManager.getStore().move(objectKey,
-          metadataManager.getKeyTable(),
-          metadataManager.getDeletedTable());
+      RepeatedOmKeyInfo repeatedOmKeyInfo =
+          metadataManager.getDeletedTable().get(objectKey);
+      repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(keyInfo,
+          repeatedOmKeyInfo);
+      metadataManager.getKeyTable().delete(objectKey);
+      metadataManager.getDeletedTable().put(objectKey, repeatedOmKeyInfo);
     } catch (OMException ex) {
       throw ex;
     } catch (IOException ex) {
@@ -1003,7 +1008,11 @@
         // will not be garbage collected, so move this part to delete table
         // and throw error
         // Move this part to delete table.
-        metadataManager.getDeletedTable().put(partName, keyInfo);
+        RepeatedOmKeyInfo repeatedOmKeyInfo =
+            metadataManager.getDeletedTable().get(partName);
+        repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
+            keyInfo, repeatedOmKeyInfo);
+        metadataManager.getDeletedTable().put(partName, repeatedOmKeyInfo);
         throw new OMException("No such Multipart upload is with specified " +
             "uploadId " + uploadID, ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
       } else {
@@ -1031,9 +1040,20 @@
           // Add the new entry in to the list of part keys.
           DBStore store = metadataManager.getStore();
           try (BatchOperation batch = store.initBatchOperation()) {
+            OmKeyInfo partKey = OmKeyInfo.getFromProtobuf(
+                oldPartKeyInfo.getPartKeyInfo());
+
+            RepeatedOmKeyInfo repeatedOmKeyInfo =
+                metadataManager.getDeletedTable()
+                    .get(oldPartKeyInfo.getPartName());
+
+            repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
+                partKey, repeatedOmKeyInfo);
+
+            metadataManager.getDeletedTable().put(partName, repeatedOmKeyInfo);
             metadataManager.getDeletedTable().putWithBatch(batch,
                 oldPartKeyInfo.getPartName(),
-                OmKeyInfo.getFromProtobuf(oldPartKeyInfo.getPartKeyInfo()));
+                repeatedOmKeyInfo);
             metadataManager.getOpenKeyTable().deleteWithBatch(batch, openKey);
             metadataManager.getMultipartInfoTable().putWithBatch(batch,
                 multipartKey, multipartKeyInfo);
@@ -1252,8 +1272,16 @@
             PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue();
             OmKeyInfo currentKeyPartInfo = OmKeyInfo.getFromProtobuf(
                 partKeyInfo.getPartKeyInfo());
+
+            RepeatedOmKeyInfo repeatedOmKeyInfo =
+                metadataManager.getDeletedTable()
+                    .get(partKeyInfo.getPartName());
+
+            repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
+                currentKeyPartInfo, repeatedOmKeyInfo);
+
             metadataManager.getDeletedTable().putWithBatch(batch,
-                partKeyInfo.getPartName(), currentKeyPartInfo);
+                partKeyInfo.getPartName(), repeatedOmKeyInfo);
           }
           // Finally delete the entry from the multipart info table and open
           // key table
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 618b6aa..6c08591 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -44,9 +44,10 @@
 import org.apache.hadoop.ozone.om.codec.OmMultipartKeyInfoCodec;
 import org.apache.hadoop.ozone.om.codec.OmPrefixInfoCodec;
 import org.apache.hadoop.ozone.om.codec.OmVolumeArgsCodec;
+import org.apache.hadoop.ozone.om.codec.RepeatedOmKeyInfoCodec;
 import org.apache.hadoop.ozone.om.codec.S3SecretValueCodec;
 import org.apache.hadoop.ozone.om.codec.TokenIdentifierCodec;
-import org.apache.hadoop.ozone.om.codec.VolumeListCodec;
+import org.apache.hadoop.ozone.om.codec.UserVolumeInfoCodec;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
@@ -57,9 +58,11 @@
 import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
 import org.apache.hadoop.ozone.om.lock.OzoneManagerLock;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.UserVolumeInfo;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -87,31 +90,31 @@
    * OM DB stores metadata as KV pairs in different column families.
    * <p>
    * OM DB Schema:
-   * |-------------------------------------------------------------------|
-   * |  Column Family     |        VALUE                                 |
-   * |-------------------------------------------------------------------|
-   * | userTable          |     user->VolumeList                         |
-   * |-------------------------------------------------------------------|
-   * | volumeTable        |     /volume->VolumeInfo                      |
-   * |-------------------------------------------------------------------|
-   * | bucketTable        |     /volume/bucket-> BucketInfo              |
-   * |-------------------------------------------------------------------|
-   * | keyTable           | /volumeName/bucketName/keyName->KeyInfo      |
-   * |-------------------------------------------------------------------|
-   * | deletedTable       | /volumeName/bucketName/keyName->KeyInfo      |
-   * |-------------------------------------------------------------------|
-   * | openKey            | /volumeName/bucketName/keyName/id->KeyInfo   |
-   * |-------------------------------------------------------------------|
-   * | s3Table            | s3BucketName -> /volumeName/bucketName       |
-   * |-------------------------------------------------------------------|
-   * | s3SecretTable      | s3g_access_key_id -> s3Secret                |
-   * |-------------------------------------------------------------------|
-   * | dTokenTable        | s3g_access_key_id -> s3Secret                |
-   * |-------------------------------------------------------------------|
-   * | prefixInfoTable    | prefix -> PrefixInfo                         |
-   * |-------------------------------------------------------------------|
-   * |  multipartInfoTable| /volumeName/bucketName/keyName/uploadId ->...|
-   * |-------------------------------------------------------------------|
+   * |----------------------------------------------------------------------|
+   * |  Column Family     |        VALUE                                    |
+   * |----------------------------------------------------------------------|
+   * | userTable          |     /user->UserVolumeInfo                       |
+   * |----------------------------------------------------------------------|
+   * | volumeTable        |     /volume->VolumeInfo                         |
+   * |----------------------------------------------------------------------|
+   * | bucketTable        |     /volume/bucket-> BucketInfo                 |
+   * |----------------------------------------------------------------------|
+   * | keyTable           | /volumeName/bucketName/keyName->KeyInfo         |
+   * |----------------------------------------------------------------------|
+   * | deletedTable       | /volumeName/bucketName/keyName->RepeatedKeyInfo |
+   * |----------------------------------------------------------------------|
+   * | openKey            | /volumeName/bucketName/keyName/id->KeyInfo      |
+   * |----------------------------------------------------------------------|
+   * | s3Table            | s3BucketName -> /volumeName/bucketName          |
+   * |----------------------------------------------------------------------|
+   * | s3SecretTable      | s3g_access_key_id -> s3Secret                   |
+   * |----------------------------------------------------------------------|
+   * | dTokenTable        | s3g_access_key_id -> s3Secret                   |
+   * |----------------------------------------------------------------------|
+   * | prefixInfoTable    | prefix -> PrefixInfo                            |
+   * |----------------------------------------------------------------------|
+   * |  multipartInfoTable| /volumeName/bucketName/keyName/uploadId ->...   |
+   * |----------------------------------------------------------------------|
    */
 
   public static final String USER_TABLE = "userTable";
@@ -168,7 +171,7 @@
   }
 
   @Override
-  public Table<String, VolumeList> getUserTable() {
+  public Table<String, UserVolumeInfo> getUserTable() {
     return userTable;
   }
 
@@ -192,7 +195,7 @@
   }
 
   @Override
-  public Table<String, OmKeyInfo> getDeletedTable() {
+  public Table<String, RepeatedOmKeyInfo> getDeletedTable() {
     return deletedTable;
   }
 
@@ -261,9 +264,10 @@
         .addTable(PREFIX_TABLE)
         .addCodec(OzoneTokenIdentifier.class, new TokenIdentifierCodec())
         .addCodec(OmKeyInfo.class, new OmKeyInfoCodec())
+        .addCodec(RepeatedOmKeyInfo.class, new RepeatedOmKeyInfoCodec())
         .addCodec(OmBucketInfo.class, new OmBucketInfoCodec())
         .addCodec(OmVolumeArgs.class, new OmVolumeArgsCodec())
-        .addCodec(VolumeList.class, new VolumeListCodec())
+        .addCodec(UserVolumeInfo.class, new UserVolumeInfoCodec())
         .addCodec(OmMultipartKeyInfo.class, new OmMultipartKeyInfoCodec())
         .addCodec(S3SecretValue.class, new S3SecretValueCodec())
         .addCodec(OmPrefixInfo.class, new OmPrefixInfoCodec());
@@ -276,7 +280,7 @@
    */
   protected void initializeOmTables() throws IOException {
     userTable =
-        this.store.getTable(USER_TABLE, String.class, VolumeList.class);
+        this.store.getTable(USER_TABLE, String.class, UserVolumeInfo.class);
     checkTableStatus(userTable, USER_TABLE);
 
     TableCacheImpl.CacheCleanupPolicy cleanupPolicy =
@@ -296,8 +300,8 @@
     keyTable = this.store.getTable(KEY_TABLE, String.class, OmKeyInfo.class);
     checkTableStatus(keyTable, KEY_TABLE);
 
-    deletedTable =
-        this.store.getTable(DELETED_TABLE, String.class, OmKeyInfo.class);
+    deletedTable = this.store.getTable(DELETED_TABLE, String.class,
+        RepeatedOmKeyInfo.class);
     checkTableStatus(deletedTable, DELETED_TABLE);
 
     openKeyTable =
@@ -703,7 +707,7 @@
   public List<OmVolumeArgs> listVolumes(String userName,
       String prefix, String startKey, int maxKeys) throws IOException {
     List<OmVolumeArgs> result = Lists.newArrayList();
-    VolumeList volumes;
+    UserVolumeInfo volumes;
     if (StringUtil.isBlank(userName)) {
       throw new OMException("User name is required to list Volumes.",
           ResultCodes.USER_NOT_FOUND);
@@ -744,15 +748,15 @@
     return result;
   }
 
-  private VolumeList getVolumesByUser(String userNameKey)
+  private UserVolumeInfo getVolumesByUser(String userNameKey)
       throws OMException {
     try {
-      VolumeList volumeList = getUserTable().get(userNameKey);
-      if (volumeList == null) {
+      UserVolumeInfo userVolInfo = getUserTable().get(userNameKey);
+      if (userVolInfo == null) {
         // No volume found for this user, return an empty list
-        return VolumeList.newBuilder().build();
+        return UserVolumeInfo.newBuilder().build();
       } else {
-        return volumeList;
+        return userVolInfo;
       }
     } catch (IOException e) {
       throw new OMException("Unable to get volumes info by the given user, "
@@ -765,25 +769,26 @@
   public List<BlockGroup> getPendingDeletionKeys(final int keyCount)
       throws IOException {
     List<BlockGroup> keyBlocksList = Lists.newArrayList();
-    try (TableIterator<String, ? extends KeyValue<String, OmKeyInfo>> keyIter =
-        getDeletedTable()
-            .iterator()) {
+    try (TableIterator<String, ? extends KeyValue<String, RepeatedOmKeyInfo>>
+             keyIter = getDeletedTable().iterator()) {
       int currentCount = 0;
       while (keyIter.hasNext() && currentCount < keyCount) {
-        KeyValue<String, OmKeyInfo> kv = keyIter.next();
+        KeyValue<String, RepeatedOmKeyInfo> kv = keyIter.next();
         if (kv != null) {
-          OmKeyInfo info = kv.getValue();
+          RepeatedOmKeyInfo infoList = kv.getValue();
           // Get block keys as a list.
-          OmKeyLocationInfoGroup latest = info.getLatestVersionLocations();
-          List<BlockID> item = latest.getLocationList().stream()
-              .map(b -> new BlockID(b.getContainerID(), b.getLocalID()))
-              .collect(Collectors.toList());
-          BlockGroup keyBlocks = BlockGroup.newBuilder()
-              .setKeyName(kv.getKey())
-              .addAllBlockIDs(item)
-              .build();
-          keyBlocksList.add(keyBlocks);
-          currentCount++;
+          for(OmKeyInfo info : infoList.getOmKeyInfoList()){
+            OmKeyLocationInfoGroup latest = info.getLatestVersionLocations();
+            List<BlockID> item = latest.getLocationList().stream()
+                .map(b -> new BlockID(b.getContainerID(), b.getLocalID()))
+                .collect(Collectors.toList());
+            BlockGroup keyBlocks = BlockGroup.newBuilder()
+                .setKeyName(kv.getKey())
+                .addAllBlockIDs(item)
+                .build();
+            keyBlocksList.add(keyBlocks);
+            currentCount++;
+          }
         }
       }
     }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index d2d7256..a6503d7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -42,7 +42,6 @@
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -74,9 +73,10 @@
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneIllegalArgumentException;
 import org.apache.hadoop.ozone.OzoneSecurityUtil;
 import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider;
+import org.apache.hadoop.ozone.om.ha.OMHANodeDetails;
+import org.apache.hadoop.ozone.om.ha.OMNodeDetails;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
 import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
 import org.apache.hadoop.ozone.om.protocol.OzoneManagerServerProtocol;
@@ -207,10 +207,6 @@
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NODE_ID_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_PORT_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME_DEFAULT;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_AUTH_METHOD;
@@ -310,12 +306,32 @@
     super(OzoneVersionInfo.OZONE_VERSION_INFO);
     Preconditions.checkNotNull(conf);
     configuration = conf;
+    // Load HA related configurations
+    OMHANodeDetails omhaNodeDetails =
+        OMHANodeDetails.loadOMHAConfig(configuration);
+
+    this.peerNodes = omhaNodeDetails.getPeerNodeDetails();
+    this.omNodeDetails = omhaNodeDetails.getLocalNodeDetails();
+
+    omStorage = new OMStorage(conf);
+    omId = omStorage.getOmId();
+
+    // In case of single OM Node Service there will be no OM Node ID
+    // specified, set it to value from om storage
+    if (this.omNodeDetails.getOMNodeId() == null) {
+      this.omNodeDetails =
+          OMHANodeDetails.getOMNodeDetails(conf, omNodeDetails.getOMServiceId(),
+              omStorage.getOmId(), omNodeDetails.getRpcAddress(),
+              omNodeDetails.getRatisPort());
+    }
+
+    loginOMUserIfSecurityEnabled(conf);
+
     this.maxUserVolumeCount = conf.getInt(OZONE_OM_USER_MAX_VOLUME,
         OZONE_OM_USER_MAX_VOLUME_DEFAULT);
     Preconditions.checkArgument(this.maxUserVolumeCount > 0,
         OZONE_OM_USER_MAX_VOLUME + " value should be greater than zero");
-    omStorage = new OMStorage(conf);
-    omId = omStorage.getOmId();
+
     if (omStorage.getState() != StorageState.INITIALIZED) {
       throw new OMException("OM not initialized.",
           ResultCodes.OM_NOT_INITIALIZED);
@@ -342,8 +358,7 @@
         OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY,
         OMConfigKeys.OZONE_OM_RATIS_ENABLE_DEFAULT);
 
-    // Load HA related configurations
-    loadOMHAConfigs(configuration);
+
     InetSocketAddress omNodeRpcAddr = omNodeDetails.getRpcAddress();
     omRpcAddressTxt = new Text(omNodeDetails.getRpcAddressString());
 
@@ -420,7 +435,7 @@
             OzoneManagerProtocolProtos.Type.values());
 
     // Start Om Rpc Server.
-    omRpcServer = getRpcServer(conf);
+    omRpcServer = getRpcServer(configuration);
     omRpcAddress = updateRPCListenAddress(configuration,
         OZONE_OM_ADDRESS_KEY, omNodeRpcAddr, omRpcServer);
 
@@ -513,195 +528,6 @@
     return grpcBlockTokenEnabled;
   }
 
-  /**
-   * Inspects and loads OM node configurations.
-   *
-   * If {@link OMConfigKeys#OZONE_OM_SERVICE_IDS_KEY} is configured with
-   * multiple ids and/ or if {@link OMConfigKeys#OZONE_OM_NODE_ID_KEY} is not
-   * specifically configured , this method determines the omServiceId
-   * and omNodeId by matching the node's address with the configured
-   * addresses. When a match is found, it sets the omServicId and omNodeId from
-   * the corresponding configuration key. This method also finds the OM peers
-   * nodes belonging to the same OM service.
-   *
-   * @param conf
-   */
-  private void loadOMHAConfigs(Configuration conf) {
-    InetSocketAddress localRpcAddress = null;
-    String localOMServiceId = null;
-    String localOMNodeId = null;
-    int localRatisPort = 0;
-    Collection<String> omServiceIds = conf.getTrimmedStringCollection(
-        OZONE_OM_SERVICE_IDS_KEY);
-
-    String knownOMNodeId = conf.get(OZONE_OM_NODE_ID_KEY);
-    int found = 0;
-    boolean isOMAddressSet = false;
-
-    for (String serviceId : OmUtils.emptyAsSingletonNull(omServiceIds)) {
-      Collection<String> omNodeIds = OmUtils.getOMNodeIds(conf, serviceId);
-
-      List<OMNodeDetails> peerNodesList = new ArrayList<>();
-      boolean isPeer = false;
-      for (String nodeId : OmUtils.emptyAsSingletonNull(omNodeIds)) {
-        if (knownOMNodeId != null && !knownOMNodeId.equals(nodeId)) {
-          isPeer = true;
-        } else {
-          isPeer = false;
-        }
-        String rpcAddrKey = OmUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY,
-            serviceId, nodeId);
-        String rpcAddrStr = OmUtils.getOmRpcAddress(conf, rpcAddrKey);
-        if (rpcAddrStr == null) {
-          continue;
-        }
-
-        // If OM address is set for any node id, we will not fallback to the
-        // default
-        isOMAddressSet = true;
-
-        String ratisPortKey = OmUtils.addKeySuffixes(OZONE_OM_RATIS_PORT_KEY,
-            serviceId, nodeId);
-        int ratisPort = conf.getInt(ratisPortKey, OZONE_OM_RATIS_PORT_DEFAULT);
-
-        InetSocketAddress addr = null;
-        try {
-          addr = NetUtils.createSocketAddr(rpcAddrStr);
-        } catch (Exception e) {
-          LOG.warn("Exception in creating socket address " + addr, e);
-          continue;
-        }
-        if (!addr.isUnresolved()) {
-          if (!isPeer && OmUtils.isAddressLocal(addr)) {
-            localRpcAddress = addr;
-            localOMServiceId = serviceId;
-            localOMNodeId = nodeId;
-            localRatisPort = ratisPort;
-            found++;
-          } else {
-            // This OMNode belongs to same OM service as the current OMNode.
-            // Add it to peerNodes list.
-            String httpAddr = OmUtils.getHttpAddressForOMPeerNode(conf,
-                serviceId, nodeId, addr.getHostName());
-            String httpsAddr = OmUtils.getHttpsAddressForOMPeerNode(conf,
-                serviceId, nodeId, addr.getHostName());
-            OMNodeDetails peerNodeInfo = new OMNodeDetails.Builder()
-                .setOMServiceId(serviceId)
-                .setOMNodeId(nodeId)
-                .setRpcAddress(addr)
-                .setRatisPort(ratisPort)
-                .setHttpAddress(httpAddr)
-                .setHttpsAddress(httpsAddr)
-                .build();
-            peerNodesList.add(peerNodeInfo);
-          }
-        }
-      }
-      if (found == 1) {
-        LOG.debug("Found one matching OM address with service ID: {} and node" +
-                " ID: {}", localOMServiceId, localOMNodeId);
-
-        setOMNodeDetails(localOMServiceId, localOMNodeId, localRpcAddress,
-            localRatisPort);
-
-        this.peerNodes = peerNodesList;
-
-        LOG.info("Found matching OM address with OMServiceId: {}, " +
-            "OMNodeId: {}, RPC Address: {} and Ratis port: {}",
-            localOMServiceId, localOMNodeId,
-            NetUtils.getHostPortString(localRpcAddress), localRatisPort);
-        return;
-      } else if (found > 1) {
-        String msg = "Configuration has multiple " + OZONE_OM_ADDRESS_KEY +
-            " addresses that match local node's address. Please configure the" +
-            " system with " + OZONE_OM_SERVICE_IDS_KEY + " and " +
-            OZONE_OM_ADDRESS_KEY;
-        throw new OzoneIllegalArgumentException(msg);
-      }
-    }
-
-    if (!isOMAddressSet) {
-      // No OM address is set. Fallback to default
-      InetSocketAddress omAddress = OmUtils.getOmAddress(conf);
-      int ratisPort = conf.getInt(OZONE_OM_RATIS_PORT_KEY,
-          OZONE_OM_RATIS_PORT_DEFAULT);
-
-      LOG.info("Configuration either no {} set. Falling back to the default " +
-          "OM address {}", OZONE_OM_ADDRESS_KEY, omAddress);
-
-      setOMNodeDetails(null, null, omAddress, ratisPort);
-
-    } else {
-      String msg = "Configuration has no " + OZONE_OM_ADDRESS_KEY + " " +
-          "address that matches local node's address. Please configure the " +
-          "system with " + OZONE_OM_ADDRESS_KEY;
-      LOG.info(msg);
-      throw new OzoneIllegalArgumentException(msg);
-    }
-  }
-
-  /**
-   * Builds and sets OMNodeDetails object.
-   */
-  private void setOMNodeDetails(String serviceId, String nodeId,
-      InetSocketAddress rpcAddress, int ratisPort) {
-
-    if (serviceId == null) {
-      // If no serviceId is set, take the default serviceID om-service
-      serviceId = OzoneConsts.OM_SERVICE_ID_DEFAULT;
-      LOG.info("OM Service ID is not set. Setting it to the default ID: {}",
-          serviceId);
-    }
-    if (nodeId == null) {
-      // If no nodeId is set, take the omId from omStorage as the nodeID
-      nodeId = omId;
-      LOG.info("OM Node ID is not set. Setting it to the OmStorage's " +
-          "OmID: {}", nodeId);
-    }
-
-    this.omNodeDetails = new OMNodeDetails.Builder()
-        .setOMServiceId(serviceId)
-        .setOMNodeId(nodeId)
-        .setRpcAddress(rpcAddress)
-        .setRatisPort(ratisPort)
-        .build();
-
-    // Set this nodes OZONE_OM_ADDRESS_KEY to the discovered address.
-    configuration.set(OZONE_OM_ADDRESS_KEY,
-        NetUtils.getHostPortString(rpcAddress));
-
-    // Get and set Http(s) address of local node. If base config keys are
-    // not set, check for keys suffixed with OM serivce ID and node ID.
-    setOMNodeSpecificConfigs(serviceId, nodeId);
-  }
-
-  /**
-   * Check if any of the following configuration keys have been set using OM
-   * Node ID suffixed to the key. If yes, then set the base key with the
-   * configured valued.
-   *    1. {@link OMConfigKeys#OZONE_OM_HTTP_ADDRESS_KEY}
-   *    2. {@link OMConfigKeys#OZONE_OM_HTTPS_ADDRESS_KEY}
-   *    3. {@link OMConfigKeys#OZONE_OM_HTTP_BIND_HOST_KEY}
-   *    4. {@link OMConfigKeys#OZONE_OM_HTTPS_BIND_HOST_KEY}
-   */
-  private void setOMNodeSpecificConfigs(String omServiceId, String omNodeId) {
-    String[] confKeys = new String[] {
-        OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY,
-        OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY,
-        OMConfigKeys.OZONE_OM_HTTP_BIND_HOST_KEY,
-        OMConfigKeys.OZONE_OM_HTTPS_BIND_HOST_KEY};
-
-    for (String confKey : confKeys) {
-      String confValue = OmUtils.getConfSuffixedWithOMNodeId(
-          configuration, confKey, omServiceId, omNodeId);
-      if (confValue != null) {
-        LOG.info("Setting configuration key {} with value of key {}: {}",
-            confKey, OmUtils.addKeySuffixes(confKey, omNodeId), confValue);
-        configuration.set(confKey, confValue);
-      }
-    }
-  }
-
   private KeyProviderCryptoExtension createKeyProviderExt(
       OzoneConfiguration conf) throws IOException {
     KeyProvider keyProvider = KMSUtil.createKeyProvider(conf,
@@ -801,7 +627,7 @@
 
     return new OzoneDelegationTokenSecretManager(conf, tokenMaxLifetime,
         tokenRenewInterval, tokenRemoverScanInterval, omRpcAddressTxt,
-        s3SecretManager);
+        s3SecretManager, certClient);
   }
 
   private OzoneBlockTokenSecretManager createBlockTokenSecretManager(
@@ -1024,7 +850,6 @@
    */
   public static OzoneManager createOm(OzoneConfiguration conf)
       throws IOException, AuthenticationException {
-    loginOMUserIfSecurityEnabled(conf);
     return new OzoneManager(conf);
   }
 
@@ -1053,6 +878,7 @@
   @VisibleForTesting
   public static boolean omInit(OzoneConfiguration conf) throws IOException,
       AuthenticationException {
+    OMHANodeDetails.loadOMHAConfig(conf);
     loginOMUserIfSecurityEnabled(conf);
     OMStorage omStorage = new OMStorage(conf);
     StorageState state = omStorage.getState();
@@ -1361,7 +1187,7 @@
       return omRpcServer;
     }
 
-    InetSocketAddress omNodeRpcAddr = OmUtils.getOmAddress(configuration);
+    InetSocketAddress omNodeRpcAddr = OmUtils.getOmAddress(conf);
 
     final int handlerCount = conf.getInt(OZONE_OM_HANDLER_COUNT_KEY,
         OZONE_OM_HANDLER_COUNT_DEFAULT);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
index 675895d..4ea8529 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
@@ -28,8 +28,10 @@
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.UserVolumeInfo;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
 import org.apache.hadoop.ozone.security.acl.RequestContext;
@@ -73,11 +75,11 @@
   }
 
   // Helpers to add and delete volume from user list
-  private VolumeList addVolumeToOwnerList(String volume, String owner)
+  private UserVolumeInfo addVolumeToOwnerList(String volume, String owner)
       throws IOException {
     // Get the volume list
     String dbUserKey = metadataManager.getUserKey(owner);
-    VolumeList volumeList = metadataManager.getUserTable().get(dbUserKey);
+    UserVolumeInfo volumeList = metadataManager.getUserTable().get(dbUserKey);
     List<String> prevVolList = new ArrayList<>();
     if (volumeList != null) {
       prevVolList.addAll(volumeList.getVolumeNamesList());
@@ -92,16 +94,16 @@
 
     // Add the new volume to the list
     prevVolList.add(volume);
-    VolumeList newVolList = VolumeList.newBuilder()
+    UserVolumeInfo newVolList = UserVolumeInfo.newBuilder()
         .addAllVolumeNames(prevVolList).build();
 
     return newVolList;
   }
 
-  private VolumeList delVolumeFromOwnerList(String volume, String owner)
+  private UserVolumeInfo delVolumeFromOwnerList(String volume, String owner)
       throws IOException {
     // Get the volume list
-    VolumeList volumeList = metadataManager.getUserTable().get(owner);
+    UserVolumeInfo volumeList = metadataManager.getUserTable().get(owner);
     List<String> prevVolList = new ArrayList<>();
     if (volumeList != null) {
       prevVolList.addAll(volumeList.getVolumeNamesList());
@@ -112,7 +114,7 @@
 
     // Remove the volume from the list
     prevVolList.remove(volume);
-    VolumeList newVolList = VolumeList.newBuilder()
+    UserVolumeInfo newVolList = UserVolumeInfo.newBuilder()
         .addAllVolumeNames(prevVolList).build();
     return newVolList;
   }
@@ -144,7 +146,7 @@
         throw new OMException(ResultCodes.VOLUME_ALREADY_EXISTS);
       }
 
-      VolumeList volumeList = addVolumeToOwnerList(omVolumeArgs.getVolume(),
+      UserVolumeInfo volumeList = addVolumeToOwnerList(omVolumeArgs.getVolume(),
           omVolumeArgs.getOwnerName());
 
       // Set creation time
@@ -173,7 +175,7 @@
   }
 
   private void createVolumeCommitToDB(OmVolumeArgs omVolumeArgs,
-      VolumeList volumeList, String dbVolumeKey, String dbUserKey)
+      UserVolumeInfo volumeList, String dbVolumeKey, String dbUserKey)
       throws IOException {
     try (BatchOperation batch = metadataManager.getStore()
         .initBatchOperation()) {
@@ -222,11 +224,12 @@
 
       acquiredUsersLock = metadataManager.getLock().acquireMultiUserLock(owner,
           originalOwner);
-      VolumeList oldOwnerVolumeList = delVolumeFromOwnerList(volume,
+      UserVolumeInfo oldOwnerVolumeList = delVolumeFromOwnerList(volume,
           originalOwner);
 
       String newOwner =  metadataManager.getUserKey(owner);
-      VolumeList newOwnerVolumeList = addVolumeToOwnerList(volume, newOwner);
+      UserVolumeInfo newOwnerVolumeList = addVolumeToOwnerList(volume,
+          newOwner);
 
       volumeArgs.setOwnerName(owner);
       setOwnerCommitToDB(oldOwnerVolumeList, newOwnerVolumeList,
@@ -246,8 +249,8 @@
   }
 
 
-  private void setOwnerCommitToDB(VolumeList oldOwnerVolumeList,
-      VolumeList newOwnerVolumeList, OmVolumeArgs newOwnerVolumeArgs,
+  private void setOwnerCommitToDB(UserVolumeInfo oldOwnerVolumeList,
+      UserVolumeInfo newOwnerVolumeList, OmVolumeArgs newOwnerVolumeArgs,
       String oldOwner) throws IOException {
     try (BatchOperation batch = metadataManager.getStore()
         .initBatchOperation()) {
@@ -370,7 +373,7 @@
       Preconditions.checkState(volume.equals(volumeArgs.getVolume()));
       // delete the volume from the owner list
       // as well as delete the volume entry
-      VolumeList newVolumeList = delVolumeFromOwnerList(volume,
+      UserVolumeInfo newVolumeList = delVolumeFromOwnerList(volume,
           volumeArgs.getOwnerName());
 
 
@@ -390,7 +393,7 @@
   }
 
 
-  private void deleteVolumeCommitToDB(VolumeList newVolumeList,
+  private void deleteVolumeCommitToDB(UserVolumeInfo newVolumeList,
       String volume, String owner) throws IOException {
     try (BatchOperation batch = metadataManager.getStore()
         .initBatchOperation()) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java
new file mode 100644
index 0000000..8d9e709
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java
@@ -0,0 +1,306 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.om.ha;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.OzoneIllegalArgumentException;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NODES_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NODE_ID_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_PORT_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY;
+
+/**
+ * Class which maintains peer information and it's own OM node information.
+ */
+public class OMHANodeDetails {
+
+  private static String[] genericConfigKeys = new String[] {
+      OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY,
+      OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY,
+      OMConfigKeys.OZONE_OM_HTTP_BIND_HOST_KEY,
+      OMConfigKeys.OZONE_OM_HTTPS_BIND_HOST_KEY,
+      OMConfigKeys.OZONE_OM_DB_DIRS,
+      OMConfigKeys.OZONE_OM_ADDRESS_KEY,
+  };
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(OMHANodeDetails.class);
+  private final OMNodeDetails localNodeDetails;
+  private final List<OMNodeDetails> peerNodeDetails;
+
+  public OMHANodeDetails(OMNodeDetails localNodeDetails,
+      List<OMNodeDetails> peerNodeDetails) {
+    this.localNodeDetails = localNodeDetails;
+    this.peerNodeDetails = peerNodeDetails;
+  }
+
+  public OMNodeDetails getLocalNodeDetails() {
+    return localNodeDetails;
+  }
+
+  public List< OMNodeDetails > getPeerNodeDetails() {
+    return peerNodeDetails;
+  }
+
+
+  /**
+   * Inspects and loads OM node configurations.
+   *
+   * If {@link OMConfigKeys#OZONE_OM_SERVICE_IDS_KEY} is configured with
+   * multiple ids and/ or if {@link OMConfigKeys#OZONE_OM_NODE_ID_KEY} is not
+   * specifically configured , this method determines the omServiceId
+   * and omNodeId by matching the node's address with the configured
+   * addresses. When a match is found, it sets the omServicId and omNodeId from
+   * the corresponding configuration key. This method also finds the OM peers
+   * nodes belonging to the same OM service.
+   *
+   * @param conf
+   */
+  public static OMHANodeDetails loadOMHAConfig(OzoneConfiguration conf) {
+    InetSocketAddress localRpcAddress = null;
+    String localOMServiceId = null;
+    String localOMNodeId = null;
+    int localRatisPort = 0;
+    Collection<String> omServiceIds = conf.getTrimmedStringCollection(
+        OZONE_OM_SERVICE_IDS_KEY);
+
+    String knownOMNodeId = conf.get(OZONE_OM_NODE_ID_KEY);
+    int found = 0;
+    boolean isOMAddressSet = false;
+
+    for (String serviceId : omServiceIds) {
+      Collection<String> omNodeIds = OmUtils.getOMNodeIds(conf, serviceId);
+
+      if (omNodeIds.size() == 0) {
+        String msg = "Configuration does not have any value set for " +
+            OZONE_OM_NODES_KEY + " for service ID " + serviceId + ". List of " +
+            "OM Node ID's should be specified for the service ID";
+        throw new OzoneIllegalArgumentException(msg);
+      }
+
+      List<OMNodeDetails> peerNodesList = new ArrayList<>();
+      boolean isPeer;
+      for (String nodeId : omNodeIds) {
+        if (knownOMNodeId != null && !knownOMNodeId.equals(nodeId)) {
+          isPeer = true;
+        } else {
+          isPeer = false;
+        }
+        String rpcAddrKey = OmUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY,
+            serviceId, nodeId);
+        String rpcAddrStr = OmUtils.getOmRpcAddress(conf, rpcAddrKey);
+        if (rpcAddrStr == null || rpcAddrStr.isEmpty()) {
+          String msg = "Configuration does not have any value set for " +
+              rpcAddrKey + "." + "OM Rpc Address should be set for all node " +
+              "IDs for a service ID.";
+          throw new OzoneIllegalArgumentException(msg);
+        }
+
+        // If OM address is set for any node id, we will not fallback to the
+        // default
+        isOMAddressSet = true;
+
+        String ratisPortKey = OmUtils.addKeySuffixes(OZONE_OM_RATIS_PORT_KEY,
+            serviceId, nodeId);
+        int ratisPort = conf.getInt(ratisPortKey, OZONE_OM_RATIS_PORT_DEFAULT);
+
+        InetSocketAddress addr = null;
+        try {
+          addr = NetUtils.createSocketAddr(rpcAddrStr);
+        } catch (Exception e) {
+          LOG.warn("Exception in creating socket address " + addr, e);
+          continue;
+        }
+        if (!addr.isUnresolved()) {
+          if (!isPeer && OmUtils.isAddressLocal(addr)) {
+            localRpcAddress = addr;
+            localOMServiceId = serviceId;
+            localOMNodeId = nodeId;
+            localRatisPort = ratisPort;
+            found++;
+          } else {
+            // This OMNode belongs to same OM service as the current OMNode.
+            // Add it to peerNodes list.
+            // This OMNode belongs to same OM service as the current OMNode.
+            // Add it to peerNodes list.
+            peerNodesList.add(getHAOMNodeDetails(conf, serviceId,
+                nodeId, addr, ratisPort));
+          }
+        }
+      }
+      if (found == 1) {
+        LOG.debug("Found one matching OM address with service ID: {} and node" +
+            " ID: {}", localOMServiceId, localOMNodeId);
+
+        LOG.info("Found matching OM address with OMServiceId: {}, " +
+                "OMNodeId: {}, RPC Address: {} and Ratis port: {}",
+            localOMServiceId, localOMNodeId,
+            NetUtils.getHostPortString(localRpcAddress), localRatisPort);
+
+
+        setOMNodeSpecificConfigs(conf, localOMServiceId, localOMNodeId);
+        return new OMHANodeDetails(getHAOMNodeDetails(conf, localOMServiceId,
+            localOMNodeId, localRpcAddress, localRatisPort), peerNodesList);
+
+      } else if (found > 1) {
+        String msg = "Configuration has multiple " + OZONE_OM_ADDRESS_KEY +
+            " addresses that match local node's address. Please configure the" +
+            " system with " + OZONE_OM_SERVICE_IDS_KEY + " and " +
+            OZONE_OM_ADDRESS_KEY;
+        throw new OzoneIllegalArgumentException(msg);
+      }
+    }
+
+    if (!isOMAddressSet) {
+      // No OM address is set. Fallback to default
+      InetSocketAddress omAddress = OmUtils.getOmAddress(conf);
+      int ratisPort = conf.getInt(OZONE_OM_RATIS_PORT_KEY,
+          OZONE_OM_RATIS_PORT_DEFAULT);
+
+      LOG.info("Configuration either no {} set. Falling back to the default " +
+          "OM address {}", OZONE_OM_ADDRESS_KEY, omAddress);
+
+      return new OMHANodeDetails(getOMNodeDetails(conf, null,
+          null, omAddress, ratisPort), new ArrayList<>());
+
+    } else {
+      String msg = "Configuration has no " + OZONE_OM_ADDRESS_KEY + " " +
+          "address that matches local node's address. Please configure the " +
+          "system with " + OZONE_OM_ADDRESS_KEY;
+      LOG.info(msg);
+      throw new OzoneIllegalArgumentException(msg);
+    }
+  }
+
+  /**
+   * Create Local OM Node Details.
+   * @param serviceId - Service ID this OM belongs to,
+   * @param nodeId - Node ID of this OM.
+   * @param rpcAddress - Rpc Address of the OM.
+   * @param ratisPort - Ratis port of the OM.
+   * @return OMNodeDetails
+   */
+  public static OMNodeDetails getOMNodeDetails(OzoneConfiguration conf,
+      String serviceId, String nodeId, InetSocketAddress rpcAddress,
+      int ratisPort) {
+
+    if (serviceId == null) {
+      // If no serviceId is set, take the default serviceID om-service
+      serviceId = OzoneConsts.OM_SERVICE_ID_DEFAULT;
+      LOG.info("OM Service ID is not set. Setting it to the default ID: {}",
+          serviceId);
+    }
+
+
+    // We need to pass null for serviceID and nodeID as this is set for
+    // non-HA cluster. This means one node OM cluster.
+    String httpAddr = OmUtils.getHttpAddressForOMPeerNode(conf,
+        null, null, rpcAddress.getHostName());
+    String httpsAddr = OmUtils.getHttpsAddressForOMPeerNode(conf,
+        null, null, rpcAddress.getHostName());
+
+    return new OMNodeDetails.Builder()
+        .setOMServiceId(serviceId)
+        .setOMNodeId(nodeId)
+        .setRpcAddress(rpcAddress)
+        .setRatisPort(ratisPort)
+        .setHttpAddress(httpAddr)
+        .setHttpsAddress(httpsAddr)
+        .build();
+
+  }
+
+
+  /**
+   * Create Local OM Node Details.
+   * @param serviceId - Service ID this OM belongs to,
+   * @param nodeId - Node ID of this OM.
+   * @param rpcAddress - Rpc Address of the OM.
+   * @param ratisPort - Ratis port of the OM.
+   * @return OMNodeDetails
+   */
+  public static OMNodeDetails getHAOMNodeDetails(OzoneConfiguration conf,
+      String serviceId, String nodeId, InetSocketAddress rpcAddress,
+      int ratisPort) {
+    Preconditions.checkNotNull(serviceId);
+    Preconditions.checkNotNull(nodeId);
+
+    String httpAddr = OmUtils.getHttpAddressForOMPeerNode(conf,
+        serviceId, nodeId, rpcAddress.getHostName());
+    String httpsAddr = OmUtils.getHttpsAddressForOMPeerNode(conf,
+        serviceId, nodeId, rpcAddress.getHostName());
+
+    return new OMNodeDetails.Builder()
+        .setOMServiceId(serviceId)
+        .setOMNodeId(nodeId)
+        .setRpcAddress(rpcAddress)
+        .setRatisPort(ratisPort)
+        .setHttpAddress(httpAddr)
+        .setHttpsAddress(httpsAddr)
+        .build();
+
+  }
+
+
+  /**
+   * Check if any of the following configuration keys have been set using OM
+   * Node ID suffixed to the key. If yes, then set the base key with the
+   * configured valued.
+   *    1. {@link OMConfigKeys#OZONE_OM_HTTP_ADDRESS_KEY}
+   *    2. {@link OMConfigKeys#OZONE_OM_HTTPS_ADDRESS_KEY}
+   *    3. {@link OMConfigKeys#OZONE_OM_HTTP_BIND_HOST_KEY}
+   *    4. {@link OMConfigKeys#OZONE_OM_HTTPS_BIND_HOST_KEY}\
+   *    5. {@link OMConfigKeys#OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE}
+   *    6. {@link OMConfigKeys#OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY}
+   *    7. {@link OMConfigKeys#OZONE_OM_KERBEROS_KEYTAB_FILE_KEY}
+   *    8. {@link OMConfigKeys#OZONE_OM_KERBEROS_PRINCIPAL_KEY}
+   *    9. {@link OMConfigKeys#OZONE_OM_DB_DIRS}
+   *    10. {@link OMConfigKeys#OZONE_OM_ADDRESS_KEY}
+   */
+  private static void setOMNodeSpecificConfigs(
+      OzoneConfiguration ozoneConfiguration, String omServiceId,
+      String omNodeId) {
+
+    for (String confKey : genericConfigKeys) {
+      String confValue = OmUtils.getConfSuffixedWithOMNodeId(
+          ozoneConfiguration, confKey, omServiceId, omNodeId);
+      if (confValue != null) {
+        LOG.info("Setting configuration key {} with value of key {}: {}",
+            confKey, OmUtils.addKeySuffixes(confKey, omNodeId), confValue);
+        ozoneConfiguration.set(confKey, confValue);
+      }
+    }
+  }
+
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMNodeDetails.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMNodeDetails.java
similarity index 98%
rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMNodeDetails.java
rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMNodeDetails.java
index fc8c818..7d69b93 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMNodeDetails.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMNodeDetails.java
@@ -15,7 +15,7 @@
  * the License.
  */
 
-package org.apache.hadoop.ozone.om;
+package org.apache.hadoop.ozone.om.ha;
 
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.http.HttpConfig;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java
new file mode 100644
index 0000000..3c40c88
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.ha;
+
+/**
+ * This package contains classes related to OM HA.
+ */
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java
index c341d30..69a7ae9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java
@@ -40,7 +40,7 @@
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMNodeDetails;
+import org.apache.hadoop.ozone.om.ha.OMNodeDetails;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
index 33e5fef..eb366ad 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
@@ -139,13 +139,12 @@
       // TODO: Revisit if we need it later.
 
       omClientResponse = new OMKeyDeleteResponse(omKeyInfo,
-          deleteKeyArgs.getModificationTime(),
           omResponse.setDeleteKeyResponse(
               DeleteKeyResponse.newBuilder()).build());
 
     } catch (IOException ex) {
       exception = ex;
-      omClientResponse = new OMKeyDeleteResponse(null, 0,
+      omClientResponse = new OMKeyDeleteResponse(null,
           createErrorOMResponse(omResponse, exception));
     } finally {
       if (omClientResponse != null) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketCreateRequest.java
index f28f11b..7a7091d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketCreateRequest.java
@@ -57,8 +57,7 @@
     .S3CreateBucketResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .S3CreateVolumeInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .VolumeList;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserVolumeInfo;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
@@ -176,10 +175,11 @@
           OmVolumeArgs omVolumeArgs = createOmVolumeArgs(volumeName, userName,
               s3CreateBucketRequest.getS3CreateVolumeInfo()
                   .getCreationTime());
-          VolumeList volumeList = omMetadataManager.getUserTable().get(
+          UserVolumeInfo volumeList = omMetadataManager.getUserTable().get(
               omMetadataManager.getUserKey(userName));
           volumeList = addVolumeToOwnerList(volumeList,
-              volumeName, userName, ozoneManager.getMaxUserVolumeCount());
+              volumeName, userName, ozoneManager.getMaxUserVolumeCount(),
+              transactionLogIndex);
           createVolume(omMetadataManager, omVolumeArgs, volumeList, volumeKey,
               omMetadataManager.getUserKey(userName), transactionLogIndex);
           volumeCreated = true;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
index 940ba7d..f176879 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
@@ -133,14 +133,13 @@
       }
 
       omClientResponse = new S3MultipartUploadAbortResponse(multipartKey,
-          keyArgs.getModificationTime(), multipartKeyInfo,
+          multipartKeyInfo,
           omResponse.setAbortMultiPartUploadResponse(
               MultipartUploadAbortResponse.newBuilder()).build());
     } catch (IOException ex) {
       exception = ex;
       omClientResponse = new S3MultipartUploadAbortResponse(multipartKey,
-          keyArgs.getModificationTime(), multipartKeyInfo,
-          createErrorOMResponse(omResponse, exception));
+          multipartKeyInfo, createErrorOMResponse(omResponse, exception));
     } finally {
       if (omClientResponse != null) {
         omClientResponse.setFlushFuture(
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
index f9e1338..0992fe0 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
@@ -188,13 +188,13 @@
       omResponse.setCommitMultiPartUploadResponse(
           MultipartCommitUploadPartResponse.newBuilder().setPartName(partName));
       omClientResponse = new S3MultipartUploadCommitPartResponse(multipartKey,
-        openKey, keyArgs.getModificationTime(), omKeyInfo, multipartKeyInfo,
+        openKey, omKeyInfo, multipartKeyInfo,
           oldPartKeyInfo, omResponse.build());
 
     } catch (IOException ex) {
       exception = ex;
       omClientResponse = new S3MultipartUploadCommitPartResponse(multipartKey,
-          openKey, keyArgs.getModificationTime(), omKeyInfo, multipartKeyInfo,
+          openKey, omKeyInfo, multipartKeyInfo,
           oldPartKeyInfo, createErrorOMResponse(omResponse, exception));
     } finally {
       if (omClientResponse != null) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java
index 22dc43f..c06069c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java
@@ -47,8 +47,7 @@
     .OMResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .VolumeInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .VolumeList;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserVolumeInfo;
 import org.apache.hadoop.util.Time;
 
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
@@ -116,6 +115,11 @@
     Collection<String> ozAdmins = ozoneManager.getOzoneAdmins();
     try {
       omVolumeArgs = OmVolumeArgs.getFromProtobuf(volumeInfo);
+      // when you create a volume, we set both Object ID and update ID to the
+      // same ratis transaction ID. The Object ID will never change, but update
+      // ID will be set to transactionID each time we update the object.
+      omVolumeArgs.setUpdateID(transactionLogIndex);
+      omVolumeArgs.setObjectID(transactionLogIndex);
       auditMap = omVolumeArgs.toAuditMap();
 
       // check Acl
@@ -128,7 +132,7 @@
         }
       }
 
-      VolumeList volumeList = null;
+      UserVolumeInfo volumeList = null;
 
       // acquire lock.
       acquiredVolumeLock = omMetadataManager.getLock().acquireLock(VOLUME_LOCK,
@@ -146,7 +150,7 @@
         String dbUserKey = omMetadataManager.getUserKey(owner);
         volumeList = omMetadataManager.getUserTable().get(dbUserKey);
         volumeList = addVolumeToOwnerList(volumeList, volume, owner,
-            ozoneManager.getMaxUserVolumeCount());
+            ozoneManager.getMaxUserVolumeCount(), transactionLogIndex);
         createVolume(omMetadataManager, omVolumeArgs, volumeList, dbVolumeKey,
             dbUserKey, transactionLogIndex);
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java
index 9a1c422..485536f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java
@@ -95,7 +95,7 @@
       }
 
       OmVolumeArgs omVolumeArgs = null;
-      OzoneManagerProtocolProtos.VolumeList newVolumeList = null;
+      OzoneManagerProtocolProtos.UserVolumeInfo newVolumeList = null;
 
       acquiredVolumeLock = omMetadataManager.getLock().acquireLock(VOLUME_LOCK,
           volume);
@@ -115,7 +115,8 @@
 
       // delete the volume from the owner list
       // as well as delete the volume entry
-      newVolumeList = delVolumeFromOwnerList(newVolumeList, volume, owner);
+      newVolumeList = delVolumeFromOwnerList(newVolumeList, volume, owner,
+          transactionLogIndex);
 
       omMetadataManager.getUserTable().addCacheEntry(new CacheKey<>(dbUserKey),
           new CacheValue<>(Optional.of(newVolumeList), transactionLogIndex));
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java
index dc44375..7c38c41 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java
@@ -26,7 +26,7 @@
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .VolumeList;
+    .UserVolumeInfo;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 
@@ -48,12 +48,13 @@
    * acquiring user lock.
    * @param volumeList - current volume list owned by user.
    * @param volume - volume which needs to deleted from the volume list.
-   * @param owner
-   * @return VolumeList - updated volume list for the user.
+   * @param owner - Name of the Owner.
+   * @param txID - The transaction ID that is updating this value.
+   * @return UserVolumeInfo - updated UserVolumeInfo.
    * @throws IOException
    */
-  protected VolumeList delVolumeFromOwnerList(VolumeList volumeList,
-      String volume, String owner) throws IOException {
+  protected UserVolumeInfo delVolumeFromOwnerList(UserVolumeInfo volumeList,
+      String volume, String owner, long txID) throws IOException {
 
     List<String> prevVolList = new ArrayList<>();
 
@@ -67,8 +68,11 @@
 
     // Remove the volume from the list
     prevVolList.remove(volume);
-    VolumeList newVolList = VolumeList.newBuilder()
-        .addAllVolumeNames(prevVolList).build();
+    UserVolumeInfo newVolList = UserVolumeInfo.newBuilder()
+        .addAllVolumeNames(prevVolList)
+            .setObjectID(volumeList.getObjectID())
+            .setUpdateID(txID)
+         .build();
     return newVolList;
   }
 
@@ -84,8 +88,9 @@
    * @throws OMException - if user has volumes greater than
    * maxUserVolumeCount, an exception is thrown.
    */
-  protected VolumeList addVolumeToOwnerList(VolumeList volumeList,
-      String volume, String owner, long maxUserVolumeCount) throws IOException {
+  protected UserVolumeInfo addVolumeToOwnerList(UserVolumeInfo volumeList,
+      String volume, String owner, long maxUserVolumeCount, long txID)
+      throws IOException {
 
     // Check the volume count
     if (volumeList != null &&
@@ -95,13 +100,18 @@
     }
 
     List<String> prevVolList = new ArrayList<>();
+    long objectID = txID;
     if (volumeList != null) {
       prevVolList.addAll(volumeList.getVolumeNamesList());
+      objectID = volumeList.getObjectID();
     }
 
+
     // Add the new volume to the list
     prevVolList.add(volume);
-    VolumeList newVolList = VolumeList.newBuilder()
+    UserVolumeInfo newVolList = UserVolumeInfo.newBuilder()
+        .setObjectID(objectID)
+        .setUpdateID(txID)
         .addAllVolumeNames(prevVolList).build();
 
     return newVolList;
@@ -119,7 +129,7 @@
    * @throws IOException
    */
   protected void createVolume(final OMMetadataManager omMetadataManager,
-      OmVolumeArgs omVolumeArgs, VolumeList volumeList, String dbVolumeKey,
+      OmVolumeArgs omVolumeArgs, UserVolumeInfo volumeList, String dbVolumeKey,
       String dbUserKey, long transactionLogIndex) {
     // Update cache: Update user and volume cache.
     omMetadataManager.getUserTable().addCacheEntry(new CacheKey<>(dbUserKey),
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java
index 3fdb152..129b2f9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java
@@ -117,8 +117,8 @@
 
       String dbVolumeKey = omMetadataManager.getVolumeKey(volume);
 
-      OzoneManagerProtocolProtos.VolumeList oldOwnerVolumeList = null;
-      OzoneManagerProtocolProtos.VolumeList newOwnerVolumeList = null;
+      OzoneManagerProtocolProtos.UserVolumeInfo oldOwnerVolumeList = null;
+      OzoneManagerProtocolProtos.UserVolumeInfo newOwnerVolumeList = null;
       OmVolumeArgs omVolumeArgs = null;
 
 
@@ -144,14 +144,16 @@
           omMetadataManager.getUserTable().get(oldOwner);
 
       oldOwnerVolumeList = delVolumeFromOwnerList(
-          oldOwnerVolumeList, volume, oldOwner);
+          oldOwnerVolumeList, volume, oldOwner, transactionLogIndex);
 
       newOwnerVolumeList = omMetadataManager.getUserTable().get(newOwner);
       newOwnerVolumeList = addVolumeToOwnerList(
-          newOwnerVolumeList, volume, newOwner, maxUserVolumeCount);
+          newOwnerVolumeList, volume, newOwner,
+          maxUserVolumeCount, transactionLogIndex);
 
       // Set owner with new owner name.
       omVolumeArgs.setOwnerName(newOwner);
+      omVolumeArgs.setUpdateID(transactionLogIndex);
 
       // Update cache.
       omMetadataManager.getUserTable().addCacheEntry(
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java
index 9b24910..3f800d3 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java
@@ -28,6 +28,9 @@
     .OMResponse;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 
+import javax.annotation.Nullable;
+import javax.annotation.Nonnull;
+
 /**
  * Response for CreateBucket request.
  */
@@ -35,8 +38,8 @@
 
   private final OmBucketInfo omBucketInfo;
 
-  public OMBucketCreateResponse(OmBucketInfo omBucketInfo,
-      OMResponse omResponse) {
+  public OMBucketCreateResponse(@Nullable OmBucketInfo omBucketInfo,
+      @Nonnull OMResponse omResponse) {
     super(omResponse);
     this.omBucketInfo = omBucketInfo;
   }
@@ -56,6 +59,7 @@
     }
   }
 
+  @Nullable
   public OmBucketInfo getOmBucketInfo() {
     return omBucketInfo;
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java
index 5dd6cdf..0e0b398 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java
@@ -25,6 +25,8 @@
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 
+import javax.annotation.Nonnull;
+
 /**
  * Response for DeleteBucket request.
  */
@@ -35,7 +37,7 @@
 
   public OMBucketDeleteResponse(
       String volumeName, String bucketName,
-      OzoneManagerProtocolProtos.OMResponse omResponse) {
+      @Nonnull OzoneManagerProtocolProtos.OMResponse omResponse) {
     super(omResponse);
     this.volumeName = volumeName;
     this.bucketName = bucketName;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java
index d5e88c6..f9ce204 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java
@@ -27,14 +27,17 @@
     .OMResponse;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 
+import javax.annotation.Nullable;
+import javax.annotation.Nonnull;
+
 /**
  * Response for SetBucketProperty request.
  */
 public class OMBucketSetPropertyResponse extends OMClientResponse {
   private OmBucketInfo omBucketInfo;
 
-  public OMBucketSetPropertyResponse(OmBucketInfo omBucketInfo,
-      OMResponse omResponse) {
+  public OMBucketSetPropertyResponse(@Nullable OmBucketInfo omBucketInfo,
+      @Nonnull OMResponse omResponse) {
     super(omResponse);
     this.omBucketInfo = omBucketInfo;
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java
index 474fda3..2690dda 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java
@@ -29,6 +29,7 @@
 import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nullable;
+import javax.annotation.Nonnull;
 import java.io.IOException;
 
 /**
@@ -41,7 +42,7 @@
   private OmKeyInfo dirKeyInfo;
 
   public OMDirectoryCreateResponse(@Nullable OmKeyInfo dirKeyInfo,
-      OMResponse omResponse) {
+      @Nonnull OMResponse omResponse) {
     super(omResponse);
     this.dirKeyInfo = dirKeyInfo;
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java
index 0354b84..8da7313 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.ozone.om.response.file;
 
 import javax.annotation.Nullable;
+import javax.annotation.Nonnull;
 
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
@@ -33,7 +34,7 @@
 public class OMFileCreateResponse extends OMKeyCreateResponse {
 
   public OMFileCreateResponse(@Nullable OmKeyInfo omKeyInfo,
-      long openKeySessionID, OMResponse omResponse) {
+      long openKeySessionID, @Nonnull OMResponse omResponse) {
     super(omKeyInfo, openKeySessionID, omResponse);
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
index b92c55a..c35fa6c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
@@ -27,6 +27,8 @@
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 
 import java.io.IOException;
+import javax.annotation.Nullable;
+import javax.annotation.Nonnull;
 
 /**
  * Response for AllocateBlock request.
@@ -36,8 +38,8 @@
   private final OmKeyInfo omKeyInfo;
   private final long clientID;
 
-  public OMAllocateBlockResponse(OmKeyInfo omKeyInfo,
-      long clientID, OMResponse omResponse) {
+  public OMAllocateBlockResponse(@Nullable OmKeyInfo omKeyInfo,
+      long clientID, @Nonnull OMResponse omResponse) {
     super(omResponse);
     this.omKeyInfo = omKeyInfo;
     this.clientID = clientID;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
index 1c4ff31..0eb97f3 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
@@ -25,6 +25,8 @@
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 
 import java.io.IOException;
+import javax.annotation.Nullable;
+import javax.annotation.Nonnull;
 
 /**
  * Response for CommitKey request.
@@ -34,8 +36,9 @@
   private OmKeyInfo omKeyInfo;
   private long openKeySessionID;
 
-  public OMKeyCommitResponse(OmKeyInfo omKeyInfo, long openKeySessionID,
-      OzoneManagerProtocolProtos.OMResponse omResponse) {
+  public OMKeyCommitResponse(@Nullable OmKeyInfo omKeyInfo,
+      long openKeySessionID,
+      @Nonnull OzoneManagerProtocolProtos.OMResponse omResponse) {
     super(omResponse);
     this.omKeyInfo = omKeyInfo;
     this.openKeySessionID = openKeySessionID;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
index 2ff40d4..fde646c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
@@ -20,6 +20,7 @@
 
 import java.io.IOException;
 import javax.annotation.Nullable;
+import javax.annotation.Nonnull;
 
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -38,7 +39,7 @@
   private long openKeySessionID;
 
   public OMKeyCreateResponse(@Nullable OmKeyInfo omKeyInfo,
-      long openKeySessionID, OMResponse omResponse) {
+      long openKeySessionID, @Nonnull OMResponse omResponse) {
     super(omResponse);
     this.omKeyInfo = omKeyInfo;
     this.openKeySessionID = openKeySessionID;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
index a3dfb28..96aedd1 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
@@ -22,6 +22,7 @@
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@@ -29,19 +30,19 @@
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 
 import java.io.IOException;
+import javax.annotation.Nullable;
+import javax.annotation.Nonnull;
 
 /**
  * Response for DeleteKey request.
  */
 public class OMKeyDeleteResponse extends OMClientResponse {
   private OmKeyInfo omKeyInfo;
-  private long deleteTimestamp;
 
-  public OMKeyDeleteResponse(OmKeyInfo omKeyInfo, long deletionTime,
-      OMResponse omResponse) {
+  public OMKeyDeleteResponse(@Nullable OmKeyInfo omKeyInfo,
+      @Nonnull OMResponse omResponse) {
     super(omResponse);
     this.omKeyInfo = omKeyInfo;
-    this.deleteTimestamp = deletionTime;
   }
 
   @Override
@@ -60,12 +61,19 @@
       if (!isKeyEmpty(omKeyInfo)) {
         // If a deleted key is put in the table where a key with the same
         // name already exists, then the old deleted key information would be
-        // lost. To differentiate between keys with same name in
-        // deletedTable, we add the timestamp to the key name.
-        String deleteKeyName = OmUtils.getDeletedKeyName(
-            ozoneKey, deleteTimestamp);
+        // lost. To avoid this, first check if a key with same name exists.
+        // deletedTable in OM Metadata stores <KeyName, RepeatedOMKeyInfo>.
+        // The RepeatedOmKeyInfo is the structure that allows us to store a
+        // list of OmKeyInfo that can be tied to same key name. For a keyName
+        // if RepeatedOMKeyInfo structure is null, we create a new instance,
+        // if it is not null, then we simply add to the list and store this
+        // instance in deletedTable.
+        RepeatedOmKeyInfo repeatedOmKeyInfo =
+            omMetadataManager.getDeletedTable().get(ozoneKey);
+        repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
+            omKeyInfo, repeatedOmKeyInfo);
         omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
-            deleteKeyName, omKeyInfo);
+            ozoneKey, repeatedOmKeyInfo);
       }
     }
   }
@@ -73,10 +81,14 @@
   /**
    * Check if the key is empty or not. Key will be empty if it does not have
    * blocks.
+   *
    * @param keyInfo
    * @return if empty true, else false.
    */
-  private boolean isKeyEmpty(OmKeyInfo keyInfo) {
+  private boolean isKeyEmpty(@Nullable OmKeyInfo keyInfo) {
+    if (keyInfo == null) {
+      return true;
+    }
     for (OmKeyLocationInfoGroup keyLocationList : keyInfo
         .getKeyLocationVersions()) {
       if (keyLocationList.getLocationList().size() != 0) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java
index 16d6cfb..513b94d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java
@@ -27,6 +27,7 @@
 
 import java.io.IOException;
 import java.util.List;
+import javax.annotation.Nonnull;
 
 /**
  * Response for {@link OMKeyPurgeRequest} request.
@@ -35,7 +36,8 @@
 
   private List<String> purgeKeyList;
 
-  public OMKeyPurgeResponse(List<String> keyList, OMResponse omResponse) {
+  public OMKeyPurgeResponse(List<String> keyList,
+      @Nonnull OMResponse omResponse) {
     super(omResponse);
     this.purgeKeyList = keyList;
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
index 1e7831b..0e9ae17 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
@@ -27,6 +27,8 @@
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 
 import java.io.IOException;
+import javax.annotation.Nullable;
+import javax.annotation.Nonnull;
 
 /**
  * Response for RenameKey request.
@@ -37,8 +39,8 @@
   private final String toKeyName;
   private final String fromKeyName;
 
-  public OMKeyRenameResponse(OmKeyInfo renameKeyInfo, String toKeyName,
-      String fromKeyName, OMResponse omResponse) {
+  public OMKeyRenameResponse(@Nullable OmKeyInfo renameKeyInfo,
+      String toKeyName, String fromKeyName, @Nonnull OMResponse omResponse) {
     super(omResponse);
     this.renameKeyInfo = renameKeyInfo;
     this.toKeyName = toKeyName;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
index 26df300..a9a4024 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
@@ -22,6 +22,7 @@
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@@ -33,6 +34,8 @@
 import java.io.IOException;
 import java.util.Map;
 import java.util.TreeMap;
+import javax.annotation.Nullable;
+import javax.annotation.Nonnull;
 
 /**
  * Response for Multipart Abort Request.
@@ -40,16 +43,13 @@
 public class S3MultipartUploadAbortResponse extends OMClientResponse {
 
   private String multipartKey;
-  private long timeStamp;
   private OmMultipartKeyInfo omMultipartKeyInfo;
 
   public S3MultipartUploadAbortResponse(String multipartKey,
-      long timeStamp,
-      OmMultipartKeyInfo omMultipartKeyInfo,
-      OMResponse omResponse) {
+      @Nullable OmMultipartKeyInfo omMultipartKeyInfo,
+      @Nonnull OMResponse omResponse) {
     super(omResponse);
     this.multipartKey = multipartKey;
-    this.timeStamp = timeStamp;
     this.omMultipartKeyInfo = omMultipartKeyInfo;
   }
 
@@ -73,9 +73,16 @@
         PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue();
         OmKeyInfo currentKeyPartInfo =
             OmKeyInfo.getFromProtobuf(partKeyInfo.getPartKeyInfo());
+
+        RepeatedOmKeyInfo repeatedOmKeyInfo =
+            omMetadataManager.getDeletedTable().get(partKeyInfo.getPartName());
+
+        repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
+            currentKeyPartInfo, repeatedOmKeyInfo);
+
         omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
-            OmUtils.getDeletedKeyName(partKeyInfo.getPartName(), timeStamp),
-            currentKeyPartInfo);
+            partKeyInfo.getPartName(),
+            repeatedOmKeyInfo);
       }
 
     }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
index e010c19..fef3698 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
@@ -22,6 +22,7 @@
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@@ -34,6 +35,9 @@
 import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .Status.OK;
 
+import javax.annotation.Nullable;
+import javax.annotation.Nonnull;
+
 /**
  * Response for S3MultipartUploadCommitPart request.
  */
@@ -41,21 +45,19 @@
 
   private String multipartKey;
   private String openKey;
-  private long deleteTimeStamp;
   private OmKeyInfo deletePartKeyInfo;
   private OmMultipartKeyInfo omMultipartKeyInfo;
   private OzoneManagerProtocolProtos.PartKeyInfo oldMultipartKeyInfo;
 
 
   public S3MultipartUploadCommitPartResponse(String multipartKey,
-      String openKey, long deleteTimeStamp,
-      OmKeyInfo deletePartKeyInfo, OmMultipartKeyInfo omMultipartKeyInfo,
-      OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo,
-      OMResponse omResponse) {
+      String openKey, @Nullable OmKeyInfo deletePartKeyInfo,
+      @Nullable OmMultipartKeyInfo omMultipartKeyInfo,
+      @Nullable OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo,
+      @Nonnull OMResponse omResponse) {
     super(omResponse);
     this.multipartKey = multipartKey;
     this.openKey = openKey;
-    this.deleteTimeStamp = deleteTimeStamp;
     this.deletePartKeyInfo = deletePartKeyInfo;
     this.omMultipartKeyInfo = omMultipartKeyInfo;
     this.oldMultipartKeyInfo = oldPartKeyInfo;
@@ -65,19 +67,26 @@
   public void addToDBBatch(OMMetadataManager omMetadataManager,
       BatchOperation batchOperation) throws IOException {
 
-
     if (getOMResponse().getStatus() == NO_SUCH_MULTIPART_UPLOAD_ERROR) {
       // Means by the time we try to commit part, some one has aborted this
       // multipart upload. So, delete this part information.
+      RepeatedOmKeyInfo repeatedOmKeyInfo =
+          omMetadataManager.getDeletedTable().get(openKey);
+
+      repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
+          deletePartKeyInfo, repeatedOmKeyInfo);
+
+
       omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
-          OmUtils.getDeletedKeyName(openKey, deleteTimeStamp),
-          deletePartKeyInfo);
+          openKey,
+          repeatedOmKeyInfo);
     }
 
     if (getOMResponse().getStatus() == OK) {
 
       // If we have old part info:
       // Need to do 3 steps:
+      //   0. Strip GDPR related metadata from multipart info
       //   1. add old part to delete table
       //   2. Commit multipart info which has information about this new part.
       //   3. delete this new part entry from open key table.
@@ -85,12 +94,20 @@
       // This means for this multipart upload part upload, we have an old
       // part information, so delete it.
       if (oldMultipartKeyInfo != null) {
-        omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
-            OmUtils.getDeletedKeyName(oldMultipartKeyInfo.getPartName(),
-                deleteTimeStamp),
-            OmKeyInfo.getFromProtobuf(oldMultipartKeyInfo.getPartKeyInfo()));
-      }
+        OmKeyInfo partKey =
+            OmKeyInfo.getFromProtobuf(oldMultipartKeyInfo.getPartKeyInfo());
 
+        RepeatedOmKeyInfo repeatedOmKeyInfo =
+            omMetadataManager.getDeletedTable()
+                .get(oldMultipartKeyInfo.getPartName());
+
+        repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(partKey,
+            repeatedOmKeyInfo);
+
+        omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
+            oldMultipartKeyInfo.getPartName(),
+            repeatedOmKeyInfo);
+      }
 
       omMetadataManager.getMultipartInfoTable().putWithBatch(batchOperation,
           multipartKey, omMultipartKeyInfo);
@@ -99,8 +116,6 @@
       //  safely delete part key info from open key table.
       omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation,
           openKey);
-
-
     }
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
index 0c34053..b0cc8b5 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
@@ -29,6 +29,7 @@
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 
 import javax.annotation.Nullable;
+import javax.annotation.Nonnull;
 
 /**
  * Response for Multipart Upload Complete request.
@@ -39,7 +40,7 @@
 
 
   public S3MultipartUploadCompleteResponse(@Nullable String multipartKey,
-      @Nullable OmKeyInfo omKeyInfo, OMResponse omResponse) {
+      @Nullable OmKeyInfo omKeyInfo, @Nonnull OMResponse omResponse) {
     super(omResponse);
     this.multipartKey = multipartKey;
     this.omKeyInfo = omKeyInfo;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java
index 81f763a..2b797d9b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java
@@ -25,6 +25,7 @@
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 
+import javax.annotation.Nonnull;
 import java.io.IOException;
 
 /**
@@ -35,7 +36,7 @@
   private OmVolumeArgs omVolumeArgs;
 
   public OMVolumeAclOpResponse(OmVolumeArgs omVolumeArgs,
-      OMResponse omResponse) {
+      @Nonnull OMResponse omResponse) {
     super(omResponse);
     this.omVolumeArgs = omVolumeArgs;
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java
index 0c0492f..1bd3e4f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java
@@ -27,24 +27,25 @@
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .OMResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .VolumeList;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserVolumeInfo;
 
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 
+import javax.annotation.Nonnull;
+
 /**
  * Response for CreateBucket request.
  */
 public class OMVolumeCreateResponse extends OMClientResponse {
 
-  private VolumeList volumeList;
+  private UserVolumeInfo userVolumeInfo;
   private OmVolumeArgs omVolumeArgs;
 
   public OMVolumeCreateResponse(OmVolumeArgs omVolumeArgs,
-      VolumeList volumeList, OMResponse omResponse) {
+      UserVolumeInfo userVolumeInfo, @Nonnull OMResponse omResponse) {
     super(omResponse);
     this.omVolumeArgs = omVolumeArgs;
-    this.volumeList = volumeList;
+    this.userVolumeInfo = userVolumeInfo;
   }
   @Override
   public void addToDBBatch(OMMetadataManager omMetadataManager,
@@ -61,7 +62,7 @@
       omMetadataManager.getVolumeTable().putWithBatch(batchOperation,
           dbVolumeKey, omVolumeArgs);
       omMetadataManager.getUserTable().putWithBatch(batchOperation, dbUserKey,
-          volumeList);
+          userVolumeInfo);
     }
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java
index c04b700..6718ce5 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java
@@ -26,19 +26,21 @@
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .OMResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .VolumeList;
+    .UserVolumeInfo;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 
+import javax.annotation.Nonnull;
+
 /**
  * Response for CreateVolume request.
  */
 public class OMVolumeDeleteResponse extends OMClientResponse {
   private String volume;
   private String owner;
-  private VolumeList updatedVolumeList;
+  private UserVolumeInfo updatedVolumeList;
 
   public OMVolumeDeleteResponse(String volume, String owner,
-      VolumeList updatedVolumeList, OMResponse omResponse) {
+      UserVolumeInfo updatedVolumeList, @Nonnull OMResponse omResponse) {
     super(omResponse);
     this.volume = volume;
     this.owner = owner;
@@ -53,7 +55,7 @@
     // not called in failure scenario in OM code.
     if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
       String dbUserKey = omMetadataManager.getUserKey(owner);
-      VolumeList volumeList = updatedVolumeList;
+      UserVolumeInfo volumeList = updatedVolumeList;
       if (updatedVolumeList.getVolumeNamesList().size() == 0) {
         omMetadataManager.getUserTable().deleteWithBatch(batchOperation,
             dbUserKey);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java
index 44410b7..8e02702 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java
@@ -26,24 +26,26 @@
 
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .VolumeList;
+    .UserVolumeInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .OMResponse;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 
+import javax.annotation.Nonnull;
+
 /**
  * Response for set owner request.
  */
 public class OMVolumeSetOwnerResponse extends OMClientResponse {
 
   private String oldOwner;
-  private VolumeList oldOwnerVolumeList;
-  private VolumeList newOwnerVolumeList;
+  private UserVolumeInfo oldOwnerVolumeList;
+  private UserVolumeInfo newOwnerVolumeList;
   private OmVolumeArgs newOwnerVolumeArgs;
 
   public OMVolumeSetOwnerResponse(String oldOwner,
-      VolumeList oldOwnerVolumeList, VolumeList newOwnerVolumeList,
-      OmVolumeArgs newOwnerVolumeArgs, OMResponse omResponse) {
+      UserVolumeInfo oldOwnerVolumeList, UserVolumeInfo newOwnerVolumeList,
+      OmVolumeArgs newOwnerVolumeArgs, @Nonnull OMResponse omResponse) {
     super(omResponse);
     this.oldOwner = oldOwner;
     this.oldOwnerVolumeList = oldOwnerVolumeList;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java
index 4adc641..13e05fa 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java
@@ -28,6 +28,8 @@
 
 import java.io.IOException;
 
+import javax.annotation.Nonnull;
+
 /**
  * Response for set quota request.
  */
@@ -35,7 +37,7 @@
   private OmVolumeArgs omVolumeArgs;
 
   public OMVolumeSetQuotaResponse(OmVolumeArgs omVolumeArgs,
-      OMResponse omResponse) {
+      @Nonnull OMResponse omResponse) {
     super(omResponse);
     this.omVolumeArgs = omVolumeArgs;
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java
index d77536a..5bca52d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java
@@ -23,7 +23,7 @@
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.ozone.om.OMNodeDetails;
+import org.apache.hadoop.ozone.om.ha.OMNodeDetails;
 import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
 import org.apache.hadoop.hdds.utils.db.RocksDBCheckpoint;
 import org.apache.http.Header;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
index 6f8e9df..d4c029b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
@@ -17,7 +17,8 @@
 package org.apache.hadoop.ozone.protocolPB;
 
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
+
+import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.NotLeaderException;
@@ -33,7 +34,6 @@
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
-import io.opentracing.Scope;
 import org.apache.ratis.protocol.RaftPeerId;
 import org.apache.ratis.util.ExitUtils;
 import org.slf4j.Logger;
@@ -58,8 +58,9 @@
   private final boolean isRatisEnabled;
   private final OzoneManager ozoneManager;
   private final OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer;
-  private final ProtocolMessageMetrics protocolMessageMetrics;
   private final AtomicLong transactionIndex = new AtomicLong(0L);
+  private final OzoneProtocolMessageDispatcher<OMRequest, OMResponse>
+      dispatcher;
 
   /**
    * Constructs an instance of the server handler.
@@ -75,7 +76,6 @@
     handler = new OzoneManagerRequestHandler(impl);
     this.omRatisServer = ratisServer;
     this.isRatisEnabled = enableRatis;
-    this.protocolMessageMetrics = metrics;
     this.ozoneManagerDoubleBuffer =
         new OzoneManagerDoubleBuffer(ozoneManager.getMetadataManager(), (i) -> {
           // Do nothing.
@@ -83,6 +83,9 @@
           // As we wait until the double buffer flushes DB to disk.
         }, isRatisEnabled);
 
+    dispatcher = new OzoneProtocolMessageDispatcher<>("OzoneProtocol",
+        metrics, LOG);
+
   }
 
   /**
@@ -93,35 +96,9 @@
   @Override
   public OMResponse submitRequest(RpcController controller,
       OMRequest request) throws ServiceException {
-    Scope scope = TracingUtil
-        .importAndCreateScope(request.getCmdType().name(),
-            request.getTraceID());
-    try {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace(
-            "OzoneManagerProtocol {} request is received: <json>{}</json>",
-            request.getCmdType().toString(),
-            request.toString().replaceAll("\n", "\\\\n"));
-      } else if (LOG.isDebugEnabled()) {
-        LOG.debug("OzoneManagerProtocol {} request is received",
-            request.getCmdType().toString());
-      }
-      protocolMessageMetrics.increment(request.getCmdType());
 
-      OMResponse omResponse = processRequest(request);
-
-      if (LOG.isTraceEnabled()) {
-        LOG.trace(
-            "OzoneManagerProtocol {} request is processed. Response: "
-                + "<json>{}</json>",
-            request.getCmdType().toString(),
-            omResponse.toString().replaceAll("\n", "\\\\n"));
-      }
-      return omResponse;
-
-    } finally {
-      scope.close();
-    }
+    return dispatcher.processRequest(request, this::processRequest,
+        request.getCmdType(), request.getTraceID());
   }
 
   private OMResponse processRequest(OMRequest request) throws
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
index 118a8a4..999eede 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
@@ -35,8 +35,7 @@
   private static final Logger LOG = LoggerFactory.getLogger(Shell.class);
 
   public static final String OZONE_URI_DESCRIPTION = "Ozone URI could start "
-      + "with o3:// or http(s):// or without prefix. REST protocol will "
-      + "be used for http(s), RPC otherwise. URI may contain the host and port "
+      + "with o3:// or without prefix. URI may contain the host and port "
       + "of the OM server. Both are optional. "
       + "If they are not specified it will be identified from "
       + "the config files.";
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java
index 468668c..441f1c1 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java
@@ -84,8 +84,9 @@
   public TemporaryFolder folder = new TemporaryFolder();
 
   @Before
-  public void setup() throws IOException  {
-    ozoneManager = Mockito.mock(OzoneManager.class);
+  public void setup() throws IOException {
+    ozoneManager = Mockito.mock(OzoneManager.class,
+        Mockito.withSettings().stubOnly());
     omMetrics = OMMetrics.create();
     OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
     ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
@@ -125,7 +126,7 @@
     testDoubleBuffer(1, 10);
     testDoubleBuffer(10, 100);
     testDoubleBuffer(100, 100);
-    testDoubleBuffer(1000, 100);
+    testDoubleBuffer(1000, 500);
   }
 
   /**
@@ -373,14 +374,15 @@
       setup();
       for (int i = 0; i < iterations; i++) {
         Daemon d1 = new Daemon(() ->
-            doTransactions(RandomStringUtils.randomAlphabetic(5), bucketCount));
+            doTransactions(RandomStringUtils.randomAlphabetic(5),
+                bucketCount));
         d1.start();
       }
 
       // We are doing +1 for volume transaction.
       long expectedTransactions = (bucketCount + 1) * iterations;
       GenericTestUtils.waitFor(() -> lastAppliedIndex == expectedTransactions,
-          100, 120000);
+          100, 500000);
 
       Assert.assertEquals(expectedTransactions,
           doubleBuffer.getFlushedTransactionCount()
@@ -428,15 +430,6 @@
     for (int i=0; i< bucketCount; i++) {
       createBucket(volumeName, UUID.randomUUID().toString(),
           trxId.incrementAndGet());
-      // For every 100 buckets creation adding 100ms delay
-
-      if (i % 100 == 0) {
-        try {
-          Thread.sleep(100);
-        } catch (Exception ex) {
-
-        }
-      }
     }
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java
index 68649eb..c04fba2 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java
@@ -32,7 +32,7 @@
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.OMNodeDetails;
+import org.apache.hadoop.ozone.om.ha.OMNodeDetails;
 import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
index cd2b665..88848f8 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
@@ -35,6 +35,7 @@
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.om.request.s3.bucket.S3BucketCreateRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@@ -265,11 +266,15 @@
    */
   public static void addUserToDB(String volumeName, String ownerName,
       OMMetadataManager omMetadataManager) throws Exception {
-    OzoneManagerProtocolProtos.VolumeList volumeList =
-        OzoneManagerProtocolProtos.VolumeList.newBuilder()
-            .addVolumeNames(volumeName).build();
+    OzoneManagerProtocolProtos.UserVolumeInfo userVolumeInfo =
+        OzoneManagerProtocolProtos.UserVolumeInfo
+            .newBuilder()
+            .addVolumeNames(volumeName)
+            .setObjectID(1)
+            .setUpdateID(1)
+            .build();
     omMetadataManager.getUserTable().put(
-        omMetadataManager.getUserKey(ownerName), volumeList);
+        omMetadataManager.getUserKey(ownerName), userVolumeInfo);
   }
 
   /**
@@ -370,10 +375,16 @@
 
     // Delete key from KeyTable and put in DeletedKeyTable
     omMetadataManager.getKeyTable().delete(ozoneKey);
-    String deletedKeyName = OmUtils.getDeletedKeyName(ozoneKey, Time.now());
-    omMetadataManager.getDeletedTable().put(deletedKeyName, omKeyInfo);
 
-    return deletedKeyName;
+    RepeatedOmKeyInfo repeatedOmKeyInfo =
+        omMetadataManager.getDeletedTable().get(ozoneKey);
+
+    repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(omKeyInfo,
+        repeatedOmKeyInfo);
+
+    omMetadataManager.getDeletedTable().put(ozoneKey, repeatedOmKeyInfo);
+
+    return ozoneKey;
   }
 
   /**
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/package-info.java
new file mode 100644
index 0000000..0bdab7d
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * Tests for OM request.
+ */
+package org.apache.hadoop.ozone.om.request;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java
index 61e12f8..b685711 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java
@@ -20,6 +20,7 @@
 
 import java.util.UUID;
 
+import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Test;
@@ -73,6 +74,11 @@
       OMClientResponse omClientResponse =
           omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1,
               ozoneManagerDoubleBufferHelper);
+      Assert.assertTrue(omClientResponse instanceof OMVolumeCreateResponse);
+      OMVolumeCreateResponse respone =
+          (OMVolumeCreateResponse) omClientResponse;
+      Assert.assertEquals(1, respone.getOmVolumeArgs().getObjectID());
+      Assert.assertEquals(1, respone.getOmVolumeArgs().getUpdateID());
     } catch (IllegalArgumentException ex){
       GenericTestUtils.assertExceptionContains("should be greater than zero",
           ex);
@@ -106,7 +112,7 @@
     omVolumeCreateRequest = new OMVolumeCreateRequest(modifiedRequest);
 
     OMClientResponse omClientResponse =
-        omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1,
+        omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 2,
             ozoneManagerDoubleBufferHelper);
 
     OzoneManagerProtocolProtos.OMResponse omResponse =
@@ -124,6 +130,8 @@
         omMetadataManager.getVolumeTable().get(volumeKey);
     // As request is valid volume table should not have entry.
     Assert.assertNotNull(omVolumeArgs);
+    Assert.assertEquals(2, omVolumeArgs.getObjectID());
+    Assert.assertEquals(2, omVolumeArgs.getUpdateID());
 
     // Check data from table and request.
     Assert.assertEquals(volumeInfo.getVolume(), omVolumeArgs.getVolume());
@@ -132,10 +140,10 @@
     Assert.assertEquals(volumeInfo.getCreationTime(),
         omVolumeArgs.getCreationTime());
 
-    OzoneManagerProtocolProtos.VolumeList volumeList = omMetadataManager
+    OzoneManagerProtocolProtos.UserVolumeInfo userVolumeInfo = omMetadataManager
         .getUserTable().get(ownerKey);
-    Assert.assertNotNull(volumeList);
-    Assert.assertEquals(volumeName, volumeList.getVolumeNames(0));
+    Assert.assertNotNull(userVolumeInfo);
+    Assert.assertEquals(volumeName, userVolumeInfo.getVolumeNames(0));
 
     // Create another volume for the user.
     originalRequest = createVolumeRequest("vol1", adminName,
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java
index d67ac08..af38ba0 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java
@@ -90,14 +90,14 @@
     Assert.assertEquals(newOwner, fromDBOwner);
 
 
-    OzoneManagerProtocolProtos.VolumeList newOwnerVolumeList =
+    OzoneManagerProtocolProtos.UserVolumeInfo newOwnerVolumeList =
         omMetadataManager.getUserTable().get(newOwnerKey);
 
     Assert.assertNotNull(newOwnerVolumeList);
     Assert.assertEquals(volumeName,
         newOwnerVolumeList.getVolumeNamesList().get(0));
 
-    OzoneManagerProtocolProtos.VolumeList oldOwnerVolumeList =
+    OzoneManagerProtocolProtos.UserVolumeInfo oldOwnerVolumeList =
         omMetadataManager.getUserTable().get(
             omMetadataManager.getUserKey(ownerKey));
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestOMResponseUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestOMResponseUtils.java
index 4cafe7a..5e41d2d 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestOMResponseUtils.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestOMResponseUtils.java
@@ -56,8 +56,10 @@
                     .getDefaultInstance())
             .build();
 
-    OzoneManagerProtocolProtos.VolumeList volumeList =
-        OzoneManagerProtocolProtos.VolumeList.newBuilder()
+    OzoneManagerProtocolProtos.UserVolumeInfo userVolumeInfo =
+        OzoneManagerProtocolProtos.UserVolumeInfo.newBuilder()
+            .setObjectID(1)
+            .setUpdateID(1)
             .addVolumeNames(volumeName).build();
 
     OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
@@ -65,7 +67,7 @@
         .setVolume(volumeName).setCreationTime(Time.now()).build();
 
     OMVolumeCreateResponse omVolumeCreateResponse =
-        new OMVolumeCreateResponse(omVolumeArgs, volumeList, omResponse);
+        new OMVolumeCreateResponse(omVolumeArgs, userVolumeInfo, omResponse);
 
 
     OmBucketInfo omBucketInfo = TestOMResponseUtils.createBucket(
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java
index da96e0c..ba2b738 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java
@@ -21,9 +21,7 @@
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-import org.apache.hadoop.util.Time;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -52,15 +50,11 @@
             .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
             .build();
 
-    long deletionTime = Time.now();
-
     OMKeyDeleteResponse omKeyDeleteResponse =
-        new OMKeyDeleteResponse(omKeyInfo, deletionTime, omResponse);
+        new OMKeyDeleteResponse(omKeyInfo, omResponse);
 
     String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
         keyName);
-    String deletedOzoneKeyName = OmUtils.getDeletedKeyName(
-        ozoneKey, deletionTime);
 
     TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
         clientID, replicationType, replicationFactor, omMetadataManager);
@@ -76,7 +70,7 @@
     // As default key entry does not have any blocks, it should not be in
     // deletedKeyTable.
     Assert.assertFalse(omMetadataManager.getDeletedTable().isExist(
-        deletedOzoneKeyName));
+        ozoneKey));
   }
 
   @Test
@@ -117,13 +111,9 @@
             .setStatus(OzoneManagerProtocolProtos.Status.OK)
             .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
             .build();
-    long deletionTime = Time.now();
 
     OMKeyDeleteResponse omKeyDeleteResponse =
-        new OMKeyDeleteResponse(omKeyInfo, deletionTime, omResponse);
-
-    String deletedOzoneKeyName = OmUtils.getDeletedKeyName(
-        ozoneKey, deletionTime);
+        new OMKeyDeleteResponse(omKeyInfo, omResponse);
 
     Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
     omKeyDeleteResponse.addToDBBatch(omMetadataManager, batchOperation);
@@ -135,7 +125,7 @@
 
     // Key has blocks, it should not be in deletedKeyTable.
     Assert.assertTrue(omMetadataManager.getDeletedTable().isExist(
-        deletedOzoneKeyName));
+        ozoneKey));
   }
 
 
@@ -152,7 +142,7 @@
             .build();
 
     OMKeyDeleteResponse omKeyDeleteResponse =
-        new OMKeyDeleteResponse(omKeyInfo, Time.now(), omResponse);
+        new OMKeyDeleteResponse(omKeyInfo, omResponse);
 
     String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
         keyName);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/package-info.java
new file mode 100644
index 0000000..fd48e14
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * Tests for OM Response.
+ */
+package org.apache.hadoop.ozone.om.response;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
index 634ffaf..09b028b 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
@@ -113,8 +113,7 @@
         .setAbortMultiPartUploadResponse(
             MultipartUploadAbortResponse.newBuilder().build()).build();
 
-    return new S3MultipartUploadAbortResponse(multipartKey, timeStamp,
-            omMultipartKeyInfo,
+    return new S3MultipartUploadAbortResponse(multipartKey, omMultipartKeyInfo,
             omResponse);
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java
index b6707ed..60aacd5 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java
@@ -20,10 +20,10 @@
 
 import java.util.UUID;
 
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.junit.Assert;
 import org.junit.Test;
 
-import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@@ -124,24 +124,25 @@
     Assert.assertTrue(omMetadataManager.countRowsInTable(
         omMetadataManager.getDeletedTable()) == 2);
 
-    String part1DeletedKeyName = OmUtils.getDeletedKeyName(
-        omMultipartKeyInfo.getPartKeyInfo(1).getPartName(),
-        timeStamp);
+    String part1DeletedKeyName =
+        omMultipartKeyInfo.getPartKeyInfo(1).getPartName();
 
-    String part2DeletedKeyName = OmUtils.getDeletedKeyName(
-        omMultipartKeyInfo.getPartKeyInfo(2).getPartName(),
-        timeStamp);
+    String part2DeletedKeyName =
+        omMultipartKeyInfo.getPartKeyInfo(2).getPartName();
 
     Assert.assertNotNull(omMetadataManager.getDeletedTable().get(
         part1DeletedKeyName));
     Assert.assertNotNull(omMetadataManager.getDeletedTable().get(
         part2DeletedKeyName));
 
+    RepeatedOmKeyInfo ro =
+        omMetadataManager.getDeletedTable().get(part1DeletedKeyName);
     Assert.assertEquals(OmKeyInfo.getFromProtobuf(part1.getPartKeyInfo()),
-        omMetadataManager.getDeletedTable().get(part1DeletedKeyName));
+        ro.getOmKeyInfoList().get(0));
 
+    ro = omMetadataManager.getDeletedTable().get(part2DeletedKeyName);
     Assert.assertEquals(OmKeyInfo.getFromProtobuf(part2.getPartKeyInfo()),
-        omMetadataManager.getDeletedTable().get(part2DeletedKeyName));
+        ro.getOmKeyInfoList().get(0));
   }
 
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java
index f395e01..b69d8b7 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java
@@ -27,9 +27,9 @@
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .CreateVolumeResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .VolumeList;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .UserVolumeInfo;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.junit.Assert;
@@ -68,7 +68,8 @@
 
     String volumeName = UUID.randomUUID().toString();
     String userName = "user1";
-    VolumeList volumeList = VolumeList.newBuilder()
+    UserVolumeInfo volumeList = UserVolumeInfo.newBuilder()
+        .setObjectID(1).setUpdateID(1)
         .addVolumeNames(volumeName).build();
 
     OMResponse omResponse = OMResponse.newBuilder()
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java
index 67bdf15..5d6b481 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java
@@ -27,9 +27,9 @@
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .CreateVolumeResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .VolumeList;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .UserVolumeInfo;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.junit.Assert;
@@ -68,7 +68,9 @@
 
     String volumeName = UUID.randomUUID().toString();
     String userName = "user1";
-    VolumeList volumeList = VolumeList.newBuilder()
+    UserVolumeInfo volumeList = UserVolumeInfo.newBuilder()
+        .setObjectID(1)
+        .setUpdateID(1)
         .addVolumeNames(volumeName).build();
 
     OMResponse omResponse = OMResponse.newBuilder()
@@ -85,7 +87,8 @@
         new OMVolumeCreateResponse(omVolumeArgs, volumeList, omResponse);
 
     // As we are deleting updated volume list should be empty.
-    VolumeList updatedVolumeList = VolumeList.newBuilder().build();
+    UserVolumeInfo updatedVolumeList = UserVolumeInfo.newBuilder()
+        .setObjectID(1).setUpdateID(1).build();
     OMVolumeDeleteResponse omVolumeDeleteResponse =
         new OMVolumeDeleteResponse(volumeName, userName, updatedVolumeList,
             omResponse);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java
index a04df9c..0951c06 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java
@@ -27,9 +27,9 @@
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .CreateVolumeResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .VolumeList;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .UserVolumeInfo;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.hdds.utils.db.Table;
@@ -69,7 +69,9 @@
 
     String volumeName = UUID.randomUUID().toString();
     String oldOwner = "user1";
-    VolumeList volumeList = VolumeList.newBuilder()
+    UserVolumeInfo volumeList = UserVolumeInfo.newBuilder()
+        .setObjectID(1)
+        .setUpdateID(1)
         .addVolumeNames(volumeName).build();
 
     OMResponse omResponse = OMResponse.newBuilder()
@@ -88,9 +90,14 @@
 
 
     String newOwner = "user2";
-    VolumeList newOwnerVolumeList = VolumeList.newBuilder()
+    UserVolumeInfo newOwnerVolumeList = UserVolumeInfo.newBuilder()
+        .setObjectID(1)
+        .setUpdateID(1)
         .addVolumeNames(volumeName).build();
-    VolumeList oldOwnerVolumeList = VolumeList.newBuilder().build();
+    UserVolumeInfo oldOwnerVolumeList = UserVolumeInfo.newBuilder()
+        .setObjectID(2)
+        .setUpdateID(2)
+        .build();
     OmVolumeArgs newOwnerVolumeArgs = OmVolumeArgs.newBuilder()
         .setOwnerName(newOwner).setAdminName(newOwner)
         .setVolume(volumeName).setCreationTime(omVolumeArgs.getCreationTime())
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/package-info.java
new file mode 100644
index 0000000..98788cd
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Test Volume functions.
+ */
+package org.apache.hadoop.ozone.om.response.volume;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java
index f05a1e8..874252d 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java
@@ -169,8 +169,15 @@
     validateHash(token.getPassword(), token.getIdentifier());
   }
 
-  @Test
-  public void testRenewTokenSuccess() throws Exception {
+  private void restartSecretManager() throws IOException {
+    secretManager.stop();
+    secretManager = null;
+    secretManager = createSecretManager(conf, tokenMaxLifetime,
+        expiryTime, tokenRemoverScanInterval);
+  }
+
+  private void testRenewTokenSuccessHelper(boolean restartSecretManager)
+      throws Exception {
     secretManager = createSecretManager(conf, tokenMaxLifetime,
         expiryTime, tokenRemoverScanInterval);
     secretManager.start(certificateClient);
@@ -178,10 +185,25 @@
         TEST_USER,
         TEST_USER);
     Thread.sleep(10 * 5);
+
+    if (restartSecretManager) {
+      restartSecretManager();
+    }
+
     long renewalTime = secretManager.renewToken(token, TEST_USER.toString());
     Assert.assertTrue(renewalTime > 0);
   }
 
+  @Test
+  public void testReloadAndRenewToken() throws Exception {
+    testRenewTokenSuccessHelper(true);
+  }
+
+  @Test
+  public void testRenewTokenSuccess() throws Exception {
+    testRenewTokenSuccessHelper(false);
+  }
+
   /**
    * Tests failure for mismatch in renewer.
    */
@@ -375,6 +397,7 @@
       createSecretManager(OzoneConfiguration config, long tokenMaxLife,
       long expiry, long tokenRemoverScanTime) throws IOException {
     return new OzoneDelegationTokenSecretManager(config, tokenMaxLife,
-        expiry, tokenRemoverScanTime, serviceRpcAdd, s3SecretManager);
+        expiry, tokenRemoverScanTime, serviceRpcAdd, s3SecretManager,
+        certificateClient);
   }
 }
\ No newline at end of file
diff --git a/hadoop-ozone/ozonefs-lib-current/pom.xml b/hadoop-ozone/ozonefs-lib-current/pom.xml
index 9d77a78..5953acb 100644
--- a/hadoop-ozone/ozonefs-lib-current/pom.xml
+++ b/hadoop-ozone/ozonefs-lib-current/pom.xml
@@ -58,8 +58,8 @@
         </executions>
       </plugin>
       <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>findbugs-maven-plugin</artifactId>
+        <groupId>com.github.spotbugs</groupId>
+        <artifactId>spotbugs-maven-plugin</artifactId>
         <configuration>
           <skip>true</skip>
         </configuration>
diff --git a/hadoop-ozone/ozonefs-lib-legacy/pom.xml b/hadoop-ozone/ozonefs-lib-legacy/pom.xml
index fedd26b..c248308 100644
--- a/hadoop-ozone/ozonefs-lib-legacy/pom.xml
+++ b/hadoop-ozone/ozonefs-lib-legacy/pom.xml
@@ -120,8 +120,8 @@
         </executions>
       </plugin>
       <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>findbugs-maven-plugin</artifactId>
+        <groupId>com.github.spotbugs</groupId>
+        <artifactId>spotbugs-maven-plugin</artifactId>
         <configuration>
           <skip>true</skip>
         </configuration>
diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs/pom.xml
index 32e4a63..4f85070 100644
--- a/hadoop-ozone/ozonefs/pom.xml
+++ b/hadoop-ozone/ozonefs/pom.xml
@@ -132,9 +132,12 @@
       <artifactId>hadoop-ozone-common</artifactId>
     </dependency>
     <dependency>
-      <groupId>com.google.code.findbugs</groupId>
-      <artifactId>findbugs</artifactId>
-      <version>3.0.1</version>
+      <groupId>org.apache.httpcomponents</groupId>
+      <artifactId>httpclient</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs</artifactId>
       <scope>provided</scope>
     </dependency>
 
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index b2d143e..825e65c 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -28,7 +28,7 @@
   <properties>
     <hdds.version>0.5.0-SNAPSHOT</hdds.version>
     <ozone.version>0.5.0-SNAPSHOT</ozone.version>
-    <ratis.version>0.4.0</ratis.version>
+    <ratis.version>0.5.0-201fc85-SNAPSHOT</ratis.version>
     <bouncycastle.version>1.60</bouncycastle.version>
     <ozone.release>Crater Lake</ozone.release>
     <declared.ozone.version>${ozone.version}</declared.ozone.version>
@@ -291,20 +291,12 @@
             <exclude>**/dependency-reduced-pom.xml</exclude>
             <exclude>**/node_modules/**</exclude>
             <exclude>**/yarn.lock</exclude>
-            <exclude>**/recon-web/build/**</exclude>
+            <exclude>**/ozone-recon-web/build/**</exclude>
             <exclude>src/main/license/**</exclude>
           </excludes>
         </configuration>
       </plugin>
       <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>findbugs-maven-plugin</artifactId>
-        <version>3.0.4</version>
-        <configuration>
-          <excludeFilterFile combine.self="override"/>
-        </configuration>
-      </plugin>
-      <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-dependency-plugin</artifactId>
         <executions>
@@ -351,7 +343,7 @@
         <configuration>
           <excludes>
             <exclude>**/node_modules/*</exclude>
-            <exclude>**/recon-web/**</exclude>
+            <exclude>**/ozone-recon-web/**</exclude>
           </excludes>
         </configuration>
         <executions>
diff --git a/hadoop-ozone/recon/pom.xml b/hadoop-ozone/recon/pom.xml
index 4eed468..55318a5 100644
--- a/hadoop-ozone/recon/pom.xml
+++ b/hadoop-ozone/recon/pom.xml
@@ -77,8 +77,8 @@
         </executions>
       </plugin>
       <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>findbugs-maven-plugin</artifactId>
+        <groupId>com.github.spotbugs</groupId>
+        <artifactId>spotbugs-maven-plugin</artifactId>
         <configuration>
           <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
         </configuration>
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
index 95e6f9b..ecd47f2 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
@@ -175,4 +175,39 @@
     }
   }
 
+  /**
+   * Load last known DB in Recon.
+   * @param reconDbDir
+   * @param fileNamePrefix
+   * @return
+   */
+  public File getLastKnownDB(File reconDbDir, String fileNamePrefix) {
+    String lastKnownSnapshotFileName = null;
+    long lastKnonwnSnapshotTs = Long.MIN_VALUE;
+    if (reconDbDir != null) {
+      File[] snapshotFiles = reconDbDir.listFiles((dir, name) ->
+          name.startsWith(fileNamePrefix));
+      if (snapshotFiles != null) {
+        for (File snapshotFile : snapshotFiles) {
+          String fileName = snapshotFile.getName();
+          try {
+            String[] fileNameSplits = fileName.split("_");
+            if (fileNameSplits.length <= 1) {
+              continue;
+            }
+            long snapshotTimestamp = Long.parseLong(fileNameSplits[1]);
+            if (lastKnonwnSnapshotTs < snapshotTimestamp) {
+              lastKnonwnSnapshotTs = snapshotTimestamp;
+              lastKnownSnapshotFileName = fileName;
+            }
+          } catch (NumberFormatException nfEx) {
+            LOG.warn("Unknown file found in Recon DB dir : {}", fileName);
+          }
+        }
+      }
+    }
+    return lastKnownSnapshotFileName == null ? null :
+        new File(reconDbDir.getPath(), lastKnownSnapshotFileName);
+  }
+
 }
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
index e554b25..3d55c99 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.ozone.recon.recovery;
 
+import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OM_SNAPSHOT_DB;
+import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR;
+
 import java.io.File;
 import java.io.IOException;
 
@@ -28,6 +31,7 @@
 import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
+import org.apache.hadoop.ozone.recon.ReconUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -42,17 +46,28 @@
   private static final Logger LOG =
       LoggerFactory.getLogger(ReconOmMetadataManagerImpl.class);
 
-  @Inject
   private OzoneConfiguration ozoneConfiguration;
+  private ReconUtils reconUtils;
 
   @Inject
-  public ReconOmMetadataManagerImpl(OzoneConfiguration configuration) {
+  public ReconOmMetadataManagerImpl(OzoneConfiguration configuration,
+                                    ReconUtils reconUtils) {
+    this.reconUtils = reconUtils;
     this.ozoneConfiguration = configuration;
   }
 
   @Override
   public void start(OzoneConfiguration configuration) throws IOException {
     LOG.info("Starting ReconOMMetadataManagerImpl");
+    File reconDbDir =
+        reconUtils.getReconDbDir(configuration, OZONE_RECON_OM_SNAPSHOT_DB_DIR);
+    File lastKnownOMSnapshot =
+        reconUtils.getLastKnownDB(reconDbDir, RECON_OM_SNAPSHOT_DB);
+    if (lastKnownOMSnapshot != null) {
+      LOG.info("Last known snapshot for OM : {}",
+          lastKnownOMSnapshot.getAbsolutePath());
+      initializeNewRdbStore(lastKnownOMSnapshot);
+    }
   }
 
   /**
@@ -69,7 +84,7 @@
       addOMTablesAndCodecs(dbStoreBuilder);
       DBStore newStore = dbStoreBuilder.build();
       setStore(newStore);
-      LOG.info("Created new OM DB snapshot at {}.",
+      LOG.info("Created OM DB snapshot at {}.",
           dbFile.getAbsolutePath());
     } catch (IOException ioEx) {
       LOG.error("Unable to initialize Recon OM DB snapshot store.",
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java
index 4e32e1a..85edb7e 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java
@@ -81,14 +81,7 @@
   public ContainerDBServiceProviderImpl(DBStore dbStore,
                                         Configuration sqlConfiguration) {
     globalStatsDao = new GlobalStatsDao(sqlConfiguration);
-    try {
-      this.containerKeyTable = dbStore.getTable(CONTAINER_KEY_TABLE,
-          ContainerKeyPrefix.class, Integer.class);
-      this.containerKeyCountTable = dbStore.getTable(CONTAINER_KEY_COUNT_TABLE,
-          Long.class, Long.class);
-    } catch (IOException e) {
-      LOG.error("Unable to create Container Key tables." + e);
-    }
+    initializeTables(dbStore);
   }
 
   /**
@@ -107,8 +100,9 @@
     File oldDBLocation = containerDbStore.getDbLocation();
     containerDbStore = ReconContainerDBProvider
         .getNewDBStore(configuration, reconUtils);
-    containerKeyTable = containerDbStore.getTable(CONTAINER_KEY_TABLE,
-        ContainerKeyPrefix.class, Integer.class);
+    LOG.info("Creating new Recon Container DB at {}",
+        containerDbStore.getDbLocation().getAbsolutePath());
+    initializeTables(containerDbStore);
 
     if (oldDBLocation.exists()) {
       LOG.info("Cleaning up old Recon Container DB at {}.",
@@ -128,6 +122,20 @@
   }
 
   /**
+   * Initialize the container DB tables.
+   * @param dbStore
+   */
+  private void initializeTables(DBStore dbStore) {
+    try {
+      this.containerKeyTable = dbStore.getTable(CONTAINER_KEY_TABLE,
+          ContainerKeyPrefix.class, Integer.class);
+      this.containerKeyCountTable = dbStore.getTable(CONTAINER_KEY_COUNT_TABLE,
+          Long.class, Long.class);
+    } catch (IOException e) {
+      LOG.error("Unable to create Container Key tables." + e);
+    }
+  }
+  /**
    * Concatenate the containerID and Key Prefix using a delimiter and store the
    * count into the container DB store.
    *
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
index a5e5e4df5..789b301 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
@@ -178,6 +178,11 @@
 
   @Override
   public void start() {
+    try {
+      omMetadataManager.start(configuration);
+    } catch (IOException ioEx) {
+      LOG.error("Error staring Recon OM Metadata Manager.", ioEx);
+    }
     long initialDelay = configuration.getTimeDuration(
         RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY,
         RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY_DEFAULT,
@@ -235,20 +240,24 @@
    * @throws IOException
    */
   @VisibleForTesting
-  void updateReconOmDBWithNewSnapshot() throws IOException {
+  boolean updateReconOmDBWithNewSnapshot() throws IOException {
     // Obtain the current DB snapshot from OM and
     // update the in house OM metadata managed DB instance.
     DBCheckpoint dbSnapshot = getOzoneManagerDBSnapshot();
     if (dbSnapshot != null && dbSnapshot.getCheckpointLocation() != null) {
+      LOG.info("Got new checkpoint from OM : " +
+          dbSnapshot.getCheckpointLocation());
       try {
         omMetadataManager.updateOmDB(dbSnapshot.getCheckpointLocation()
             .toFile());
+        return true;
       } catch (IOException e) {
         LOG.error("Unable to refresh Recon OM DB Snapshot. ", e);
       }
     } else {
       LOG.error("Null snapshot location got from OM.");
     }
+    return false;
   }
 
   /**
@@ -287,6 +296,7 @@
    */
   @VisibleForTesting
   void syncDataFromOM() {
+    LOG.info("Syncing data from Ozone Manager.");
     long currentSequenceNumber = getCurrentOMDBSequenceNumber();
     boolean fullSnapshot = false;
 
@@ -296,6 +306,7 @@
       OMDBUpdatesHandler omdbUpdatesHandler =
           new OMDBUpdatesHandler(omMetadataManager);
       try {
+        LOG.info("Obtaining delta updates from Ozone Manager");
         // Get updates from OM and apply to local Recon OM DB.
         getAndApplyDeltaUpdatesFromOM(currentSequenceNumber,
             omdbUpdatesHandler);
@@ -315,16 +326,20 @@
 
     if (fullSnapshot) {
       try {
+        LOG.info("Obtaining full snapshot from Ozone Manager");
         // Update local Recon OM DB to new snapshot.
-        updateReconOmDBWithNewSnapshot();
+        boolean success = updateReconOmDBWithNewSnapshot();
         // Update timestamp of successful delta updates query.
-        ReconTaskStatus reconTaskStatusRecord =
-            new ReconTaskStatus(
-                OmSnapshotTaskName.OM_DB_FULL_SNAPSHOT.name(),
-                System.currentTimeMillis(), getCurrentOMDBSequenceNumber());
-        reconTaskStatusDao.update(reconTaskStatusRecord);
-        // Reinitialize tasks that are listening.
-        reconTaskController.reInitializeTasks(omMetadataManager);
+        if (success) {
+          ReconTaskStatus reconTaskStatusRecord =
+              new ReconTaskStatus(
+                  OmSnapshotTaskName.OM_DB_FULL_SNAPSHOT.name(),
+                  System.currentTimeMillis(), getCurrentOMDBSequenceNumber());
+          reconTaskStatusDao.update(reconTaskStatusRecord);
+          // Reinitialize tasks that are listening.
+          LOG.info("Calling reprocess on Recon tasks.");
+          reconTaskController.reInitializeTasks(omMetadataManager);
+        }
       } catch (IOException | InterruptedException e) {
         LOG.error("Unable to update Recon's OM DB with new snapshot ", e);
       }
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerDBProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerDBProvider.java
index f2de129..9c3e987 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerDBProvider.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerDBProvider.java
@@ -23,6 +23,7 @@
 import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_KEY_TABLE;
 import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR;
 
+import java.io.File;
 import java.nio.file.Path;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -49,15 +50,29 @@
   private static final Logger LOG =
       LoggerFactory.getLogger(ReconContainerDBProvider.class);
 
-  @Inject
   private OzoneConfiguration configuration;
+  private ReconUtils reconUtils;
 
   @Inject
-  private ReconUtils reconUtils;
+  public ReconContainerDBProvider(OzoneConfiguration configuration,
+                                  ReconUtils reconUtils) {
+    this.configuration = configuration;
+    this.reconUtils = reconUtils;
+  }
 
   @Override
   public DBStore get() {
-    DBStore dbStore = getNewDBStore(configuration, reconUtils);
+    DBStore dbStore;
+    File reconDbDir =
+        reconUtils.getReconDbDir(configuration, OZONE_RECON_DB_DIR);
+    File lastKnownOMSnapshot =
+        reconUtils.getLastKnownDB(reconDbDir, RECON_CONTAINER_DB);
+    if (lastKnownOMSnapshot != null) {
+      dbStore = getDBStore(configuration, reconUtils,
+          lastKnownOMSnapshot.getName());
+    } else {
+      dbStore = getNewDBStore(configuration, reconUtils);
+    }
     if (dbStore == null) {
       throw new ProvisionException("Unable to provide instance of DBStore " +
           "store.");
@@ -65,10 +80,9 @@
     return dbStore;
   }
 
-  public static DBStore getNewDBStore(OzoneConfiguration configuration,
-                                      ReconUtils reconUtils) {
+  private static DBStore getDBStore(OzoneConfiguration configuration,
+                            ReconUtils reconUtils, String dbName) {
     DBStore dbStore = null;
-    String dbName = RECON_CONTAINER_DB + "_" + System.currentTimeMillis();
     try {
       Path metaDir = reconUtils.getReconDbDir(
           configuration, OZONE_RECON_DB_DIR).toPath();
@@ -86,4 +100,10 @@
     }
     return dbStore;
   }
+
+  static DBStore getNewDBStore(OzoneConfiguration configuration,
+                               ReconUtils reconUtils) {
+    String dbName = RECON_CONTAINER_DB + "_" + System.currentTimeMillis();
+    return getDBStore(configuration, reconUtils, dbName);
+  }
 }
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/AbstractOMMetadataManagerTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/AbstractOMMetadataManagerTest.java
index 649ec02..ff3765a 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/AbstractOMMetadataManagerTest.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/AbstractOMMetadataManagerTest.java
@@ -120,7 +120,7 @@
         .getAbsolutePath());
 
     ReconOMMetadataManager reconOMMetaMgr =
-        new ReconOmMetadataManagerImpl(configuration);
+        new ReconOmMetadataManagerImpl(configuration, new ReconUtils());
     reconOMMetaMgr.start(configuration);
 
     reconOMMetaMgr.updateOmDB(
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java
index ad04837..6d19dac 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java
@@ -133,4 +133,35 @@
     assertEquals("File 1 Contents", contents);
   }
 
+  @Test
+  public void testGetLastKnownDB() throws IOException {
+    File newDir = folder.newFolder();
+
+    File file1 = Paths.get(newDir.getAbsolutePath(), "valid_1")
+        .toFile();
+    String str = "File1 Contents";
+    BufferedWriter writer = new BufferedWriter(new FileWriter(
+        file1.getAbsolutePath()));
+    writer.write(str);
+    writer.close();
+
+    File file2 = Paths.get(newDir.getAbsolutePath(), "valid_2")
+        .toFile();
+    str = "File2 Contents";
+    writer = new BufferedWriter(new FileWriter(file2.getAbsolutePath()));
+    writer.write(str);
+    writer.close();
+
+
+    File file3 = Paths.get(newDir.getAbsolutePath(), "invalid_3")
+        .toFile();
+    str = "File3 Contents";
+    writer = new BufferedWriter(new FileWriter(file3.getAbsolutePath()));
+    writer.write(str);
+    writer.close();
+
+    ReconUtils reconUtils = new ReconUtils();
+    File latestValidFile = reconUtils.getLastKnownDB(newDir, "valid");
+    assertTrue(latestValidFile.getName().equals("valid_2"));
+  }
 }
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/TestReconOmMetadataManagerImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/TestReconOmMetadataManagerImpl.java
index 64fb8d8..a9e6aea 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/TestReconOmMetadataManagerImpl.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/TestReconOmMetadataManagerImpl.java
@@ -22,7 +22,9 @@
 import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR;
 
 import java.io.File;
+import java.io.IOException;
 
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
@@ -31,6 +33,7 @@
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
+import org.apache.hadoop.ozone.recon.ReconUtils;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
@@ -45,8 +48,94 @@
   public TemporaryFolder temporaryFolder = new TemporaryFolder();
 
   @Test
+  public void testStart() throws Exception {
+
+    OMMetadataManager omMetadataManager = getOMMetadataManager();
+
+    //Take checkpoint of the above OM DB.
+    DBCheckpoint checkpoint = omMetadataManager.getStore()
+        .getCheckpoint(true);
+    File snapshotFile = new File(
+        checkpoint.getCheckpointLocation().getParent() + "/" +
+            "om.snapshot.db_" + System.currentTimeMillis());
+    checkpoint.getCheckpointLocation().toFile().renameTo(snapshotFile);
+
+    //Create new Recon OM Metadata manager instance.
+    File reconOmDbDir = temporaryFolder.newFolder();
+    OzoneConfiguration configuration = new OzoneConfiguration();
+    configuration.set(OZONE_RECON_OM_SNAPSHOT_DB_DIR, reconOmDbDir
+        .getAbsolutePath());
+    FileUtils.copyDirectory(snapshotFile.getParentFile(), reconOmDbDir);
+
+    ReconOMMetadataManager reconOMMetadataManager =
+        new ReconOmMetadataManagerImpl(configuration, new ReconUtils());
+    reconOMMetadataManager.start(configuration);
+
+    Assert.assertNotNull(reconOMMetadataManager.getBucketTable());
+    Assert.assertNotNull(reconOMMetadataManager.getVolumeTable()
+        .get("/sampleVol"));
+    Assert.assertNotNull(reconOMMetadataManager.getBucketTable()
+        .get("/sampleVol/bucketOne"));
+    Assert.assertNotNull(reconOMMetadataManager.getKeyTable()
+        .get("/sampleVol/bucketOne/key_one"));
+    Assert.assertNotNull(reconOMMetadataManager.getKeyTable()
+        .get("/sampleVol/bucketOne/key_two"));
+  }
+
+  @Test
   public void testUpdateOmDB() throws Exception {
 
+    OMMetadataManager omMetadataManager = getOMMetadataManager();
+    //Make sure OM Metadata reflects the keys that were inserted.
+    Assert.assertNotNull(omMetadataManager.getKeyTable()
+        .get("/sampleVol/bucketOne/key_one"));
+    Assert.assertNotNull(omMetadataManager.getKeyTable()
+        .get("/sampleVol/bucketOne/key_two"));
+
+    //Take checkpoint of OM DB.
+    DBCheckpoint checkpoint = omMetadataManager.getStore()
+        .getCheckpoint(true);
+    Assert.assertNotNull(checkpoint.getCheckpointLocation());
+
+    //Create new Recon OM Metadata manager instance.
+    File reconOmDbDir = temporaryFolder.newFolder();
+    OzoneConfiguration configuration = new OzoneConfiguration();
+    configuration.set(OZONE_RECON_OM_SNAPSHOT_DB_DIR, reconOmDbDir
+        .getAbsolutePath());
+    ReconOMMetadataManager reconOMMetadataManager =
+        new ReconOmMetadataManagerImpl(configuration, new ReconUtils());
+    reconOMMetadataManager.start(configuration);
+
+    //Before accepting a snapshot, the metadata should have null tables.
+    Assert.assertNull(reconOMMetadataManager.getBucketTable());
+
+    //Update Recon OM DB with the OM DB checkpoint location.
+    reconOMMetadataManager.updateOmDB(
+        checkpoint.getCheckpointLocation().toFile());
+
+    //Now, the tables should have been initialized.
+    Assert.assertNotNull(reconOMMetadataManager.getBucketTable());
+
+    // Check volume and bucket entries.
+    Assert.assertNotNull(reconOMMetadataManager.getVolumeTable()
+        .get("/sampleVol"));
+    Assert.assertNotNull(reconOMMetadataManager.getBucketTable()
+        .get("/sampleVol/bucketOne"));
+
+    //Verify Keys inserted in OM DB are available in Recon OM DB.
+    Assert.assertNotNull(reconOMMetadataManager.getKeyTable()
+        .get("/sampleVol/bucketOne/key_one"));
+    Assert.assertNotNull(reconOMMetadataManager.getKeyTable()
+        .get("/sampleVol/bucketOne/key_two"));
+
+  }
+
+  /**
+   * Get test OM metadata manager.
+   * @return OMMetadataManager instance
+   * @throws IOException
+   */
+  private OMMetadataManager getOMMetadataManager() throws IOException {
     //Create a new OM Metadata Manager instance + DB.
     File omDbDir = temporaryFolder.newFolder();
     OzoneConfiguration omConfiguration = new OzoneConfiguration();
@@ -93,48 +182,6 @@
             .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE)
             .build());
 
-    //Make sure OM Metadata reflects the keys that were inserted.
-    Assert.assertNotNull(omMetadataManager.getKeyTable()
-        .get("/sampleVol/bucketOne/key_one"));
-    Assert.assertNotNull(omMetadataManager.getKeyTable()
-        .get("/sampleVol/bucketOne/key_two"));
-
-    //Take checkpoint of OM DB.
-    DBCheckpoint checkpoint = omMetadataManager.getStore()
-        .getCheckpoint(true);
-    Assert.assertNotNull(checkpoint.getCheckpointLocation());
-
-    //Create new Recon OM Metadata manager instance.
-    File reconOmDbDir = temporaryFolder.newFolder();
-    OzoneConfiguration configuration = new OzoneConfiguration();
-    configuration.set(OZONE_RECON_OM_SNAPSHOT_DB_DIR, reconOmDbDir
-        .getAbsolutePath());
-    ReconOMMetadataManager reconOMMetadataManager =
-        new ReconOmMetadataManagerImpl(configuration);
-    reconOMMetadataManager.start(configuration);
-
-    //Before accepting a snapshot, the metadata should have null tables.
-    Assert.assertNull(reconOMMetadataManager.getBucketTable());
-
-    //Update Recon OM DB with the OM DB checkpoint location.
-    reconOMMetadataManager.updateOmDB(
-        checkpoint.getCheckpointLocation().toFile());
-
-    //Now, the tables should have been initialized.
-    Assert.assertNotNull(reconOMMetadataManager.getBucketTable());
-
-    // Check volume and bucket entries.
-    Assert.assertNotNull(reconOMMetadataManager.getVolumeTable()
-        .get(volumeKey));
-    Assert.assertNotNull(reconOMMetadataManager.getBucketTable()
-        .get(bucketKey));
-
-    //Verify Keys inserted in OM DB are available in Recon OM DB.
-    Assert.assertNotNull(reconOMMetadataManager.getKeyTable()
-        .get("/sampleVol/bucketOne/key_one"));
-    Assert.assertNotNull(reconOMMetadataManager.getKeyTable()
-        .get("/sampleVol/bucketOne/key_two"));
-
+    return omMetadataManager;
   }
-
 }
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java
index f38cbc6..a2eb7f4 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java
@@ -114,7 +114,7 @@
     Assert.assertNull(reconOMMetadataManager.getKeyTable()
         .get("/sampleVol/bucketOne/key_two"));
 
-    ozoneManagerServiceProvider.updateReconOmDBWithNewSnapshot();
+    assertTrue(ozoneManagerServiceProvider.updateReconOmDBWithNewSnapshot());
 
     assertNotNull(reconOMMetadataManager.getKeyTable()
         .get("/sampleVol/bucketOne/key_one"));
@@ -241,10 +241,10 @@
         .reInitializeTasks(omMetadataManager);
 
     OzoneManagerServiceProviderImpl ozoneManagerServiceProvider =
-        new OzoneManagerServiceProviderImpl(configuration, omMetadataManager,
+        new MockOzoneServiceProvider(configuration, omMetadataManager,
             reconTaskControllerMock, new ReconUtils(), ozoneManagerProtocol);
 
-    //Should trigger full snapshot request.
+    // Should trigger full snapshot request.
     ozoneManagerServiceProvider.syncDataFromOM();
 
     ArgumentCaptor<ReconTaskStatus> captor =
@@ -313,5 +313,26 @@
         .DBUpdatesRequest.class))).thenReturn(dbUpdatesWrapper);
     return ozoneManagerProtocolMock;
   }
+}
 
-}
\ No newline at end of file
+/**
+ * Mock OzoneManagerServiceProviderImpl which overrides
+ * updateReconOmDBWithNewSnapshot.
+ */
+class MockOzoneServiceProvider extends OzoneManagerServiceProviderImpl {
+
+  MockOzoneServiceProvider(OzoneConfiguration configuration,
+                           ReconOMMetadataManager omMetadataManager,
+                           ReconTaskController reconTaskController,
+                           ReconUtils reconUtils,
+                           OzoneManagerProtocol ozoneManagerClient)
+      throws IOException {
+    super(configuration, omMetadataManager, reconTaskController, reconUtils,
+        ozoneManagerClient);
+  }
+
+  @Override
+  public boolean updateReconOmDBWithNewSnapshot() {
+    return true;
+  }
+}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconContainerDBProvider.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconContainerDBProvider.java
index cb4aa7d..ad1feeb 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconContainerDBProvider.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconContainerDBProvider.java
@@ -20,8 +20,6 @@
 
 import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR;
 import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
 
 import java.io.File;
 import java.io.IOException;
@@ -36,7 +34,6 @@
 import com.google.inject.AbstractModule;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
-import com.google.inject.ProvisionException;
 import com.google.inject.Singleton;
 
 /**
@@ -68,20 +65,10 @@
 
   @Test
   public void testGet() throws Exception {
-
     ReconContainerDBProvider reconContainerDBProvider = injector.getInstance(
         ReconContainerDBProvider.class);
     DBStore dbStore = reconContainerDBProvider.get();
     assertNotNull(dbStore);
-
-    ReconContainerDBProvider reconContainerDBProviderNew = new
-        ReconContainerDBProvider();
-    try {
-      reconContainerDBProviderNew.get();
-      fail();
-    } catch (Exception e) {
-      assertTrue(e instanceof ProvisionException);
-    }
   }
 
 }
\ No newline at end of file
diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml
index cbbba1f..32c9587 100644
--- a/hadoop-ozone/s3gateway/pom.xml
+++ b/hadoop-ozone/s3gateway/pom.xml
@@ -210,9 +210,8 @@
       <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>com.google.code.findbugs</groupId>
-      <artifactId>findbugs</artifactId>
-      <version>3.0.1</version>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs</artifactId>
       <scope>provided</scope>
     </dependency>
   </dependencies>
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java
index d9afaf1..abaca03 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java
@@ -62,6 +62,9 @@
   @Inject
   private Text omService;
 
+  @Inject
+  private String omServiceID;
+
 
   @Produces
   public OzoneClient createClient() throws IOException {
@@ -105,7 +108,13 @@
     } catch (Exception e) {
       LOG.error("Error: ", e);
     }
-    return OzoneClientFactory.getClient(ozoneConfiguration);
+
+    if (omServiceID == null) {
+      return OzoneClientFactory.getClient(ozoneConfiguration);
+    } else {
+      // As in HA case, we need to pass om service ID.
+      return OzoneClientFactory.getRpcClient(omServiceID, ozoneConfiguration);
+    }
   }
 
   @VisibleForTesting
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneServiceProvider.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneServiceProvider.java
index f4342f6..b98426c 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneServiceProvider.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneServiceProvider.java
@@ -20,33 +20,75 @@
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.s3.util.OzoneS3Util;
 import org.apache.hadoop.security.SecurityUtil;
 
 import javax.annotation.PostConstruct;
 import javax.enterprise.context.ApplicationScoped;
 import javax.enterprise.inject.Produces;
 import javax.inject.Inject;
+
+import java.util.Arrays;
+import java.util.Collection;
+
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NODES_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY;
+
 /**
  * This class creates the OM service .
  */
 @ApplicationScoped
 public class OzoneServiceProvider {
 
-  private Text omServiceAdd;
+  private Text omServiceAddr;
+
+  private String omserviceID;
 
   @Inject
   private OzoneConfiguration conf;
 
   @PostConstruct
   public void init() {
-    omServiceAdd = SecurityUtil.buildTokenService(OmUtils.
-        getOmAddressForClients(conf));
+    Collection<String> serviceIdList =
+        conf.getTrimmedStringCollection(OZONE_OM_SERVICE_IDS_KEY);
+    if (serviceIdList.size() == 0) {
+      // Non-HA cluster
+      omServiceAddr = SecurityUtil.buildTokenService(OmUtils.
+          getOmAddressForClients(conf));
+    } else {
+      // HA cluster.
+      //For now if multiple service id's are configured we throw exception.
+      // As if multiple service id's are configured, S3Gateway will not be
+      // knowing which one to talk to. In future, if OM federation is supported
+      // we can resolve this by having another property like
+      // ozone.om.internal.service.id.
+      // TODO: Revisit this later.
+      if (serviceIdList.size() > 1) {
+        throw new IllegalArgumentException("Multiple serviceIds are " +
+            "configured. " + Arrays.toString(serviceIdList.toArray()));
+      } else {
+        String serviceId = serviceIdList.iterator().next();
+        Collection<String> omNodeIds = OmUtils.getOMNodeIds(conf, serviceId);
+        if (omNodeIds.size() == 0) {
+          throw new IllegalArgumentException(OZONE_OM_NODES_KEY
+              + "." + serviceId + " is not defined");
+        }
+        omServiceAddr = new Text(OzoneS3Util.buildServiceNameForToken(conf,
+            serviceId, omNodeIds));
+        omserviceID = serviceId;
+      }
+    }
   }
 
 
   @Produces
   public Text getService() {
-    return omServiceAdd;
+    return omServiceAddr;
+  }
+
+  @Produces
+  public String getOmServiceID() {
+    return omserviceID;
   }
 
 }
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/OzoneS3Util.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/OzoneS3Util.java
index 129ea2d..ce7d4f2 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/OzoneS3Util.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/OzoneS3Util.java
@@ -19,8 +19,17 @@
 package org.apache.hadoop.ozone.s3.util;
 
 import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.security.SecurityUtil;
+
+import javax.annotation.Nonnull;
+import java.util.Collection;
 import java.util.Objects;
 
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
+
 /**
  * Ozone util for S3 related operations.
  */
@@ -33,4 +42,39 @@
     Objects.requireNonNull(userName);
     return DigestUtils.md5Hex(userName);
   }
+
+  /**
+   * Generate service Name for token.
+   * @param configuration
+   * @param serviceId - ozone manager service ID
+   * @param omNodeIds - list of node ids for the given OM service.
+   * @return service Name.
+   */
+  public static String buildServiceNameForToken(
+      @Nonnull OzoneConfiguration configuration, @Nonnull String serviceId,
+      @Nonnull Collection<String> omNodeIds) {
+    StringBuilder rpcAddress = new StringBuilder();
+
+    int nodesLength = omNodeIds.size();
+    int counter = 0;
+    for (String nodeId : omNodeIds) {
+      counter++;
+      String rpcAddrKey = OmUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY,
+          serviceId, nodeId);
+      String rpcAddrStr = OmUtils.getOmRpcAddress(configuration, rpcAddrKey);
+      if (rpcAddrStr == null || rpcAddrStr.isEmpty()) {
+        throw new IllegalArgumentException("Could not find rpcAddress for " +
+            OZONE_OM_ADDRESS_KEY + "." + serviceId + "." + nodeId);
+      }
+
+      if (counter != nodesLength) {
+        rpcAddress.append(SecurityUtil.buildTokenService(
+            NetUtils.createSocketAddr(rpcAddrStr)) + ",");
+      } else {
+        rpcAddress.append(SecurityUtil.buildTokenService(
+            NetUtils.createSocketAddr(rpcAddrStr)));
+      }
+    }
+    return rpcAddress.toString();
+  }
 }
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestOzoneS3Util.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestOzoneS3Util.java
new file mode 100644
index 0000000..8892a97
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestOzoneS3Util.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.s3.util;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.Collection;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP;
+import static org.junit.Assert.fail;
+
+/**
+ * Class used to test OzoneS3Util.
+ */
+public class TestOzoneS3Util {
+
+
+  private OzoneConfiguration configuration;
+  private String serviceID = "omService";
+
+  @Before
+  public void setConf() {
+    configuration = new OzoneConfiguration();
+
+    String nodeIDs = "om1,om2,om3";
+    configuration.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, serviceID);
+    configuration.set(OMConfigKeys.OZONE_OM_NODES_KEY + "." + serviceID,
+        nodeIDs);
+    configuration.setBoolean(HADOOP_SECURITY_TOKEN_SERVICE_USE_IP, false);
+  }
+
+  @Test
+  public void testBuildServiceNameForToken() {
+
+    Collection<String> nodeIDList = OmUtils.getOMNodeIds(configuration,
+        serviceID);
+
+    configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
+        serviceID, "om1"), "om1:9862");
+    configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
+        serviceID, "om2"), "om2:9862");
+    configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
+        serviceID, "om3"), "om3:9862");
+
+    String expectedOmServiceAddress = buildServiceAddress(nodeIDList);
+
+    SecurityUtil.setConfiguration(configuration);
+    String omserviceAddr = OzoneS3Util.buildServiceNameForToken(configuration,
+        serviceID, nodeIDList);
+
+    Assert.assertEquals(expectedOmServiceAddress, omserviceAddr);
+  }
+
+
+  @Test
+  public void testBuildServiceNameForTokenIncorrectConfig() {
+
+    Collection<String> nodeIDList = OmUtils.getOMNodeIds(configuration,
+        serviceID);
+
+    // Don't set om3 node rpc address. Here we are skipping setting of one of
+    // the OM address. So buildServiceNameForToken will fail.
+    configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
+        serviceID, "om1"), "om1:9862");
+    configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
+        serviceID, "om2"), "om2:9862");
+
+
+    SecurityUtil.setConfiguration(configuration);
+
+    try {
+      OzoneS3Util.buildServiceNameForToken(configuration,
+          serviceID, nodeIDList);
+      fail("testBuildServiceNameForTokenIncorrectConfig failed");
+    } catch (IllegalArgumentException ex) {
+      GenericTestUtils.assertExceptionContains("Could not find rpcAddress " +
+          "for", ex);
+    }
+
+
+  }
+
+  /**
+   * Build serviceName from list of node ids.
+   * @param nodeIDList
+   * @return service name for token.
+   */
+  private String buildServiceAddress(Collection<String> nodeIDList) {
+    StringBuilder omServiceAddrBuilder = new StringBuilder();
+    int nodesLength = nodeIDList.size();
+    int counter = 0;
+    for (String nodeID : nodeIDList) {
+      counter++;
+      String addr = configuration.get(OmUtils.addKeySuffixes(
+          OMConfigKeys.OZONE_OM_ADDRESS_KEY, serviceID, nodeID));
+
+      if (counter != nodesLength) {
+        omServiceAddrBuilder.append(addr + ",");
+      } else {
+        omServiceAddrBuilder.append(addr);
+      }
+    }
+
+    return omServiceAddrBuilder.toString();
+  }
+
+}
diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml
index b4a6bc2..d1ee9d5 100644
--- a/hadoop-ozone/tools/pom.xml
+++ b/hadoop-ozone/tools/pom.xml
@@ -101,9 +101,8 @@
       <version>1.11.615</version>
     </dependency>
     <dependency>
-      <groupId>com.google.code.findbugs</groupId>
-      <artifactId>findbugs</artifactId>
-      <version>3.0.1</version>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs</artifactId>
       <scope>provided</scope>
     </dependency>
     <dependency>
@@ -133,8 +132,8 @@
   <build>
     <plugins>
       <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>findbugs-maven-plugin</artifactId>
+        <groupId>com.github.spotbugs</groupId>
+        <artifactId>spotbugs-maven-plugin</artifactId>
         <configuration>
           <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml
           </excludeFilterFile>
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java
index 0303479..f9b5e03 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java
@@ -267,8 +267,9 @@
       } catch (OMException ex) {
         if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
           volume.createBucket(bucketName);
+        } else {
+          throw ex;
         }
-        throw ex;
       }
     }
   }
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
index 7407f8b5..cc31619 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
@@ -34,7 +34,7 @@
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserVolumeInfo;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.util.Tool;
@@ -360,7 +360,7 @@
                           byte[] value) throws IOException, SQLException {
     switch (type) {
     case USER:
-      VolumeList volumeList = VolumeList.parseFrom(value);
+      UserVolumeInfo volumeList = UserVolumeInfo.parseFrom(value);
       for (String volumeName : volumeList.getVolumeNamesList()) {
         String insertVolumeList =
             String.format(INSERT_VOLUME_LIST, keyName, volumeName);
diff --git a/hadoop-ozone/tools/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/tools/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
new file mode 100644
index 0000000..0368002
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.hadoop.fs.ozone.OzoneFileSystem
diff --git a/hadoop-ozone/upgrade/pom.xml b/hadoop-ozone/upgrade/pom.xml
index 5721102..0a4bd7f 100644
--- a/hadoop-ozone/upgrade/pom.xml
+++ b/hadoop-ozone/upgrade/pom.xml
@@ -34,9 +34,8 @@
       <artifactId>hadoop-hdds-common</artifactId>
     </dependency>
     <dependency>
-      <groupId>com.google.code.findbugs</groupId>
-      <artifactId>findbugs</artifactId>
-      <version>3.0.1</version>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs</artifactId>
       <scope>provided</scope>
     </dependency>
     <dependency>
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index e610b6a..28110ca 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -69,8 +69,8 @@
 
     <!-- jackson versions -->
     <jackson.version>1.9.13</jackson.version>
-    <jackson2.version>2.9.9</jackson2.version>
-    <jackson2.databind.version>2.9.9.3</jackson2.databind.version>
+    <jackson2.version>2.9.10</jackson2.version>
+    <jackson2.databind.version>2.9.10</jackson2.databind.version>
 
     <!-- httpcomponents versions -->
     <httpclient.version>4.5.6</httpclient.version>
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index a790369..5f1a101 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -196,6 +196,7 @@
       <item name="Azure Data Lake Storage"
             href="hadoop-azure-datalake/index.html"/>
       <item name="OpenStack Swift" href="hadoop-openstack/index.html"/>
+      <item name="Tencent COS" href="hadoop-cos/index.html"/>
     </menu>
 
     <menu name="Auth" inherit="top">
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index 345ac90..014a494 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -352,26 +352,30 @@
 
   /**
    * List of custom Signers. The signer class will be loaded, and the signer
-   * name will be associated with this signer class in the S3 SDK. e.g. Single
-   * CustomSigner -> 'CustomSigner:org.apache...CustomSignerClass Multiple
-   * CustomSigners -> 'CSigner1:CustomSignerClass1,CSigner2:CustomerSignerClass2
+   * name will be associated with this signer class in the S3 SDK.
+   * Examples
+   * CustomSigner {@literal ->} 'CustomSigner:org.apache...CustomSignerClass'
+   * CustomSigners {@literal ->} 'CSigner1:CSigner1Class,CSigner2:CSigner2Class'
+   * Initializer {@literal ->} 'CSigner1:CSigner1Class:CSigner1InitializerClass'
+   * With Existing {@literal ->} 'AWS4Signer,CSigner1,CSigner2:CSigner2Class'
    */
   public static final String CUSTOM_SIGNERS = "fs.s3a.custom.signers";
 
   /**
    * There's 3 parameters that can be used to specify a non-default signing
-   * algorithm. fs.s3a.signing-algorithm - This property has existed for the
-   * longest time. If specified, without either of the other 2 properties being
-   * specified, this signing algorithm will be used for S3 and DDB (S3Guard).
-   * The other 2 properties override this value for S3 or DDB.
+   * algorithm.<br>
+   * fs.s3a.signing-algorithm - This property has existed for the longest time.
+   * If specified, without either of the other 2 properties being specified,
+   * this signing algorithm will be used for S3 and DDB (S3Guard). <br>
+   * The other 2 properties override this value for S3 or DDB. <br>
    * fs.s3a.s3.signing-algorithm - Allows overriding the S3 Signing algorithm.
    * This does not affect DDB. Specifying this property without specifying
    * fs.s3a.signing-algorithm will only update the signing algorithm for S3
-   * requests, and the default will be used for DDB fs.s3a.ddb.signing-algorithm
-   * - Allows overriding the DDB Signing algorithm. This does not affect S3.
-   * Specifying this property without specifying fs.s3a.signing-algorithm will
-   * only update the signing algorithm for DDB requests, and the default will be
-   * used for S3
+   * requests, and the default will be used for DDB.<br>
+   * fs.s3a.ddb.signing-algorithm - Allows overriding the DDB Signing algorithm.
+   * This does not affect S3. Specifying this property without specifying
+   * fs.s3a.signing-algorithm will only update the signing algorithm for
+   * DDB requests, and the default will be used for S3.
    */
   public static final String SIGNING_ALGORITHM = "fs.s3a.signing-algorithm";
 
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java
index a59ffa9..bbb9faa 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java
@@ -31,6 +31,7 @@
 
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.util.DurationInfo;
 
 /**
  * Class to provide lambda expression invocation of AWS operations.
@@ -105,7 +106,7 @@
   @Retries.OnceTranslated
   public static <T> T once(String action, String path, Operation<T> operation)
       throws IOException {
-    try {
+    try (DurationInfo ignored = new DurationInfo(LOG, false, "%s", action)) {
       return operation.execute();
     } catch (AmazonClientException e) {
       throw S3AUtils.translateException(action, path, e);
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
index bdffed4..a60f9af 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
@@ -497,17 +497,19 @@
    * @param capability string to query the stream support for.
    * @return true if the capability is supported by this instance.
    */
+  @SuppressWarnings("deprecation")
   @Override
   public boolean hasCapability(String capability) {
     switch (capability.toLowerCase(Locale.ENGLISH)) {
 
       // does the output stream have delayed visibility
     case CommitConstants.STREAM_CAPABILITY_MAGIC_OUTPUT:
+    case CommitConstants.STREAM_CAPABILITY_MAGIC_OUTPUT_OLD:
       return !putTracker.outputImmediatelyVisible();
 
       // The flush/sync options are absolutely not supported
-    case "hflush":
-    case "hsync":
+    case StreamCapabilities.HFLUSH:
+    case StreamCapabilities.HSYNC:
       return false;
 
     default:
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 0747be2..9431884 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -36,7 +36,6 @@
 import java.util.Date;
 import java.util.EnumSet;
 import java.util.List;
-import java.util.Locale;
 import java.util.Map;
 import java.util.Optional;
 import java.util.Set;
@@ -91,9 +90,13 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonPathCapabilities;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Globber;
+import org.apache.hadoop.fs.s3a.auth.SignerManager;
+import org.apache.hadoop.fs.s3a.auth.delegation.DelegationTokenProvider;
 import org.apache.hadoop.fs.s3a.impl.ChangeDetectionPolicy;
 import org.apache.hadoop.fs.s3a.impl.ContextAccessors;
 import org.apache.hadoop.fs.s3a.impl.CopyOutcome;
@@ -107,6 +110,7 @@
 import org.apache.hadoop.fs.s3a.s3guard.BulkOperationState;
 import org.apache.hadoop.fs.s3a.select.InternalSelectConstants;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.DurationInfo;
 import org.apache.hadoop.util.LambdaUtils;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
@@ -152,6 +156,7 @@
 import org.apache.hadoop.util.SemaphoredDelegatingExecutor;
 
 import static org.apache.hadoop.fs.impl.AbstractFSBuilderImpl.rejectUnknownMandatoryKeys;
+import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
 import static org.apache.hadoop.fs.s3a.Constants.*;
 import static org.apache.hadoop.fs.s3a.Invoker.*;
 import static org.apache.hadoop.fs.s3a.S3AUtils.*;
@@ -180,7 +185,7 @@
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class S3AFileSystem extends FileSystem implements StreamCapabilities,
-    AWSPolicyProvider {
+    AWSPolicyProvider, DelegationTokenProvider {
   /**
    * Default blocksize as used in blocksize and FS status queries.
    */
@@ -360,8 +365,8 @@
       }
       useListV1 = (listVersion == 1);
 
-      signerManager = new SignerManager();
-      signerManager.initCustomSigners(conf);
+      signerManager = new SignerManager(bucket, this, conf, owner);
+      signerManager.initCustomSigners();
 
       // creates the AWS client, including overriding auth chain if
       // the FS came with a DT
@@ -1333,6 +1338,11 @@
     return renameOperation.execute();
   }
 
+  @Override public Token<? extends TokenIdentifier> getFsDelegationToken()
+      throws IOException {
+    return getDelegationToken(null);
+  }
+
   /**
    * The callbacks made by the rename and delete operations.
    * This separation allows the operation to be factored out and
@@ -2471,7 +2481,7 @@
    * @param newDir the current working directory.
    */
   public void setWorkingDirectory(Path newDir) {
-    workingDir = newDir;
+    workingDir = makeQualified(newDir);
   }
 
   /**
@@ -3668,19 +3678,27 @@
    */
   @Override
   public FileStatus[] globStatus(Path pathPattern) throws IOException {
-    entryPoint(INVOCATION_GLOB_STATUS);
-    return super.globStatus(pathPattern);
+    return globStatus(pathPattern, ACCEPT_ALL);
   }
 
   /**
-   * Override superclass so as to add statistic collection.
+   * Override superclass so as to disable symlink resolution and so avoid
+   * some calls to the FS which may have problems when the store is being
+   * inconsistent.
    * {@inheritDoc}
    */
   @Override
-  public FileStatus[] globStatus(Path pathPattern, PathFilter filter)
+  public FileStatus[] globStatus(
+      final Path pathPattern,
+      final PathFilter filter)
       throws IOException {
     entryPoint(INVOCATION_GLOB_STATUS);
-    return super.globStatus(pathPattern, filter);
+    return Globber.createGlobber(this)
+        .withPathPattern(pathPattern)
+        .withPathFiltern(filter)
+        .withResolveSymlinks(true)
+        .build()
+        .glob();
   }
 
   /**
@@ -4084,17 +4102,15 @@
     return instrumentation.newCommitterStatistics();
   }
 
-  /**
-   * Return the capabilities of this filesystem instance.
-   * @param capability string to query the stream support for.
-   * @return whether the FS instance has the capability.
-   */
+  @SuppressWarnings("deprecation")
   @Override
-  public boolean hasCapability(String capability) {
-
-    switch (capability.toLowerCase(Locale.ENGLISH)) {
+  public boolean hasPathCapability(final Path path, final String capability)
+      throws IOException {
+    final Path p = makeQualified(path);
+    switch (validatePathCapabilityArgs(p, capability)) {
 
     case CommitConstants.STORE_CAPABILITY_MAGIC_COMMITTER:
+    case CommitConstants.STORE_CAPABILITY_MAGIC_COMMITTER_OLD:
       // capability depends on FS configuration
       return isMagicCommitEnabled();
 
@@ -4102,7 +4118,31 @@
       // select is only supported if enabled
       return selectBinding.isEnabled();
 
+    case CommonPathCapabilities.FS_CHECKSUMS:
+      // capability depends on FS configuration
+      return getConf().getBoolean(ETAG_CHECKSUM_ENABLED,
+          ETAG_CHECKSUM_ENABLED_DEFAULT);
+
     default:
+      return super.hasPathCapability(p, capability);
+    }
+  }
+
+  /**
+   * Return the capabilities of this filesystem instance.
+   *
+   * This has been supplanted by {@link #hasPathCapability(Path, String)}.
+   * @param capability string to query the stream support for.
+   * @return whether the FS instance has the capability.
+   */
+  @Deprecated
+  @Override
+  public boolean hasCapability(String capability) {
+    try {
+      return hasPathCapability(workingDir, capability);
+    } catch (IOException ex) {
+      // should never happen, so log and downgrade.
+      LOG.debug("Ignoring exception on hasCapability({}})", capability, ex);
       return false;
     }
   }
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SignerManager.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SignerManager.java
deleted file mode 100644
index 5ca1482..0000000
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SignerManager.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.s3a;
-
-import com.amazonaws.auth.Signer;
-import com.amazonaws.auth.SignerFactory;
-import java.io.Closeable;
-import java.io.IOException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.conf.Configuration;
-
-import static org.apache.hadoop.fs.s3a.Constants.CUSTOM_SIGNERS;
-
-/**
- * Class to handle custom signers.
- */
-public class SignerManager implements Closeable {
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(SignerManager.class);
-
-
-  public SignerManager() {
-  }
-
-  /**
-   * Initialize custom signers and register them with the AWS SDK.
-   *
-   * @param conf Hadoop configuration
-   */
-  public void initCustomSigners(Configuration conf) {
-    String[] customSigners = conf.getTrimmedStrings(CUSTOM_SIGNERS);
-    if (customSigners == null || customSigners.length == 0) {
-      // No custom signers specified, nothing to do.
-      LOG.debug("No custom signers specified");
-      return;
-    }
-
-    for (String customSigner : customSigners) {
-      String[] parts = customSigner.split(":");
-      if (parts.length != 2) {
-        String message =
-            "Invalid format (Expected name:SignerClass) for CustomSigner: ["
-                + customSigner
-                + "]";
-        LOG.error(message);
-        throw new IllegalArgumentException(message);
-      }
-      maybeRegisterSigner(parts[0], parts[1], conf);
-    }
-  }
-
-  /*
-   * Make sure the signer class is registered once with the AWS SDK
-   */
-  private static void maybeRegisterSigner(String signerName,
-      String signerClassName, Configuration conf) {
-    try {
-      SignerFactory.getSignerByTypeAndService(signerName, null);
-    } catch (IllegalArgumentException e) {
-      // Signer is not registered with the AWS SDK.
-      // Load the class and register the signer.
-      Class<? extends Signer> clazz = null;
-      try {
-        clazz = (Class<? extends Signer>) conf.getClassByName(signerClassName);
-      } catch (ClassNotFoundException cnfe) {
-        throw new RuntimeException(String
-            .format("Signer class [%s] not found for signer [%s]",
-                signerClassName, signerName), cnfe);
-      }
-      LOG.debug("Registering Custom Signer - [{}->{}]", signerName,
-          clazz.getName());
-      synchronized (SignerManager.class) {
-        SignerFactory.registerSigner(signerName, clazz);
-      }
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-  }
-}
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AwsSignerInitializer.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AwsSignerInitializer.java
new file mode 100644
index 0000000..f02a3ed
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AwsSignerInitializer.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.auth;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.s3a.auth.delegation.DelegationTokenProvider;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.security.UserGroupInformation;
+
+/**
+ * Interface which can be implemented to allow initialization of any custom
+ * signers which may be used by the {@link S3AFileSystem}.
+ */
+public interface AwsSignerInitializer {
+
+  /**
+   * Register a store instance.
+   *
+   * @param bucketName the bucket name
+   * @param storeConf the store configuration
+   * @param dtProvider delegation token provider for the store
+   * @param storeUgi ugi under which the store is operating
+   */
+  void registerStore(String bucketName, Configuration storeConf,
+      DelegationTokenProvider dtProvider, UserGroupInformation storeUgi);
+
+  /**
+   * Unregister a store instance.
+   *
+   * @param bucketName the bucket name
+   * @param storeConf the store configuration
+   * @param dtProvider delegation token provider for the store
+   * @param storeUgi ugi under which the store is operating
+   */
+  void unregisterStore(String bucketName, Configuration storeConf,
+      DelegationTokenProvider dtProvider, UserGroupInformation storeUgi);
+}
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/SignerManager.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/SignerManager.java
new file mode 100644
index 0000000..cda769a
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/SignerManager.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.s3a.auth;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.List;
+
+import com.amazonaws.auth.Signer;
+import com.amazonaws.auth.SignerFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.s3a.auth.delegation.DelegationTokenProvider;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.ReflectionUtils;
+
+import static org.apache.hadoop.fs.s3a.Constants.CUSTOM_SIGNERS;
+
+/**
+ * Class to handle custom signers.
+ */
+public class SignerManager implements Closeable {
+
+  private static final Logger LOG = LoggerFactory
+      .getLogger(SignerManager.class);
+
+  private final List<AwsSignerInitializer> initializers = new LinkedList<>();
+
+  private final String bucketName;
+  private final DelegationTokenProvider delegationTokenProvider;
+  private final Configuration ownerConf;
+  private final UserGroupInformation ownerUgi;
+
+  public SignerManager(String bucketName,
+      DelegationTokenProvider delegationTokenProvider, Configuration ownerConf,
+      UserGroupInformation ownerUgi) {
+    this.bucketName = bucketName;
+    this.delegationTokenProvider = delegationTokenProvider;
+    this.ownerConf = ownerConf;
+    this.ownerUgi = ownerUgi;
+  }
+
+  /**
+   * Initialize custom signers and register them with the AWS SDK.
+   *
+   */
+  public void initCustomSigners() {
+    String[] customSigners = ownerConf.getTrimmedStrings(CUSTOM_SIGNERS);
+    if (customSigners == null || customSigners.length == 0) {
+      // No custom signers specified, nothing to do.
+      LOG.debug("No custom signers specified");
+      return;
+    }
+
+    for (String customSigner : customSigners) {
+      String[] parts = customSigner.split(":");
+      if (!(parts.length == 1 || parts.length == 2 || parts.length == 3)) {
+        String message = "Invalid format (Expected name, name:SignerClass,"
+            + " name:SignerClass:SignerInitializerClass)"
+            + " for CustomSigner: [" + customSigner + "]";
+        LOG.error(message);
+        throw new IllegalArgumentException(message);
+      }
+      if (parts.length == 1) {
+        // Nothing to do. Trying to use a pre-defined Signer
+      } else {
+        // Register any custom Signer
+        maybeRegisterSigner(parts[0], parts[1], ownerConf);
+
+        // If an initializer is specified, take care of instantiating it and
+        // setting it up
+        if (parts.length == 3) {
+          Class<? extends AwsSignerInitializer> clazz = null;
+          try {
+            clazz = (Class<? extends AwsSignerInitializer>) ownerConf
+                .getClassByName(parts[2]);
+          } catch (ClassNotFoundException e) {
+            throw new RuntimeException(String.format(
+                "SignerInitializer class" + " [%s] not found for signer [%s]",
+                parts[2], parts[0]), e);
+          }
+          LOG.debug("Creating signer initializer: [{}] for signer: [{}]",
+              parts[2], parts[0]);
+          AwsSignerInitializer signerInitializer = ReflectionUtils
+              .newInstance(clazz, null);
+          initializers.add(signerInitializer);
+          signerInitializer
+              .registerStore(bucketName, ownerConf, delegationTokenProvider,
+                  ownerUgi);
+        }
+      }
+    }
+  }
+
+  /*
+   * Make sure the signer class is registered once with the AWS SDK
+   */
+  private static void maybeRegisterSigner(String signerName,
+      String signerClassName, Configuration conf) {
+    try {
+      SignerFactory.getSignerByTypeAndService(signerName, null);
+    } catch (IllegalArgumentException e) {
+      // Signer is not registered with the AWS SDK.
+      // Load the class and register the signer.
+      Class<? extends Signer> clazz = null;
+      try {
+        clazz = (Class<? extends Signer>) conf.getClassByName(signerClassName);
+      } catch (ClassNotFoundException cnfe) {
+        throw new RuntimeException(String
+            .format("Signer class [%s] not found for signer [%s]",
+                signerClassName, signerName), cnfe);
+      }
+      LOG.debug("Registering Custom Signer - [{}->{}]", signerName,
+          clazz.getName());
+      synchronized (SignerManager.class) {
+        SignerFactory.registerSigner(signerName, clazz);
+      }
+    }
+  }
+
+  @Override public void close() throws IOException {
+    LOG.debug("Unregistering fs from {} initializers", initializers.size());
+    for (AwsSignerInitializer initializer : initializers) {
+      initializer
+          .unregisterStore(bucketName, ownerConf, delegationTokenProvider,
+              ownerUgi);
+    }
+  }
+}
\ No newline at end of file
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/DelegationTokenProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/DelegationTokenProvider.java
new file mode 100644
index 0000000..56bd9aa
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/DelegationTokenProvider.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.auth.delegation;
+
+import java.io.IOException;
+
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+
+/**
+ * Interface for S3A Delegation Token access.
+ */
+public interface DelegationTokenProvider {
+  Token<? extends TokenIdentifier> getFsDelegationToken() throws IOException;
+}
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitConstants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitConstants.java
index 877433b..c9b0337 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitConstants.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitConstants.java
@@ -78,14 +78,32 @@
    * Value: {@value}.
    */
   public static final String STREAM_CAPABILITY_MAGIC_OUTPUT
+      = "fs.s3a.capability.magic.output.stream";
+
+  /**
+   * Flag to indicate that a store supports magic committers.
+   * returned in {@code PathCapabilities}
+   * Value: {@value}.
+   */
+  public static final String STORE_CAPABILITY_MAGIC_COMMITTER
+      = "fs.s3a.capability.magic.committer";
+
+  /**
+   * Flag to indicate whether a stream is a magic output stream;
+   * returned in {@code StreamCapabilities}
+   * Value: {@value}.
+   */
+  @Deprecated
+  public static final String STREAM_CAPABILITY_MAGIC_OUTPUT_OLD
       = "s3a:magic.output.stream";
 
   /**
    * Flag to indicate that a store supports magic committers.
-   * returned in {@code StreamCapabilities}
+   * returned in {@code PathCapabilities}
    * Value: {@value}.
    */
-  public static final String STORE_CAPABILITY_MAGIC_COMMITTER
+  @Deprecated
+  public static final String STORE_CAPABILITY_MAGIC_COMMITTER_OLD
       = "s3a:magic.committer";
 
   /**
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
index 9cb1efe..bd834e0 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
@@ -1227,7 +1227,8 @@
       } else {
         println(out, "Filesystem %s is not using S3Guard", fsUri);
       }
-      boolean magic = fs.hasCapability(
+      boolean magic = fs.hasPathCapability(
+          new Path(s3Path),
           CommitConstants.STORE_CAPABILITY_MAGIC_COMMITTER);
       println(out, "The \"magic\" committer %s supported",
           magic ? "is" : "is not");
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectConstants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectConstants.java
index d74411d..0e2bf91 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectConstants.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectConstants.java
@@ -50,7 +50,7 @@
    * Does the FS Support S3 Select?
    * Value: {@value}.
    */
-  public static final String S3_SELECT_CAPABILITY = "s3a:fs.s3a.select.sql";
+  public static final String S3_SELECT_CAPABILITY = "fs.s3a.capability.select.sql";
 
   /**
    * Flag: is S3 select enabled?
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectTool.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectTool.java
index 61409f8..4b362c6 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectTool.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/select/SelectTool.java
@@ -234,7 +234,7 @@
     }
     setFilesystem((S3AFileSystem) fs);
 
-    if (!getFilesystem().hasCapability(S3_SELECT_CAPABILITY)) {
+    if (!getFilesystem().hasPathCapability(path, S3_SELECT_CAPABILITY)) {
       // capability disabled
       throw new ExitUtil.ExitException(EXIT_SERVICE_UNAVAILABLE,
           SELECT_IS_DISABLED + " for " + file);
diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index ea55f90..ca584df 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -1879,3 +1879,61 @@
 hadoop distcp -update -skipcrccheck -numListstatusThreads 40 /user/alice/datasets s3a://alice-backup/datasets
 ```
 
+### <a name="customsigners"></a> Advanced - Custom Signers
+
+AWS uees request signing to authenticate requests. In general, there should
+be no need to override the signers, and the defaults work out of the box.
+If, however, this is required - this section talks about how to configure
+custom signers. There’s 2 broad config categories to be set - one for
+registering a custom signer and another to specify usage.
+
+#### Registering Custom Signers
+```xml
+<property>
+  <name>fs.s3a.custom.signers</name>
+  <value>comma separated list of signers</value>
+  <!-- Example
+  <value>AWS4SignerType,CS1:CS1ClassName,CS2:CS2ClassName:CS2InitClass</value>
+  -->
+</property>
+```
+Acceptable value for each custom signer
+
+`SignerName`- this is used in case one of the default signers is being used.
+(E.g `AWS4SignerType`, `QueryStringSignerType`, `AWSS3V4SignerType`).
+If no custom signers are being used - this value does not need to be set.
+
+`SignerName:SignerClassName` - register a new signer with the specified name,
+and the class for this signer.
+The Signer Class must implement `com.amazonaws.auth.Signer`.
+
+`SignerName:SignerClassName:SignerInitializerClassName` - similar time above
+except also allows for a custom SignerInitializer
+(`org.apache.hadoop.fs.s3a.AwsSignerInitializer`) class to be specified.
+
+#### Usage of the Signers
+Signers can be set at a per service level(S3, dynamodb, etc) or a common
+signer for all services.
+
+```xml
+<property>
+  <name>fs.s3a.s3.signing-algorithm</name>
+  <value>${S3SignerName}</value>
+  <description>Specify the signer for S3</description>
+</property>
+
+<property>
+  <name>fs.s3a.ddb.signing-algorithm</name>
+  <value>${DdbSignerName}</value>
+  <description>Specify the signer for DDB</description>
+</property>
+
+<property>
+  <name>fs.s3a.signing-algorithm</name>
+  <value>${SignerName}</value>
+</property>
+```
+
+For a specific service, the service specific signer is looked up first.
+If that is not specified, the common signer is looked up. If this is
+not specified as well, SDK settings are used.
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestLocatedFileStatusFetcher.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestLocatedFileStatusFetcher.java
new file mode 100644
index 0000000..bd6bf2f
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestLocatedFileStatusFetcher.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a;
+
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test the LocatedFileStatusFetcher can do.
+ * This is related to HADOOP-16458.
+ * There's basic tests in ITestS3AFSMainOperations; this
+ * is see if we can create better corner cases.
+ */
+public class ITestLocatedFileStatusFetcher extends AbstractS3ATestBase {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ITestLocatedFileStatusFetcher.class);
+
+  @Test
+  public void testGlobScan() throws Throwable {
+
+  }
+}
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFSMainOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFSMainOperations.java
new file mode 100644
index 0000000..511aa0f
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFSMainOperations.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a;
+
+import org.junit.Ignore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSMainOperationsBaseTest;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.s3a.S3AContract;
+
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.createTestPath;
+
+/**
+ * S3A Test suite for the FSMainOperationsBaseTest tests.
+ */
+public class ITestS3AFSMainOperations extends FSMainOperationsBaseTest {
+
+
+  public ITestS3AFSMainOperations() {
+    super(createTestPath(
+        new Path("/ITestS3AFSMainOperations")).toUri().toString());
+  }
+
+  @Override
+  protected FileSystem createFileSystem() throws Exception {
+    S3AContract contract = new S3AContract(new Configuration());
+    contract.init();
+    return contract.getTestFileSystem();
+  }
+
+  @Override
+  @Ignore("Permissions not supported")
+  public void testListStatusThrowsExceptionForUnreadableDir() {
+  }
+
+  @Override
+  @Ignore("Permissions not supported")
+  public void testGlobStatusThrowsExceptionForUnreadableDir() {
+  }
+
+  @Override
+  @Ignore("local FS path setup broken")
+  public void testCopyToLocalWithUseRawLocalFileSystemOption()
+      throws Exception {
+  }
+
+}
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java
index fc8d872..8f7f1be 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java
@@ -28,12 +28,15 @@
 import org.junit.Assume;
 import org.junit.Test;
 
+import org.apache.hadoop.fs.CommonPathCapabilities;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.store.EtagChecksum;
 import org.apache.hadoop.test.LambdaTestUtils;
 
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertHasPathCapabilities;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertLacksPathCapabilities;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
 
@@ -142,6 +145,8 @@
     Path file1 = touchFile("file1");
     EtagChecksum checksum1 = fs.getFileChecksum(file1, 0);
     LOG.info("Checksum for {}: {}", file1, checksum1);
+    assertHasPathCapabilities(fs, file1,
+        CommonPathCapabilities.FS_CHECKSUMS);
     assertNotNull("Null file 1 checksum", checksum1);
     assertNotEquals("file 1 checksum", 0, checksum1.getLength());
     assertEquals("checksums", checksum1,
@@ -159,6 +164,8 @@
     final S3AFileSystem fs = getFileSystem();
     Path file1 = touchFile("file1");
     EtagChecksum checksum1 = fs.getFileChecksum(file1, 0);
+    assertLacksPathCapabilities(fs, file1,
+        CommonPathCapabilities.FS_CHECKSUMS);
     assertNull("Checksums are being generated", checksum1);
   }
 
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
index b974385..1889c05 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
@@ -1236,9 +1236,12 @@
    * Skip a test if the FS isn't marked as supporting magic commits.
    * @param fs filesystem
    */
-  public static void assumeMagicCommitEnabled(S3AFileSystem fs) {
+  public static void assumeMagicCommitEnabled(S3AFileSystem fs)
+      throws IOException {
     assume("Magic commit option disabled on " + fs,
-        fs.hasCapability(CommitConstants.STORE_CAPABILITY_MAGIC_COMMITTER));
+        fs.hasPathCapability(
+            fs.getWorkingDirectory(),
+            CommitConstants.STORE_CAPABILITY_MAGIC_COMMITTER));
   }
 
   /**
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestSignerManager.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestSignerManager.java
deleted file mode 100644
index ac759d0..0000000
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestSignerManager.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3a;
-
-import java.util.concurrent.TimeUnit;
-
-import com.amazonaws.SignableRequest;
-import com.amazonaws.auth.AWSCredentials;
-import com.amazonaws.auth.Signer;
-import com.amazonaws.auth.SignerFactory;
-import org.assertj.core.api.Assertions;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-import org.apache.hadoop.classification.InterfaceAudience.Private;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.test.LambdaTestUtils;
-
-import static org.apache.hadoop.fs.s3a.Constants.CUSTOM_SIGNERS;
-
-/**
- * Tests for the SignerManager.
- */
-public class TestSignerManager {
-
-  @Rule
-  public Timeout testTimeout = new Timeout(
-      10_000L, TimeUnit.MILLISECONDS
-  );
-
-  @Test
-  public void testCustomSignerFailureIfNotRegistered() throws Exception {
-    LambdaTestUtils.intercept(Exception.class,
-        () -> SignerFactory.createSigner("testsignerUnregistered", null));
-    // Expecting generic Exception.class to handle future implementation
-    // changes.
-    // For now, this is an NPE
-  }
-
-  @Test
-  public void testCustomSignerInitialization() {
-    Configuration config = new Configuration();
-    SignerForTest1.reset();
-    SignerForTest2.reset();
-    config.set(CUSTOM_SIGNERS, "testsigner1:" + SignerForTest1.class.getName());
-    SignerManager signerManager = new SignerManager();
-    signerManager.initCustomSigners(config);
-    Signer s1 = SignerFactory.createSigner("testsigner1", null);
-    s1.sign(null, null);
-    Assertions.assertThat(SignerForTest1.initialized)
-        .as(SignerForTest1.class.getName() + " not initialized")
-        .isEqualTo(true);
-  }
-
-  @Test
-  public void testMultipleCustomSignerInitialization() {
-    Configuration config = new Configuration();
-    SignerForTest1.reset();
-    SignerForTest2.reset();
-    config.set(CUSTOM_SIGNERS,
-        "testsigner1:" + SignerForTest1.class.getName() + "," + "testsigner2:"
-            + SignerForTest2.class.getName());
-    SignerManager signerManager = new SignerManager();
-    signerManager.initCustomSigners(config);
-    Signer s1 = SignerFactory.createSigner("testsigner1", null);
-    s1.sign(null, null);
-    Assertions.assertThat(SignerForTest1.initialized)
-        .as(SignerForTest1.class.getName() + " not initialized")
-        .isEqualTo(true);
-
-    Signer s2 = SignerFactory.createSigner("testsigner2", null);
-    s2.sign(null, null);
-    Assertions.assertThat(SignerForTest2.initialized)
-        .as(SignerForTest2.class.getName() + " not initialized")
-        .isEqualTo(true);
-  }
-
-  /**
-   * SignerForTest1.
-   */
-  @Private
-  public static class SignerForTest1 implements Signer {
-
-    private static boolean initialized = false;
-
-    @Override
-    public void sign(SignableRequest<?> request, AWSCredentials credentials) {
-      initialized = true;
-    }
-
-    public static void reset() {
-      initialized = false;
-    }
-  }
-
-  /**
-   * SignerForTest2.
-   */
-  @Private
-  public static class SignerForTest2 implements Signer {
-
-    private static boolean initialized = false;
-
-    @Override
-    public void sign(SignableRequest<?> request, AWSCredentials credentials) {
-      initialized = true;
-    }
-
-    public static void reset() {
-      initialized = false;
-    }
-  }
-}
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java
new file mode 100644
index 0000000..651cdad
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java
@@ -0,0 +1,237 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.auth;
+
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
+import com.amazonaws.SignableRequest;
+import com.amazonaws.auth.AWSCredentials;
+import com.amazonaws.auth.Signer;
+import com.amazonaws.services.s3.AmazonS3;
+import com.amazonaws.services.s3.AmazonS3ClientBuilder;
+import com.amazonaws.services.s3.internal.AWSS3V4Signer;
+import org.assertj.core.api.Assertions;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
+import org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider;
+import org.apache.hadoop.fs.s3a.auth.ITestCustomSigner.CustomSignerInitializer.StoreValue;
+import org.apache.hadoop.fs.s3a.auth.delegation.DelegationTokenProvider;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import static org.apache.hadoop.fs.s3a.Constants.CUSTOM_SIGNERS;
+import static org.apache.hadoop.fs.s3a.Constants.SIGNING_ALGORITHM_S3;
+
+/**
+ * Tests for custom Signers and SignerInitializers.
+ */
+public class ITestCustomSigner extends AbstractS3ATestBase {
+
+  private static final Logger LOG = LoggerFactory
+      .getLogger(ITestCustomSigner.class);
+
+  private static final String TEST_ID_KEY = "TEST_ID_KEY";
+  private static final String TEST_REGION_KEY = "TEST_REGION_KEY";
+
+  private String regionName;
+
+  @Override
+  public void setup() throws Exception {
+    super.setup();
+    regionName = determineRegion(getFileSystem().getBucket());
+    LOG.info("Determined region name to be [{}] for bucket [{}]", regionName,
+        getFileSystem().getBucket());
+  }
+
+  @Test
+  public void testCustomSignerAndInitializer()
+      throws IOException, InterruptedException {
+
+    UserGroupInformation ugi1 = UserGroupInformation.createRemoteUser("user1");
+    FileSystem fs1 = runMkDirAndVerify(ugi1, "/customsignerpath1", "id1");
+
+    UserGroupInformation ugi2 = UserGroupInformation.createRemoteUser("user2");
+    FileSystem fs2 = runMkDirAndVerify(ugi2, "/customsignerpath2", "id2");
+
+    Assertions.assertThat(CustomSignerInitializer.knownStores.size())
+        .as("Num registered stores mismatch").isEqualTo(2);
+    fs1.close();
+    Assertions.assertThat(CustomSignerInitializer.knownStores.size())
+        .as("Num registered stores mismatch").isEqualTo(1);
+    fs2.close();
+    Assertions.assertThat(CustomSignerInitializer.knownStores.size())
+        .as("Num registered stores mismatch").isEqualTo(0);
+  }
+
+  private FileSystem runMkDirAndVerify(UserGroupInformation ugi,
+      String pathString, String identifier)
+      throws IOException, InterruptedException {
+    Configuration conf = createTestConfig(identifier);
+    Path path = new Path(pathString);
+    path = path.makeQualified(getFileSystem().getUri(),
+        getFileSystem().getWorkingDirectory());
+
+    Path finalPath = path;
+    return ugi.doAs((PrivilegedExceptionAction<FileSystem>) () -> {
+      int invocationCount = CustomSigner.invocationCount;
+      FileSystem fs = finalPath.getFileSystem(conf);
+      fs.mkdirs(finalPath);
+      Assertions.assertThat(CustomSigner.invocationCount)
+          .as("Invocation count lower than expected")
+          .isGreaterThan(invocationCount);
+
+      Assertions.assertThat(CustomSigner.lastStoreValue)
+          .as("Store value should not be null").isNotNull();
+      Assertions.assertThat(CustomSigner.lastStoreValue.conf)
+          .as("Configuration should not be null").isNotNull();
+      Assertions.assertThat(CustomSigner.lastStoreValue.conf.get(TEST_ID_KEY))
+          .as("Configuration TEST_KEY mismatch").isEqualTo(identifier);
+
+      return fs;
+    });
+  }
+
+  private Configuration createTestConfig(String identifier) {
+    Configuration conf = createConfiguration();
+
+    conf.set(CUSTOM_SIGNERS,
+        "CustomS3Signer:" + CustomSigner.class.getName() + ":"
+            + CustomSignerInitializer.class.getName());
+    conf.set(SIGNING_ALGORITHM_S3, "CustomS3Signer");
+
+    conf.set(TEST_ID_KEY, identifier);
+    conf.set(TEST_REGION_KEY, regionName);
+
+    return conf;
+  }
+
+  private String determineRegion(String bucketName) throws IOException {
+    AmazonS3 s3 = AmazonS3ClientBuilder.standard().withCredentials(
+        new SimpleAWSCredentialsProvider(null, createConfiguration()))
+        .withForceGlobalBucketAccessEnabled(true).withRegion("us-east-1")
+        .build();
+    String region = s3.getBucketLocation(bucketName);
+    //  See: https://forums.aws.amazon.com/thread.jspa?messageID=796829&tstart=0
+    if (region.equals("US")) {
+      region = "us-east-1";
+    }
+    return region;
+  }
+
+  @Private
+  public static final class CustomSigner implements Signer {
+
+    private static int invocationCount = 0;
+    private static StoreValue lastStoreValue;
+
+    @Override
+    public void sign(SignableRequest<?> request, AWSCredentials credentials) {
+      invocationCount++;
+      String host = request.getEndpoint().getHost();
+      String bucketName = host.split("\\.")[0];
+      try {
+        lastStoreValue = CustomSignerInitializer
+            .getStoreValue(bucketName, UserGroupInformation.getCurrentUser());
+      } catch (IOException e) {
+        throw new RuntimeException("Failed to get current Ugi", e);
+      }
+      AWSS3V4Signer realSigner = new AWSS3V4Signer();
+      realSigner.setServiceName("s3");
+      realSigner.setRegionName(lastStoreValue.conf.get(TEST_REGION_KEY));
+      realSigner.sign(request, credentials);
+    }
+  }
+
+  @Private
+  public static final class CustomSignerInitializer
+      implements AwsSignerInitializer {
+
+    private static final Map<StoreKey, StoreValue> knownStores = new HashMap<>();
+
+    @Override
+    public void registerStore(String bucketName, Configuration storeConf,
+        DelegationTokenProvider dtProvider, UserGroupInformation storeUgi) {
+      StoreKey storeKey = new StoreKey(bucketName, storeUgi);
+      StoreValue storeValue = new StoreValue(storeConf, dtProvider);
+      knownStores.put(storeKey, storeValue);
+    }
+
+    @Override
+    public void unregisterStore(String bucketName, Configuration storeConf,
+        DelegationTokenProvider dtProvider, UserGroupInformation storeUgi) {
+      StoreKey storeKey = new StoreKey(bucketName, storeUgi);
+      knownStores.remove(storeKey);
+    }
+
+    public static StoreValue getStoreValue(String bucketName,
+        UserGroupInformation ugi) {
+      StoreKey storeKey = new StoreKey(bucketName, ugi);
+      return knownStores.get(storeKey);
+    }
+
+    private static class StoreKey {
+      private final String bucketName;
+      private final UserGroupInformation ugi;
+
+      public StoreKey(String bucketName, UserGroupInformation ugi) {
+        this.bucketName = bucketName;
+        this.ugi = ugi;
+      }
+
+      @Override
+      public boolean equals(Object o) {
+        if (this == o) {
+          return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+          return false;
+        }
+        StoreKey storeKey = (StoreKey) o;
+        return Objects.equals(bucketName, storeKey.bucketName) && Objects
+            .equals(ugi, storeKey.ugi);
+      }
+
+      @Override
+      public int hashCode() {
+        return Objects.hash(bucketName, ugi);
+      }
+    }
+
+    static class StoreValue {
+      private final Configuration conf;
+      private final DelegationTokenProvider dtProvider;
+
+      public StoreValue(Configuration conf,
+          DelegationTokenProvider dtProvider) {
+        this.conf = conf;
+        this.dtProvider = dtProvider;
+      }
+    }
+  }
+}
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestRestrictedReadAccess.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestRestrictedReadAccess.java
new file mode 100644
index 0000000..a741cd6
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestRestrictedReadAccess.java
@@ -0,0 +1,707 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.auth;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.nio.file.AccessDeniedException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.concurrent.Callable;
+
+import org.assertj.core.api.Assertions;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
+import org.apache.hadoop.fs.s3a.Constants;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.fs.s3a.S3ATestUtils;
+import org.apache.hadoop.fs.s3a.S3AUtils;
+import org.apache.hadoop.fs.s3a.Statistic;
+import org.apache.hadoop.fs.s3a.s3guard.LocalMetadataStore;
+import org.apache.hadoop.fs.s3a.s3guard.MetadataStore;
+import org.apache.hadoop.fs.s3a.s3guard.NullMetadataStore;
+import org.apache.hadoop.mapred.LocatedFileStatusFetcher;
+import org.apache.hadoop.mapreduce.lib.input.InvalidInputException;
+
+import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
+import static org.apache.hadoop.fs.s3a.Constants.ASSUMED_ROLE_ARN;
+import static org.apache.hadoop.fs.s3a.Constants.METADATASTORE_AUTHORITATIVE;
+import static org.apache.hadoop.fs.s3a.Constants.S3_METADATA_STORE_IMPL;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.assume;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.disableFilesystemCaching;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestBucketName;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.lsR;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBucketOverrides;
+import static org.apache.hadoop.fs.s3a.auth.RoleModel.Effects;
+import static org.apache.hadoop.fs.s3a.auth.RoleModel.Statement;
+import static org.apache.hadoop.fs.s3a.auth.RoleModel.directory;
+import static org.apache.hadoop.fs.s3a.auth.RoleModel.statement;
+import static org.apache.hadoop.fs.s3a.auth.RolePolicies.*;
+import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.bindRolePolicyStatements;
+import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.newAssumedRoleConfig;
+import static org.apache.hadoop.mapreduce.lib.input.FileInputFormat.LIST_STATUS_NUM_THREADS;
+import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
+import static org.apache.hadoop.test.GenericTestUtils.failif;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+
+/**
+ * This test creates a client with no read access to the underlying
+ * filesystem and then tries to perform various read operations on it.
+ * S3Guard in non-auth mode always goes to the FS, so we parameterize the
+ * test for S3Guard + Auth to see how failures move around.
+ * <ol>
+ *   <li>Tests only run if an assumed role is provided.</li>
+ *   <li>And the s3guard tests use the local metastore if
+ *   there was not one already.</li>
+ * </ol>
+ * The tests are all bundled into one big test case.
+ * From a purist unit test perspective, this is utterly wrong as it goes
+ * against the
+ * <i>"Each test case tests exactly one thing"</i>
+ * philosophy of JUnit.
+ * <p>
+ * However is significantly reduces setup costs on the parameterized test runs,
+ * as it means that the filesystems and directories only need to be
+ * created and destroyed once per parameterized suite, rather than
+ * once per individual test.
+ * <p>
+ * All the test probes have informative messages so when a test failure
+ * does occur, its cause should be discoverable. It main weaknesses are
+ * therefore:
+ * <ol>
+ *   <li>A failure of an assertion blocks all subsequent assertions from
+ *   being checked.</li>
+ *   <li>Maintenance is potentially harder.</li>
+ * </ol>
+ * To simplify maintenance, the operations tested are broken up into
+ * their own methods, with fields used to share the restricted role and
+ * created paths.
+ */
+@SuppressWarnings("ThrowableNotThrown")
+@RunWith(Parameterized.class)
+public class ITestRestrictedReadAccess extends AbstractS3ATestBase {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ITestRestrictedReadAccess.class);
+
+  /** Filter to select everything. */
+  private static final PathFilter EVERYTHING = t -> true;
+
+  /** Filter to select .txt files. */
+  private static final PathFilter TEXT_FILE =
+      path -> path.toUri().toString().endsWith(".txt");
+
+  /** The same path filter used in FileInputFormat. */
+  private static final PathFilter HIDDEN_FILE_FILTER =
+      (p) -> {
+        String n = p.getName();
+        return !n.startsWith("_") && !n.startsWith(".");
+      };
+
+  /**
+   * Text found in LocatedFileStatusFetcher exception when the glob
+   * returned "null".
+   */
+  private static final String DOES_NOT_EXIST = "does not exist";
+
+  /**
+   * Text found in LocatedFileStatusFetcher exception when
+   * the glob returned an empty list.
+   */
+  private static final String MATCHES_0_FILES = "matches 0 files";
+
+  /**
+   * Text used in files.
+   */
+  public static final byte[] HELLO = "hello".getBytes(Charset.forName("UTF-8"));
+
+  /**
+   * Wildcard scan to find *.txt in the no-read directory.
+   * When a scan/glob is done with S3Guard in auth mode, the scan will
+   * succeed but the file open will fail for any non-empty file.
+   * In non-auth mode, the read restrictions will fail the actual scan.
+   */
+  private Path noReadWildcard;
+
+  /**
+   * Parameterization.
+   */
+  @Parameterized.Parameters(name = "{0}")
+  public static Collection<Object[]> params() {
+    return Arrays.asList(new Object[][]{
+        {"raw", false, false},
+        {"nonauth", true, false},
+        {"auth", true, true}
+    });
+  }
+
+  private final String name;
+
+  private final boolean s3guard;
+
+  private final boolean authMode;
+
+  private Path basePath;
+
+  private Path noReadDir;
+
+  private Path emptyDir;
+
+  private Path emptyFile;
+
+  private Path subDir;
+
+  private Path subdirFile;
+
+  private Path subDir2;
+
+  private Path subdir2File1;
+
+  private Path subdir2File2;
+
+  private Configuration roleConfig;
+
+  /**
+   * A read-only FS; if non-null it is closed in teardown.
+   */
+  private S3AFileSystem readonlyFS;
+
+  /**
+   * Test suite setup.
+   * @param name name for logs/paths.
+   * @param s3guard is S3Guard enabled?
+   * @param authMode is S3Guard in auth mode?
+   */
+  public ITestRestrictedReadAccess(
+      final String name,
+      final boolean s3guard,
+      final boolean authMode) {
+    this.name = name;
+    this.s3guard = s3guard;
+    this.authMode = authMode;
+  }
+
+  @Override
+  public Configuration createConfiguration() {
+    Configuration conf = super.createConfiguration();
+    String bucketName = getTestBucketName(conf);
+    removeBucketOverrides(bucketName, conf,
+        S3_METADATA_STORE_IMPL,
+        METADATASTORE_AUTHORITATIVE);
+    conf.setClass(Constants.S3_METADATA_STORE_IMPL,
+        s3guard ?
+            LocalMetadataStore.class
+            : NullMetadataStore.class,
+        MetadataStore.class);
+    conf.setBoolean(METADATASTORE_AUTHORITATIVE, authMode);
+    disableFilesystemCaching(conf);
+    return conf;
+  }
+
+  @Override
+  public void setup() throws Exception {
+    super.setup();
+    assumeRoleTests();
+  }
+
+  @Override
+  public void teardown() throws Exception {
+    S3AUtils.closeAll(LOG, readonlyFS);
+    super.teardown();
+  }
+
+  private void assumeRoleTests() {
+    assume("No ARN for role tests", !getAssumedRoleARN().isEmpty());
+  }
+
+  private String getAssumedRoleARN() {
+    return getContract().getConf().getTrimmed(ASSUMED_ROLE_ARN, "");
+  }
+
+  /**
+   * Create the assumed role configuration.
+   * @return a config bonded to the ARN of the assumed role
+   */
+  public Configuration createAssumedRoleConfig() {
+    return createAssumedRoleConfig(getAssumedRoleARN());
+  }
+
+  /**
+   * Create a config for an assumed role; it also disables FS caching.
+   * @param roleARN ARN of role
+   * @return the new configuration
+   */
+  private Configuration createAssumedRoleConfig(String roleARN) {
+    return newAssumedRoleConfig(getContract().getConf(), roleARN);
+  }
+
+  /**
+   * This is a single test case which invokes the individual test cases
+   * in sequence.
+   */
+  @Test
+  public void testNoReadAccess() throws Throwable {
+    describe("Test failure handling if the client doesn't"
+        + " have read access under a path");
+    initNoReadAccess();
+
+    // now move up the API Chain, from the calls made by globStatus,
+    // to globStatus itself, and then to LocatedFileStatusFetcher,
+    // which invokes globStatus
+
+    checkBasicFileOperations();
+    checkGlobOperations();
+    checkSingleThreadedLocatedFileStatus();
+    checkLocatedFileStatusFourThreads();
+    checkLocatedFileStatusScanFile();
+    checkLocatedFileStatusNonexistentPath();
+    checkDeleteOperations();
+  }
+
+  /**
+   * Initialize the directory tree and the role filesystem.
+   */
+  public void initNoReadAccess() throws Throwable {
+    describe("Setting up filesystem");
+
+    S3AFileSystem realFS = getFileSystem();
+
+    // avoiding the parameterization to steer clear of accidentally creating
+    // patterns
+    basePath = path("testNoReadAccess-" + name);
+
+    // define the paths and create them.
+    describe("Creating test directories and files");
+
+    // this is the directory to which the restricted role has no read
+    // access.
+    noReadDir = new Path(basePath, "noReadDir");
+    // wildcard scan to find *.txt
+    noReadWildcard = new Path(noReadDir, "*/*.txt");
+
+    // an empty directory directory under the noReadDir
+    emptyDir = new Path(noReadDir, "emptyDir");
+    realFS.mkdirs(emptyDir);
+
+    // an empty file directory under the noReadDir
+    emptyFile = new Path(noReadDir, "emptyFile.txt");
+    touch(realFS, emptyFile);
+
+    // a subdirectory
+    subDir = new Path(noReadDir, "subDir");
+
+    // and a file in that subdirectory
+    subdirFile = new Path(subDir, "subdirFile.txt");
+    createFile(realFS, subdirFile, true, HELLO);
+    subDir2 = new Path(noReadDir, "subDir2");
+    subdir2File1 = new Path(subDir2, "subdir2File1.txt");
+    subdir2File2 = new Path(subDir2, "subdir2File2.docx");
+    createFile(realFS, subdir2File1, true, HELLO);
+    createFile(realFS, subdir2File2, true, HELLO);
+
+    // create a role filesystem which does not have read access under a path
+    // it still has write access, which can be explored in the final
+    // step to delete files and directories.
+    roleConfig = createAssumedRoleConfig();
+    bindRolePolicyStatements(roleConfig,
+        STATEMENT_S3GUARD_CLIENT,
+        STATEMENT_ALLOW_SSE_KMS_RW,
+        statement(true, S3_ALL_BUCKETS, S3_ALL_OPERATIONS),
+        new Statement(Effects.Deny)
+            .addActions(S3_ALL_GET)
+            .addResources(directory(noReadDir)));
+    readonlyFS = (S3AFileSystem) basePath.getFileSystem(roleConfig);
+  }
+
+  /**
+   * Validate basic IO operations.
+   */
+  public void checkBasicFileOperations() throws Throwable {
+
+    // this is a LIST call; there's no marker.
+    // so the sequence is
+    //   - HEAD path -> FNFE
+    //   - HEAD path + / -> FNFE
+    //   - LIST path -> list results
+    // Because the client has list access, this succeeds
+    readonlyFS.listStatus(basePath);
+
+    // this is HEAD + "/" on S3; get on S3Guard auth
+    readonlyFS.listStatus(emptyDir);
+
+    // a recursive list of the no-read-directory works because
+    // there is no directory marker, it becomes a LIST call.
+    lsR(readonlyFS, noReadDir, true);
+
+    // similarly, a getFileStatus ends up being a list and generating
+    // a file status marker.
+    readonlyFS.getFileStatus(noReadDir);
+
+    // empty dir checks work!
+    readonlyFS.getFileStatus(emptyDir);
+
+    // now look at a file; the outcome depends on the mode.
+    if (authMode) {
+      // auth mode doesn't check S3, so no failure
+      readonlyFS.getFileStatus(subdirFile);
+    } else {
+      accessDenied(() ->
+          readonlyFS.getFileStatus(subdirFile));
+    }
+
+    // irrespective of mode, the attempt to read the data will fail.
+    // the only variable is where the failure occurs
+    accessDenied(() ->
+        ContractTestUtils.readUTF8(readonlyFS, subdirFile, HELLO.length));
+
+    // the empty file is interesting
+    if (!authMode) {
+      // non-auth mode, it fails at some point in the opening process.
+      // due to a HEAD being called on the object
+      accessDenied(() ->
+          ContractTestUtils.readUTF8(readonlyFS, emptyFile, 0));
+    } else {
+      // auth mode doesn't check the store.
+      // Furthermore, because it knows the file length is zero,
+      // it returns -1 without even opening the file.
+      // This means that permissions on the file do not get checked.
+      // See: HADOOP-16464.
+      try (FSDataInputStream is = readonlyFS.open(emptyFile)) {
+        Assertions.assertThat(is.read())
+            .describedAs("read of empty file")
+            .isEqualTo(-1);
+      }
+      readonlyFS.getFileStatus(subdirFile);
+    }
+  }
+
+  /**
+   * Explore Glob's recursive scan.
+   */
+  public void checkGlobOperations() throws Throwable {
+
+    describe("Glob Status operations");
+    // baseline: the real filesystem on a subdir
+    globFS(getFileSystem(), subdirFile, null, false, 1);
+    // a file fails if not in auth mode
+    globFS(readonlyFS, subdirFile, null, !authMode, 1);
+    // empty directories don't fail.
+    assertStatusPathEquals(emptyDir,
+        globFS(readonlyFS, emptyDir, null, false, 1));
+
+    FileStatus[] st = globFS(readonlyFS,
+        noReadWildcard,
+        null, false, 2);
+    Assertions.assertThat(st)
+        .extracting(FileStatus::getPath)
+        .containsExactlyInAnyOrder(subdirFile, subdir2File1);
+
+    // there is precisely one .docx file (subdir2File2.docx)
+    globFS(readonlyFS,
+        new Path(noReadDir, "*/*.docx"),
+        null, false, 1);
+
+    // there are no .doc files.
+    globFS(readonlyFS,
+        new Path(noReadDir, "*/*.doc"),
+        null, false, 0);
+    globFS(readonlyFS, noReadDir,
+        EVERYTHING, false, 1);
+    // and a filter without any wildcarded pattern only finds
+    // the role dir itself.
+    FileStatus[] st2 = globFS(readonlyFS, noReadDir,
+        EVERYTHING, false, 1);
+    Assertions.assertThat(st2)
+        .extracting(FileStatus::getPath)
+        .containsExactly(noReadDir);
+  }
+
+  /**
+   * Run a located file status fetcher against the directory tree.
+   */
+  public void checkSingleThreadedLocatedFileStatus() throws Throwable {
+
+    describe("LocatedFileStatusFetcher operations");
+    // use the same filter as FileInputFormat; single thread.
+    roleConfig.setInt(LIST_STATUS_NUM_THREADS, 1);
+    LocatedFileStatusFetcher fetcher =
+        new LocatedFileStatusFetcher(
+            roleConfig,
+            new Path[]{basePath},
+            true,
+            HIDDEN_FILE_FILTER,
+            true);
+    Assertions.assertThat(fetcher.getFileStatuses())
+        .describedAs("result of located scan")
+        .flatExtracting(FileStatus::getPath)
+        .containsExactlyInAnyOrder(
+            emptyFile,
+            subdirFile,
+            subdir2File1,
+            subdir2File2);
+
+  }
+
+  /**
+   * Run a located file status fetcher against the directory tree.
+   */
+  public void checkLocatedFileStatusFourThreads() throws Throwable {
+
+    // four threads and the text filter.
+    int threads = 4;
+    describe("LocatedFileStatusFetcher with %d", threads);
+    roleConfig.setInt(LIST_STATUS_NUM_THREADS, threads);
+    LocatedFileStatusFetcher fetcher2 =
+        new LocatedFileStatusFetcher(
+            roleConfig,
+            new Path[]{noReadWildcard},
+            true,
+            EVERYTHING,
+            true);
+    Assertions.assertThat(fetcher2.getFileStatuses())
+        .describedAs("result of located scan")
+        .isNotNull()
+        .flatExtracting(FileStatus::getPath)
+        .containsExactlyInAnyOrder(subdirFile, subdir2File1);
+  }
+
+  /**
+   * Run a located file status fetcher against the directory tree.
+   */
+  public void checkLocatedFileStatusScanFile() throws Throwable {
+    // pass in a file as the base of the scan.
+    describe("LocatedFileStatusFetcher with file %s", subdirFile);
+    roleConfig.setInt(LIST_STATUS_NUM_THREADS, 16);
+    try {
+      Iterable<FileStatus> fetched = new LocatedFileStatusFetcher(
+          roleConfig,
+          new Path[]{subdirFile},
+          true,
+          TEXT_FILE,
+          true).getFileStatuses();
+      // when not in auth mode, the HEAD request MUST have failed.
+      failif(!authMode, "LocatedFileStatusFetcher(" + subdirFile + ")"
+          + " should have failed");
+      // and in auth mode, the file MUST have been found.
+      Assertions.assertThat(fetched)
+          .describedAs("result of located scan")
+          .isNotNull()
+          .flatExtracting(FileStatus::getPath)
+          .containsExactly(subdirFile);
+    } catch (AccessDeniedException e) {
+      // we require the HEAD request to fail with access denied in non-auth
+      // mode, but not in auth mode.
+      failif(authMode, "LocatedFileStatusFetcher(" + subdirFile + ")", e);
+    }
+  }
+
+  /**
+   * Explore what happens with a path that does not exist.
+   */
+  public void checkLocatedFileStatusNonexistentPath() throws Throwable {
+    // scan a path that doesn't exist
+    Path nonexistent = new Path(noReadDir, "nonexistent");
+    InvalidInputException ex = intercept(InvalidInputException.class,
+        DOES_NOT_EXIST,
+        () -> new LocatedFileStatusFetcher(
+            roleConfig,
+            new Path[]{nonexistent},
+            true,
+            EVERYTHING,
+            true)
+            .getFileStatuses());
+    // validate nested exception
+    assertExceptionContains(DOES_NOT_EXIST, ex.getCause());
+
+    // a file which exists but which doesn't match the pattern
+    // is downgraded to not existing.
+    intercept(InvalidInputException.class,
+        DOES_NOT_EXIST,
+        () -> new LocatedFileStatusFetcher(
+            roleConfig,
+            new Path[]{noReadDir},
+            true,
+            TEXT_FILE,
+            true)
+            .getFileStatuses());
+
+    // a pattern under a nonexistent path is considered to not be a match.
+    ex = intercept(
+        InvalidInputException.class,
+        MATCHES_0_FILES,
+        () -> new LocatedFileStatusFetcher(
+            roleConfig,
+            new Path[]{new Path(nonexistent, "*.txt)")},
+            true,
+            TEXT_FILE,
+            true)
+            .getFileStatuses());
+    // validate nested exception
+    assertExceptionContains(MATCHES_0_FILES, ex.getCause());
+  }
+
+  /**
+   * Do some cleanup to see what happens with delete calls.
+   * Cleanup happens in test teardown anyway; doing it here
+   * just makes use of the delete calls to see how delete failures
+   * change with permissions and S3Guard stettings.
+   */
+  public void checkDeleteOperations() throws Throwable {
+    describe("Testing delete operations");
+
+    if (!authMode) {
+      // unguarded or non-auth S3Guard to fail on HEAD + /
+      accessDenied(() -> readonlyFS.delete(emptyDir, true));
+      // to fail on HEAD
+      accessDenied(() -> readonlyFS.delete(emptyFile, true));
+    } else {
+      // auth mode checks DDB for status and then issues the DELETE
+      readonlyFS.delete(emptyDir, true);
+      readonlyFS.delete(emptyFile, true);
+    }
+
+    // this will succeed for both as there is no subdir marker.
+    readonlyFS.delete(subDir, true);
+    // after which  it is not there
+    fileNotFound(() -> readonlyFS.getFileStatus(subDir));
+    // and nor is its child.
+    fileNotFound(() -> readonlyFS.getFileStatus(subdirFile));
+
+    // now delete the base path
+    readonlyFS.delete(basePath, true);
+    // and expect an FNFE
+    fileNotFound(() -> readonlyFS.getFileStatus(subDir));
+  }
+
+  /**
+   * Require an operation to fail with a FileNotFoundException.
+   * @param eval closure to evaluate.
+   * @param <T> type of callable
+   * @return the exception.
+   * @throws Exception any other exception
+   */
+  protected <T> FileNotFoundException fileNotFound(final Callable<T> eval)
+      throws Exception {
+    return intercept(FileNotFoundException.class, eval);
+  }
+
+  /**
+   * Require an operation to fail with an AccessDeniedException.
+   * @param eval closure to evaluate.
+   * @param <T> type of callable
+   * @return the exception.
+   * @throws Exception any other exception
+   */
+  protected <T> AccessDeniedException accessDenied(final Callable<T> eval)
+      throws Exception {
+    return intercept(AccessDeniedException.class, eval);
+  }
+
+  /**
+   * Assert that a status array has exactly one element and its
+   * value is as expected.
+   * @param expected expected path
+   * @param statuses list of statuses
+   */
+  protected void assertStatusPathEquals(final Path expected,
+      final FileStatus[] statuses) {
+    Assertions.assertThat(statuses)
+        .describedAs("List of status entries")
+        .isNotNull()
+        .hasSize(1);
+    Assertions.assertThat(statuses[0].getPath())
+        .describedAs("Status entry %s", statuses[0])
+        .isEqualTo(expected);
+  }
+
+  /**
+   * Glob under a path with expected outcomes.
+   * @param fs filesystem to use
+   * @param path path (which can include patterns)
+   * @param filter optional filter
+   * @param expectAuthFailure is auth failure expected?
+   * @param expectedCount expected count of results; -1 means null response
+   * @return the result of a successful glob or null if an expected auth
+   *          failure was caught.
+   * @throws IOException failure.
+   */
+  protected FileStatus[] globFS(
+      final S3AFileSystem fs,
+      final Path path,
+      final PathFilter filter,
+      boolean expectAuthFailure,
+      final int expectedCount)
+      throws IOException {
+    LOG.info("Glob {}", path);
+    S3ATestUtils.MetricDiff getMetric = new S3ATestUtils.MetricDiff(fs,
+        Statistic.OBJECT_METADATA_REQUESTS);
+    S3ATestUtils.MetricDiff listMetric = new S3ATestUtils.MetricDiff(fs,
+        Statistic.OBJECT_LIST_REQUESTS);
+    FileStatus[] st;
+    try {
+      st = filter == null
+          ? fs.globStatus(path)
+          : fs.globStatus(path, filter);
+      LOG.info("Metrics:\n {},\n {}", getMetric, listMetric);
+      if (expectAuthFailure) {
+        // should have failed here
+        String resultStr;
+        if (st == null) {
+          resultStr = "A null array";
+        } else {
+          resultStr = StringUtils.join(st, ",");
+        }
+        fail(String.format("globStatus(%s) should have raised"
+            + " an exception, but returned %s", path, resultStr));
+      }
+    } catch (AccessDeniedException e) {
+      LOG.info("Metrics:\n {},\n {}", getMetric, listMetric);
+      failif(!expectAuthFailure, "Access denied in glob of " + path,
+          e);
+      return null;
+    }
+    if (expectedCount < 0) {
+      Assertions.assertThat(st)
+          .describedAs("Glob of %s", path)
+          .isNull();
+    } else {
+      Assertions.assertThat(st)
+          .describedAs("Glob of %s", path)
+          .isNotNull()
+          .hasSize(expectedCount);
+    }
+    return st;
+  }
+
+}
+
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/TestSignerManager.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/TestSignerManager.java
new file mode 100644
index 0000000..ca87b5c
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/TestSignerManager.java
@@ -0,0 +1,590 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.fs.s3a.auth;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.nio.charset.StandardCharsets;
+import java.security.PrivilegedExceptionAction;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.TimeUnit;
+
+import com.amazonaws.AmazonWebServiceRequest;
+import com.amazonaws.DefaultRequest;
+import com.amazonaws.SignableRequest;
+import com.amazonaws.auth.AWSCredentials;
+import com.amazonaws.auth.Signer;
+import com.amazonaws.auth.SignerFactory;
+import org.assertj.core.api.Assertions;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.s3a.auth.TestSignerManager.SignerInitializerForTest.StoreValue;
+import org.apache.hadoop.fs.s3a.auth.delegation.DelegationTokenProvider;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.test.LambdaTestUtils;
+
+import static org.apache.hadoop.fs.s3a.Constants.CUSTOM_SIGNERS;
+
+/**
+ * Tests for the SignerManager.
+ */
+public class TestSignerManager {
+
+  private static final Text TEST_TOKEN_KIND = new Text("TestTokenKind");
+  private static final Text TEST_TOKEN_SERVICE = new Text("TestTokenService");
+  private static final String TEST_KEY_IDENTIFIER = "TEST_KEY_IDENTIFIER";
+  private static final String BUCKET1 = "bucket1";
+  private static final String BUCKET2 = "bucket2";
+  private static final String TESTUSER1 = "testuser1";
+  private static final String TESTUSER2 = "testuser2";
+
+  @Rule public Timeout testTimeout = new Timeout(10_000L,
+      TimeUnit.MILLISECONDS);
+
+  @Before
+  public void beforeTest() {
+    SignerForTest1.reset();
+    SignerForTest2.reset();
+    SignerInitializerForTest.reset();
+    SignerForInitializerTest.reset();
+    SignerInitializer2ForTest.reset();
+  }
+
+  @Test
+  public void testPredefinedSignerInitialization() throws IOException {
+    // Try initializing a pre-defined Signer type.
+    // Should run through without an exception.
+    Configuration config = new Configuration();
+    // Pre-defined signer types as of AWS-SDK 1.11.563
+    // AWS4SignerType, QueryStringSignerType, AWSS3V4SignerType
+    config.set(CUSTOM_SIGNERS, "AWS4SignerType");
+    SignerManager signerManager = new SignerManager("dontcare", null, config,
+        UserGroupInformation.getCurrentUser());
+    signerManager.initCustomSigners();
+  }
+
+  @Test
+  public void testCustomSignerFailureIfNotRegistered() throws Exception {
+    Configuration config = new Configuration();
+    config.set(CUSTOM_SIGNERS, "testsignerUnregistered");
+    SignerManager signerManager = new SignerManager("dontcare", null, config,
+        UserGroupInformation.getCurrentUser());
+    // Make sure the config is respected.
+    signerManager.initCustomSigners();
+    // Simulate a call from the AWS SDK to create the signer.
+    LambdaTestUtils.intercept(Exception.class,
+        () -> SignerFactory.createSigner("testsignerUnregistered", null));
+    // Expecting generic Exception.class to handle future implementation
+    // changes.
+    // For now, this is an NPE
+  }
+
+  @Test
+  public void testCustomSignerInitialization() throws IOException {
+    Configuration config = new Configuration();
+    config.set(CUSTOM_SIGNERS, "testsigner1:" + SignerForTest1.class.getName());
+    SignerManager signerManager = new SignerManager("dontcare", null, config,
+        UserGroupInformation.getCurrentUser());
+    signerManager.initCustomSigners();
+    Signer s1 = SignerFactory.createSigner("testsigner1", null);
+    s1.sign(null, null);
+    Assertions.assertThat(SignerForTest1.initialized)
+        .as(SignerForTest1.class.getName() + " not initialized")
+        .isEqualTo(true);
+  }
+
+  @Test
+  public void testMultipleCustomSignerInitialization() throws IOException {
+    Configuration config = new Configuration();
+    config.set(CUSTOM_SIGNERS,
+        "testsigner1:" + SignerForTest1.class.getName() + "," + "testsigner2:"
+            + SignerForTest2.class.getName());
+    SignerManager signerManager = new SignerManager("dontcare", null, config,
+        UserGroupInformation.getCurrentUser());
+    signerManager.initCustomSigners();
+    Signer s1 = SignerFactory.createSigner("testsigner1", null);
+    s1.sign(null, null);
+    Assertions.assertThat(SignerForTest1.initialized)
+        .as(SignerForTest1.class.getName() + " not initialized")
+        .isEqualTo(true);
+
+    Signer s2 = SignerFactory.createSigner("testsigner2", null);
+    s2.sign(null, null);
+    Assertions.assertThat(SignerForTest2.initialized)
+        .as(SignerForTest2.class.getName() + " not initialized")
+        .isEqualTo(true);
+  }
+
+  @Test
+  public void testSimpleSignerInitializer() throws IOException {
+    Configuration config = new Configuration();
+    config.set(CUSTOM_SIGNERS,
+        "testsigner1:" + SignerForTest1.class.getName() + ":"
+            + SignerInitializerForTest.class.getName());
+
+    Token<? extends TokenIdentifier> token = createTokenForTest("identifier");
+    DelegationTokenProvider dtProvider = new DelegationTokenProviderForTest(
+        token);
+
+    UserGroupInformation ugi = UserGroupInformation
+        .createRemoteUser("testuser");
+
+    SignerManager signerManager = new SignerManager("bucket1", dtProvider,
+        config, ugi);
+    signerManager.initCustomSigners();
+    Assertions.assertThat(SignerInitializerForTest.instanceCount)
+        .as(SignerInitializerForTest.class.getName()
+            + " creation count mismatch").isEqualTo(1);
+    Assertions.assertThat(SignerInitializerForTest.registerCount)
+        .as(SignerInitializerForTest.class.getName()
+            + " registration count mismatch").isEqualTo(1);
+    Assertions.assertThat(SignerInitializerForTest.unregisterCount)
+        .as(SignerInitializerForTest.class.getName()
+            + " registration count mismatch").isEqualTo(0);
+
+    signerManager.close();
+    Assertions.assertThat(SignerInitializerForTest.unregisterCount)
+        .as(SignerInitializerForTest.class.getName()
+            + " registration count mismatch").isEqualTo(1);
+  }
+
+  @Test
+  public void testMultipleSignerInitializers() throws IOException {
+    Configuration config = new Configuration();
+    config.set(CUSTOM_SIGNERS,
+        "testsigner1:" + SignerForTest1.class.getName() + ":"
+            + SignerInitializerForTest.class.getName() + "," // 2nd signer
+            + "testsigner2:" + SignerForTest2.class.getName() + ","
+            // 3rd signer
+            + "testsigner3:" + SignerForTest2.class.getName() + ":"
+            + SignerInitializer2ForTest.class.getName());
+
+    Token<? extends TokenIdentifier> token = createTokenForTest("identifier");
+    DelegationTokenProvider dtProvider = new DelegationTokenProviderForTest(
+        token);
+
+    UserGroupInformation ugi = UserGroupInformation
+        .createRemoteUser("testuser");
+
+    SignerManager signerManager = new SignerManager("bucket1", dtProvider,
+        config, ugi);
+    signerManager.initCustomSigners();
+
+    Assertions.assertThat(SignerInitializerForTest.instanceCount)
+        .as(SignerInitializerForTest.class.getName()
+            + " creation count mismatch").isEqualTo(1);
+    Assertions.assertThat(SignerInitializerForTest.registerCount)
+        .as(SignerInitializerForTest.class.getName()
+            + " registration count mismatch").isEqualTo(1);
+    Assertions.assertThat(SignerInitializerForTest.unregisterCount)
+        .as(SignerInitializerForTest.class.getName()
+            + " registration count mismatch").isEqualTo(0);
+
+    Assertions.assertThat(SignerInitializer2ForTest.instanceCount)
+        .as(SignerInitializer2ForTest.class.getName()
+            + " creation count mismatch").isEqualTo(1);
+    Assertions.assertThat(SignerInitializer2ForTest.registerCount)
+        .as(SignerInitializer2ForTest.class.getName()
+            + " registration count mismatch").isEqualTo(1);
+    Assertions.assertThat(SignerInitializer2ForTest.unregisterCount)
+        .as(SignerInitializer2ForTest.class.getName()
+            + " registration count mismatch").isEqualTo(0);
+
+    signerManager.close();
+    Assertions.assertThat(SignerInitializerForTest.unregisterCount)
+        .as(SignerInitializerForTest.class.getName()
+            + " registration count mismatch").isEqualTo(1);
+    Assertions.assertThat(SignerInitializer2ForTest.unregisterCount)
+        .as(SignerInitializer2ForTest.class.getName()
+            + " registration count mismatch").isEqualTo(1);
+  }
+
+  @Test
+  public void testSignerInitializerMultipleInstances()
+      throws IOException, InterruptedException {
+
+    String id1 = "id1";
+    String id2 = "id2";
+    String id3 = "id3";
+    UserGroupInformation ugiU1 = UserGroupInformation
+        .createRemoteUser(TESTUSER1);
+    UserGroupInformation ugiU2 = UserGroupInformation
+        .createRemoteUser(TESTUSER2);
+
+    SignerManager signerManagerU1B1 = fakeS3AInstanceCreation(id1,
+        SignerForInitializerTest.class, SignerInitializerForTest.class, BUCKET1,
+        ugiU1);
+    SignerManager signerManagerU2B1 = fakeS3AInstanceCreation(id2,
+        SignerForInitializerTest.class, SignerInitializerForTest.class, BUCKET1,
+        ugiU2);
+    SignerManager signerManagerU2B2 = fakeS3AInstanceCreation(id3,
+        SignerForInitializerTest.class, SignerInitializerForTest.class, BUCKET2,
+        ugiU2);
+
+    Assertions.assertThat(SignerInitializerForTest.instanceCount)
+        .as(SignerInitializerForTest.class.getName()
+            + " creation count mismatch").isEqualTo(3);
+    Assertions.assertThat(SignerInitializerForTest.registerCount)
+        .as(SignerInitializerForTest.class.getName()
+            + " registration count mismatch").isEqualTo(3);
+    Assertions.assertThat(SignerInitializerForTest.unregisterCount)
+        .as(SignerInitializerForTest.class.getName()
+            + " registration count mismatch").isEqualTo(0);
+
+    // Simulate U1B1 making a request
+    attemptSignAndVerify(id1, BUCKET1, ugiU1, false);
+
+    // Simulate U2B1 making a request
+    attemptSignAndVerify(id2, BUCKET1, ugiU2, false);
+
+    // Simulate U2B2 making a request
+    attemptSignAndVerify(id3, BUCKET2, ugiU2, false);
+
+    // Simulate U1B2 (not defined - so Signer should get a null)
+    attemptSignAndVerify("dontcare", BUCKET2, ugiU1, true);
+
+    closeAndVerifyNull(signerManagerU1B1, BUCKET1, ugiU1, 2);
+    closeAndVerifyNull(signerManagerU2B2, BUCKET2, ugiU2, 1);
+    closeAndVerifyNull(signerManagerU2B1, BUCKET1, ugiU2, 0);
+
+    Assertions.assertThat(SignerInitializerForTest.unregisterCount)
+        .as(SignerInitializerForTest.class.getName()
+            + " registration count mismatch").isEqualTo(3);
+  }
+
+  private void attemptSignAndVerify(String identifier, String bucket,
+      UserGroupInformation ugi, boolean expectNullStoreInfo)
+      throws IOException, InterruptedException {
+    ugi.doAs((PrivilegedExceptionAction<Void>) () -> {
+      Signer signer = new SignerForInitializerTest();
+      SignableRequest<?> signableRequest = constructSignableRequest(bucket);
+      signer.sign(signableRequest, null);
+      verifyStoreValueInSigner(expectNullStoreInfo, bucket, identifier);
+      return null;
+    });
+  }
+
+  private void verifyStoreValueInSigner(boolean expectNull, String bucketName,
+      String identifier) throws IOException {
+    if (expectNull) {
+      Assertions.assertThat(SignerForInitializerTest.retrievedStoreValue)
+          .as("Retrieved store value expected to be null").isNull();
+    } else {
+      StoreValue storeValue = SignerForInitializerTest.retrievedStoreValue;
+      Assertions.assertThat(storeValue).as("StoreValue should not be null")
+          .isNotNull();
+      Assertions.assertThat(storeValue.getBucketName())
+          .as("Bucket Name mismatch").isEqualTo(bucketName);
+      Configuration conf = storeValue.getStoreConf();
+      Assertions.assertThat(conf).as("Configuration should not be null")
+          .isNotNull();
+      Assertions.assertThat(conf.get(TEST_KEY_IDENTIFIER))
+          .as("Identifier mistmatch").isEqualTo(identifier);
+      Token<? extends TokenIdentifier> token = storeValue.getDtProvider()
+          .getFsDelegationToken();
+      String tokenId = new String(token.getIdentifier(),
+          StandardCharsets.UTF_8);
+      Assertions.assertThat(tokenId)
+          .as("Mismatch in delegation token identifier").isEqualTo(
+          createTokenIdentifierString(identifier, bucketName,
+              UserGroupInformation.getCurrentUser().getShortUserName()));
+    }
+  }
+
+  private void closeAndVerifyNull(Closeable closeable, String bucketName,
+      UserGroupInformation ugi, int expectedCount)
+      throws IOException, InterruptedException {
+    closeable.close();
+    attemptSignAndVerify("dontcare", bucketName, ugi, true);
+    Assertions.assertThat(SignerInitializerForTest.storeCache.size())
+        .as("StoreCache size mismatch").isEqualTo(expectedCount);
+  }
+
+  /**
+   * SignerForTest1.
+   */
+  @Private
+  public static class SignerForTest1 implements Signer {
+
+    private static boolean initialized = false;
+
+    @Override
+    public void sign(SignableRequest<?> request, AWSCredentials credentials) {
+      initialized = true;
+    }
+
+    public static void reset() {
+      initialized = false;
+    }
+  }
+
+  /**
+   * SignerForTest2.
+   */
+  @Private
+  public static class SignerForTest2 implements Signer {
+
+    private static boolean initialized = false;
+
+    @Override
+    public void sign(SignableRequest<?> request, AWSCredentials credentials) {
+      initialized = true;
+    }
+
+    public static void reset() {
+      initialized = false;
+    }
+  }
+
+  /**
+   * SignerInitializerForTest.
+   */
+  @Private
+  public static class SignerInitializerForTest implements AwsSignerInitializer {
+
+    private static int registerCount = 0;
+    private static int unregisterCount = 0;
+    private static int instanceCount = 0;
+
+    private static final Map<StoreKey, StoreValue> storeCache = new HashMap<>();
+
+    public SignerInitializerForTest() {
+      instanceCount++;
+    }
+
+    @Override
+    public void registerStore(String bucketName, Configuration storeConf,
+        DelegationTokenProvider dtProvider, UserGroupInformation storeUgi) {
+      registerCount++;
+      StoreKey storeKey = new StoreKey(bucketName, storeUgi);
+      StoreValue storeValue = new StoreValue(bucketName, storeConf, dtProvider);
+      storeCache.put(storeKey, storeValue);
+    }
+
+    @Override
+    public void unregisterStore(String bucketName, Configuration storeConf,
+        DelegationTokenProvider dtProvider, UserGroupInformation storeUgi) {
+      unregisterCount++;
+      StoreKey storeKey = new StoreKey(bucketName, storeUgi);
+      storeCache.remove(storeKey);
+    }
+
+    public static void reset() {
+      registerCount = 0;
+      unregisterCount = 0;
+      instanceCount = 0;
+      storeCache.clear();
+    }
+
+    public static StoreValue getStoreInfo(String bucketName,
+        UserGroupInformation storeUgi) {
+      StoreKey storeKey = new StoreKey(bucketName, storeUgi);
+      return storeCache.get(storeKey);
+    }
+
+    private static class StoreKey {
+      private final String bucketName;
+      private final UserGroupInformation ugi;
+
+      public StoreKey(String bucketName, UserGroupInformation ugi) {
+        this.bucketName = bucketName;
+        this.ugi = ugi;
+      }
+
+      @Override
+      public boolean equals(Object o) {
+        if (this == o) {
+          return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+          return false;
+        }
+        StoreKey storeKey = (StoreKey) o;
+        return Objects.equals(bucketName, storeKey.bucketName) && Objects
+            .equals(ugi, storeKey.ugi);
+      }
+
+      @Override
+      public int hashCode() {
+        return Objects.hash(bucketName, ugi);
+      }
+    }
+
+    static class StoreValue {
+      private final String bucketName;
+      private final Configuration storeConf;
+      private final DelegationTokenProvider dtProvider;
+
+      public StoreValue(String bucketName, Configuration storeConf,
+          DelegationTokenProvider dtProvider) {
+        this.bucketName = bucketName;
+        this.storeConf = storeConf;
+        this.dtProvider = dtProvider;
+      }
+
+      String getBucketName() {
+        return bucketName;
+      }
+
+      Configuration getStoreConf() {
+        return storeConf;
+      }
+
+      DelegationTokenProvider getDtProvider() {
+        return dtProvider;
+      }
+    }
+  }
+
+  /**
+   * To be used in conjunction with {@link SignerInitializerForTest}.
+   */
+  @Private
+  public static class SignerForInitializerTest implements Signer {
+
+    private static StoreValue retrievedStoreValue;
+
+    @Override
+    public void sign(SignableRequest<?> request, AWSCredentials credentials) {
+      String bucketName = request.getEndpoint().getHost();
+      try {
+        retrievedStoreValue = SignerInitializerForTest
+            .getStoreInfo(bucketName, UserGroupInformation.getCurrentUser());
+      } catch (IOException e) {
+        throw new RuntimeException("Failed to get current ugi", e);
+      }
+    }
+
+    public static void reset() {
+      retrievedStoreValue = null;
+    }
+  }
+
+  /**
+   * DelegationTokenProviderForTest.
+   */
+  @Private
+  private static class DelegationTokenProviderForTest
+      implements DelegationTokenProvider {
+
+    private final Token<? extends TokenIdentifier> token;
+
+    private DelegationTokenProviderForTest(
+        Token<? extends TokenIdentifier> token) {
+      this.token = token;
+    }
+
+    @Override
+    public Token<? extends TokenIdentifier> getFsDelegationToken()
+        throws IOException {
+      return this.token;
+    }
+  }
+
+  /**
+   * SignerInitializer2ForTest.
+   */
+  @Private
+  public static class SignerInitializer2ForTest
+      implements AwsSignerInitializer {
+
+    private static int registerCount = 0;
+    private static int unregisterCount = 0;
+    private static int instanceCount = 0;
+
+    public SignerInitializer2ForTest() {
+      instanceCount++;
+    }
+
+    @Override
+    public void registerStore(String bucketName, Configuration storeConf,
+        DelegationTokenProvider dtProvider, UserGroupInformation storeUgi) {
+      registerCount++;
+    }
+
+    @Override
+    public void unregisterStore(String bucketName, Configuration storeConf,
+        DelegationTokenProvider dtProvider, UserGroupInformation storeUgi) {
+      unregisterCount++;
+    }
+
+    public static void reset() {
+      registerCount = 0;
+      unregisterCount = 0;
+      instanceCount = 0;
+    }
+  }
+
+  private Token<? extends TokenIdentifier> createTokenForTest(String idString) {
+    byte[] identifier = idString.getBytes(StandardCharsets.UTF_8);
+    byte[] password = "notapassword".getBytes(StandardCharsets.UTF_8);
+    Token<? extends TokenIdentifier> token = new Token<>(identifier, password,
+        TEST_TOKEN_KIND, TEST_TOKEN_SERVICE);
+    return token;
+  }
+
+  private SignerManager fakeS3AInstanceCreation(String identifier,
+      Class<? extends Signer> signerClazz,
+      Class<? extends AwsSignerInitializer> signerInitializerClazz,
+      String bucketName, UserGroupInformation ugi) {
+    // Simulate new S3A instance interactions.
+    Objects.requireNonNull(signerClazz, "SignerClazz missing");
+    Objects.requireNonNull(signerInitializerClazz,
+        "SignerInitializerClazzMissing");
+    Configuration config = new Configuration();
+    config.set(TEST_KEY_IDENTIFIER, identifier);
+    config.set(CUSTOM_SIGNERS,
+        signerClazz.getCanonicalName() + ":" + signerClazz.getName() + ":"
+            + signerInitializerClazz.getName());
+    Token<? extends TokenIdentifier> token1 = createTokenForTest(
+        createTokenIdentifierString(identifier, bucketName,
+            ugi.getShortUserName()));
+    DelegationTokenProvider dtProvider1 = new DelegationTokenProviderForTest(
+        token1);
+    SignerManager signerManager = new SignerManager(bucketName, dtProvider1,
+        config, ugi);
+    signerManager.initCustomSigners();
+    return signerManager;
+  }
+
+  private String createTokenIdentifierString(String identifier,
+      String bucketName, String user) {
+    return identifier + "_" + bucketName + "_" + user;
+  }
+
+  private SignableRequest<?> constructSignableRequest(String bucketName)
+      throws URISyntaxException {
+    DefaultRequest signableRequest = new DefaultRequest(
+        AmazonWebServiceRequest.NOOP, "fakeservice");
+    URI uri = new URI("s3://" + bucketName + "/");
+    signableRequest.setEndpoint(uri);
+    return signableRequest;
+  }
+}
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java
index b0e2b8e..455a8a3 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java
@@ -550,10 +550,7 @@
   @Test
   public void testWriteNormalStream() throws Throwable {
     S3AFileSystem fs = getFileSystem();
-    Assume.assumeTrue(
-        "Filesystem does not have magic support enabled: " + fs,
-        fs.hasCapability(STORE_CAPABILITY_MAGIC_COMMITTER));
-
+    assumeMagicCommitEnabled(fs);
     Path destFile = path("normal");
     try (FSDataOutputStream out = fs.create(destFile, true)) {
       out.writeChars("data");
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
index 2d17ca5..97fcdc5 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
@@ -517,7 +517,7 @@
     String name = fs.getUri().toString();
     S3GuardTool.BucketInfo cmd = new S3GuardTool.BucketInfo(
         getConfiguration());
-    if (fs.hasCapability(
+    if (fs.hasPathCapability(fs.getWorkingDirectory(),
         CommitConstants.STORE_CAPABILITY_MAGIC_COMMITTER)) {
       // if the FS is magic, expect this to work
       exec(cmd, S3GuardTool.BucketInfo.MAGIC_FLAG, name);
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3Select.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3Select.java
index 1f2faa2..d6058d1 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3Select.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestS3Select.java
@@ -102,9 +102,9 @@
   @Override
   public void setup() throws Exception {
     super.setup();
-    Assume.assumeTrue("S3 Select is not enabled",
-        getFileSystem().hasCapability(S3_SELECT_CAPABILITY));
     csvPath = path(getMethodName() + ".csv");
+    Assume.assumeTrue("S3 Select is not enabled",
+        getFileSystem().hasPathCapability(csvPath, S3_SELECT_CAPABILITY));
     selectConf = new Configuration(false);
     selectConf.setBoolean(SELECT_ERRORS_INCLUDE_SQL, true);
     createStandardCsvFile(getFileSystem(), csvPath, ALL_QUOTES);
diff --git a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 3955721..278b815 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -46,6 +46,7 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonPathCapabilities;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary.Builder;
 import org.apache.hadoop.fs.CreateFlag;
@@ -70,6 +71,7 @@
 import org.apache.hadoop.util.VersionInfo;
 
 import static org.apache.hadoop.fs.adl.AdlConfKeys.*;
+import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
 
 /**
  * A FileSystem to access Azure Data Lake Store.
@@ -1033,4 +1035,20 @@
     }
     return dest;
   }
+
+  @Override
+  public boolean hasPathCapability(final Path path, final String capability)
+      throws IOException {
+
+    switch (validatePathCapabilityArgs(makeQualified(path), capability)) {
+
+    case CommonPathCapabilities.FS_ACLS:
+    case CommonPathCapabilities.FS_APPEND:
+    case CommonPathCapabilities.FS_CONCAT:
+    case CommonPathCapabilities.FS_PERMISSIONS:
+      return true;
+    default:
+      return super.hasPathCapability(path, capability);
+    }
+  }
 }
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlSdkConfiguration.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlSdkConfiguration.java
index 980b683..27004db 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlSdkConfiguration.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlSdkConfiguration.java
@@ -98,6 +98,7 @@
     conf = AdlStorageConfiguration.getConfiguration();
     conf.set(ADL_SSL_CHANNEL_MODE, sslChannelModeConfigValue);
     fs = (AdlFileSystem) (AdlStorageConfiguration.createStorageConnector(conf));
+    Assume.assumeNotNull(fs);
 
     SSLChannelMode sslChannelMode = fs.getAdlClient().getSSLChannelMode();
     Assert.assertEquals(
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index f8962d9..a990b60 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -51,6 +51,7 @@
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BufferedFSInputStream;
+import org.apache.hadoop.fs.CommonPathCapabilities;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -84,6 +85,7 @@
 import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.fs.azure.NativeAzureFileSystemHelper.*;
+import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
 
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.google.common.annotations.VisibleForTesting;
@@ -3866,4 +3868,19 @@
   void updateDaemonUsers(List<String> daemonUsers) {
     this.daemonUsers = daemonUsers;
   }
+
+  @Override
+  public boolean hasPathCapability(final Path path, final String capability)
+      throws IOException {
+    switch (validatePathCapabilityArgs(path, capability)) {
+
+    case CommonPathCapabilities.FS_PERMISSIONS:
+      return true;
+    // Append support is dynamic
+    case CommonPathCapabilities.FS_APPEND:
+      return appendSupportEnabled;
+    default:
+      return super.hasPathCapability(path, capability);
+    }
+  }
 }
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java
index 19b1a90..723a25c 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsConfiguration.java
@@ -146,6 +146,10 @@
       DefaultValue = DEFAULT_ENABLE_FLUSH)
   private boolean enableFlush;
 
+  @BooleanConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_DISABLE_OUTPUTSTREAM_FLUSH,
+      DefaultValue = DEFAULT_DISABLE_OUTPUTSTREAM_FLUSH)
+  private boolean disableOutputStreamFlush;
+
   @BooleanConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_ENABLE_AUTOTHROTTLING,
       DefaultValue = DEFAULT_ENABLE_AUTOTHROTTLING)
   private boolean enableAutoThrottling;
@@ -427,6 +431,10 @@
     return this.enableFlush;
   }
 
+  public boolean isOutputStreamFlushDisabled() {
+    return this.disableOutputStreamFlush;
+  }
+
   public boolean isAutoThrottlingEnabled() {
     return this.enableAutoThrottling;
   }
@@ -635,4 +643,10 @@
   void setEnableFlush(boolean enableFlush) {
     this.enableFlush = enableFlush;
   }
+
+  @VisibleForTesting
+  void setDisableOutputStreamFlush(boolean disableOutputStreamFlush) {
+    this.disableOutputStreamFlush = disableOutputStreamFlush;
+  }
+
 }
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java
index d93822f..c6640c4 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java
@@ -47,6 +47,7 @@
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.CommonPathCapabilities;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -55,6 +56,7 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIOException;
+import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants;
 import org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations;
 import org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes;
 import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException;
@@ -76,6 +78,8 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Progressable;
 
+import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
+
 /**
  * A {@link org.apache.hadoop.fs.FileSystem} for reading and writing files stored on <a
  * href="http://store.azure.com/">Windows Azure</a>
@@ -108,7 +112,7 @@
     this.setWorkingDirectory(this.getHomeDirectory());
 
     if (abfsConfiguration.getCreateRemoteFileSystemDuringInitialization()) {
-      if (!this.fileSystemExists()) {
+      if (this.tryGetFileStatus(new Path(AbfsHttpConstants.ROOT_PATH)) == null) {
         try {
           this.createFileSystem();
         } catch (AzureBlobFileSystemException ex) {
@@ -1129,4 +1133,20 @@
       }
     }
   }
+
+  @Override
+  public boolean hasPathCapability(final Path path, final String capability)
+      throws IOException {
+    // qualify the path to make sure that it refers to the current FS.
+    final Path p = makeQualified(path);
+    switch (validatePathCapabilityArgs(p, capability)) {
+    case CommonPathCapabilities.FS_PERMISSIONS:
+    case CommonPathCapabilities.FS_APPEND:
+      return true;
+    case CommonPathCapabilities.FS_ACLS:
+      return getIsNamespaceEnabled();
+    default:
+      return super.hasPathCapability(p, capability);
+    }
+  }
 }
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
index 6b2d196..7f1bf10 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
@@ -362,7 +362,8 @@
         AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path),
         0,
         abfsConfiguration.getWriteBufferSize(),
-        abfsConfiguration.isFlushEnabled());
+        abfsConfiguration.isFlushEnabled(),
+        abfsConfiguration.isOutputStreamFlushDisabled());
   }
 
   public void createDirectory(final Path path, final FsPermission permission, final FsPermission umask)
@@ -434,7 +435,8 @@
         AbfsHttpConstants.FORWARD_SLASH + getRelativePath(path),
         offset,
         abfsConfiguration.getWriteBufferSize(),
-        abfsConfiguration.isFlushEnabled());
+        abfsConfiguration.isFlushEnabled(),
+        abfsConfiguration.isOutputStreamFlushDisabled());
   }
 
   public void rename(final Path source, final Path destination) throws
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java
index 8cd86bf..cd86f18 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/ConfigurationKeys.java
@@ -51,7 +51,15 @@
   public static final String FS_AZURE_ALWAYS_USE_HTTPS = "fs.azure.always.use.https";
   public static final String FS_AZURE_ATOMIC_RENAME_KEY = "fs.azure.atomic.rename.key";
   public static final String FS_AZURE_READ_AHEAD_QUEUE_DEPTH = "fs.azure.readaheadqueue.depth";
+  /** Provides a config control to enable or disable ABFS Flush operations -
+   *  HFlush and HSync. Default is true. **/
   public static final String FS_AZURE_ENABLE_FLUSH = "fs.azure.enable.flush";
+  /** Provides a config control to disable or enable OutputStream Flush API
+   *  operations in AbfsOutputStream. Flush() will trigger actions that
+   *  guarantee that buffered data is persistent with a perf cost while the API
+   *  documentation does not have such expectations of data being persisted.
+   *  Default value of this config is true. **/
+  public static final String FS_AZURE_DISABLE_OUTPUTSTREAM_FLUSH = "fs.azure.disable.outputstream.flush";
   public static final String FS_AZURE_USER_AGENT_PREFIX_KEY = "fs.azure.user.agent.prefix";
   public static final String FS_AZURE_SSL_CHANNEL_MODE_KEY = "fs.azure.ssl.channel.mode";
   public static final String FS_AZURE_USE_UPN = "fs.azure.use.upn";
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java
index f0c33ee..e0c355a 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/constants/FileSystemConfigurations.java
@@ -57,6 +57,7 @@
 
   public static final int DEFAULT_READ_AHEAD_QUEUE_DEPTH = -1;
   public static final boolean DEFAULT_ENABLE_FLUSH = true;
+  public static final boolean DEFAULT_DISABLE_OUTPUTSTREAM_FLUSH = true;
   public static final boolean DEFAULT_ENABLE_AUTOTHROTTLING = true;
 
   public static final DelegatingSSLSocketFactory.SSLChannelMode DEFAULT_FS_AZURE_SSL_CHANNEL_MODE
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java
index f2f0a45..fd56eb0 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsOutputStream.java
@@ -52,6 +52,7 @@
   private long position;
   private boolean closed;
   private boolean supportFlush;
+  private boolean disableOutputStreamFlush;
   private volatile IOException lastError;
 
   private long lastFlushOffset;
@@ -80,12 +81,14 @@
       final String path,
       final long position,
       final int bufferSize,
-      final boolean supportFlush) {
+      final boolean supportFlush,
+      final boolean disableOutputStreamFlush) {
     this.client = client;
     this.path = path;
     this.position = position;
     this.closed = false;
     this.supportFlush = supportFlush;
+    this.disableOutputStreamFlush = disableOutputStreamFlush;
     this.lastError = null;
     this.lastFlushOffset = 0;
     this.bufferSize = bufferSize;
@@ -199,7 +202,7 @@
    */
   @Override
   public void flush() throws IOException {
-    if (supportFlush) {
+    if (!disableOutputStreamFlush) {
       flushInternalAsync();
     }
   }
diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md b/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md
index 02a62c8..c5bad77 100644
--- a/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md
+++ b/hadoop-tools/hadoop-azure/src/site/markdown/abfs.md
@@ -643,6 +643,23 @@
 `org.apache.hadoop.fs.azurebfs.AbfsConfiguration` for the full list
 of configuration options and their default values.
 
+### <a name="flushconfigoptions"></a> Flush Options
+
+#### <a name="abfsflushconfigoptions"></a> 1. Azure Blob File System Flush Options
+Config `fs.azure.enable.flush` provides an option to render ABFS flush APIs -
+ HFlush() and HSync() to be no-op. By default, this
+config will be set to true.
+
+Both the APIs will ensure that data is persisted.
+
+#### <a name="outputstreamflushconfigoptions"></a> 2. OutputStream Flush Options
+Config `fs.azure.disable.outputstream.flush` provides an option to render
+OutputStream Flush() API to be a no-op in AbfsOutputStream. By default, this
+config will be set to true.
+
+Hflush() being the only documented API that can provide persistent data
+transfer, Flush() also attempting to persist buffered data will lead to
+performance issues.
 
 ## <a name="troubleshooting"></a> Troubleshooting
 
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java
index d60cae8..60f7f7d 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java
@@ -208,43 +208,44 @@
   }
 
   @Test
-  public void testFlushWithFlushEnabled() throws Exception {
-    testFlush(true);
-  }
-
-  @Test
-  public void testFlushWithFlushDisabled() throws Exception {
+  public void testFlushWithOutputStreamFlushEnabled() throws Exception {
     testFlush(false);
   }
 
-  private void testFlush(boolean flushEnabled) throws Exception {
+  @Test
+  public void testFlushWithOutputStreamFlushDisabled() throws Exception {
+    testFlush(true);
+  }
+
+  private void testFlush(boolean disableOutputStreamFlush) throws Exception {
     final AzureBlobFileSystem fs = (AzureBlobFileSystem) getFileSystem();
 
-    // Simulate setting "fs.azure.enable.flush" to true or false
-    fs.getAbfsStore().getAbfsConfiguration().setEnableFlush(flushEnabled);
+    // Simulate setting "fs.azure.disable.outputstream.flush" to true or false
+    fs.getAbfsStore().getAbfsConfiguration()
+        .setDisableOutputStreamFlush(disableOutputStreamFlush);
 
     final Path testFilePath = path(methodName.getMethodName());
     byte[] buffer = getRandomBytesArray();
 
     // The test case must write "fs.azure.write.request.size" bytes
     // to the stream in order for the data to be uploaded to storage.
-    assertEquals(
-        fs.getAbfsStore().getAbfsConfiguration().getWriteBufferSize(),
+    assertEquals(fs.getAbfsStore().getAbfsConfiguration().getWriteBufferSize(),
         buffer.length);
 
     try (FSDataOutputStream stream = fs.create(testFilePath)) {
       stream.write(buffer);
 
       // Write asynchronously uploads data, so we must wait for completion
-      AbfsOutputStream abfsStream = (AbfsOutputStream) stream.getWrappedStream();
+      AbfsOutputStream abfsStream = (AbfsOutputStream) stream
+          .getWrappedStream();
       abfsStream.waitForPendingUploads();
 
       // Flush commits the data so it can be read.
       stream.flush();
 
-      // Verify that the data can be read if flushEnabled is true; and otherwise
-      // cannot be read.
-      validate(fs.open(testFilePath), buffer, flushEnabled);
+      // Verify that the data can be read if disableOutputStreamFlush is
+      // false; and otherwise cannot be read.
+      validate(fs.open(testFilePath), buffer, !disableOutputStreamFlush);
     }
   }
 
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestGetNameSpaceEnabled.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestGetNameSpaceEnabled.java
index b9a2315..74c8803 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestGetNameSpaceEnabled.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestGetNameSpaceEnabled.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.fs.azurebfs;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.UUID;
 
@@ -24,6 +25,7 @@
 import org.junit.Test;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException;
 import org.apache.hadoop.fs.azurebfs.services.AuthType;
 
@@ -67,10 +69,10 @@
             + testUri.substring(testUri.indexOf("@"));
     AzureBlobFileSystem fs = this.getFileSystem(nonExistingFsUrl);
 
-    intercept(AbfsRestOperationException.class,
+    intercept(FileNotFoundException.class,
             "\"The specified filesystem does not exist.\", 404",
             ()-> {
-              fs.getIsNamespaceEnabled();
+              fs.getFileStatus(new Path("/")); // Run a dummy FS call
             });
   }
 
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
index e20f206..f0adc78 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
@@ -171,4 +171,10 @@
 
   /** Filename of sorted target listing. */
   public static final String TARGET_SORTED_FILE = "target_sorted.seq";
+
+  public static final String LENGTH_MISMATCH_ERROR_MSG =
+          "Mismatch in length of source:";
+
+  public static final String CHECKSUM_MISMATCH_ERROR_MSG =
+          "Checksum mismatch between ";
 }
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
index 546062f..139bd08 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
@@ -252,7 +252,7 @@
           // This is the last chunk of the splits, consolidate allChunkPaths
           try {
             concatFileChunks(conf, srcFileStatus.getPath(), targetFile,
-                allChunkPaths);
+                allChunkPaths, srcFileStatus);
           } catch (IOException e) {
             // If the concat failed because a chunk file doesn't exist,
             // then we assume that the CopyMapper has skipped copying this
@@ -609,7 +609,8 @@
    * Concat the passed chunk files into one and rename it the targetFile.
    */
   private void concatFileChunks(Configuration conf, Path sourceFile,
-                                Path targetFile, LinkedList<Path> allChunkPaths)
+                                Path targetFile, LinkedList<Path> allChunkPaths,
+                                CopyListingFileStatus srcFileStatus)
       throws IOException {
     if (allChunkPaths.size() == 1) {
       return;
@@ -637,8 +638,9 @@
       LOG.debug("concat: result: " + dstfs.getFileStatus(firstChunkFile));
     }
     rename(dstfs, firstChunkFile, targetFile);
-    DistCpUtils.compareFileLengthsAndChecksums(
-        srcfs, sourceFile, null, dstfs, targetFile, skipCrc);
+    DistCpUtils.compareFileLengthsAndChecksums(srcFileStatus.getLen(),
+        srcfs, sourceFile, null, dstfs,
+            targetFile, skipCrc, srcFileStatus.getLen());
   }
 
   /**
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java
index 336779e..f3c5b4b 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java
@@ -139,7 +139,6 @@
   public void map(Text relPath, CopyListingFileStatus sourceFileStatus,
           Context context) throws IOException, InterruptedException {
     Path sourcePath = sourceFileStatus.getPath();
-
     if (LOG.isDebugEnabled())
       LOG.debug("DistCpMapper::map(): Received " + sourcePath + ", " + relPath);
 
@@ -354,7 +353,7 @@
     if (sameLength && sameBlockSize) {
       return skipCrc ||
           DistCpUtils.checksumsAreEqual(sourceFS, source.getPath(), null,
-              targetFS, target.getPath());
+              targetFS, target.getPath(), source.getLen());
     } else {
       return false;
     }
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
index fa91930..4683cdd 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
@@ -143,8 +143,9 @@
           offset, context, fileAttributes, sourceChecksum);
 
       if (!source.isSplit()) {
-        DistCpUtils.compareFileLengthsAndChecksums(sourceFS, sourcePath,
-            sourceChecksum, targetFS, targetPath, skipCrc);
+        DistCpUtils.compareFileLengthsAndChecksums(source.getLen(), sourceFS,
+                sourcePath, sourceChecksum, targetFS,
+                targetPath, skipCrc, source.getLen());
       }
       // it's not append or direct write (preferred for s3a) case, thus we first
       // write to a temporary file, then rename it to the target path.
@@ -247,24 +248,27 @@
     boolean finished = false;
     try {
       inStream = getInputStream(source, context.getConfiguration());
+      long fileLength = source2.getLen();
+      int numBytesToRead  = (int) getNumBytesToRead(fileLength, sourceOffset,
+              bufferSize);
       seekIfRequired(inStream, sourceOffset);
-      int bytesRead = readBytes(inStream, buf);
-      while (bytesRead >= 0) {
+      int bytesRead = readBytes(inStream, buf, numBytesToRead);
+      while (bytesRead > 0) {
         if (chunkLength > 0 &&
             (totalBytesRead + bytesRead) >= chunkLength) {
           bytesRead = (int)(chunkLength - totalBytesRead);
           finished = true;
         }
         totalBytesRead += bytesRead;
-        if (action == FileAction.APPEND) {
-          sourceOffset += bytesRead;
-        }
+        sourceOffset += bytesRead;
         outStream.write(buf, 0, bytesRead);
         updateContextStatus(totalBytesRead, context, source2);
         if (finished) {
           break;
         }
-        bytesRead = readBytes(inStream, buf);
+        numBytesToRead  = (int) getNumBytesToRead(fileLength, sourceOffset,
+                bufferSize);
+        bytesRead = readBytes(inStream, buf, numBytesToRead);
       }
       outStream.close();
       outStream = null;
@@ -274,6 +278,15 @@
     return totalBytesRead;
   }
 
+  @VisibleForTesting
+  long getNumBytesToRead(long fileLength, long position, long bufLength) {
+    if (position + bufLength < fileLength) {
+      return  bufLength;
+    } else {
+      return fileLength - position;
+    }
+  }
+
   private void updateContextStatus(long totalBytesRead, Mapper.Context context,
                                    CopyListingFileStatus source2) {
     StringBuilder message = new StringBuilder(DistCpUtils.getFormatter()
@@ -287,10 +300,11 @@
     context.setStatus(message.toString());
   }
 
-  private static int readBytes(ThrottledInputStream inStream, byte buf[])
+  private static int readBytes(ThrottledInputStream inStream, byte[] buf,
+                               int numBytes)
       throws IOException {
     try {
-      return inStream.read(buf);
+      return inStream.read(buf, 0, numBytes);
     } catch (IOException e) {
       throw new CopyReadException(e);
     }
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
index 3ba9802..73c49bb 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
@@ -35,6 +35,7 @@
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.InputFormat;
+import org.apache.hadoop.tools.DistCpConstants;
 import org.apache.hadoop.tools.CopyListing.AclsNotSupportedException;
 import org.apache.hadoop.tools.CopyListing.XAttrsNotSupportedException;
 import org.apache.hadoop.tools.CopyListingFileStatus;
@@ -565,13 +566,15 @@
    * @throws IOException if there's an exception while retrieving checksums.
    */
   public static boolean checksumsAreEqual(FileSystem sourceFS, Path source,
-      FileChecksum sourceChecksum, FileSystem targetFS, Path target)
+                                          FileChecksum sourceChecksum,
+                                          FileSystem targetFS,
+                                          Path target, long sourceLen)
       throws IOException {
     FileChecksum targetChecksum = null;
     try {
       sourceChecksum = sourceChecksum != null
           ? sourceChecksum
-          : sourceFS.getFileChecksum(source);
+          : sourceFS.getFileChecksum(source, sourceLen);
       if (sourceChecksum != null) {
         // iff there's a source checksum, look for one at the destination.
         targetChecksum = targetFS.getFileChecksum(target);
@@ -595,23 +598,22 @@
    * @param skipCrc The flag to indicate whether to skip checksums.
    * @throws IOException if there's a mismatch in file lengths or checksums.
    */
-  public static void compareFileLengthsAndChecksums(
-      FileSystem sourceFS, Path source, FileChecksum sourceChecksum,
-      FileSystem targetFS, Path target, boolean skipCrc) throws IOException {
-    long srcLen = sourceFS.getFileStatus(source).getLen();
-    long tgtLen = targetFS.getFileStatus(target).getLen();
-    if (srcLen != tgtLen) {
+  public static void compareFileLengthsAndChecksums(long srcLen,
+             FileSystem sourceFS, Path source, FileChecksum sourceChecksum,
+             FileSystem targetFS, Path target, boolean skipCrc,
+             long targetLen) throws IOException {
+    if (srcLen != targetLen) {
       throw new IOException(
-          "Mismatch in length of source:" + source + " (" + srcLen
-              + ") and target:" + target + " (" + tgtLen + ")");
+          DistCpConstants.LENGTH_MISMATCH_ERROR_MSG + source + " (" + srcLen
+              + ") and target:" + target + " (" + targetLen + ")");
     }
 
     //At this point, src & dest lengths are same. if length==0, we skip checksum
     if ((srcLen != 0) && (!skipCrc)) {
       if (!checksumsAreEqual(sourceFS, source, sourceChecksum,
-          targetFS, target)) {
+          targetFS, target, srcLen)) {
         StringBuilder errorMessage =
-            new StringBuilder("Checksum mismatch between ")
+            new StringBuilder(DistCpConstants.CHECKSUM_MISMATCH_ERROR_MSG)
                 .append(source).append(" and ").append(target).append(".");
         boolean addSkipHint = false;
         String srcScheme = sourceFS.getScheme();
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyCommitter.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyCommitter.java
index f4566a6..11118c1f 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyCommitter.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyCommitter.java
@@ -473,9 +473,12 @@
         if (!skipCrc) {
           Assert.fail("Expected commit to fail");
         }
+        Path sourcePath = new Path(sourceBase + srcFilename);
+        CopyListingFileStatus sourceCurrStatus =
+                new CopyListingFileStatus(fs.getFileStatus(sourcePath));
         Assert.assertFalse(DistCpUtils.checksumsAreEqual(
             fs, new Path(sourceBase + srcFilename), null,
-            fs, new Path(targetBase + srcFilename)));
+            fs, new Path(targetBase + srcFilename), sourceCurrStatus.getLen()));
       } catch(IOException exception) {
         if (skipCrc) {
           LOG.error("Unexpected exception is found", exception);
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java
index b4a267d..51eebbb 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java
@@ -21,11 +21,16 @@
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
+import java.io.PrintWriter;
+import java.io.StringWriter;
 import java.security.PrivilegedAction;
 import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Random;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -53,6 +58,8 @@
 import org.apache.hadoop.tools.StubContext;
 import org.apache.hadoop.tools.util.DistCpUtils;
 import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.StringUtils;
+
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -444,6 +451,55 @@
     }
   }
 
+  @Test(timeout = 40000)
+  public void testCopyWhileAppend() throws Exception {
+    deleteState();
+    mkdirs(SOURCE_PATH + "/1");
+    touchFile(SOURCE_PATH + "/1/3");
+    CopyMapper copyMapper = new CopyMapper();
+    StubContext stubContext = new StubContext(getConfiguration(), null, 0);
+    Mapper<Text, CopyListingFileStatus, Text, Text>.Context context =
+            stubContext.getContext();
+    copyMapper.setup(context);
+    final Path path = new Path(SOURCE_PATH + "/1/3");
+    int manyBytes = 100000000;
+    appendFile(path, manyBytes);
+    ScheduledExecutorService scheduledExecutorService =
+            Executors.newSingleThreadScheduledExecutor();
+    Runnable task = new Runnable() {
+      public void run() {
+        try {
+          int maxAppendAttempts = 20;
+          int appendCount = 0;
+          while (appendCount < maxAppendAttempts) {
+            appendFile(path, 1000);
+            Thread.sleep(200);
+            appendCount++;
+          }
+        } catch (IOException | InterruptedException e) {
+            LOG.error("Exception encountered ", e);
+            Assert.fail("Test failed: " + e.getMessage());
+        }
+      }
+    };
+    scheduledExecutorService.schedule(task, 10, TimeUnit.MILLISECONDS);
+    try {
+      copyMapper.map(new Text(DistCpUtils.getRelativePath(
+              new Path(SOURCE_PATH), path)),
+              new CopyListingFileStatus(cluster.getFileSystem().getFileStatus(
+                      path)), context);
+    } catch (Exception ex) {
+      LOG.error("Exception encountered ", ex);
+      String exceptionAsString = StringUtils.stringifyException(ex);
+      if (exceptionAsString.contains(DistCpConstants.LENGTH_MISMATCH_ERROR_MSG) ||
+              exceptionAsString.contains(DistCpConstants.CHECKSUM_MISMATCH_ERROR_MSG)) {
+        Assert.fail("Test failed: " + exceptionAsString);
+      }
+    } finally {
+      scheduledExecutorService.shutdown();
+    }
+  }
+
   @Test(timeout=40000)
   public void testMakeDirFailure() {
     try {
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestRetriableFileCopyCommand.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestRetriableFileCopyCommand.java
index 1f8a915..d29447b 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestRetriableFileCopyCommand.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestRetriableFileCopyCommand.java
@@ -24,6 +24,8 @@
 import org.apache.hadoop.mapreduce.*;
 import org.apache.hadoop.tools.CopyListingFileStatus;
 import org.apache.hadoop.tools.mapred.CopyMapper.FileAction;
+
+import org.junit.Assert;
 import org.junit.Test;
 import static org.junit.Assert.*;
 import static org.mockito.Mockito.*;
@@ -57,5 +59,26 @@
     }
     assertNotNull("close didn't fail", actualEx);
     assertEquals(expectedEx, actualEx);
-  }  
+  }
+
+  @Test(timeout = 40000)
+  public void testGetNumBytesToRead() {
+    long pos = 100;
+    long buffLength = 1024;
+    long fileLength = 2058;
+    RetriableFileCopyCommand retriableFileCopyCommand =
+            new RetriableFileCopyCommand("Testing NumBytesToRead ",
+                    FileAction.OVERWRITE);
+    long numBytes = retriableFileCopyCommand
+            .getNumBytesToRead(fileLength, pos, buffLength);
+    Assert.assertEquals(1024, numBytes);
+    pos += numBytes;
+    numBytes = retriableFileCopyCommand
+            .getNumBytesToRead(fileLength, pos, buffLength);
+    Assert.assertEquals(934, numBytes);
+    pos += numBytes;
+    numBytes = retriableFileCopyCommand
+            .getNumBytesToRead(fileLength, pos, buffLength);
+    Assert.assertEquals(0, numBytes);
+  }
 }
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java
index 5cf1840..6ce8e3e 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java
@@ -33,8 +33,8 @@
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.tools.ECAdmin;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.tools.CopyListingFileStatus;
+import org.apache.hadoop.tools.DistCpConstants;
 import org.apache.hadoop.tools.DistCpOptionSwitch;
 import org.apache.hadoop.tools.DistCpOptions.FileAttribute;
 import org.apache.hadoop.util.ToolRunner;
@@ -63,6 +63,7 @@
 import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
 import static org.apache.hadoop.fs.permission.FsAction.READ_WRITE;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -1208,7 +1209,7 @@
   }
 
   @Test
-  public void testCompareFileLengthsAndChecksums() throws IOException {
+  public void testCompareFileLengthsAndChecksums() throws Throwable {
 
     String base = "/tmp/verify-checksum/";
     long srcSeed = System.currentTimeMillis();
@@ -1224,22 +1225,18 @@
     Path dstWithLen0 = new Path(base + "dstLen0");
     fs.create(srcWithLen0).close();
     fs.create(dstWithLen0).close();
-    DistCpUtils.compareFileLengthsAndChecksums(fs, srcWithLen0,
-        null, fs, dstWithLen0, false);
+    DistCpUtils.compareFileLengthsAndChecksums(0, fs, srcWithLen0,
+        null, fs, dstWithLen0, false, 0);
 
     // different lengths comparison
     Path srcWithLen1 = new Path(base + "srcLen1");
     Path dstWithLen2 = new Path(base + "dstLen2");
     DFSTestUtil.createFile(fs, srcWithLen1, 1, replFactor, srcSeed);
     DFSTestUtil.createFile(fs, dstWithLen2, 2, replFactor, srcSeed);
-    try {
-      DistCpUtils.compareFileLengthsAndChecksums(fs, srcWithLen1,
-          null, fs, dstWithLen2, false);
-      Assert.fail("Expected different lengths comparison to fail!");
-    } catch (IOException e) {
-      GenericTestUtils.assertExceptionContains(
-          "Mismatch in length", e);
-    }
+
+    intercept(IOException.class, DistCpConstants.LENGTH_MISMATCH_ERROR_MSG,
+        () -> DistCpUtils.compareFileLengthsAndChecksums(1, fs,
+                srcWithLen1, null, fs, dstWithLen2, false, 2));
 
     // checksums matched
     Path srcWithChecksum1 = new Path(base + "srcChecksum1");
@@ -1248,28 +1245,24 @@
         replFactor, srcSeed);
     DFSTestUtil.createFile(fs, dstWithChecksum1, 1024,
         replFactor, srcSeed);
-    DistCpUtils.compareFileLengthsAndChecksums(fs, srcWithChecksum1,
-        null, fs, dstWithChecksum1, false);
-    DistCpUtils.compareFileLengthsAndChecksums(fs, srcWithChecksum1,
+    DistCpUtils.compareFileLengthsAndChecksums(1024, fs, srcWithChecksum1,
+        null, fs, dstWithChecksum1, false, 1024);
+    DistCpUtils.compareFileLengthsAndChecksums(1024, fs, srcWithChecksum1,
         fs.getFileChecksum(srcWithChecksum1), fs, dstWithChecksum1,
-        false);
+        false, 1024);
 
     // checksums mismatched
     Path dstWithChecksum2 = new Path(base + "dstChecksum2");
     DFSTestUtil.createFile(fs, dstWithChecksum2, 1024,
         replFactor, dstSeed);
-    try {
-      DistCpUtils.compareFileLengthsAndChecksums(fs, srcWithChecksum1,
-          null, fs, dstWithChecksum2, false);
-      Assert.fail("Expected different checksums comparison to fail!");
-    } catch (IOException e) {
-      GenericTestUtils.assertExceptionContains(
-          "Checksum mismatch", e);
-    }
+    intercept(IOException.class, DistCpConstants.CHECKSUM_MISMATCH_ERROR_MSG,
+        () -> DistCpUtils.compareFileLengthsAndChecksums(1024, fs,
+               srcWithChecksum1, null, fs, dstWithChecksum2,
+               false, 1024));
 
     // checksums mismatched but skipped
-    DistCpUtils.compareFileLengthsAndChecksums(fs, srcWithChecksum1,
-        null, fs, dstWithChecksum2, true);
+    DistCpUtils.compareFileLengthsAndChecksums(1024, fs, srcWithChecksum1,
+        null, fs, dstWithChecksum2, true, 1024);
   }
 
   private static Random rand = new Random();
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtilsWithCombineMode.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtilsWithCombineMode.java
index 5d44ab0..306ac08 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtilsWithCombineMode.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtilsWithCombineMode.java
@@ -109,7 +109,7 @@
     DFSTestUtil.createFile(fs, dst, 256, 1024, 1024,
         rf, seed);
     // then compare
-    DistCpUtils.compareFileLengthsAndChecksums(fs, src,
-        null, fs, dst, false);
+    DistCpUtils.compareFileLengthsAndChecksums(1024, fs, src,
+        null, fs, dst, false, 1024);
   }
 }
diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/BlockPlacementPolicyAlwaysSatisfied.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/BlockPlacementPolicyAlwaysSatisfied.java
index 34c1951..de12aec 100644
--- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/BlockPlacementPolicyAlwaysSatisfied.java
+++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/BlockPlacementPolicyAlwaysSatisfied.java
@@ -42,6 +42,11 @@
     public String getErrorDescription() {
       return null;
     }
+
+    @Override
+    public int getAdditionalReplicasRequired() {
+      return 0;
+    }
   }
 
   @Override
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index 6ed28d9..f99038e 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -22,6 +22,7 @@
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.Reader;
+import java.security.Security;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -150,6 +151,10 @@
     SLS, RUMEN, SYNTH
   }
 
+  public static final String NETWORK_CACHE_TTL = "networkaddress.cache.ttl";
+  public static final String NETWORK_NEGATIVE_CACHE_TTL =
+      "networkaddress.cache.negative.ttl";
+
   private TraceType inputType;
   private SynthTraceJobProducer stjp;
 
@@ -241,6 +246,9 @@
 
   public void start() throws IOException, ClassNotFoundException, YarnException,
       InterruptedException {
+
+    enableDNSCaching(getConf());
+
     // start resource manager
     startRM();
     // start node managers
@@ -260,6 +268,23 @@
     runner.start();
   }
 
+  /**
+   * Enables DNS Caching based on config. If DNS caching is enabled, then set
+   * the DNS cache to infinite time. Since in SLS random nodes are added, DNS
+   * resolution can take significant time which can cause erroneous results.
+   * For more details, check <a href=
+   * "https://docs.oracle.com/javase/8/docs/technotes/guides/net/properties.html">
+   * Java Networking Properties</a>
+   * @param conf Configuration object.
+   */
+  static void enableDNSCaching(Configuration conf) {
+    if (conf.getBoolean(SLSConfiguration.DNS_CACHING_ENABLED,
+        SLSConfiguration.DNS_CACHING_ENABLED_DEFAULT)) {
+      Security.setProperty(NETWORK_CACHE_TTL, "-1");
+      Security.setProperty(NETWORK_NEGATIVE_CACHE_TTL, "-1");
+    }
+  }
+
   private void startRM() throws ClassNotFoundException, YarnException {
     Configuration rmConf = new YarnConfiguration(getConf());
     String schedulerClass = rmConf.get(YarnConfiguration.RM_SCHEDULER);
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/conf/SLSConfiguration.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/conf/SLSConfiguration.java
index 34b89b6..119960c 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/conf/SLSConfiguration.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/conf/SLSConfiguration.java
@@ -28,6 +28,9 @@
 public class SLSConfiguration {
   // sls
   public static final String PREFIX = "yarn.sls.";
+  public static final String DNS_CACHING_ENABLED = PREFIX
+      + "dns.caching.enabled";
+  public static final boolean DNS_CACHING_ENABLED_DEFAULT = false;
   // runner
   public static final String RUNNER_PREFIX = PREFIX + "runner.";
   public static final String RUNNER_POOL_SIZE = RUNNER_PREFIX + "pool.size";
diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java
index 668be14..bfbd592 100644
--- a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java
+++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java
@@ -64,7 +64,9 @@
 
   @After
   public void tearDown() throws InterruptedException {
-    sls.stop();
+    if (sls != null) {
+      sls.stop();
+    }
   }
 
   public void runSLS(Configuration conf, long timeout) throws Exception {
diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSRunner.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSRunner.java
index abb3b5e..2463ccf 100644
--- a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSRunner.java
+++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSRunner.java
@@ -22,14 +22,18 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
+import org.apache.hadoop.yarn.sls.conf.SLSConfiguration;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.*;
 
+import java.security.Security;
 import java.util.*;
 
+import static org.junit.Assert.assertEquals;
+
 /**
  * This test performs simple runs of the SLS with different trace types and
  * schedulers.
@@ -86,4 +90,39 @@
     runSLS(conf, timeTillShutdownInsec);
   }
 
+  /**
+   * Test to check whether caching is enabled based on config.
+   */
+  @Test
+  public void testEnableCaching() {
+    String networkCacheDefault = Security.getProperty(
+        SLSRunner.NETWORK_CACHE_TTL);
+    String networkNegativeCacheDefault =
+        Security.getProperty(SLSRunner.NETWORK_NEGATIVE_CACHE_TTL);
+
+    try {
+      Configuration conf = new Configuration(false);
+      // check when dns caching is disabled
+      conf.setBoolean(SLSConfiguration.DNS_CACHING_ENABLED, false);
+      SLSRunner.enableDNSCaching(conf);
+      assertEquals(networkCacheDefault,
+          Security.getProperty(SLSRunner.NETWORK_CACHE_TTL));
+      assertEquals(networkNegativeCacheDefault,
+          Security.getProperty(SLSRunner.NETWORK_NEGATIVE_CACHE_TTL));
+
+      // check when dns caching is enabled
+      conf.setBoolean(SLSConfiguration.DNS_CACHING_ENABLED, true);
+      SLSRunner.enableDNSCaching(conf);
+      assertEquals("-1",
+          Security.getProperty(SLSRunner.NETWORK_CACHE_TTL));
+      assertEquals("-1",
+          Security.getProperty(SLSRunner.NETWORK_NEGATIVE_CACHE_TTL));
+    } finally {
+      // set security settings back to default
+      Security.setProperty(SLSRunner.NETWORK_CACHE_TTL,
+          String.valueOf(networkCacheDefault));
+      Security.setProperty(SLSRunner.NETWORK_NEGATIVE_CACHE_TTL,
+          String.valueOf(networkNegativeCacheDefault));
+    }
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
index 91b5d8d..894d37c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
@@ -118,45 +118,22 @@
     </resources>
     <plugins>
       <plugin>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-maven-plugins</artifactId>
+        <groupId>org.xolstice.maven.plugins</groupId>
+        <artifactId>protobuf-maven-plugin</artifactId>
         <executions>
           <execution>
-            <id>compile-protoc</id>
-            <goals>
-              <goal>protoc</goal>
-            </goals>
+            <id>src-compile-protoc</id>
             <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>${basedir}/../../../hadoop-common-project/hadoop-common/src/main/proto</param>
-                <param>${basedir}/src/main/proto</param>
-                <param>${basedir}/src/main/proto/server</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/main/proto</directory>
-                <includes>
-                  <include>yarn_protos.proto</include>
-                  <include>yarn_service_protos.proto</include>
-                  <include>applicationmaster_protocol.proto</include>
-                  <include>applicationclient_protocol.proto</include>
-                  <include>containermanagement_protocol.proto</include>
-                  <include>server/yarn_server_resourcemanager_service_protos.proto</include>
-                  <include>server/resourcemanager_administration_protocol.proto</include>
-                  <include>application_history_client.proto</include>
-                  <include>server/application_history_server.proto</include>
-                  <include>client_SCM_protocol.proto</include>
-                  <include>server/SCM_Admin_protocol.proto</include>
-                  <include>yarn_csi_adaptor.proto</include>
-                  <include>YarnCsiAdaptor.proto</include>
-                </includes>
-              </source>
+              <skip>false</skip>
+              <additionalProtoPathElements>
+                <additionalProtoPathElement>
+                  ${basedir}/../../../hadoop-common-project/hadoop-common/src/main/proto
+                </additionalProtoPathElement>
+              </additionalProtoPathElements>
             </configuration>
           </execution>
         </executions>
       </plugin>
-
       <plugin>
         <artifactId>maven-jar-plugin</artifactId>
         <executions>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 7b05905..83871a5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -23,7 +23,9 @@
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -3789,6 +3791,26 @@
   public static final String DEFAULT_NODELABEL_CONFIGURATION_TYPE =
       CENTRALIZED_NODELABEL_CONFIGURATION_TYPE;
 
+  public static final String EXCLUSIVE_ENFORCED_PARTITIONS_SUFFIX
+      = "exclusive-enforced-partitions";
+
+  public static final String EXCLUSIVE_ENFORCED_PARTITIONS = NODE_LABELS_PREFIX
+      + EXCLUSIVE_ENFORCED_PARTITIONS_SUFFIX;
+
+  @Private
+  public static Set<String> getExclusiveEnforcedPartitions(
+      Configuration conf) {
+    Set<String> exclusiveEnforcedPartitions = new HashSet<>();
+    String[] configuredPartitions = conf.getStrings(
+        EXCLUSIVE_ENFORCED_PARTITIONS);
+    if (configuredPartitions != null) {
+      for (String partition : configuredPartitions) {
+        exclusiveEnforcedPartitions.add(partition);
+      }
+    }
+    return exclusiveEnforcedPartitions;
+  }
+
   public static final String MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY =
       YARN_PREFIX + "cluster.max-application-priority";
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/YarnCsiAdaptor.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/YarnCsiAdaptor.proto
index 146f5bf..2e954ddd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/YarnCsiAdaptor.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/YarnCsiAdaptor.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "CsiAdaptorProtocol";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/application_history_client.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/application_history_client.proto
index 7ad06c9..46ae36d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/application_history_client.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/application_history_client.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "ApplicationHistoryProtocol";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationclient_protocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationclient_protocol.proto
index fdd4bc5..9ea2bc6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationclient_protocol.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationclient_protocol.proto
@@ -22,6 +22,7 @@
  * for what changes are allowed for a *stable* .proto interface.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "ApplicationClientProtocol";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationmaster_protocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationmaster_protocol.proto
index d0e3672..a48f66f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationmaster_protocol.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationmaster_protocol.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 /**
  * These .proto interfaces are public and stable.
  * Please see http://wiki.apache.org/hadoop/Compatibility
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/client_SCM_protocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/client_SCM_protocol.proto
index fbc3c42..487b434 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/client_SCM_protocol.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/client_SCM_protocol.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "ClientSCMProtocol";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/containermanagement_protocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/containermanagement_protocol.proto
index 1f8cafb..2570278 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/containermanagement_protocol.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/containermanagement_protocol.proto
@@ -22,6 +22,7 @@
  * for what changes are allowed for a *stable* .proto interface.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "ContainerManagementProtocol";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/SCM_Admin_protocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/SCM_Admin_protocol.proto
index 4e46c57..dad3481 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/SCM_Admin_protocol.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/SCM_Admin_protocol.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "SCMAdminProtocol";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/application_history_server.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/application_history_server.proto
index 0fcf2ac..4827dbc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/application_history_server.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/application_history_server.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "ApplicationHistoryServerProtos";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto
index 032aa8e..d06fd61 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto
@@ -22,6 +22,7 @@
  * for what changes are allowed for a *stable* .proto interface.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "ResourceManagerAdministrationProtocol";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
index d37e36a..b28bf74 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
@@ -22,6 +22,7 @@
  * for what changes are allowed for a *stable* .proto interface.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "YarnServerResourceManagerServiceProtos";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_csi_adaptor.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_csi_adaptor.proto
index 9b645e1..8ddc17e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_csi_adaptor.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_csi_adaptor.proto
@@ -15,6 +15,8 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "CsiAdaptorProtos";
 option java_generate_equals_and_hash = true;
@@ -90,4 +92,4 @@
 
 message NodeUnpublishVolumeResponse {
     // Intentionally empty.
-}
\ No newline at end of file
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 4573859..c9cc2a7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -22,6 +22,7 @@
  * for what changes are allowed for a *stable* .proto interface.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "YarnProtos";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index b58b828..d562cdb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -22,6 +22,7 @@
  * for what changes are allowed for a *stable* .proto interface.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "YarnServiceProtos";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
index 6205468..8147366 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
@@ -41,31 +41,17 @@
     
     <plugins>
       <plugin>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-maven-plugins</artifactId>
+        <groupId>org.xolstice.maven.plugins</groupId>
+        <artifactId>protobuf-maven-plugin</artifactId>
         <executions>
           <execution>
-            <id>compile-protoc</id>
-            <goals>
-              <goal>protoc</goal>
-            </goals>
+            <id>src-compile-protoc</id>
             <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>${basedir}/src/main/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/main/proto</directory>
-                <includes>
-                  <include>ClientAMProtocol.proto</include>
-                </includes>
-              </source>
+              <skip>false</skip>
             </configuration>
           </execution>
         </executions>
       </plugin>
-
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-jar-plugin</artifactId>
@@ -82,6 +68,7 @@
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-surefire-plugin</artifactId>
         <configuration>
+          <forkedProcessTimeoutInSeconds>1800</forkedProcessTimeoutInSeconds>
           <environmentVariables>
             <JAVA_HOME>${java.home}</JAVA_HOME>
           </environmentVariables>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/proto/ClientAMProtocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/proto/ClientAMProtocol.proto
index 85f9b8f..0a84517 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/proto/ClientAMProtocol.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/proto/ClientAMProtocol.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "ClientAMProtocol";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
index 81ff752..88e5e24 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
@@ -65,6 +65,16 @@
       <artifactId>mockito-core</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>com.sun.jersey.jersey-test-framework</groupId>
+      <artifactId>jersey-test-framework-core</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey.jersey-test-framework</groupId>
+      <artifactId>jersey-test-framework-grizzly2</artifactId>
+      <scope>test</scope>
+    </dependency>
     <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
     <dependency>
       <groupId>org.apache.hadoop</groupId>
@@ -154,28 +164,21 @@
         </configuration>
       </plugin>
       <plugin>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-maven-plugins</artifactId>
+        <groupId>org.xolstice.maven.plugins</groupId>
+        <artifactId>protobuf-maven-plugin</artifactId>
         <executions>
           <execution>
-            <id>compile-protoc</id>
-            <goals>
-              <goal>protoc</goal>
-            </goals>
+            <id>src-test-compile-protoc</id>
             <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>${basedir}/src/test/proto</param>
-                <param>${basedir}/../../../hadoop-common-project/hadoop-common/src/main/proto</param>
-                <param>${basedir}/../hadoop-yarn-api/src/main/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/test/proto</directory>
-                <includes>
-                  <include>test_amrm_token.proto</include>
-                </includes>
-              </source>
+              <skip>false</skip>
+              <additionalProtoPathElements>
+                <additionalProtoPathElement>
+                  ${basedir}/../../../hadoop-common-project/hadoop-common/src/main/proto
+                </additionalProtoPathElement>
+                <additionalProtoPathElement>
+                  ${basedir}/../hadoop-yarn-api/src/main/proto
+                </additionalProtoPathElement>
+              </additionalProtoPathElements>
             </configuration>
           </execution>
         </executions>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
index be54553..146db9e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
@@ -21,7 +21,10 @@
 import com.google.common.annotations.VisibleForTesting;
 import com.sun.jersey.api.client.Client;
 import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
 import com.sun.jersey.api.client.WebResource.Builder;
+import com.sun.jersey.client.urlconnection.HttpURLConnectionFactory;
+import com.sun.jersey.client.urlconnection.URLConnectionClientHandler;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.GnuParser;
 import org.apache.commons.cli.MissingArgumentException;
@@ -30,6 +33,9 @@
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.security.ssl.SSLFactory;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.webapp.dao.QueueConfigInfo;
@@ -39,6 +45,9 @@
 
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response.Status;
+import java.io.IOException;
+import java.net.HttpURLConnection;
+import java.net.URL;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -56,6 +65,7 @@
   private static final String REMOVE_QUEUES_OPTION = "removeQueues";
   private static final String UPDATE_QUEUES_OPTION = "updateQueues";
   private static final String GLOBAL_OPTIONS = "globalUpdates";
+  private static final String FORMAT_CONF = "formatConfig";
   private static final String HELP_CMD = "help";
 
   private static final String CONF_ERR_MSG = "Specify configuration key " +
@@ -83,6 +93,9 @@
         "Update queue configurations");
     opts.addOption("global", GLOBAL_OPTIONS, true,
         "Update global scheduler configurations");
+    opts.addOption("format", FORMAT_CONF, false,
+        "Format Scheduler Configuration and reload from" +
+        " capacity-scheduler.xml");
     opts.addOption("h", HELP_CMD, false, "Displays help for all commands.");
 
     int exitCode = -1;
@@ -101,6 +114,7 @@
     }
 
     boolean hasOption = false;
+    boolean format = false;
     SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
     try {
       if (parsedCli.hasOption(ADD_QUEUES_OPTION)) {
@@ -121,6 +135,11 @@
         hasOption = true;
         globalUpdates(parsedCli.getOptionValue(GLOBAL_OPTIONS), updateInfo);
       }
+      if (parsedCli.hasOption((FORMAT_CONF))) {
+        hasOption = true;
+        format = true;
+      }
+
     } catch (IllegalArgumentException e) {
       System.err.println(e.getMessage());
       return -1;
@@ -133,18 +152,93 @@
     }
 
     Configuration conf = getConf();
-    return WebAppUtils.execOnActiveRM(conf,
-        this::updateSchedulerConfOnRMNode, updateInfo);
+    if (format) {
+      return WebAppUtils.execOnActiveRM(conf, this::formatSchedulerConf, null);
+    } else {
+      return WebAppUtils.execOnActiveRM(conf,
+          this::updateSchedulerConfOnRMNode, updateInfo);
+    }
   }
 
-  private int updateSchedulerConfOnRMNode(String webAppAddress,
-      SchedConfUpdateInfo updateInfo) throws Exception {
-    Client webServiceClient = Client.create();
+  @VisibleForTesting
+  int formatSchedulerConf(String webAppAddress, WebResource resource)
+      throws Exception {
+    Configuration conf = getConf();
+    SSLFactory clientSslFactory = null;
+    if (YarnConfiguration.useHttps(conf)) {
+      clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
+    }
+    Client webServiceClient = createWebServiceClient(clientSslFactory);
     ClientResponse response = null;
+    resource = (resource != null) ? resource :
+        webServiceClient.resource(webAppAddress);
+
     try {
-      Builder builder = webServiceClient.resource(webAppAddress)
-          .path("ws").path("v1").path("cluster")
-          .path("scheduler-conf").accept(MediaType.APPLICATION_JSON);
+      Builder builder = null;
+      if (UserGroupInformation.isSecurityEnabled()) {
+        builder = resource
+            .path("ws").path("v1").path("cluster")
+            .path("/scheduler-conf/format")
+            .accept(MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON);
+      } else {
+        builder = resource
+            .path("ws").path("v1").path("cluster")
+            .path("/scheduler-conf/format").queryParam("user.name",
+            UserGroupInformation.getCurrentUser().getShortUserName())
+            .accept(MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON);
+      }
+
+      response = builder.get(ClientResponse.class);
+      if (response != null) {
+        if (response.getStatus() == Status.OK.getStatusCode()) {
+          System.out.println(response.getEntity(String.class));
+          return 0;
+        } else {
+          System.err.println("Failed to format scheduler configuration: " +
+              response.getEntity(String.class));
+        }
+      } else {
+        System.err.println("Failed to format scheduler configuration: " +
+            "null response");
+      }
+      return -1;
+    } finally {
+      if (response != null) {
+        response.close();
+      }
+      if (webServiceClient != null) {
+        webServiceClient.destroy();
+      }
+      if (clientSslFactory != null) {
+        clientSslFactory.destroy();
+      }
+    }
+  }
+
+  @VisibleForTesting
+  int updateSchedulerConfOnRMNode(String webAppAddress,
+      SchedConfUpdateInfo updateInfo) throws Exception {
+    Configuration conf = getConf();
+    SSLFactory clientSslFactory = null;
+    if (YarnConfiguration.useHttps(conf)) {
+      clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
+    }
+    Client webServiceClient = createWebServiceClient(clientSslFactory);
+    ClientResponse response = null;
+    WebResource resource = webServiceClient.resource(webAppAddress);
+
+    try {
+      Builder builder = null;
+      if (UserGroupInformation.isSecurityEnabled()) {
+        builder = resource.path("ws").path("v1").path("cluster")
+            .path("scheduler-conf").accept(MediaType.APPLICATION_JSON);
+      } else {
+        builder = resource.path("ws").path("v1").path("cluster")
+            .queryParam("user.name",
+            UserGroupInformation.getCurrentUser().getShortUserName())
+            .path("scheduler-conf").accept(MediaType.APPLICATION_JSON);
+      }
+
       builder.entity(YarnWebServiceUtils.toJson(updateInfo,
           SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON);
       response = builder.put(ClientResponse.class);
@@ -164,10 +258,42 @@
       if (response != null) {
         response.close();
       }
-      webServiceClient.destroy();
+      if (webServiceClient != null) {
+        webServiceClient.destroy();
+      }
+      if (clientSslFactory != null) {
+        clientSslFactory.destroy();
+      }
     }
   }
 
+  private Client createWebServiceClient(SSLFactory clientSslFactory) {
+    Client webServiceClient = new Client(new URLConnectionClientHandler(
+        new HttpURLConnectionFactory() {
+        @Override
+        public HttpURLConnection getHttpURLConnection(URL url)
+            throws IOException {
+          AuthenticatedURL.Token token = new AuthenticatedURL.Token();
+          AuthenticatedURL aUrl;
+          HttpURLConnection conn = null;
+          try {
+            if (clientSslFactory != null) {
+              clientSslFactory.init();
+              aUrl = new AuthenticatedURL(null, clientSslFactory);
+            } else {
+              aUrl = new AuthenticatedURL();
+            }
+            conn = aUrl.openConnection(url, token);
+          } catch (Exception e) {
+            throw new IOException(e);
+          }
+          return conn;
+        }
+      }));
+    webServiceClient.setChunkedEncodingSize(null);
+    return webServiceClient;
+  }
+
 
   @VisibleForTesting
   void addQueues(String args, SchedConfUpdateInfo updateInfo) {
@@ -253,7 +379,8 @@
         + "[-remove \"queueRemovePath1;queueRemovePath2\"] "
         + "[-update \"queueUpdatePath1:confKey1=confVal1\"] "
         + "[-global globalConfKey1=globalConfVal1,"
-        + "globalConfKey2=globalConfVal2]\n"
+        + "globalConfKey2=globalConfVal2] "
+        + "[-format]\n"
         + "Example (adding queues): yarn schedulerconf -add "
         + "\"root.a.a1:capacity=100,maximum-capacity=100;root.a.a2:capacity=0,"
         + "maximum-capacity=0\"\n"
@@ -264,6 +391,8 @@
         + "maximum-capacity=75\"\n"
         + "Example (global scheduler update): yarn schedulerconf "
         + "-global yarn.scheduler.capacity.maximum-applications=10000\n"
+        + "Example (format scheduler configuration): yarn schedulerconf "
+        + "-format\n"
         + "Note: This is an alpha feature, the syntax/options are subject to "
         + "change, please run at your own risk.");
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
index 7a229dc..03bad0d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
@@ -36,6 +36,7 @@
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 
+import com.google.common.collect.ImmutableList;
 import com.sun.jersey.api.client.Client;
 import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.ClientResponse.Status;
@@ -52,6 +53,7 @@
 import java.net.URL;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -93,9 +95,14 @@
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class TestLogsCLI {
 
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestLogsCLI.class);
+
   ByteArrayOutputStream sysOutStream;
   private PrintStream sysOut;
 
@@ -402,13 +409,15 @@
     List<String> logTypes = new ArrayList<String>();
     logTypes.add("syslog");
     // create container logs in localLogDir
-    createContainerLogInLocalDir(appLogsDir, containerId1, fs, logTypes);
-    createContainerLogInLocalDir(appLogsDir, containerId2, fs, logTypes);
-
+    createContainerLogInLocalDir(appLogsDir, containerId1, fs, logTypes,
+        ImmutableList.of("empty"));
+    createContainerLogInLocalDir(appLogsDir, containerId2, fs, logTypes,
+        Collections.emptyList());
     // create two logs for container3 in localLogDir
     logTypes.add("stdout");
     logTypes.add("stdout1234");
-    createContainerLogInLocalDir(appLogsDir, containerId3, fs, logTypes);
+    createContainerLogInLocalDir(appLogsDir, containerId3, fs, logTypes,
+        Collections.emptyList());
 
     Path path =
         new Path(remoteLogRootDir + ugi.getShortUserName()
@@ -449,6 +458,7 @@
     cli.setConf(configuration);
 
     int exitCode = cli.run(new String[] { "-applicationId", appId.toString() });
+    LOG.info(sysOutStream.toString());
     assertTrue(exitCode == 0);
     assertTrue(sysOutStream.toString().contains(
         logMessage(containerId1, "syslog")));
@@ -460,6 +470,8 @@
         logMessage(containerId3, "stdout")));
     assertTrue(sysOutStream.toString().contains(
         logMessage(containerId3, "stdout1234")));
+    assertTrue(sysOutStream.toString().contains(
+        createEmptyLog("empty")));
     sysOutStream.reset();
 
     exitCode = cli.run(new String[] {"-applicationId", appId.toString(),
@@ -475,6 +487,8 @@
         logMessage(containerId3, "stdout")));
     assertTrue(sysOutStream.toString().contains(
         logMessage(containerId3, "stdout1234")));
+    assertTrue(sysOutStream.toString().contains(
+        createEmptyLog("empty")));
     sysOutStream.reset();
 
     exitCode = cli.run(new String[] {"-applicationId", appId.toString(),
@@ -490,6 +504,8 @@
         logMessage(containerId3, "stdout")));
     assertTrue(sysOutStream.toString().contains(
         logMessage(containerId3, "stdout1234")));
+    assertTrue(sysOutStream.toString().contains(
+        createEmptyLog("empty")));
     int fullSize = sysOutStream.toByteArray().length;
     sysOutStream.reset();
 
@@ -506,6 +522,8 @@
         logMessage(containerId3, "stdout")));
     assertFalse(sysOutStream.toString().contains(
         logMessage(containerId3, "stdout1234")));
+    assertFalse(sysOutStream.toString().contains(
+        createEmptyLog("empty")));
     sysOutStream.reset();
 
     exitCode = cli.run(new String[] {"-applicationId", appId.toString(),
@@ -521,6 +539,8 @@
         logMessage(containerId3, "stdout")));
     assertTrue(sysOutStream.toString().contains(
         logMessage(containerId3, "stdout1234")));
+    assertFalse(sysOutStream.toString().contains(
+        createEmptyLog("empty")));
     sysOutStream.reset();
 
     exitCode = cli.run(new String[] {"-applicationId", appId.toString(),
@@ -591,6 +611,15 @@
         (fullContextSize - fileContentSize - tailContentSize), 5));
     sysOutStream.reset();
 
+    // specify how many bytes we should get from an empty log
+    exitCode = cli.run(new String[] {"-applicationId", appId.toString(),
+        "-containerId", containerId1.toString(), "-log_files", "empty",
+        "-size", "5"});
+    assertTrue(exitCode == 0);
+    assertTrue(sysOutStream.toString().contains(
+        createEmptyLog("empty")));
+    sysOutStream.reset();
+
     // specify a negative number, it would get the last n bytes from
     // container log
     exitCode = cli.run(new String[] {"-applicationId", appId.toString(),
@@ -794,7 +823,8 @@
     List<String> logTypes = new ArrayList<String>();
     logTypes.add(fileName);
     // create container logs in localLogDir
-    createContainerLogInLocalDir(appLogsDir, containerId1, fs, logTypes);
+    createContainerLogInLocalDir(appLogsDir, containerId1, fs, logTypes,
+        Collections.emptyList());
 
     Path containerDirPath = new Path(appLogsDir, containerId1.toString());
     Path logPath = new Path(containerDirPath, fileName);
@@ -968,7 +998,8 @@
       logTypes.add("syslog");
 
       // create container logs in localLogDir for app
-      createContainerLogInLocalDir(appLogsDir, containerId, fs, logTypes);
+      createContainerLogInLocalDir(appLogsDir, containerId, fs, logTypes,
+          Collections.emptyList());
 
       // create the remote app dir for app but for a different user testUser
       Path path = new Path(remoteLogRootDir + testUser +
@@ -1547,7 +1578,8 @@
     logTypes.add("syslog");
     // create container logs in localLogDir
     for (ContainerId containerId : containerIds) {
-      createContainerLogInLocalDir(appLogsDir, containerId, fs, logTypes);
+      createContainerLogInLocalDir(appLogsDir, containerId, fs, logTypes,
+          Collections.emptyList());
     }
     Path path =
         new Path(remoteLogRootDir + ugi.getShortUserName()
@@ -1564,7 +1596,8 @@
   }
 
   private static void createContainerLogInLocalDir(Path appLogsDir,
-      ContainerId containerId, FileSystem fs, List<String> logTypes) throws Exception {
+      ContainerId containerId, FileSystem fs, List<String> logTypes,
+      List<String> emptyLogTypes) throws Exception {
     Path containerLogsDir = new Path(appLogsDir, containerId.toString());
     if (fs.exists(containerLogsDir)) {
       fs.delete(containerLogsDir, true);
@@ -1576,6 +1609,12 @@
       writer.write(logMessage(containerId, logType));
       writer.close();
     }
+    for (String emptyLogType : emptyLogTypes) {
+      Writer writer =
+          new FileWriter(new File(containerLogsDir.toString(), emptyLogType));
+      writer.write("");
+      writer.close();
+    }
   }
 
   private static String logMessage(ContainerId containerId, String logType) {
@@ -1584,6 +1623,10 @@
     return sb.toString();
   }
 
+  private static String createEmptyLog(String logType) {
+    return "LogContents:\n\nEnd of LogType:" + logType;
+  }
+
   private static void uploadContainerLogIntoRemoteDir(UserGroupInformation ugi,
       Configuration configuration, List<String> rootLogDirs, NodeId nodeId,
       ContainerId containerId, Path appDir, FileSystem fs) throws Exception {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestSchedConfCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestSchedConfCLI.java
index 5364e83..8a6c9c2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestSchedConfCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestSchedConfCLI.java
@@ -22,12 +22,48 @@
 import org.junit.Test;
 
 import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
 import java.io.PrintStream;
+import java.security.Principal;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+
+import com.google.inject.Guice;
+import com.google.inject.Singleton;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfigurationProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.apache.hadoop.yarn.webapp.dao.QueueConfigInfo;
 import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
 
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletResponse;
+import javax.servlet.ServletRequest;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletRequestWrapper;
+import javax.servlet.http.HttpServletResponse;
+
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
@@ -35,7 +71,7 @@
 /**
  * Class for testing {@link SchedConfCLI}.
  */
-public class TestSchedConfCLI {
+public class TestSchedConfCLI extends JerseyTestBase {
 
   private ByteArrayOutputStream sysOutStream;
   private PrintStream sysOut;
@@ -45,6 +81,23 @@
 
   private SchedConfCLI cli;
 
+  private static MockRM rm;
+  private static String userName;
+  private static CapacitySchedulerConfiguration csConf;
+
+  private static final File CONF_FILE = new File(new File("target",
+      "test-classes"), YarnConfiguration.CS_CONFIGURATION_FILE);
+  private static final File OLD_CONF_FILE = new File(new File("target",
+      "test-classes"), YarnConfiguration.CS_CONFIGURATION_FILE + ".tmp");
+
+  public TestSchedConfCLI() {
+    super(new WebAppDescriptor.Builder(
+        "org.apache.hadoop.yarn.server.resourcemanager.webapp")
+        .contextListenerClass(GuiceServletConfig.class)
+        .filterClass(com.google.inject.servlet.GuiceFilter.class)
+        .contextPath("jersey-guice-filter").servletPath("/").build());
+  }
+
   @Before
   public void setUp() {
     sysOutStream = new ByteArrayOutputStream();
@@ -58,6 +111,138 @@
     cli = new SchedConfCLI();
   }
 
+  private static class WebServletModule extends ServletModule {
+    @Override
+    protected void configureServlets() {
+      bind(JAXBContextResolver.class);
+      bind(RMWebServices.class);
+      bind(GenericExceptionHandler.class);
+      Configuration conf = new YarnConfiguration();
+      conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+          ResourceScheduler.class);
+      conf.set(YarnConfiguration.SCHEDULER_CONFIGURATION_STORE_CLASS,
+          YarnConfiguration.MEMORY_CONFIGURATION_STORE);
+
+      try {
+        userName = UserGroupInformation.getCurrentUser().getShortUserName();
+      } catch (IOException ioe) {
+        throw new RuntimeException("Unable to get current user name "
+            + ioe.getMessage(), ioe);
+      }
+
+      csConf = new CapacitySchedulerConfiguration(new Configuration(false),
+          false);
+      setupQueueConfiguration(csConf);
+
+      try {
+        if (CONF_FILE.exists()) {
+          if (!CONF_FILE.renameTo(OLD_CONF_FILE)) {
+            throw new RuntimeException("Failed to rename conf file");
+          }
+        }
+        FileOutputStream out = new FileOutputStream(CONF_FILE);
+        csConf.writeXml(out);
+        out.close();
+      } catch (IOException e) {
+        throw new RuntimeException("Failed to write XML file", e);
+      }
+
+      rm = new MockRM(conf);
+      bind(ResourceManager.class).toInstance(rm);
+      serve("/*").with(GuiceContainer.class);
+      filter("/*").through(TestRMCustomAuthFilter.class);
+    }
+  }
+
+  /**
+   * Custom filter which sets the Remote User for testing purpose.
+   */
+  @Singleton
+  public static class TestRMCustomAuthFilter extends AuthenticationFilter {
+    @Override
+    public void init(FilterConfig filterConfig) {
+
+    }
+
+    @Override
+    public void doFilter(ServletRequest request, ServletResponse response,
+        FilterChain filterChain) throws IOException, ServletException {
+      HttpServletRequest httpRequest = (HttpServletRequest)request;
+      HttpServletResponse httpResponse = (HttpServletResponse) response;
+      httpRequest = new HttpServletRequestWrapper(httpRequest) {
+        public String getAuthType() {
+          return null;
+        }
+
+        public String getRemoteUser() {
+          return userName;
+        }
+
+        public Principal getUserPrincipal() {
+          return new Principal() {
+            @Override
+            public String getName() {
+              return userName;
+            }
+          };
+        }
+      };
+      doFilter(filterChain, httpRequest, httpResponse);
+    }
+  }
+
+  private static void setupQueueConfiguration(
+      CapacitySchedulerConfiguration config) {
+    config.setQueues(CapacitySchedulerConfiguration.ROOT,
+        new String[]{"testqueue"});
+    String a = CapacitySchedulerConfiguration.ROOT + ".testqueue";
+    config.setCapacity(a, 100f);
+    config.setMaximumCapacity(a, 100f);
+  }
+
+  @Test(timeout = 10000)
+  public void testFormatSchedulerConf() throws Exception {
+    try {
+      super.setUp();
+      GuiceServletConfig.setInjector(
+          Guice.createInjector(new WebServletModule()));
+      ResourceScheduler scheduler = rm.getResourceScheduler();
+      MutableConfigurationProvider provider =
+          ((MutableConfScheduler) scheduler).getMutableConfProvider();
+
+      SchedConfUpdateInfo schedUpdateInfo = new SchedConfUpdateInfo();
+      HashMap<String, String> globalUpdates = new HashMap<>();
+      globalUpdates.put("schedKey1", "schedVal1");
+      schedUpdateInfo.setGlobalParams(globalUpdates);
+
+      provider.logAndApplyMutation(UserGroupInformation.getCurrentUser(),
+          schedUpdateInfo);
+      rm.getRMContext().getRMAdminService().refreshQueues();
+      provider.confirmPendingMutation(true);
+
+      Configuration schedulerConf = provider.getConfiguration();
+      assertEquals("schedVal1", schedulerConf.get("schedKey1"));
+
+      int exitCode = cli.formatSchedulerConf("", resource());
+      assertEquals(0, exitCode);
+
+      schedulerConf = provider.getConfiguration();
+      assertNull(schedulerConf.get("schedKey1"));
+    } finally {
+      if (rm != null) {
+        rm.stop();
+      }
+      CONF_FILE.delete();
+      if (OLD_CONF_FILE.exists()) {
+        if (!OLD_CONF_FILE.renameTo(CONF_FILE)) {
+          throw new RuntimeException("Failed to re-copy old" +
+              " configuration file");
+        }
+      }
+      super.tearDown();
+    }
+  }
+
   @Test(timeout = 10000)
   public void testInvalidConf() throws Exception {
     // conf pair with no key should be invalid
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/proto/test_amrm_token.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/proto/test_amrm_token.proto
index 6773277..628f5e1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/proto/test_amrm_token.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/proto/test_amrm_token.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "YarnSecurityTestAMRMTokenProtos";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index 304fed2..d4b4ee8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -256,6 +256,26 @@
         </configuration>
       </plugin>
       <plugin>
+        <groupId>org.xolstice.maven.plugins</groupId>
+        <artifactId>protobuf-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>src-compile-protoc</id>
+            <configuration>
+              <skip>false</skip>
+              <additionalProtoPathElements>
+                <additionalProtoPathElement>
+                  ${basedir}/../../../hadoop-common-project/hadoop-common/src/main/proto
+                </additionalProtoPathElement>
+                <additionalProtoPathElement>
+                  ${basedir}/../hadoop-yarn-api/src/main/proto
+                </additionalProtoPathElement>
+              </additionalProtoPathElements>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-maven-plugins</artifactId>
         <executions>
@@ -276,27 +296,6 @@
             </configuration>
           </execution>
           <execution>
-            <id>compile-protoc</id>
-            <goals>
-              <goal>protoc</goal>
-            </goals>
-            <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>${basedir}/../../../hadoop-common-project/hadoop-common/src/main/proto</param>
-                <param>${basedir}/../hadoop-yarn-api/src/main/proto</param>
-                <param>${basedir}/src/main/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/main/proto</directory>
-                <includes>
-                  <include>yarn_security_token.proto</include>
-                </includes>
-              </source>
-            </configuration>
-          </execution>
-          <execution>
             <id>resource-gz</id>
             <phase>generate-resources</phase>
             <goals>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java
index c242d89..5bc0b14 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java
@@ -44,6 +44,26 @@
       "Container: %s on %s";
 
   /**
+   * Formats the header of an aggregated log file.
+   */
+  private static byte[] formatContainerLogHeader(String containerId,
+      String nodeId, ContainerLogAggregationType logType, String fileName,
+      String lastModifiedTime, long fileLength) {
+    StringBuilder sb = new StringBuilder();
+    String containerStr = String.format(
+        LogToolUtils.CONTAINER_ON_NODE_PATTERN,
+        containerId, nodeId);
+    sb.append(containerStr + "\n")
+        .append("LogAggregationType: " + logType + "\n")
+        .append(StringUtils.repeat("=", containerStr.length()) + "\n")
+        .append("LogType:" + fileName + "\n")
+        .append("LogLastModifiedTime:" + lastModifiedTime + "\n")
+        .append("LogLength:" + fileLength + "\n")
+        .append("LogContents:\n");
+    return sb.toString().getBytes(Charset.forName("UTF-8"));
+  }
+
+  /**
    * Output container log.
    * @param containerId the containerId
    * @param nodeId the nodeId
@@ -84,22 +104,10 @@
         : (int) pendingRead;
     int len = fis.read(buf, 0, toRead);
     boolean keepGoing = (len != -1 && curRead < totalBytesToRead);
-    if (keepGoing) {
-      StringBuilder sb = new StringBuilder();
-      String containerStr = String.format(
-          LogToolUtils.CONTAINER_ON_NODE_PATTERN,
-          containerId, nodeId);
-      sb.append(containerStr + "\n")
-          .append("LogAggregationType: " + logType + "\n")
-          .append(StringUtils.repeat("=", containerStr.length()) + "\n")
-          .append("LogType:" + fileName + "\n")
-          .append("LogLastModifiedTime:" + lastModifiedTime + "\n")
-          .append("LogLength:" + Long.toString(fileLength) + "\n")
-          .append("LogContents:\n");
-      byte[] b = sb.toString().getBytes(
-          Charset.forName("UTF-8"));
-      os.write(b, 0, b.length);
-    }
+
+    byte[] b = formatContainerLogHeader(containerId, nodeId, logType, fileName,
+        lastModifiedTime, fileLength);
+    os.write(b, 0, b.length);
     while (keepGoing) {
       os.write(buf, 0, len);
       curRead += len;
@@ -132,22 +140,12 @@
       }
     }
 
+    // output log summary
+    byte[] b = formatContainerLogHeader(containerId, nodeId, logType, fileName,
+        lastModifiedTime, fileLength);
+    os.write(b, 0, b.length);
+
     if (totalBytesToRead > 0) {
-      // output log summary
-      StringBuilder sb = new StringBuilder();
-      String containerStr = String.format(
-          LogToolUtils.CONTAINER_ON_NODE_PATTERN,
-          containerId, nodeId);
-      sb.append(containerStr + "\n")
-          .append("LogAggregationType: " + logType + "\n")
-          .append(StringUtils.repeat("=", containerStr.length()) + "\n")
-          .append("LogType:" + fileName + "\n")
-          .append("LogLastModifiedTime:" + lastModifiedTime + "\n")
-          .append("LogLength:" + Long.toString(fileLength) + "\n")
-          .append("LogContents:\n");
-      byte[] b = sb.toString().getBytes(
-          Charset.forName("UTF-8"));
-      os.write(b, 0, b.length);
       // output log content
       FileChannel inputChannel = fis.getChannel();
       WritableByteChannel outputChannel = Channels.newChannel(os);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/yarn_security_token.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/yarn_security_token.proto
index 16e11aa..29bc236 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/yarn_security_token.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/yarn_security_token.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "YarnSecurityTokenProtos";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 55e908d..4393792 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -4301,4 +4301,13 @@
     <value>60000</value>
   </property>
 
+  <property>
+    <description>
+      Comma-separated list of partitions. If a label P is in this list,
+      then the RM will enforce that an app has resource requests with label
+      P iff that app's node label expression is P.
+    </description>
+    <name>yarn.node-labels.exclusive-enforced-partitions</name>
+    <value></value>
+  </property>
 </configuration>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
index 6c26c40..9ae2983 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
@@ -67,7 +67,6 @@
 
   private static final File testWorkDir = new File("target",
       "TestAggregatedLogFormat");
-  private static final Configuration conf = new Configuration();
   private static final FileSystem fs;
   private static final char filler = 'x';
   private static final Logger LOG = LoggerFactory
@@ -75,7 +74,7 @@
 
   static {
     try {
-      fs = FileSystem.get(conf);
+      fs = FileSystem.get(new Configuration());
     } catch (IOException e) {
       throw new RuntimeException(e);
     }
@@ -282,6 +281,45 @@
     Assert.assertEquals(expectedLength, s.length());
   }
 
+  @Test
+  public void testZeroLengthLog() throws IOException {
+    Configuration conf = new Configuration();
+    File workDir = new File(testWorkDir, "testZeroLength");
+    Path remoteAppLogFile = new Path(workDir.getAbsolutePath(),
+        "aggregatedLogFile");
+    Path srcFileRoot = new Path(workDir.getAbsolutePath(), "srcFiles");
+    ContainerId testContainerId = TestContainerId.newContainerId(1, 1, 1, 1);
+    Path t = new Path(srcFileRoot, testContainerId.getApplicationAttemptId()
+        .getApplicationId().toString());
+    Path srcFilePath = new Path(t, testContainerId.toString());
+
+    // Create zero byte file
+    writeSrcFile(srcFilePath, "stdout", 0);
+
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    try (LogWriter logWriter = new LogWriter()) {
+      logWriter.initialize(conf, remoteAppLogFile, ugi);
+
+      LogKey logKey = new LogKey(testContainerId);
+      LogValue logValue =
+          new LogValue(Collections.singletonList(srcFileRoot.toString()),
+              testContainerId, ugi.getShortUserName());
+
+      logWriter.append(logKey, logValue);
+    }
+
+    LogReader logReader = new LogReader(conf, remoteAppLogFile);
+    LogKey rLogKey = new LogKey();
+    DataInputStream dis = logReader.next(rLogKey);
+    Writer writer = new StringWriter();
+    LogReader.readAcontainerLogs(dis, writer);
+
+    Assert.assertEquals("LogType:stdout\n" +
+        "LogLength:0\n" +
+        "Log Contents:\n\n" +
+        "End of LogType:stdout\n\n", writer.toString());
+  }
+
   @Test(timeout=10000)
   public void testContainerLogsFileAccess() throws IOException {
     // This test will run only if NativeIO is enabled as SecureIOUtils 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexedFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexedFileController.java
index e63e469..098f3be 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexedFileController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexedFileController.java
@@ -85,6 +85,7 @@
       .createImmutable((short) (0777));
   private static final UserGroupInformation USER_UGI = UserGroupInformation
       .createRemoteUser("testUser");
+  private static final String ZERO_FILE = "zero";
   private FileSystem fs;
   private ApplicationId appId;
   private ContainerId containerId;
@@ -153,6 +154,8 @@
           logType);
       files.add(file);
     }
+    files.add(createZeroLocalLogFile(appLogsDir));
+
     LogValue value = mock(LogValue.class);
     when(value.getPendingLogFilesToUploadForThisContainer()).thenReturn(files);
 
@@ -212,12 +215,13 @@
     for (ContainerLogMeta log : meta) {
       assertEquals(containerId.toString(), log.getContainerId());
       assertEquals(nodeId.toString(), log.getNodeId());
-      assertEquals(3, log.getContainerLogMeta().size());
+      assertEquals(4, log.getContainerLogMeta().size());
       for (ContainerLogFileInfo file : log.getContainerLogMeta()) {
         fileNames.add(file.getFileName());
       }
     }
     fileNames.removeAll(logTypes);
+    fileNames.remove(ZERO_FILE);
     assertTrue(fileNames.isEmpty());
 
     boolean foundLogs = fileFormat.readAggregatedLogs(logRequest, System.out);
@@ -226,6 +230,7 @@
       assertTrue(sysOutStream.toString().contains(logMessage(
           containerId, logType)));
     }
+    assertZeroFileIsContained(sysOutStream.toString());
     sysOutStream.reset();
 
     Configuration factoryConf = new Configuration(getConf());
@@ -297,12 +302,13 @@
     for (ContainerLogMeta log : meta) {
       assertEquals(containerId.toString(), log.getContainerId());
       assertEquals(nodeId.toString(), log.getNodeId());
-      assertEquals(3, log.getContainerLogMeta().size());
+      assertEquals(4, log.getContainerLogMeta().size());
       for (ContainerLogFileInfo file : log.getContainerLogMeta()) {
         fileNames.add(file.getFileName());
       }
     }
     fileNames.removeAll(logTypes);
+    fileNames.remove(ZERO_FILE);
     assertTrue(fileNames.isEmpty());
     foundLogs = fileFormat.readAggregatedLogs(logRequest, System.out);
     assertTrue(foundLogs);
@@ -333,6 +339,7 @@
       }
     }
     fileNames.removeAll(newLogTypes);
+    fileNames.remove(ZERO_FILE);
     assertTrue(fileNames.isEmpty());
     foundLogs = fileFormat.readAggregatedLogs(logRequest, System.out);
     assertTrue(foundLogs);
@@ -361,6 +368,7 @@
       }
     }
     fileNames.removeAll(newLogTypes);
+    fileNames.remove(ZERO_FILE);
     assertTrue(fileNames.isEmpty());
     foundLogs = fileFormat.readAggregatedLogs(logRequest, System.out);
     assertTrue(foundLogs);
@@ -423,8 +431,25 @@
     sysOutStream.reset();
   }
 
+  private void assertZeroFileIsContained(String outStream) {
+    assertTrue(outStream.contains(
+        "LogContents:\n" +
+        "\n" +
+        "End of LogType:zero"));
+  }
+
+  private File createZeroLocalLogFile(Path localLogDir) throws IOException {
+    return createAndWriteLocalLogFile(localLogDir, ZERO_FILE, "");
+  }
+
   private File createAndWriteLocalLogFile(ContainerId containerId,
       Path localLogDir, String logType) throws IOException {
+    return createAndWriteLocalLogFile(localLogDir, logType,
+        logMessage(containerId, logType));
+  }
+
+  private File createAndWriteLocalLogFile(Path localLogDir, String logType,
+      String message) throws IOException {
     File file = new File(localLogDir.toString(), logType);
     if (file.exists()) {
       file.delete();
@@ -433,7 +458,7 @@
     Writer writer = null;
     try {
       writer = new FileWriter(file);
-      writer.write(logMessage(containerId, logType));
+      writer.write(message);
       writer.close();
       return file;
     } finally {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
index 8e1be21..14be03c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
@@ -205,30 +205,27 @@
   <build>
     <plugins>
       <plugin>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-maven-plugins</artifactId>
+        <groupId>org.xolstice.maven.plugins</groupId>
+        <artifactId>protobuf-maven-plugin</artifactId>
         <executions>
           <execution>
-            <id>compile-protoc</id>
-            <goals>
-              <goal>protoc</goal>
-            </goals>
+            <id>src-compile-protoc</id>
             <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto</param>
-                <param>${basedir}/../../hadoop-yarn-api/src/main/proto</param>
-                <param>${basedir}/../../hadoop-yarn-common/src/main/proto</param>
-                <param>${basedir}/../hadoop-yarn-server-common/src/main/proto</param>
-                <param>${basedir}/src/main/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/main/proto</directory>
-                <includes>
-                  <include>yarn_server_timelineserver_recovery.proto</include>
-                </includes>
-              </source>
+              <skip>false</skip>
+              <additionalProtoPathElements>
+                <additionalProtoPathElement>
+                  ${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto
+                </additionalProtoPathElement>
+                <additionalProtoPathElement>
+                  ${basedir}/../../hadoop-yarn-api/src/main/proto
+                </additionalProtoPathElement>
+                <additionalProtoPathElement>
+                  ${basedir}/../../hadoop-yarn-common/src/main/proto
+                </additionalProtoPathElement>
+                <additionalProtoPathElement>
+                  ${basedir}/../hadoop-yarn-server-common/src/main/proto
+                </additionalProtoPathElement>
+              </additionalProtoPathElements>
             </configuration>
           </execution>
         </executions>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/proto/yarn_server_timelineserver_recovery.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/proto/yarn_server_timelineserver_recovery.proto
index fc141c2..b53e65c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/proto/yarn_server_timelineserver_recovery.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/proto/yarn_server_timelineserver_recovery.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "YarnServerTimelineServerRecoveryProtos";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index 688c06d..cc38f8c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -161,37 +161,22 @@
           </execution>
         </executions>
       </plugin>
-
       <plugin>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-maven-plugins</artifactId>
+        <groupId>org.xolstice.maven.plugins</groupId>
+        <artifactId>protobuf-maven-plugin</artifactId>
         <executions>
           <execution>
-            <id>compile-protoc</id>
-            <goals>
-              <goal>protoc</goal>
-            </goals>
+            <id>src-compile-protoc</id>
             <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto</param>
-                <param>${basedir}/../../hadoop-yarn-api/src/main/proto</param>
-                <param>${basedir}/src/main/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/main/proto</directory>
-                <includes>
-                  <include>distributed_scheduling_am_protocol.proto</include>
-                  <include>yarn_server_common_protos.proto</include>
-                  <include>yarn_server_common_service_protos.proto</include>
-                  <include>yarn_server_common_service_protos.proto</include>
-                  <include>yarn_server_federation_protos.proto</include>
-                  <include>ResourceTracker.proto</include>
-                  <include>SCMUploader.proto</include>
-                  <include>collectornodemanager_protocol.proto</include>
-                </includes>
-              </source>
+              <skip>false</skip>
+              <additionalProtoPathElements>
+                <additionalProtoPathElement>
+                  ${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto
+                </additionalProtoPathElement>
+                <additionalProtoPathElement>
+                  ${basedir}/../../hadoop-yarn-api/src/main/proto
+                </additionalProtoPathElement>
+              </additionalProtoPathElements>
             </configuration>
           </execution>
         </executions>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/DistributedOpportunisticContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/DistributedOpportunisticContainerAllocator.java
new file mode 100644
index 0000000..da90167
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/DistributedOpportunisticContainerAllocator.java
@@ -0,0 +1,357 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.scheduler;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+import org.apache.hadoop.yarn.server.api.protocolrecords.RemoteNode;
+import org.apache.hadoop.yarn.server.metrics.OpportunisticSchedulerMetrics;
+import org.apache.hadoop.yarn.server.security.BaseContainerTokenSecretManager;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * <p>
+ * The DistributedOpportunisticContainerAllocator allocates containers on a
+ * given list of nodes, after modifying the container sizes to respect the
+ * limits set by the ResourceManager. It tries to distribute the containers
+ * as evenly as possible.
+ * </p>
+ */
+public class DistributedOpportunisticContainerAllocator
+    extends OpportunisticContainerAllocator {
+
+  private static final int NODE_LOCAL_LOOP = 0;
+  private static final int RACK_LOCAL_LOOP = 1;
+  private static final int OFF_SWITCH_LOOP = 2;
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(DistributedOpportunisticContainerAllocator.class);
+
+  /**
+   * Create a new Opportunistic Container Allocator.
+   * @param tokenSecretManager TokenSecretManager
+   */
+  public DistributedOpportunisticContainerAllocator(
+      BaseContainerTokenSecretManager tokenSecretManager) {
+    super(tokenSecretManager);
+  }
+
+  /**
+   * Create a new Opportunistic Container Allocator.
+   * @param tokenSecretManager TokenSecretManager
+   * @param maxAllocationsPerAMHeartbeat max number of containers to be
+   *                                     allocated in one AM heartbeat
+   */
+  public DistributedOpportunisticContainerAllocator(
+      BaseContainerTokenSecretManager tokenSecretManager,
+      int maxAllocationsPerAMHeartbeat) {
+    super(tokenSecretManager, maxAllocationsPerAMHeartbeat);
+  }
+
+  @Override
+  public List<Container> allocateContainers(ResourceBlacklistRequest blackList,
+      List<ResourceRequest> oppResourceReqs,
+      ApplicationAttemptId applicationAttemptId,
+      OpportunisticContainerContext opportContext, long rmIdentifier,
+      String appSubmitter) throws YarnException {
+
+    // Update black list.
+    updateBlacklist(blackList, opportContext);
+
+    // Add OPPORTUNISTIC requests to the outstanding ones.
+    opportContext.addToOutstandingReqs(oppResourceReqs);
+    Set<String> nodeBlackList = new HashSet<>(opportContext.getBlacklist());
+    Set<String> allocatedNodes = new HashSet<>();
+    List<Container> allocatedContainers = new ArrayList<>();
+
+    // Satisfy the outstanding OPPORTUNISTIC requests.
+    boolean continueLoop = true;
+    while (continueLoop) {
+      continueLoop = false;
+      List<Map<Resource, List<Allocation>>> allocations = new ArrayList<>();
+      for (SchedulerRequestKey schedulerKey :
+          opportContext.getOutstandingOpReqs().descendingKeySet()) {
+        // Allocated containers :
+        //  Key = Requested Capability,
+        //  Value = List of Containers of given cap (the actual container size
+        //          might be different than what is requested, which is why
+        //          we need the requested capability (key) to match against
+        //          the outstanding reqs)
+        int remAllocs = -1;
+        int maxAllocationsPerAMHeartbeat = getMaxAllocationsPerAMHeartbeat();
+        if (maxAllocationsPerAMHeartbeat > 0) {
+          remAllocs =
+              maxAllocationsPerAMHeartbeat - allocatedContainers.size()
+                  - getTotalAllocations(allocations);
+          if (remAllocs <= 0) {
+            LOG.info("Not allocating more containers as we have reached max "
+                    + "allocations per AM heartbeat {}",
+                maxAllocationsPerAMHeartbeat);
+            break;
+          }
+        }
+        Map<Resource, List<Allocation>> allocation = allocate(
+            rmIdentifier, opportContext, schedulerKey, applicationAttemptId,
+            appSubmitter, nodeBlackList, allocatedNodes, remAllocs);
+        if (allocation.size() > 0) {
+          allocations.add(allocation);
+          continueLoop = true;
+        }
+      }
+      matchAllocation(allocations, allocatedContainers, opportContext);
+    }
+
+    return allocatedContainers;
+  }
+
+  private Map<Resource, List<Allocation>> allocate(long rmIdentifier,
+      OpportunisticContainerContext appContext, SchedulerRequestKey schedKey,
+      ApplicationAttemptId appAttId, String userName, Set<String> blackList,
+      Set<String> allocatedNodes, int maxAllocations)
+      throws YarnException {
+    Map<Resource, List<Allocation>> containers = new HashMap<>();
+    for (EnrichedResourceRequest enrichedAsk :
+        appContext.getOutstandingOpReqs().get(schedKey).values()) {
+      int remainingAllocs = -1;
+      if (maxAllocations > 0) {
+        int totalAllocated = 0;
+        for (List<Allocation> allocs : containers.values()) {
+          totalAllocated += allocs.size();
+        }
+        remainingAllocs = maxAllocations - totalAllocated;
+        if (remainingAllocs <= 0) {
+          LOG.info("Not allocating more containers as max allocations per AM "
+              + "heartbeat {} has reached", getMaxAllocationsPerAMHeartbeat());
+          break;
+        }
+      }
+      allocateContainersInternal(rmIdentifier, appContext.getAppParams(),
+          appContext.getContainerIdGenerator(), blackList, allocatedNodes,
+          appAttId, appContext.getNodeMap(), userName, containers, enrichedAsk,
+          remainingAllocs);
+      ResourceRequest anyAsk = enrichedAsk.getRequest();
+      if (!containers.isEmpty()) {
+        LOG.info("Opportunistic allocation requested for [priority={}, "
+                + "allocationRequestId={}, num_containers={}, capability={}] "
+                + "allocated = {}", anyAsk.getPriority(),
+            anyAsk.getAllocationRequestId(), anyAsk.getNumContainers(),
+            anyAsk.getCapability(), containers.keySet());
+      }
+    }
+    return containers;
+  }
+
+  private void allocateContainersInternal(long rmIdentifier,
+      AllocationParams appParams, ContainerIdGenerator idCounter,
+      Set<String> blacklist, Set<String> allocatedNodes,
+      ApplicationAttemptId id, Map<String, RemoteNode> allNodes,
+      String userName, Map<Resource, List<Allocation>> allocations,
+      EnrichedResourceRequest enrichedAsk, int maxAllocations)
+      throws YarnException {
+    if (allNodes.size() == 0) {
+      LOG.info("No nodes currently available to " +
+          "allocate OPPORTUNISTIC containers.");
+      return;
+    }
+    ResourceRequest anyAsk = enrichedAsk.getRequest();
+    int toAllocate = anyAsk.getNumContainers()
+        - (allocations.isEmpty() ? 0 :
+        allocations.get(anyAsk.getCapability()).size());
+    toAllocate = Math.min(toAllocate,
+        appParams.getMaxAllocationsPerSchedulerKeyPerRound());
+    if (maxAllocations >= 0) {
+      toAllocate = Math.min(maxAllocations, toAllocate);
+    }
+    int numAllocated = 0;
+    // Node Candidates are selected as follows:
+    // * Node local candidates selected in loop == 0
+    // * Rack local candidates selected in loop == 1
+    // * From loop == 2 onwards, we revert to off switch allocations.
+    int loopIndex = OFF_SWITCH_LOOP;
+    if (enrichedAsk.getNodeLocations().size() > 0) {
+      loopIndex = NODE_LOCAL_LOOP;
+    }
+    while (numAllocated < toAllocate) {
+      Collection<RemoteNode> nodeCandidates =
+          findNodeCandidates(loopIndex, allNodes, blacklist, allocatedNodes,
+              enrichedAsk);
+      for (RemoteNode rNode : nodeCandidates) {
+        String rNodeHost = rNode.getNodeId().getHost();
+        // Ignore black list
+        if (blacklist.contains(rNodeHost)) {
+          LOG.info("Nodes for scheduling has a blacklisted node" +
+              " [" + rNodeHost + "]..");
+          continue;
+        }
+        String location = ResourceRequest.ANY;
+        if (loopIndex == NODE_LOCAL_LOOP) {
+          if (enrichedAsk.getNodeLocations().contains(rNodeHost)) {
+            location = rNodeHost;
+          } else {
+            continue;
+          }
+        } else if (allocatedNodes.contains(rNodeHost)) {
+          LOG.info("Opportunistic container has already been allocated on {}.",
+              rNodeHost);
+          continue;
+        }
+        if (loopIndex == RACK_LOCAL_LOOP) {
+          if (enrichedAsk.getRackLocations().contains(rNode.getRackName())) {
+            location = rNode.getRackName();
+          } else {
+            continue;
+          }
+        }
+        Container container = createContainer(rmIdentifier, appParams,
+            idCounter, id, userName, allocations, location,
+            anyAsk, rNode);
+        numAllocated++;
+        updateMetrics(loopIndex);
+        allocatedNodes.add(rNodeHost);
+        LOG.info("Allocated [" + container.getId() + "] as opportunistic at " +
+            "location [" + location + "]");
+        if (numAllocated >= toAllocate) {
+          break;
+        }
+      }
+      if (loopIndex == NODE_LOCAL_LOOP &&
+          enrichedAsk.getRackLocations().size() > 0) {
+        loopIndex = RACK_LOCAL_LOOP;
+      } else {
+        loopIndex++;
+      }
+      // Handle case where there are no nodes remaining after blacklist is
+      // considered.
+      if (loopIndex > OFF_SWITCH_LOOP && numAllocated == 0) {
+        LOG.warn("Unable to allocate any opportunistic containers.");
+        break;
+      }
+    }
+  }
+
+
+
+  private void updateMetrics(int loopIndex) {
+    OpportunisticSchedulerMetrics metrics =
+        OpportunisticSchedulerMetrics.getMetrics();
+    if (loopIndex == NODE_LOCAL_LOOP) {
+      metrics.incrNodeLocalOppContainers();
+    } else if (loopIndex == RACK_LOCAL_LOOP) {
+      metrics.incrRackLocalOppContainers();
+    } else {
+      metrics.incrOffSwitchOppContainers();
+    }
+  }
+
+  private Collection<RemoteNode> findNodeCandidates(int loopIndex,
+      Map<String, RemoteNode> allNodes, Set<String> blackList,
+      Set<String> allocatedNodes, EnrichedResourceRequest enrichedRR) {
+    LinkedList<RemoteNode> retList = new LinkedList<>();
+    String partition = getRequestPartition(enrichedRR);
+    if (loopIndex > 1) {
+      for (RemoteNode remoteNode : allNodes.values()) {
+        if (StringUtils.equals(partition, getRemoteNodePartition(remoteNode))) {
+          retList.add(remoteNode);
+        }
+      }
+      return retList;
+    } else {
+
+      int numContainers = enrichedRR.getRequest().getNumContainers();
+      while (numContainers > 0) {
+        if (loopIndex == 0) {
+          // Node local candidates
+          numContainers = collectNodeLocalCandidates(
+              allNodes, enrichedRR, retList, numContainers);
+        } else {
+          // Rack local candidates
+          numContainers =
+              collectRackLocalCandidates(allNodes, enrichedRR, retList,
+                  blackList, allocatedNodes, numContainers);
+        }
+        if (numContainers == enrichedRR.getRequest().getNumContainers()) {
+          // If there is no change in numContainers, then there is no point
+          // in looping again.
+          break;
+        }
+      }
+      return retList;
+    }
+  }
+
+  private int collectRackLocalCandidates(Map<String, RemoteNode> allNodes,
+      EnrichedResourceRequest enrichedRR, LinkedList<RemoteNode> retList,
+      Set<String> blackList, Set<String> allocatedNodes, int numContainers) {
+    String partition = getRequestPartition(enrichedRR);
+    for (RemoteNode rNode : allNodes.values()) {
+      if (StringUtils.equals(partition, getRemoteNodePartition(rNode)) &&
+          enrichedRR.getRackLocations().contains(rNode.getRackName())) {
+        String rHost = rNode.getNodeId().getHost();
+        if (blackList.contains(rHost)) {
+          continue;
+        }
+        if (allocatedNodes.contains(rHost)) {
+          retList.addLast(rNode);
+        } else {
+          retList.addFirst(rNode);
+          numContainers--;
+        }
+      }
+      if (numContainers == 0) {
+        break;
+      }
+    }
+    return numContainers;
+  }
+
+  private int collectNodeLocalCandidates(Map<String, RemoteNode> allNodes,
+      EnrichedResourceRequest enrichedRR, List<RemoteNode> retList,
+      int numContainers) {
+    String partition = getRequestPartition(enrichedRR);
+    for (String nodeName : enrichedRR.getNodeLocations()) {
+      RemoteNode remoteNode = allNodes.get(nodeName);
+      if (remoteNode != null &&
+          StringUtils.equals(partition, getRemoteNodePartition(remoteNode))) {
+        retList.add(remoteNode);
+        numContainers--;
+      }
+      if (numContainers == 0) {
+        break;
+      }
+    }
+    return numContainers;
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java
index 0ce1976..4a17a65 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.server.scheduler;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.util.Time;
@@ -38,21 +37,15 @@
 import org.apache.hadoop.yarn.server.api.ContainerType;
 
 import org.apache.hadoop.yarn.server.api.protocolrecords.RemoteNode;
-import org.apache.hadoop.yarn.server.metrics.OpportunisticSchedulerMetrics;
 import org.apache.hadoop.yarn.server.security.BaseContainerTokenSecretManager;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -61,16 +54,11 @@
 
 /**
  * <p>
- * The OpportunisticContainerAllocator allocates containers on a given list of
- * nodes, after modifying the container sizes to respect the limits set by the
- * ResourceManager. It tries to distribute the containers as evenly as possible.
+ * Base abstract class for Opportunistic container allocations, that provides
+ * common functions required for Opportunistic container allocation.
  * </p>
  */
-public class OpportunisticContainerAllocator {
-
-  private static final int NODE_LOCAL_LOOP = 0;
-  private static final int RACK_LOCAL_LOOP = 1;
-  private static final int OFF_SWITCH_LOOP = 2;
+public abstract class OpportunisticContainerAllocator {
 
   private int maxAllocationsPerAMHeartbeat = -1;
 
@@ -212,9 +200,6 @@
     }
   }
 
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OpportunisticContainerAllocator.class);
-
   private static final ResourceCalculator RESOURCE_CALCULATOR =
       new DominantResourceCalculator();
 
@@ -238,26 +223,30 @@
     }
   }
 
-  static class EnrichedResourceRequest {
+  /**
+   * This class encapsulates Resource Request and provides requests per
+   * node and rack.
+   */
+  public static class EnrichedResourceRequest {
     private final Map<String, AtomicInteger> nodeLocations = new HashMap<>();
     private final Map<String, AtomicInteger> rackLocations = new HashMap<>();
     private final ResourceRequest request;
     private final long timestamp;
 
-    EnrichedResourceRequest(ResourceRequest request) {
+    public EnrichedResourceRequest(ResourceRequest request) {
       this.request = request;
       timestamp = Time.monotonicNow();
     }
 
-    long getTimestamp() {
+    public long getTimestamp() {
       return timestamp;
     }
 
-    ResourceRequest getRequest() {
+    public ResourceRequest getRequest() {
       return request;
     }
 
-    void addLocation(String location, int count) {
+    public void addLocation(String location, int count) {
       Map<String, AtomicInteger> m = rackLocations;
       if (!location.startsWith("/")) {
         m = nodeLocations;
@@ -269,7 +258,7 @@
       }
     }
 
-    void removeLocation(String location) {
+    public void removeLocation(String location) {
       Map<String, AtomicInteger> m = rackLocations;
       AtomicInteger count = m.get(location);
       if (count == null) {
@@ -284,14 +273,15 @@
       }
     }
 
-    Set<String> getNodeLocations() {
+    public Set<String> getNodeLocations() {
       return nodeLocations.keySet();
     }
 
-    Set<String> getRackLocations() {
+    public Set<String> getRackLocations() {
       return rackLocations.keySet();
     }
   }
+
   /**
    * Create a new Opportunistic Container Allocator.
    * @param tokenSecretManager TokenSecretManager
@@ -320,6 +310,14 @@
   }
 
   /**
+   * Get the Max Allocations per AM heartbeat.
+   * @return maxAllocationsPerAMHeartbeat.
+   */
+  public int getMaxAllocationsPerAMHeartbeat() {
+    return this.maxAllocationsPerAMHeartbeat;
+  }
+
+  /**
    * Allocate OPPORTUNISTIC containers.
    * @param blackList Resource BlackList Request
    * @param oppResourceReqs Opportunistic Resource Requests
@@ -330,72 +328,37 @@
    * @return List of Containers.
    * @throws YarnException YarnException
    */
-  public List<Container> allocateContainers(ResourceBlacklistRequest blackList,
+  public abstract List<Container> allocateContainers(
+      ResourceBlacklistRequest blackList,
       List<ResourceRequest> oppResourceReqs,
       ApplicationAttemptId applicationAttemptId,
       OpportunisticContainerContext opportContext, long rmIdentifier,
-      String appSubmitter) throws YarnException {
+      String appSubmitter) throws YarnException;
 
-    // Update black list.
+
+  protected void updateBlacklist(ResourceBlacklistRequest blackList,
+      OpportunisticContainerContext oppContext) {
     if (blackList != null) {
-      opportContext.getBlacklist().removeAll(blackList.getBlacklistRemovals());
-      opportContext.getBlacklist().addAll(blackList.getBlacklistAdditions());
+      oppContext.getBlacklist().removeAll(blackList.getBlacklistRemovals());
+      oppContext.getBlacklist().addAll(blackList.getBlacklistAdditions());
     }
-
-    // Add OPPORTUNISTIC requests to the outstanding ones.
-    opportContext.addToOutstandingReqs(oppResourceReqs);
-    Set<String> nodeBlackList = new HashSet<>(opportContext.getBlacklist());
-    Set<String> allocatedNodes = new HashSet<>();
-    List<Container> allocatedContainers = new ArrayList<>();
-
-    // Satisfy the outstanding OPPORTUNISTIC requests.
-    boolean continueLoop = true;
-    while (continueLoop) {
-      continueLoop = false;
-      List<Map<Resource, List<Allocation>>> allocations = new ArrayList<>();
-      for (SchedulerRequestKey schedulerKey :
-          opportContext.getOutstandingOpReqs().descendingKeySet()) {
-        // Allocated containers :
-        //  Key = Requested Capability,
-        //  Value = List of Containers of given cap (the actual container size
-        //          might be different than what is requested, which is why
-        //          we need the requested capability (key) to match against
-        //          the outstanding reqs)
-        int remAllocs = -1;
-        if (maxAllocationsPerAMHeartbeat > 0) {
-          remAllocs =
-              maxAllocationsPerAMHeartbeat - allocatedContainers.size()
-                  - getTotalAllocations(allocations);
-          if (remAllocs <= 0) {
-            LOG.info("Not allocating more containers as we have reached max "
-                    + "allocations per AM heartbeat {}",
-                maxAllocationsPerAMHeartbeat);
-            break;
-          }
-        }
-        Map<Resource, List<Allocation>> allocation = allocate(
-            rmIdentifier, opportContext, schedulerKey, applicationAttemptId,
-            appSubmitter, nodeBlackList, allocatedNodes, remAllocs);
-        if (allocation.size() > 0) {
-          allocations.add(allocation);
-          continueLoop = true;
-        }
-      }
-      for (Map<Resource, List<Allocation>> allocation : allocations) {
-        for (Map.Entry<Resource, List<Allocation>> e : allocation.entrySet()) {
-          opportContext.matchAllocationToOutstandingRequest(
-              e.getKey(), e.getValue());
-          for (Allocation alloc : e.getValue()) {
-            allocatedContainers.add(alloc.getContainer());
-          }
-        }
-      }
-    }
-
-    return allocatedContainers;
   }
 
-  private int getTotalAllocations(
+  protected void matchAllocation(List<Map<Resource,
+      List<Allocation>>> allocations, List<Container> allocatedContainers,
+      OpportunisticContainerContext oppContext) {
+    for (Map<Resource, List<Allocation>> allocation : allocations) {
+      for (Map.Entry<Resource, List<Allocation>> e : allocation.entrySet()) {
+        oppContext.matchAllocationToOutstandingRequest(
+            e.getKey(), e.getValue());
+        for (Allocation alloc : e.getValue()) {
+          allocatedContainers.add(alloc.getContainer());
+        }
+      }
+    }
+  }
+
+  protected int getTotalAllocations(
       List<Map<Resource, List<Allocation>>> allocations) {
     int totalAllocs = 0;
     for (Map<Resource, List<Allocation>> allocation : allocations) {
@@ -406,223 +369,8 @@
     return totalAllocs;
   }
 
-  private Map<Resource, List<Allocation>> allocate(long rmIdentifier,
-      OpportunisticContainerContext appContext, SchedulerRequestKey schedKey,
-      ApplicationAttemptId appAttId, String userName, Set<String> blackList,
-      Set<String> allocatedNodes, int maxAllocations)
-      throws YarnException {
-    Map<Resource, List<Allocation>> containers = new HashMap<>();
-    for (EnrichedResourceRequest enrichedAsk :
-        appContext.getOutstandingOpReqs().get(schedKey).values()) {
-      int remainingAllocs = -1;
-      if (maxAllocations > 0) {
-        int totalAllocated = 0;
-        for (List<Allocation> allocs : containers.values()) {
-          totalAllocated += allocs.size();
-        }
-        remainingAllocs = maxAllocations - totalAllocated;
-        if (remainingAllocs <= 0) {
-          LOG.info("Not allocating more containers as max allocations per AM "
-                  + "heartbeat {} has reached", maxAllocationsPerAMHeartbeat);
-          break;
-        }
-      }
-      allocateContainersInternal(rmIdentifier, appContext.getAppParams(),
-          appContext.getContainerIdGenerator(), blackList, allocatedNodes,
-          appAttId, appContext.getNodeMap(), userName, containers, enrichedAsk,
-          remainingAllocs);
-      ResourceRequest anyAsk = enrichedAsk.getRequest();
-      if (!containers.isEmpty()) {
-        LOG.info("Opportunistic allocation requested for [priority={}, "
-            + "allocationRequestId={}, num_containers={}, capability={}] "
-            + "allocated = {}", anyAsk.getPriority(),
-            anyAsk.getAllocationRequestId(), anyAsk.getNumContainers(),
-            anyAsk.getCapability(), containers.keySet());
-      }
-    }
-    return containers;
-  }
-
-  private void allocateContainersInternal(long rmIdentifier,
-      AllocationParams appParams, ContainerIdGenerator idCounter,
-      Set<String> blacklist, Set<String> allocatedNodes,
-      ApplicationAttemptId id, Map<String, RemoteNode> allNodes,
-      String userName, Map<Resource, List<Allocation>> allocations,
-      EnrichedResourceRequest enrichedAsk, int maxAllocations)
-      throws YarnException {
-    if (allNodes.size() == 0) {
-      LOG.info("No nodes currently available to " +
-          "allocate OPPORTUNISTIC containers.");
-      return;
-    }
-    ResourceRequest anyAsk = enrichedAsk.getRequest();
-    int toAllocate = anyAsk.getNumContainers()
-        - (allocations.isEmpty() ? 0 :
-            allocations.get(anyAsk.getCapability()).size());
-    toAllocate = Math.min(toAllocate,
-        appParams.getMaxAllocationsPerSchedulerKeyPerRound());
-    if (maxAllocations >= 0) {
-      toAllocate = Math.min(maxAllocations, toAllocate);
-    }
-    int numAllocated = 0;
-    // Node Candidates are selected as follows:
-    // * Node local candidates selected in loop == 0
-    // * Rack local candidates selected in loop == 1
-    // * From loop == 2 onwards, we revert to off switch allocations.
-    int loopIndex = OFF_SWITCH_LOOP;
-    if (enrichedAsk.getNodeLocations().size() > 0) {
-      loopIndex = NODE_LOCAL_LOOP;
-    }
-    while (numAllocated < toAllocate) {
-      Collection<RemoteNode> nodeCandidates =
-          findNodeCandidates(loopIndex, allNodes, blacklist, allocatedNodes,
-              enrichedAsk);
-      for (RemoteNode rNode : nodeCandidates) {
-        String rNodeHost = rNode.getNodeId().getHost();
-        // Ignore black list
-        if (blacklist.contains(rNodeHost)) {
-          LOG.info("Nodes for scheduling has a blacklisted node" +
-              " [" + rNodeHost + "]..");
-          continue;
-        }
-        String location = ResourceRequest.ANY;
-        if (loopIndex == NODE_LOCAL_LOOP) {
-          if (enrichedAsk.getNodeLocations().contains(rNodeHost)) {
-            location = rNodeHost;
-          } else {
-            continue;
-          }
-        } else if (allocatedNodes.contains(rNodeHost)) {
-          LOG.info("Opportunistic container has already been allocated on {}.",
-              rNodeHost);
-          continue;
-        }
-        if (loopIndex == RACK_LOCAL_LOOP) {
-          if (enrichedAsk.getRackLocations().contains(rNode.getRackName())) {
-            location = rNode.getRackName();
-          } else {
-            continue;
-          }
-        }
-        Container container = createContainer(rmIdentifier, appParams,
-            idCounter, id, userName, allocations, location,
-            anyAsk, rNode);
-        numAllocated++;
-        updateMetrics(loopIndex);
-        allocatedNodes.add(rNodeHost);
-        LOG.info("Allocated [" + container.getId() + "] as opportunistic at " +
-            "location [" + location + "]");
-        if (numAllocated >= toAllocate) {
-          break;
-        }
-      }
-      if (loopIndex == NODE_LOCAL_LOOP &&
-          enrichedAsk.getRackLocations().size() > 0) {
-        loopIndex = RACK_LOCAL_LOOP;
-      } else {
-        loopIndex++;
-      }
-      // Handle case where there are no nodes remaining after blacklist is
-      // considered.
-      if (loopIndex > OFF_SWITCH_LOOP && numAllocated == 0) {
-        LOG.warn("Unable to allocate any opportunistic containers.");
-        break;
-      }
-    }
-  }
-
-  private void updateMetrics(int loopIndex) {
-    OpportunisticSchedulerMetrics metrics =
-        OpportunisticSchedulerMetrics.getMetrics();
-    if (loopIndex == NODE_LOCAL_LOOP) {
-      metrics.incrNodeLocalOppContainers();
-    } else if (loopIndex == RACK_LOCAL_LOOP) {
-      metrics.incrRackLocalOppContainers();
-    } else {
-      metrics.incrOffSwitchOppContainers();
-    }
-  }
-
-  private Collection<RemoteNode> findNodeCandidates(int loopIndex,
-      Map<String, RemoteNode> allNodes, Set<String> blackList,
-      Set<String> allocatedNodes, EnrichedResourceRequest enrichedRR) {
-    LinkedList<RemoteNode> retList = new LinkedList<>();
-    String partition = getRequestPartition(enrichedRR);
-    if (loopIndex > 1) {
-      for (RemoteNode remoteNode : allNodes.values()) {
-        if (StringUtils.equals(partition, getRemoteNodePartition(remoteNode))) {
-          retList.add(remoteNode);
-        }
-      }
-      return retList;
-    } else {
-
-      int numContainers = enrichedRR.getRequest().getNumContainers();
-      while (numContainers > 0) {
-        if (loopIndex == 0) {
-          // Node local candidates
-          numContainers = collectNodeLocalCandidates(
-              allNodes, enrichedRR, retList, numContainers);
-        } else {
-          // Rack local candidates
-          numContainers =
-              collectRackLocalCandidates(allNodes, enrichedRR, retList,
-                  blackList, allocatedNodes, numContainers);
-        }
-        if (numContainers == enrichedRR.getRequest().getNumContainers()) {
-          // If there is no change in numContainers, then there is no point
-          // in looping again.
-          break;
-        }
-      }
-      return retList;
-    }
-  }
-
-  private int collectRackLocalCandidates(Map<String, RemoteNode> allNodes,
-      EnrichedResourceRequest enrichedRR, LinkedList<RemoteNode> retList,
-      Set<String> blackList, Set<String> allocatedNodes, int numContainers) {
-    String partition = getRequestPartition(enrichedRR);
-    for (RemoteNode rNode : allNodes.values()) {
-      if (StringUtils.equals(partition, getRemoteNodePartition(rNode)) &&
-          enrichedRR.getRackLocations().contains(rNode.getRackName())) {
-        String rHost = rNode.getNodeId().getHost();
-        if (blackList.contains(rHost)) {
-          continue;
-        }
-        if (allocatedNodes.contains(rHost)) {
-          retList.addLast(rNode);
-        } else {
-          retList.addFirst(rNode);
-          numContainers--;
-        }
-      }
-      if (numContainers == 0) {
-        break;
-      }
-    }
-    return numContainers;
-  }
-
-  private int collectNodeLocalCandidates(Map<String, RemoteNode> allNodes,
-      EnrichedResourceRequest enrichedRR, List<RemoteNode> retList,
-      int numContainers) {
-    String partition = getRequestPartition(enrichedRR);
-    for (String nodeName : enrichedRR.getNodeLocations()) {
-      RemoteNode remoteNode = allNodes.get(nodeName);
-      if (remoteNode != null &&
-          StringUtils.equals(partition, getRemoteNodePartition(remoteNode))) {
-        retList.add(remoteNode);
-        numContainers--;
-      }
-      if (numContainers == 0) {
-        break;
-      }
-    }
-    return numContainers;
-  }
-
-  private Container createContainer(long rmIdentifier,
+  @SuppressWarnings("checkstyle:parameternumber")
+  protected Container createContainer(long rmIdentifier,
       AllocationParams appParams, ContainerIdGenerator idCounter,
       ApplicationAttemptId id, String userName,
       Map<Resource, List<Allocation>> allocations, String location,
@@ -654,6 +402,7 @@
         SchedulerRequestKey.create(rr), userName, node, cId, capability);
   }
 
+  @SuppressWarnings("checkstyle:parameternumber")
   private Container createContainer(long rmIdentifier, long tokenExpiry,
       SchedulerRequestKey schedulerKey, String userName, RemoteNode node,
       ContainerId cId, Resource capability) {
@@ -718,7 +467,7 @@
     return partitionedRequests;
   }
 
-  private String getRequestPartition(EnrichedResourceRequest enrichedRR) {
+  protected String getRequestPartition(EnrichedResourceRequest enrichedRR) {
     String partition = enrichedRR.getRequest().getNodeLabelExpression();
     if (partition == null) {
       partition = CommonNodeLabelsManager.NO_LABEL;
@@ -726,7 +475,7 @@
     return partition;
   }
 
-  private String getRemoteNodePartition(RemoteNode node) {
+  protected String getRemoteNodePartition(RemoteNode node) {
     String partition = node.getNodePartition();
     if (partition == null) {
       partition = CommonNodeLabelsManager.NO_LABEL;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/ResourceTracker.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/ResourceTracker.proto
index 7487184..896a8d2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/ResourceTracker.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/ResourceTracker.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "ResourceTracker";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/SCMUploader.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/SCMUploader.proto
index 2278422..b90e01b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/SCMUploader.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/SCMUploader.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "SCMUploaderProtocol";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/collectornodemanager_protocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/collectornodemanager_protocol.proto
index 8665274..416cb38 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/collectornodemanager_protocol.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/collectornodemanager_protocol.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "CollectorNodemanagerProtocol";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/distributed_scheduling_am_protocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/distributed_scheduling_am_protocol.proto
index 274eaa2..07efcf7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/distributed_scheduling_am_protocol.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/distributed_scheduling_am_protocol.proto
@@ -16,13 +16,13 @@
  * limitations under the License.
  */
 
-
 /**
  * These .proto interfaces are public and stable.
  * Please see http://wiki.apache.org/hadoop/Compatibility
  * for what changes are allowed for a *stable* .proto interface.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "DistributedSchedulingAMProtocol";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto
index 8200808..ea8df4f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "YarnServerCommonProtos";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
index b5a99b9..ff7153e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "YarnServerCommonServiceProtos";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_federation_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_federation_protos.proto
index cedf482..114a60d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_federation_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_federation_protos.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.federation.proto";
 option java_outer_classname = "YarnServerFederationProtos";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/scheduler/TestOpportunisticContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/scheduler/TestOpportunisticContainerAllocator.java
index 548ddad..6a91f41 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/scheduler/TestOpportunisticContainerAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/scheduler/TestOpportunisticContainerAllocator.java
@@ -98,7 +98,7 @@
             return new byte[]{1, 2};
           }
         };
-    allocator = new OpportunisticContainerAllocator(secMan);
+    allocator = new DistributedOpportunisticContainerAllocator(secMan);
     oppCntxt = new OpportunisticContainerContext();
     oppCntxt.getAppParams().setMinResource(Resource.newInstance(1024, 1));
     oppCntxt.getAppParams().setIncrementResource(Resource.newInstance(512, 1));
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
index 609f894..90c32b6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
@@ -355,32 +355,22 @@
           </excludes>
         </configuration>
       </plugin>
-
       <plugin>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-maven-plugins</artifactId>
+        <groupId>org.xolstice.maven.plugins</groupId>
+        <artifactId>protobuf-maven-plugin</artifactId>
         <executions>
           <execution>
-            <id>compile-protoc</id>
-            <goals>
-              <goal>protoc</goal>
-            </goals>
+            <id>src-compile-protoc</id>
             <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto</param>
-                <param>${basedir}/../../hadoop-yarn-api/src/main/proto</param>
-                <param>${basedir}/src/main/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/main/proto</directory>
-                <includes>
-		  <include>yarn_server_nodemanager_recovery.proto</include>
-                  <include>yarn_server_nodemanager_service_protos.proto</include>
-                  <include>LocalizationProtocol.proto</include>
-                </includes>
-              </source>
+              <skip>false</skip>
+              <additionalProtoPathElements>
+                <additionalProtoPathElement>
+                  ${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto
+                </additionalProtoPathElement>
+                <additionalProtoPathElement>
+                  ${basedir}/../../hadoop-yarn-api/src/main/proto
+                </additionalProtoPathElement>
+              </additionalProtoPathElements>
             </configuration>
           </execution>
         </executions>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index db3aaca..4bbae34 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -77,6 +77,7 @@
 import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM;
 import org.apache.hadoop.yarn.server.nodemanager.timelineservice.NMTimelinePublisher;
 import org.apache.hadoop.yarn.server.nodemanager.webapp.WebServer;
+import org.apache.hadoop.yarn.server.scheduler.DistributedOpportunisticContainerAllocator;
 import org.apache.hadoop.yarn.server.scheduler.OpportunisticContainerAllocator;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.state.MultiStateTransitionListener;
@@ -479,7 +480,7 @@
         YarnConfiguration.
             DEFAULT_OPP_CONTAINER_MAX_ALLOCATIONS_PER_AM_HEARTBEAT);
     ((NMContext) context).setQueueableContainerAllocator(
-        new OpportunisticContainerAllocator(
+        new DistributedOpportunisticContainerAllocator(
             context.getContainerTokenSecretManager(),
             maxAllocationsPerAMHeartbeat));
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/LocalizationProtocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/LocalizationProtocol.proto
index 0f50dc3..cf55f78 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/LocalizationProtocol.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/LocalizationProtocol.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "LocalizationProtocol";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto
index 4eee9b4..11b5988 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "YarnServerNodemanagerRecoveryProtos";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_service_protos.proto
index 6fde7cc..1ba8438 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_service_protos.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "YarnServerNodemanagerServiceProtos";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
index 063215e..0a2d63e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
@@ -153,6 +153,7 @@
 public class TestLogAggregationService extends BaseContainerManagerTest {
 
   private Map<ApplicationAccessType, String> acls = createAppAcls();
+  private static final String[] EMPTY_FILES = new String[] {"zero"};
   
   static {
     LOG = LoggerFactory.getLogger(TestLogAggregationService.class);
@@ -219,7 +220,7 @@
     ContainerId container11 = ContainerId.newContainerId(appAttemptId, 1);
     // Simulate log-file creation
     writeContainerLogs(app1LogDir, container11, new String[] { "stdout",
-        "stderr", "syslog" });
+        "stderr", "syslog" }, EMPTY_FILES);
     logAggregationService.handle(
         new LogHandlerContainerFinishedEvent(container11,
             ContainerType.APPLICATION_MASTER, 0));
@@ -342,7 +343,7 @@
         BuilderUtils.newApplicationAttemptId(app, 1);
     ContainerId cont = ContainerId.newContainerId(appAttemptId, 1);
     writeContainerLogs(appLogDir, cont, new String[] { "stdout",
-        "stderr", "syslog" });
+        "stderr", "syslog" }, EMPTY_FILES);
     logAggregationService.handle(new LogHandlerContainerFinishedEvent(cont,
         ContainerType.APPLICATION_MASTER, 0));
     logAggregationService.handle(new LogHandlerAppFinishedEvent(app));
@@ -432,7 +433,7 @@
     ContainerId container11 = ContainerId.newContainerId(appAttemptId1, 1);
 
     // Simulate log-file creation
-    writeContainerLogs(app1LogDir, container11, fileNames);
+    writeContainerLogs(app1LogDir, container11, fileNames, EMPTY_FILES);
     logAggregationService.handle(
         new LogHandlerContainerFinishedEvent(container11,
             ContainerType.APPLICATION_MASTER, 0));
@@ -454,14 +455,14 @@
 
     ContainerId container21 = ContainerId.newContainerId(appAttemptId2, 1);
 
-    writeContainerLogs(app2LogDir, container21, fileNames);
+    writeContainerLogs(app2LogDir, container21, fileNames, EMPTY_FILES);
     logAggregationService.handle(
         new LogHandlerContainerFinishedEvent(container21,
             ContainerType.APPLICATION_MASTER, 0));
 
     ContainerId container12 = ContainerId.newContainerId(appAttemptId1, 2);
 
-    writeContainerLogs(app1LogDir, container12, fileNames);
+    writeContainerLogs(app1LogDir, container12, fileNames, EMPTY_FILES);
     logAggregationService.handle(
         new LogHandlerContainerFinishedEvent(container12,
             ContainerType.TASK, 0));
@@ -497,25 +498,25 @@
     reset(appEventHandler);
     
     ContainerId container31 = ContainerId.newContainerId(appAttemptId3, 1);
-    writeContainerLogs(app3LogDir, container31, fileNames);
+    writeContainerLogs(app3LogDir, container31, fileNames, EMPTY_FILES);
     logAggregationService.handle(
         new LogHandlerContainerFinishedEvent(container31,
             ContainerType.APPLICATION_MASTER, 0));
 
     ContainerId container32 = ContainerId.newContainerId(appAttemptId3, 2);
-    writeContainerLogs(app3LogDir, container32, fileNames);
+    writeContainerLogs(app3LogDir, container32, fileNames, EMPTY_FILES);
     logAggregationService.handle(
         new LogHandlerContainerFinishedEvent(container32,
             ContainerType.TASK, 1)); // Failed
 
     ContainerId container22 = ContainerId.newContainerId(appAttemptId2, 2);
-    writeContainerLogs(app2LogDir, container22, fileNames);
+    writeContainerLogs(app2LogDir, container22, fileNames, EMPTY_FILES);
     logAggregationService.handle(
         new LogHandlerContainerFinishedEvent(container22,
             ContainerType.TASK, 0));
 
     ContainerId container33 = ContainerId.newContainerId(appAttemptId3, 3);
-    writeContainerLogs(app3LogDir, container33, fileNames);
+    writeContainerLogs(app3LogDir, container33, fileNames, EMPTY_FILES);
     logAggregationService.handle(
         new LogHandlerContainerFinishedEvent(container33,
             ContainerType.TASK, 0));
@@ -531,13 +532,15 @@
     assertEquals(0, logAggregationService.getNumAggregators());
 
     verifyContainerLogs(logAggregationService, application1,
-        new ContainerId[] { container11, container12 }, fileNames, 3, false);
+        new ContainerId[] {container11, container12}, fileNames, 4, false,
+        EMPTY_FILES);
 
     verifyContainerLogs(logAggregationService, application2,
-        new ContainerId[] { container21 }, fileNames, 3, false);
+        new ContainerId[] {container21}, fileNames, 4, false, EMPTY_FILES);
 
     verifyContainerLogs(logAggregationService, application3,
-        new ContainerId[] { container31, container32 }, fileNames, 3, false);
+        new ContainerId[] {container31, container32}, fileNames, 4, false,
+        EMPTY_FILES);
     
     dispatcher.await();
     
@@ -935,7 +938,7 @@
   }
 
   private void writeContainerLogs(File appLogDir, ContainerId containerId,
-      String[] fileName) throws IOException {
+      String[] fileName, String[] emptyFiles) throws IOException {
     // ContainerLogDir should be created
     String containerStr = containerId.toString();
     File containerLogDir = new File(appLogDir, containerStr);
@@ -947,17 +950,22 @@
       writer11.write(containerStr + " Hello " + fileType + "!");
       writer11.close();
     }
+    for (String emptyFile : emptyFiles) {
+      Writer writer11 = new FileWriter(new File(containerLogDir, emptyFile));
+      writer11.write("");
+      writer11.close();
+    }
   }
 
   private LogFileStatusInLastCycle verifyContainerLogs(
       LogAggregationService logAggregationService,
       ApplicationId appId, ContainerId[] expectedContainerIds,
       String[] logFiles, int numOfLogsPerContainer,
-      boolean multiLogs) throws IOException {
+      boolean multiLogs, String[] zeroLengthFiles) throws IOException {
     return verifyContainerLogs(logAggregationService, appId,
         expectedContainerIds, expectedContainerIds.length,
         expectedContainerIds.length, logFiles, numOfLogsPerContainer,
-        multiLogs);
+        multiLogs, zeroLengthFiles);
   }
 
   // expectedContainerIds is the minimal set of containers to check.
@@ -968,7 +976,8 @@
       LogAggregationService logAggregationService,
       ApplicationId appId, ContainerId[] expectedContainerIds,
       int minNumOfContainers, int maxNumOfContainers,
-      String[] logFiles, int numOfLogsPerContainer, boolean multiLogs)
+      String[] logFiles, int numOfLogsPerContainer, boolean multiLogs,
+      String[] zeroLengthLogFiles)
     throws IOException {
     Path appLogDir = logAggregationService.getLogAggregationFileController(
         conf).getRemoteAppLogDir(appId, this.user);
@@ -1089,6 +1098,11 @@
               + " not present in aggregated log-file!", foundValue);
           Assert.assertEquals(expectedValue, foundValue);
         }
+        for (String emptyFile : zeroLengthLogFiles) {
+          String foundValue = thisContainerMap.remove(emptyFile);
+          String expectedValue = "\nEnd of LogType:" + emptyFile;
+          Assert.assertEquals(expectedValue, foundValue);
+        }
         Assert.assertEquals(0, thisContainerMap.size());
       }
       Assert.assertTrue("number of remaining containers should be at least " +
@@ -1584,7 +1598,7 @@
 
     // Simulate log-file creation
     writeContainerLogs(appLogDir1, container1, new String[] { "stdout",
-        "stderr", "syslog" });
+        "stderr", "syslog" }, EMPTY_FILES);
     logAggregationService.handle(new LogHandlerContainerFinishedEvent(
         container1, ContainerType.APPLICATION_MASTER, 0));
 
@@ -1605,7 +1619,7 @@
     ContainerId container2 = ContainerId.newContainerId(appAttemptId2, 1);
 
     writeContainerLogs(app2LogDir, container2, new String[] { "stdout",
-        "stderr", "syslog" });
+        "stderr", "syslog" }, EMPTY_FILES);
     logAggregationService.handle(
         new LogHandlerContainerFinishedEvent(container2,
             ContainerType.APPLICATION_MASTER, 0));
@@ -1629,7 +1643,7 @@
       this.user, null, this.acls, context1));
     ContainerId container3 = ContainerId.newContainerId(appAttemptId3, 1);
     writeContainerLogs(app3LogDir, container3, new String[] { "stdout",
-        "sys.log", "std.log", "out.log", "err.log", "log" });
+        "sys.log", "std.log", "out.log", "err.log", "log" }, EMPTY_FILES);
     logAggregationService.handle(
         new LogHandlerContainerFinishedEvent(container3,
             ContainerType.APPLICATION_MASTER, 0));
@@ -1654,7 +1668,7 @@
       this.user, null, this.acls, context2));
     ContainerId container4 = ContainerId.newContainerId(appAttemptId4, 1);
     writeContainerLogs(app4LogDir, container4, new String[] { "stdout",
-        "sys.log", "std.log", "out.log", "err.log", "log" });
+        "sys.log", "std.log", "out.log", "err.log", "log" }, EMPTY_FILES);
     logAggregationService.handle(
         new LogHandlerContainerFinishedEvent(container4,
             ContainerType.APPLICATION_MASTER, 0));
@@ -1682,19 +1696,19 @@
 
     String[] logFiles = new String[] { "stdout", "syslog" };
     verifyContainerLogs(logAggregationService, application1,
-      new ContainerId[] { container1 }, logFiles, 2, false);
+        new ContainerId[] {container1}, logFiles, 2, false, new String[] {});
 
     logFiles = new String[] { "stderr" };
     verifyContainerLogs(logAggregationService, application2,
-      new ContainerId[] { container2 }, logFiles, 1, false);
+        new ContainerId[] {container2}, logFiles, 2, false, EMPTY_FILES);
 
     logFiles = new String[] { "out.log", "err.log" };
     verifyContainerLogs(logAggregationService, application3,
-      new ContainerId[] { container3 }, logFiles, 2, false);
+        new ContainerId[] {container3}, logFiles, 2, false, new String[] {});
 
     logFiles = new String[] { "sys.log" };
     verifyContainerLogs(logAggregationService, application4,
-      new ContainerId[] { container4 }, logFiles, 1, false);
+        new ContainerId[] {container4}, logFiles, 1, false, new String[] {});
 
     dispatcher.await();
 
@@ -1721,8 +1735,8 @@
     // When the app is running, we only aggregate the log with
     // the name stdout. After the app finishes, we only aggregate
     // the log with the name std_final.
-    logAggregationContext.setRolledLogsIncludePattern("stdout");
-    logAggregationContext.setIncludePattern("std_final");
+    logAggregationContext.setRolledLogsIncludePattern("stdout|zero");
+    logAggregationContext.setIncludePattern("std_final|empty_final");
     this.conf.set(
         YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
     //configure YarnConfiguration.NM_REMOTE_APP_LOG_DIR to
@@ -1767,7 +1781,8 @@
     // until the app finishes.
     String[] logFilesWithFinalLog =
         new String[] {"stdout", "std_final"};
-    writeContainerLogs(appLogDir, container, logFilesWithFinalLog);
+    String[] zeroFiles = new String[] {"zero", "empty_final"};
+    writeContainerLogs(appLogDir, container, logFilesWithFinalLog, zeroFiles);
 
     // Do log aggregation
     AppLogAggregatorImpl aggregator =
@@ -1781,7 +1796,7 @@
 
     String[] logFiles = new String[] { "stdout" };
     verifyContainerLogs(logAggregationService, application,
-        new ContainerId[] {container}, logFiles, 1, true);
+        new ContainerId[] {container}, logFiles, 2, true, EMPTY_FILES);
 
     logAggregationService.handle(
         new LogHandlerContainerFinishedEvent(container,
@@ -1800,8 +1815,9 @@
     // This container finishes.
     // The log "std_final" should be aggregated this time.
     String[] logFinalLog = new String[] {"std_final"};
+    String[] emptyFinalLog = new String[] {"empty_final"};
     verifyContainerLogs(logAggregationService, application,
-        new ContainerId[] {container}, logFinalLog, 1, true);
+        new ContainerId[] {container}, logFinalLog, 2, true, emptyFinalLog);
 
     logAggregationService.handle(new LogHandlerAppFinishedEvent(application));
 
@@ -1823,7 +1839,7 @@
     finishApplication(appId, logAggregationService);
 
     verifyContainerLogs(logAggregationService, appId,
-        new ContainerId[] {container1}, logFiles, 0, false);
+        new ContainerId[] {container1}, logFiles, 0, false, EMPTY_FILES);
 
     verifyLogAggFinishEvent(appId);
   }
@@ -1847,7 +1863,7 @@
     finishApplication(appId, logAggregationService);
 
     verifyContainerLogs(logAggregationService, appId,
-        new ContainerId[] { container1 }, logFiles, 1, false);
+        new ContainerId[] {container1}, logFiles, 2, false, EMPTY_FILES);
 
     verifyLogAggFinishEvent(appId);
   }
@@ -1871,7 +1887,8 @@
     finishApplication(appId, logAggregationService);
 
     verifyContainerLogs(logAggregationService, appId,
-        new ContainerId[] { container1, container2 }, logFiles, 1, false);
+        new ContainerId[] {container1, container2}, logFiles, 2, false,
+        EMPTY_FILES);
 
     verifyLogAggFinishEvent(appId);
   }
@@ -1895,7 +1912,8 @@
     finishApplication(appId, logAggregationService);
 
     verifyContainerLogs(logAggregationService, appId,
-        new ContainerId[] { container2, container3 }, logFiles, 1, false);
+        new ContainerId[] {container2, container3}, logFiles, 2, false,
+        EMPTY_FILES);
 
     verifyLogAggFinishEvent(appId);
   }
@@ -1931,7 +1949,7 @@
     finishApplication(appId, logAggregationService);
 
     verifyContainerLogs(logAggregationService, appId,
-        new ContainerId[] { container1 }, logFiles, 1, false);
+        new ContainerId[] {container1}, logFiles, 2, false, EMPTY_FILES);
 
     verifyLogAggFinishEvent(appId);
   }
@@ -2080,7 +2098,7 @@
 
     verifyContainerLogs(logAggregationService, appId,
         new ContainerId[] { container1, container2, container3 },
-            logFiles, 1, false);
+            logFiles, 2, false, EMPTY_FILES);
 
     verifyLogAggFinishEvent(appId);
   }
@@ -2162,7 +2180,7 @@
     verifyContainerLogs(logAggregationService, appId,
         containerIds.toArray(new ContainerId[containerIds.size()]),
         minOfContainersWithLogs, maxOfContainersWithLogs,
-        logFiles, 1, false);
+        logFiles, 2, false, EMPTY_FILES);
 
     verifyLogAggFinishEvent(appId);
   }
@@ -2240,7 +2258,7 @@
     File appLogDir1 =
         new File(localLogDir, application1.toString());
     appLogDir1.mkdir();
-    writeContainerLogs(appLogDir1, containerId, logFiles);
+    writeContainerLogs(appLogDir1, containerId, logFiles, EMPTY_FILES);
 
     logAggregationService.handle(new LogHandlerContainerFinishedEvent(
         containerId, containerType, exitCode));
@@ -2361,7 +2379,8 @@
     String[] logFiles1WithFinalLog =
         new String[] { "stdout", "stderr", "syslog", "std_final" };
     String[] logFiles1 = new String[] { "stdout", "stderr", "syslog"};
-    writeContainerLogs(appLogDir, container, logFiles1WithFinalLog);
+    writeContainerLogs(appLogDir, container, logFiles1WithFinalLog,
+        EMPTY_FILES);
 
     // Do log aggregation
     AppLogAggregatorImpl aggregator =
@@ -2378,7 +2397,7 @@
     }
     // Container logs should be uploaded
     logFileStatusInLastCycle = verifyContainerLogs(logAggregationService, application,
-        new ContainerId[] { container }, logFiles1, 3, true);
+        new ContainerId[] {container}, logFiles1, 4, true, EMPTY_FILES);
     for(String logFile : logFiles1) {
       Assert.assertTrue(logFileStatusInLastCycle.getLogFileTypesInLastCycle()
         .contains(logFile));
@@ -2403,7 +2422,7 @@
 
     // Do log aggregation
     String[] logFiles2 = new String[] { "stdout_1", "stderr_1", "syslog_1" };
-    writeContainerLogs(appLogDir, container, logFiles2);
+    writeContainerLogs(appLogDir, container, logFiles2, EMPTY_FILES);
 
     aggregator.doLogAggregationOutOfBand();
 
@@ -2416,7 +2435,7 @@
     }
     // Container logs should be uploaded
     logFileStatusInLastCycle = verifyContainerLogs(logAggregationService, application,
-        new ContainerId[] { container }, logFiles2, 3, true);
+        new ContainerId[] {container}, logFiles2, 4, true, EMPTY_FILES);
 
     for(String logFile : logFiles2) {
       Assert.assertTrue(logFileStatusInLastCycle.getLogFileTypesInLastCycle()
@@ -2430,7 +2449,7 @@
 
     // create another logs
     String[] logFiles3 = new String[] { "stdout_2", "stderr_2", "syslog_2" };
-    writeContainerLogs(appLogDir, container, logFiles3);
+    writeContainerLogs(appLogDir, container, logFiles3, EMPTY_FILES);
 
     logAggregationService.handle(
         new LogHandlerContainerFinishedEvent(container,
@@ -2450,7 +2469,8 @@
     String[] logFiles3WithFinalLog =
         new String[] { "stdout_2", "stderr_2", "syslog_2", "std_final" };
     verifyContainerLogs(logAggregationService, application,
-      new ContainerId[] { container }, logFiles3WithFinalLog, 4, true);
+        new ContainerId[] {container}, logFiles3WithFinalLog, 5, true,
+        EMPTY_FILES);
     logAggregationService.stop();
     assertEquals(0, logAggregationService.getNumAggregators());
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/TestDistributedScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/TestDistributedScheduler.java
index dee2a20..5a0715e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/TestDistributedScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/TestDistributedScheduler.java
@@ -44,6 +44,7 @@
 import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager;
 import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM;
 
+import org.apache.hadoop.yarn.server.scheduler.DistributedOpportunisticContainerAllocator;
 import org.apache.hadoop.yarn.server.scheduler.OpportunisticContainerAllocator;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.Records;
@@ -232,7 +233,8 @@
     };
     nmContainerTokenSecretManager.setMasterKey(mKey);
     OpportunisticContainerAllocator containerAllocator =
-        new OpportunisticContainerAllocator(nmContainerTokenSecretManager);
+        new DistributedOpportunisticContainerAllocator(
+            nmContainerTokenSecretManager);
 
     NMTokenSecretManagerInNM nmTokenSecretManagerInNM =
         new NMTokenSecretManagerInNM();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
index 740af8f..ad17ae8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
@@ -28,6 +28,7 @@
 import com.sun.jersey.api.client.WebResource;
 import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
 import com.sun.jersey.test.framework.WebAppDescriptor;
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
@@ -118,6 +119,7 @@
   private static LocalDirsHandlerService dirsHandler;
   private static WebApp nmWebApp;
   private static final String LOGSERVICEWSADDR = "test:1234";
+  private static final String LOG_MESSAGE = "log message\n";
 
   private static final File testRootDir = new File("target",
       TestNMWebServices.class.getSimpleName());
@@ -441,20 +443,26 @@
 
   @Test (timeout = 5000)
   public void testContainerLogsWithNewAPI() throws Exception {
-    final ContainerId containerId = BuilderUtils.newContainerId(0, 0, 0, 0);
-    WebResource r = resource();
-    r = r.path("ws").path("v1").path("node").path("containers")
-        .path(containerId.toString()).path("logs");
-    testContainerLogs(r, containerId);
+    ContainerId containerId0 = BuilderUtils.newContainerId(0, 0, 0, 0);
+    WebResource r0 = resource();
+    r0 = r0.path("ws").path("v1").path("node").path("containers")
+        .path(containerId0.toString()).path("logs");
+    testContainerLogs(r0, containerId0, LOG_MESSAGE);
+
+    ContainerId containerId1 = BuilderUtils.newContainerId(0, 0, 0, 1);
+    WebResource r1 = resource();
+    r1 = r1.path("ws").path("v1").path("node").path("containers")
+            .path(containerId1.toString()).path("logs");
+    testContainerLogs(r1, containerId1, "");
   }
 
   @Test (timeout = 5000)
   public void testContainerLogsWithOldAPI() throws Exception {
-    final ContainerId containerId = BuilderUtils.newContainerId(1, 1, 0, 1);
+    final ContainerId containerId2 = BuilderUtils.newContainerId(1, 1, 0, 2);
     WebResource r = resource();
     r = r.path("ws").path("v1").path("node").path("containerlogs")
-        .path(containerId.toString());
-    testContainerLogs(r, containerId);
+        .path(containerId2.toString());
+    testContainerLogs(r, containerId2, LOG_MESSAGE);
   }
 
   @Test (timeout = 10000)
@@ -583,15 +591,14 @@
         2, json.getJSONArray("assignedGpuDevices").length());
   }
 
-  private void testContainerLogs(WebResource r, ContainerId containerId)
-      throws Exception {
+  private void testContainerLogs(WebResource r, ContainerId containerId,
+      String logMessage) throws Exception {
     final String containerIdStr = containerId.toString();
     final ApplicationAttemptId appAttemptId = containerId
         .getApplicationAttemptId();
     final ApplicationId appId = appAttemptId.getApplicationId();
     final String appIdStr = appId.toString();
     final String filename = "logfile1";
-    final String logMessage = "log message\n";
     nmContext.getApplications().put(appId, new ApplicationImpl(null, "user",
         appId, null, nmContext));
     
@@ -607,6 +614,9 @@
     
     File logFile = new File(path.toUri().getPath());
     logFile.deleteOnExit();
+    if (logFile.getParentFile().exists()) {
+      FileUtils.deleteDirectory(logFile.getParentFile());
+    }
     assertTrue("Failed to create log dir", logFile.getParentFile().mkdirs());
     PrintWriter pw = new PrintWriter(logFile);
     pw.print(logMessage);
@@ -628,8 +638,10 @@
         .accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
     responseText = response.getEntity(String.class);
     responseLogMessage = getLogContext(responseText);
-    assertEquals(5, responseLogMessage.getBytes().length);
-    assertEquals(new String(logMessage.getBytes(), 0, 5), responseLogMessage);
+    int truncatedLength = Math.min(5, logMessage.getBytes().length);
+    assertEquals(truncatedLength, responseLogMessage.getBytes().length);
+    assertEquals(new String(logMessage.getBytes(), 0, truncatedLength),
+        responseLogMessage);
     assertTrue(fullTextSize >= responseLogMessage.getBytes().length);
 
     // specify the bytes which is larger than the actual file size,
@@ -649,9 +661,10 @@
         .accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
     responseText = response.getEntity(String.class);
     responseLogMessage = getLogContext(responseText);
-    assertEquals(5, responseLogMessage.getBytes().length);
+    assertEquals(truncatedLength, responseLogMessage.getBytes().length);
     assertEquals(new String(logMessage.getBytes(),
-        logMessage.getBytes().length - 5, 5), responseLogMessage);
+        logMessage.getBytes().length - truncatedLength, truncatedLength),
+        responseLogMessage);
     assertTrue(fullTextSize >= responseLogMessage.getBytes().length);
 
     response = r.path(filename)
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
index 190e163..b9b7b70 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
@@ -285,53 +285,42 @@
           </execution>
         </executions>
       </plugin>
-
-     <plugin>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-maven-plugins</artifactId>
+      <plugin>
+        <groupId>org.xolstice.maven.plugins</groupId>
+        <artifactId>protobuf-maven-plugin</artifactId>
         <executions>
           <execution>
-            <id>compile-protoc</id>
-            <goals>
-              <goal>protoc</goal>
-            </goals>
+            <id>src-compile-protoc</id>
             <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto</param>
-                <param>${basedir}/../../hadoop-yarn-api/src/main/proto</param>
-                <param>${basedir}/../../hadoop-yarn-common/src/main/proto</param>
-                <param>${basedir}/../hadoop-yarn-server-common/src/main/proto</param>
-                <param>${basedir}/src/main/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/main/proto</directory>
-                <includes>
-                  <include>yarn_server_resourcemanager_recovery.proto</include>
-                </includes>
-              </source>
+              <skip>false</skip>
+              <additionalProtoPathElements>
+                <additionalProtoPathElement>
+                  ${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto
+                </additionalProtoPathElement>
+                <additionalProtoPathElement>
+                  ${basedir}/../../hadoop-yarn-api/src/main/proto
+                </additionalProtoPathElement>
+                <additionalProtoPathElement>
+                  ${basedir}/../../hadoop-yarn-common/src/main/proto
+                </additionalProtoPathElement>
+                <additionalProtoPathElement>
+                  ${basedir}/../hadoop-yarn-server-common/src/main/proto
+                </additionalProtoPathElement>
+              </additionalProtoPathElements>
             </configuration>
           </execution>
           <execution>
-            <id>compile-test-protoc</id>
-            <goals>
-              <goal>test-protoc</goal>
-            </goals>
+            <id>src-test-compile-protoc</id>
             <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto</param>
-                <param>${basedir}/../../hadoop-yarn-api/src/main/proto</param>
-                <param>${basedir}/src/test/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/test/proto</directory>
-                <includes>
-                  <include>test_client_tokens.proto</include>
-                </includes>
-              </source>
+              <skip>false</skip>
+              <additionalProtoPathElements>
+                <additionalProtoPathElement>
+                  ${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto
+                </additionalProtoPathElement>
+                <additionalProtoPathElement>
+                  ${basedir}/../../hadoop-yarn-api/src/main/proto
+                </additionalProtoPathElement>
+              </additionalProtoPathElements>
             </configuration>
           </execution>
         </executions>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 2b93ca7..f9681e0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -695,13 +695,15 @@
           " submitted by user " + user);
       RMAuditLogger.logSuccess(user, AuditConstants.SUBMIT_APP_REQUEST,
           "ClientRMService", applicationId, callerContext,
-          submissionContext.getQueue());
+          submissionContext.getQueue(),
+          submissionContext.getNodeLabelExpression());
     } catch (YarnException e) {
       LOG.info("Exception in submitting " + applicationId, e);
       RMAuditLogger.logFailure(user, AuditConstants.SUBMIT_APP_REQUEST,
           e.getMessage(), "ClientRMService",
           "Exception in submitting application", applicationId, callerContext,
-          submissionContext.getQueue());
+          submissionContext.getQueue(),
+          submissionContext.getNodeLabelExpression());
       throw e;
     }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
index 6763d66..2f28ac7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
@@ -76,6 +76,7 @@
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerUpdates;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler
     .SchedulerNodeReport;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
@@ -118,6 +119,7 @@
   private ResourceProfilesManager resourceProfilesManager;
   private boolean timelineServiceV2Enabled;
   private boolean nodelabelsEnabled;
+  private Set<String> exclusiveEnforcedPartitions;
 
   @Override
   public void init(ApplicationMasterServiceContext amsContext,
@@ -128,6 +130,8 @@
         timelineServiceV2Enabled(rmContext.getYarnConfiguration());
     this.nodelabelsEnabled = YarnConfiguration
         .areNodeLabelsEnabled(rmContext.getYarnConfiguration());
+    this.exclusiveEnforcedPartitions = YarnConfiguration
+        .getExclusiveEnforcedPartitions(rmContext.getYarnConfiguration());
   }
 
   @Override
@@ -236,6 +240,10 @@
           && ResourceRequest.ANY.equals(req.getResourceName())) {
         req.setNodeLabelExpression(asc.getNodeLabelExpression());
       }
+      if (ResourceRequest.ANY.equals(req.getResourceName())) {
+        SchedulerUtils.enforcePartitionExclusivity(req,
+            exclusiveEnforcedPartitions, asc.getNodeLabelExpression());
+      }
     }
 
     Resource maximumCapacity =
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
index a360ed2..d51afa2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
@@ -20,6 +20,7 @@
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.yarn.server.metrics.OpportunisticSchedulerMetrics;
+import org.apache.hadoop.yarn.server.scheduler.DistributedOpportunisticContainerAllocator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -37,7 +38,6 @@
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.event.EventHandler;
-import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.DistributedSchedulingAMProtocol;
 import org.apache.hadoop.yarn.api.impl.pb.service.ApplicationMasterProtocolPBServiceImpl;
 
@@ -76,7 +76,6 @@
 
 import org.apache.hadoop.yarn.server.scheduler.OpportunisticContainerAllocator;
 import org.apache.hadoop.yarn.server.scheduler.OpportunisticContainerContext;
-import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -102,7 +101,7 @@
   private final NodeQueueLoadMonitor nodeMonitor;
   private final OpportunisticContainerAllocator oppContainerAllocator;
 
-  private final int k;
+  private final int numNodes;
 
   private final long cacheRefreshInterval;
   private volatile List<RemoteNode> cachedNodes;
@@ -175,7 +174,7 @@
 
       if (!appAttempt.getApplicationAttemptId().equals(appAttemptId)){
         LOG.error("Calling allocate on previous or removed or non "
-            + "existent application attempt " + appAttemptId);
+            + "existent application attempt {}", appAttemptId);
         return;
       }
 
@@ -233,10 +232,11 @@
         YarnConfiguration.OPP_CONTAINER_MAX_ALLOCATIONS_PER_AM_HEARTBEAT,
         YarnConfiguration.
             DEFAULT_OPP_CONTAINER_MAX_ALLOCATIONS_PER_AM_HEARTBEAT);
-    this.oppContainerAllocator = new OpportunisticContainerAllocator(
-        rmContext.getContainerTokenSecretManager(),
-        maxAllocationsPerAMHeartbeat);
-    this.k = rmContext.getYarnConfiguration().getInt(
+    this.oppContainerAllocator =
+        new DistributedOpportunisticContainerAllocator(
+            rmContext.getContainerTokenSecretManager(),
+            maxAllocationsPerAMHeartbeat);
+    this.numNodes = rmContext.getYarnConfiguration().getInt(
         YarnConfiguration.OPP_CONTAINER_ALLOCATION_NODES_NUMBER_USED,
         YarnConfiguration.DEFAULT_OPP_CONTAINER_ALLOCATION_NODES_NUMBER_USED);
     long nodeSortInterval = rmContext.getYarnConfiguration().getLong(
@@ -438,12 +438,12 @@
     // <-- IGNORED EVENTS : END -->
     default:
       LOG.error("Unknown event arrived at" +
-          "OpportunisticContainerAllocatorAMService: " + event.toString());
+          "OpportunisticContainerAllocatorAMService: {}", event);
     }
 
   }
 
-  public QueueLimitCalculator getNodeManagerQueueLimitCalculator() {
+  QueueLimitCalculator getNodeManagerQueueLimitCalculator() {
     return nodeMonitor.getThresholdCalculator();
   }
 
@@ -453,7 +453,7 @@
     if ((currTime - lastCacheUpdateTime > cacheRefreshInterval)
         || (cachedNodes == null)) {
       cachedNodes = convertToRemoteNodes(
-          this.nodeMonitor.selectLeastLoadedNodes(this.k));
+          this.nodeMonitor.selectLeastLoadedNodes(this.numNodes));
       if (cachedNodes.size() > 0) {
         lastCacheUpdateTime = currTime;
       }
@@ -483,12 +483,4 @@
     }
     return null;
   }
-
-  private static ApplicationAttemptId getAppAttemptId() throws YarnException {
-    AMRMTokenIdentifier amrmTokenIdentifier =
-        YarnServerSecurityUtils.authorizeRequest();
-    ApplicationAttemptId applicationAttemptId =
-        amrmTokenIdentifier.getApplicationAttemptId();
-    return applicationAttemptId;
-  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 1b91ba0..f4f9793 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -21,6 +21,7 @@
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.TreeSet;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
@@ -102,6 +103,7 @@
   private YarnAuthorizationProvider authorizer;
   private boolean timelineServiceV2Enabled;
   private boolean nodeLabelsEnabled;
+  private Set<String> exclusiveEnforcedPartitions;
 
   public RMAppManager(RMContext context,
       YarnScheduler scheduler, ApplicationMasterService masterService,
@@ -126,6 +128,8 @@
         timelineServiceV2Enabled(conf);
     this.nodeLabelsEnabled = YarnConfiguration
         .areNodeLabelsEnabled(rmContext.getYarnConfiguration());
+    this.exclusiveEnforcedPartitions = YarnConfiguration
+        .getExclusiveEnforcedPartitions(rmContext.getYarnConfiguration());
   }
 
   /**
@@ -593,6 +597,9 @@
           throw new InvalidResourceRequestException("Invalid resource request, "
               + "no resource request specified with " + ResourceRequest.ANY);
         }
+        SchedulerUtils.enforcePartitionExclusivity(anyReq,
+            exclusiveEnforcedPartitions,
+            submissionContext.getNodeLabelExpression());
 
         // Make sure that all of the requests agree with the ANY request
         // and have correct values
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java
index b24cac9..854b6ca 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java
@@ -271,6 +271,16 @@
     }
   }
 
+  public static void logSuccess(String user, String operation, String target,
+      ApplicationId appId, CallerContext callerContext, String queueName,
+      String partition) {
+    if (LOG.isInfoEnabled()) {
+      LOG.info(
+          createSuccessLog(user, operation, target, appId, null, null, null,
+              callerContext, Server.getRemoteIp(), queueName, partition));
+    }
+  }
+
   /**
    * Create a readable and parseable audit log string for a successful event.
    *
@@ -391,7 +401,8 @@
   static String createFailureLog(String user, String operation, String perm,
       String target, String description, ApplicationId appId,
       ApplicationAttemptId attemptId, ContainerId containerId,
-      Resource resource, CallerContext callerContext, String queueName) {
+      Resource resource, CallerContext callerContext, String queueName,
+      String partition) {
     StringBuilder b = createStringBuilderForFailureLog(user,
         operation, target, description, perm);
     if (appId != null) {
@@ -410,6 +421,10 @@
     if (queueName != null) {
       add(Keys.QUEUENAME, queueName, b);
     }
+    if (partition != null) {
+      add(Keys.NODELABEL, partition, b);
+    }
+
     return b.toString();
   }
 
@@ -420,7 +435,7 @@
       String target, String description, ApplicationId appId,
       ApplicationAttemptId attemptId, ContainerId containerId, Resource resource) {
     return createFailureLog(user, operation, perm, target, description, appId,
-        attemptId, containerId, resource, null, null);
+        attemptId, containerId, resource, null, null, null);
   }
 
   /**
@@ -492,7 +507,7 @@
       CallerContext callerContext) {
     if (LOG.isWarnEnabled()) {
       LOG.warn(createFailureLog(user, operation, perm, target, description,
-          appId, null, null, null, callerContext, null));
+          appId, null, null, null, callerContext, null, null));
     }
   }
 
@@ -501,7 +516,7 @@
       CallerContext callerContext, String queueName) {
     if (LOG.isWarnEnabled()) {
       LOG.warn(createFailureLog(user, operation, perm, target, description,
-          appId, null, null, null, callerContext, queueName));
+          appId, null, null, null, callerContext, queueName, null));
     }
   }
 
@@ -533,7 +548,7 @@
       String queueName) {
     if (LOG.isWarnEnabled()) {
       LOG.warn(createFailureLog(user, operation, perm, target, description,
-          appId, null, null, null, null, queueName));
+          appId, null, null, null, null, queueName, null));
     }
   }
 
@@ -582,6 +597,34 @@
   }
 
   /**
+   * Create a readable and parseable audit log string for a failed event.
+   *
+   * @param user User who made the service request.
+   * @param operation Operation requested by the user.
+   * @param perm Target permissions.
+   * @param target The target on which the operation is being performed.
+   * @param description Some additional information as to why the operation
+   *                    failed.
+   * @param appId ApplicationId in which operation was performed.
+   * @param callerContext Caller context
+   * @param queueName Name of queue.
+   * @param partition Name of labeled partition.
+   *
+   * <br><br>
+   * Note that the {@link RMAuditLogger} uses tabs ('\t') as a key-val delimiter
+   * and hence the value fields should not contains tabs ('\t').
+   */
+  public static void logFailure(String user, String operation, String perm,
+      String target, String description, ApplicationId appId,
+      CallerContext callerContext, String queueName, String partition) {
+    if (LOG.isWarnEnabled()) {
+      LOG.warn(
+          createFailureLog(user, operation, perm, target, description, appId,
+              null, null, null, callerContext, queueName, partition));
+    }
+  }
+
+  /**
    * A helper api to add remote IP address.
    */
   static void addRemoteIP(StringBuilder b) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
index 6e56f3d..9e843df 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
@@ -65,6 +65,8 @@
    */
   Configuration getConfiguration();
 
+  void formatConfigurationInStore(Configuration conf) throws Exception;
+
   /**
    * Closes the configuration provider, releasing any required resources.
    * @throws IOException on failure to close
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index cc7f585..8149ab4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -205,6 +205,8 @@
   private AtomicLong unconfirmedAllocatedMem = new AtomicLong();
   private AtomicInteger unconfirmedAllocatedVcores = new AtomicInteger();
 
+  private String nodeLabelExpression;
+
   public SchedulerApplicationAttempt(ApplicationAttemptId applicationAttemptId, 
       String user, Queue queue, AbstractUsersManager abstractUsersManager,
       RMContext rmContext) {
@@ -226,6 +228,8 @@
         unmanagedAM = appSubmissionContext.getUnmanagedAM();
         this.logAggregationContext =
             appSubmissionContext.getLogAggregationContext();
+        this.nodeLabelExpression =
+            appSubmissionContext.getNodeLabelExpression();
       }
       applicationSchedulingEnvs = rmApp.getApplicationSchedulingEnvs();
     }
@@ -1469,4 +1473,9 @@
   public Map<String, String> getApplicationSchedulingEnvs() {
     return this.applicationSchedulingEnvs;
   }
+
+  @Override
+  public String getPartition() {
+    return nodeLabelExpression == null ? "" : nodeLabelExpression;
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
index c82bf05..7ec1c33 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
@@ -302,6 +302,29 @@
   }
 
   /**
+   * If RM should enforce partition exclusivity for enforced partition "x":
+   * 1) If request is "x" and app label is not "x",
+   *    override request to app's label.
+   * 2) If app label is "x", ensure request is "x".
+   * @param resReq resource request
+   * @param enforcedPartitions list of exclusive enforced partitions
+   * @param appLabel app's node label expression
+   */
+  public static void enforcePartitionExclusivity(ResourceRequest resReq,
+      Set<String> enforcedPartitions, String appLabel) {
+    if (enforcedPartitions == null || enforcedPartitions.isEmpty()) {
+      return;
+    }
+    if (!enforcedPartitions.contains(appLabel)
+        && enforcedPartitions.contains(resReq.getNodeLabelExpression())) {
+      resReq.setNodeLabelExpression(appLabel);
+    }
+    if (enforcedPartitions.contains(appLabel)) {
+      resReq.setNodeLabelExpression(appLabel);
+    }
+  }
+
+  /**
    * Utility method to validate a resource request, by ensuring that the
    * requested memory/vcore is non-negative and not greater than max
    *
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index 990c8f3..15b1ef9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -50,6 +50,7 @@
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.MultiNodePolicySpec;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FairOrderingPolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FifoOrderingPolicy;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FifoOrderingPolicyWithExclusivePartitions;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.OrderingPolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.SchedulableEntity;
 import org.apache.hadoop.yarn.util.UnitsConversionUtil;
@@ -162,6 +163,9 @@
 
   public static final String FAIR_APP_ORDERING_POLICY = "fair";
 
+  public static final String FIFO_WITH_PARTITIONS_APP_ORDERING_POLICY
+      = "fifo-with-partitions";
+
   public static final String DEFAULT_APP_ORDERING_POLICY =
       FIFO_APP_ORDERING_POLICY;
   
@@ -561,6 +565,9 @@
     if (policyType.trim().equals(FAIR_APP_ORDERING_POLICY)) {
        policyType = FairOrderingPolicy.class.getName();
     }
+    if (policyType.trim().equals(FIFO_WITH_PARTITIONS_APP_ORDERING_POLICY)) {
+      policyType = FifoOrderingPolicyWithExclusivePartitions.class.getName();
+    }
     try {
       orderingPolicy = (OrderingPolicy<S>)
         Class.forName(policyType).newInstance();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 6368ee5..b883a9a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -69,6 +69,7 @@
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.CandidateNodeSet;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.CandidateNodeSetUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FifoOrderingPolicyForPendingApps;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.IteratorSelector;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.OrderingPolicy;
 import org.apache.hadoop.yarn.server.utils.Lock;
 import org.apache.hadoop.yarn.server.utils.Lock.NoLock;
@@ -804,7 +805,8 @@
       }
 
       for (Iterator<FiCaSchedulerApp> fsApp =
-           getPendingAppsOrderingPolicy().getAssignmentIterator();
+           getPendingAppsOrderingPolicy()
+               .getAssignmentIterator(IteratorSelector.EMPTY_ITERATOR_SELECTOR);
            fsApp.hasNext(); ) {
         FiCaSchedulerApp application = fsApp.next();
         ApplicationId applicationId = application.getApplicationId();
@@ -1095,8 +1097,10 @@
 
     Map<String, CachedUserLimit> userLimits = new HashMap<>();
     boolean needAssignToQueueCheck = true;
+    IteratorSelector sel = new IteratorSelector();
+    sel.setPartition(candidates.getPartition());
     for (Iterator<FiCaSchedulerApp> assignmentIterator =
-         orderingPolicy.getAssignmentIterator();
+         orderingPolicy.getAssignmentIterator(sel);
          assignmentIterator.hasNext(); ) {
       FiCaSchedulerApp application = assignmentIterator.next();
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FSSchedulerConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FSSchedulerConfigurationStore.java
index 3ef97a0..80053be 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FSSchedulerConfigurationStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/FSSchedulerConfigurationStore.java
@@ -156,6 +156,11 @@
         + finalConfigPath);
   }
 
+  @Override
+  public void format() throws Exception {
+    fileSystem.delete(schedulerConfDir, true);
+  }
+
   private Path getFinalConfigPath(Path tempPath) {
     String tempConfigPathStr = tempPath.getName();
     if (!tempConfigPathStr.endsWith(TMP)) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
index d69c236..4871443 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
@@ -61,6 +61,11 @@
   }
 
   @Override
+  public void format() {
+    this.schedConf = null;
+  }
+
+  @Override
   public synchronized Configuration retrieve() {
     return schedConf;
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/LeveldbConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/LeveldbConfigurationStore.java
index 4eb328c..743d7ef4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/LeveldbConfigurationStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/LeveldbConfigurationStore.java
@@ -98,6 +98,13 @@
     }
   }
 
+  @Override
+  public void format() throws Exception {
+    close();
+    FileSystem fs = FileSystem.getLocal(conf);
+    fs.delete(getStorageDir(), true);
+  }
+
   private void initDatabase(Configuration config) throws Exception {
     Path storeRoot = createStorageDir();
     Options options = new Options();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
index 0677bd8..41b9b25 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/MutableCSConfigurationProvider.java
@@ -40,6 +40,7 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 /**
  * CS configuration provider which implements
@@ -58,6 +59,9 @@
   private ConfigurationMutationACLPolicy aclMutationPolicy;
   private RMContext rmContext;
 
+  private final ReentrantReadWriteLock formatLock =
+      new ReentrantReadWriteLock();
+
   public MutableCSConfigurationProvider(RMContext rmContext) {
     this.rmContext = rmContext;
   }
@@ -152,16 +156,50 @@
   }
 
   @Override
+  public void formatConfigurationInStore(Configuration config)
+      throws Exception {
+    formatLock.writeLock().lock();
+    try {
+      confStore.format();
+      Configuration initialSchedConf = new Configuration(false);
+      initialSchedConf.addResource(YarnConfiguration.CS_CONFIGURATION_FILE);
+      this.schedConf = new Configuration(false);
+      // We need to explicitly set the key-values in schedConf, otherwise
+      // these configuration keys cannot be deleted when
+      // configuration is reloaded.
+      for (Map.Entry<String, String> kv : initialSchedConf) {
+        schedConf.set(kv.getKey(), kv.getValue());
+      }
+      confStore.initialize(config, schedConf, rmContext);
+      confStore.checkVersion();
+    } catch (Exception e) {
+      throw new IOException(e);
+    } finally {
+      formatLock.writeLock().unlock();
+    }
+  }
+
+  @Override
   public void confirmPendingMutation(boolean isValid) throws Exception {
-    confStore.confirmMutation(isValid);
-    if (!isValid) {
-      schedConf = oldConf;
+    formatLock.readLock().lock();
+    try {
+      confStore.confirmMutation(isValid);
+      if (!isValid) {
+        schedConf = oldConf;
+      }
+    } finally {
+      formatLock.readLock().unlock();
     }
   }
 
   @Override
   public void reloadConfigurationFromStore() throws Exception {
-    schedConf = confStore.retrieve();
+    formatLock.readLock().lock();
+    try {
+      schedConf = confStore.retrieve();
+    } finally {
+      formatLock.readLock().unlock();
+    }
   }
 
   private List<String> getSiblingQueues(String queuePath, Configuration conf) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java
index 2cc831f..334c962 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/YarnConfigurationStore.java
@@ -125,6 +125,13 @@
    */
   public abstract Configuration retrieve() throws IOException;
 
+
+  /**
+   * Format the persisted configuration.
+   * @throws IOException on failure to format
+   */
+  public abstract void format() throws Exception;
+
   /**
    * Get a list of confirmed configuration mutations starting from a given id.
    * @param fromId id from which to start getting mutations, inclusive
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ZKConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ZKConfigurationStore.java
index 34c73ec..d3fab39 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ZKConfigurationStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ZKConfigurationStore.java
@@ -133,6 +133,11 @@
   }
 
   @Override
+  public void format() throws Exception {
+    zkManager.delete(confStorePath);
+  }
+
+  @Override
   public synchronized void storeVersion() throws Exception {
     byte[] data =
         ((VersionPBImpl) CURRENT_VERSION_INFO).getProto().toByteArray();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/AbstractComparatorOrderingPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/AbstractComparatorOrderingPolicy.java
index b83b2ae..4ee4450 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/AbstractComparatorOrderingPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/AbstractComparatorOrderingPolicy.java
@@ -47,13 +47,13 @@
   public Collection<S> getSchedulableEntities() {
     return schedulableEntities;
   }
-  
+
   @Override
-  public Iterator<S> getAssignmentIterator() {
+  public Iterator<S> getAssignmentIterator(IteratorSelector sel) {
     reorderScheduleEntities();
     return schedulableEntities.iterator();
   }
-  
+
   @Override
   public Iterator<S> getPreemptionIterator() {
     reorderScheduleEntities();
@@ -138,5 +138,5 @@
 
   @Override
   public abstract String getInfo();
-  
+
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FifoOrderingPolicyWithExclusivePartitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FifoOrderingPolicyWithExclusivePartitions.java
new file mode 100644
index 0000000..1b1ef66
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FifoOrderingPolicyWithExclusivePartitions.java
@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+
+
+/**
+ * Similar to {@link FifoOrderingPolicy}, but with separate ordering policies
+ * for each partition in
+ * {@code yarn.scheduler.capacity.<queue-path>.ordering-policy.exclusive-enforced-partitions}.
+ */
+public class FifoOrderingPolicyWithExclusivePartitions<S extends SchedulableEntity>
+    implements OrderingPolicy<S> {
+
+  private static final String DEFAULT_PARTITION = "DEFAULT_PARTITION";
+
+  private Map<String, OrderingPolicy<S>> orderingPolicies;
+
+  public FifoOrderingPolicyWithExclusivePartitions() {
+    this.orderingPolicies = new HashMap<>();
+    this.orderingPolicies.put(DEFAULT_PARTITION, new FifoOrderingPolicy());
+  }
+
+  public Collection<S> getSchedulableEntities() {
+    return unionOrderingPolicies().getSchedulableEntities();
+  }
+
+  public Iterator<S> getAssignmentIterator(IteratorSelector sel) {
+    // Return schedulable entities only from filtered partition
+    return getPartitionOrderingPolicy(sel.getPartition())
+        .getAssignmentIterator(sel);
+  }
+
+  public Iterator<S> getPreemptionIterator() {
+    // Entities from all partitions should be preemptible
+    return unionOrderingPolicies().getPreemptionIterator();
+  }
+
+  /**
+   * Union all schedulable entities from all ordering policies.
+   * @return ordering policy containing all schedulable entities
+   */
+  private OrderingPolicy<S> unionOrderingPolicies() {
+    OrderingPolicy<S> ret = new FifoOrderingPolicy<>();
+    for (Map.Entry<String, OrderingPolicy<S>> entry
+        : orderingPolicies.entrySet()) {
+      ret.addAllSchedulableEntities(entry.getValue().getSchedulableEntities());
+    }
+    return ret;
+  }
+
+  public void addSchedulableEntity(S s) {
+    getPartitionOrderingPolicy(s.getPartition()).addSchedulableEntity(s);
+  }
+
+  public boolean removeSchedulableEntity(S s) {
+    return getPartitionOrderingPolicy(s.getPartition())
+        .removeSchedulableEntity(s);
+  }
+
+  public void addAllSchedulableEntities(Collection<S> sc) {
+    for (S entity : sc) {
+      getPartitionOrderingPolicy(entity.getPartition())
+          .addSchedulableEntity(entity);
+    }
+  }
+
+  public int getNumSchedulableEntities() {
+    // Return total number of schedulable entities, to maintain parity with
+    // existing FifoOrderingPolicy e.g. when determining if queue has reached
+    // its max app limit
+    int ret = 0;
+    for (Map.Entry<String, OrderingPolicy<S>> entry
+        : orderingPolicies.entrySet()) {
+      ret += entry.getValue().getNumSchedulableEntities();
+    }
+    return ret;
+  }
+
+  public void containerAllocated(S schedulableEntity, RMContainer r) {
+    getPartitionOrderingPolicy(schedulableEntity.getPartition())
+        .containerAllocated(schedulableEntity, r);
+  }
+
+  public void containerReleased(S schedulableEntity, RMContainer r) {
+    getPartitionOrderingPolicy(schedulableEntity.getPartition())
+        .containerReleased(schedulableEntity, r);
+  }
+
+  public void demandUpdated(S schedulableEntity) {
+    getPartitionOrderingPolicy(schedulableEntity.getPartition())
+        .demandUpdated(schedulableEntity);
+  }
+
+  @Override
+  public void configure(Map<String, String> conf) {
+    if (conf == null) {
+      return;
+    }
+    String partitions =
+        conf.get(YarnConfiguration.EXCLUSIVE_ENFORCED_PARTITIONS_SUFFIX);
+    if (partitions != null) {
+      for (String partition : partitions.split(",")) {
+        partition = partition.trim();
+        if (!partition.isEmpty()) {
+          this.orderingPolicies.put(partition, new FifoOrderingPolicy());
+        }
+      }
+    }
+  }
+
+  @Override
+  public String getInfo() {
+    return "FifoOrderingPolicyWithExclusivePartitions";
+  }
+
+  private OrderingPolicy<S> getPartitionOrderingPolicy(String partition) {
+    String keyPartition = orderingPolicies.containsKey(partition) ?
+        partition : DEFAULT_PARTITION;
+    return orderingPolicies.get(keyPartition);
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/IteratorSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/IteratorSelector.java
new file mode 100644
index 0000000..0e9b55f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/IteratorSelector.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy;
+
+/**
+ * IteratorSelector contains information needed to tell an
+ * {@link OrderingPolicy} what to return in an iterator.
+ */
+public class IteratorSelector {
+
+  public static final IteratorSelector EMPTY_ITERATOR_SELECTOR =
+      new IteratorSelector();
+
+  private String partition;
+
+  /**
+   * The partition for this iterator selector.
+   * @return partition
+   */
+  public String getPartition() {
+    return this.partition;
+  }
+
+  /**
+   * Set partition for this iterator selector.
+   * @param p partition
+   */
+  public void setPartition(String p) {
+    this.partition = p;
+  }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/OrderingPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/OrderingPolicy.java
index 9aacc7e..66b6a59 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/OrderingPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/OrderingPolicy.java
@@ -45,10 +45,11 @@
   /**
    * Return an iterator over the collection of {@link SchedulableEntity}
    * objects which orders them for container assignment.
+   * @param sel the {@link IteratorSelector} to filter with
    * @return an iterator over the collection of {@link SchedulableEntity}
    * objects
    */
-  public Iterator<S> getAssignmentIterator();
+  Iterator<S> getAssignmentIterator(IteratorSelector sel);
 
   /**
    * Return an iterator over the collection of {@link SchedulableEntity}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/SchedulableEntity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/SchedulableEntity.java
index 41b83ce..be83556 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/SchedulableEntity.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/SchedulableEntity.java
@@ -55,4 +55,9 @@
    */
   public boolean isRecovering();
 
+  /**
+   * Get partition corresponding to this entity.
+   * @return partition
+   */
+  String getPartition();
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
index f2d2b82..6cc1e29 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
@@ -48,6 +48,9 @@
   /** Path for {@code RMWebServices#updateSchedulerConfiguration}. */
   public static final String SCHEDULER_CONF = "/scheduler-conf";
 
+  /** Path for {@code RMWebServices#formatSchedulerConfiguration}. */
+  public static final String FORMAT_SCHEDULER_CONF = "/scheduler-conf/format";
+
   /** Path for {@code RMWebServiceProtocol#dumpSchedulerLogs}. */
   public static final String SCHEDULER_LOGS = "/scheduler/logs";
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index 6a413d3..d1e04fa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -2564,6 +2564,37 @@
     return rm.getClientRMService().getContainers(request).getContainerList();
   }
 
+  @GET
+  @Path(RMWSConsts.FORMAT_SCHEDULER_CONF)
+  @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
+       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  public Response formatSchedulerConfiguration(@Context HttpServletRequest hsr)
+      throws AuthorizationException {
+    // Only admin user allowed to format scheduler conf in configuration store
+    UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
+    initForWritableEndpoints(callerUGI, true);
+
+    ResourceScheduler scheduler = rm.getResourceScheduler();
+    if (scheduler instanceof MutableConfScheduler
+        && ((MutableConfScheduler) scheduler).isConfigurationMutable()) {
+      try {
+        MutableConfigurationProvider mutableConfigurationProvider =
+            ((MutableConfScheduler) scheduler).getMutableConfProvider();
+        mutableConfigurationProvider.formatConfigurationInStore(conf);
+        return Response.status(Status.OK).entity("Configuration under " +
+            "store successfully formatted.").build();
+      } catch (Exception e) {
+        LOG.error("Exception thrown when formating configuration", e);
+        return Response.status(Status.BAD_REQUEST).entity(e.getMessage())
+            .build();
+      }
+    } else {
+      return Response.status(Status.BAD_REQUEST)
+          .entity("Configuration change only supported by " +
+          "MutableConfScheduler.").build();
+    }
+  }
+
   @PUT
   @Path(RMWSConsts.SCHEDULER_CONF)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto
index 35c77ab..8ac6615 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "YarnServerResourceManagerRecoveryProtos";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java
index 7d8eeab..84ea2d6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAuditLogger.java
@@ -293,16 +293,17 @@
       ApplicationAttemptId attemptId, ContainerId containerId,
       CallerContext callerContext, Resource resource) {
     testFailureLogFormatHelper(checkIP, appId, attemptId, containerId,
-        callerContext, resource, null);
+        callerContext, resource, null, null, null);
   }
 
   private void testFailureLogFormatHelper(boolean checkIP, ApplicationId appId,
         ApplicationAttemptId attemptId, ContainerId containerId,
         CallerContext callerContext, Resource resource,
-        RMAuditLogger.ArgsBuilder args) {
+        String queueName, String partition, RMAuditLogger.ArgsBuilder args) {
     String fLog = args == null ?
       RMAuditLogger.createFailureLog(USER, OPERATION, PERM, TARGET, DESC,
-          appId, attemptId, containerId, resource, callerContext, null) :
+          appId, attemptId, containerId, resource, callerContext,
+          queueName, partition) :
         RMAuditLogger.createFailureLog(USER, OPERATION, PERM, TARGET, DESC,
             args);
     StringBuilder expLog = new StringBuilder();
@@ -334,6 +335,12 @@
         expLog.append("\tCALLERSIGNATURE=signature");
       }
     }
+    if (queueName != null) {
+      expLog.append("\tQUEUENAME=" + QUEUE);
+    }
+    if (partition != null) {
+      expLog.append("\tNODELABEL=" + PARTITION);
+    }
     if (args != null) {
       expLog.append("\tQUEUENAME=root");
       expLog.append("\tRECURSIVE=true");
@@ -364,10 +371,16 @@
     testFailureLogFormatHelper(checkIP, APPID, ATTEMPTID, CONTAINERID,
         new CallerContext.Builder(CALLER_CONTEXT).setSignature(CALLER_SIGNATURE)
             .build(), RESOURCE);
+    testFailureLogFormatHelper(checkIP, APPID, ATTEMPTID, CONTAINERID,
+        new CallerContext.Builder(CALLER_CONTEXT).setSignature(CALLER_SIGNATURE)
+            .build(), RESOURCE, QUEUE, null, null);
+    testFailureLogFormatHelper(checkIP, APPID, ATTEMPTID, CONTAINERID,
+        new CallerContext.Builder(CALLER_CONTEXT).setSignature(CALLER_SIGNATURE)
+            .build(), RESOURCE, QUEUE, PARTITION, null);
     RMAuditLogger.ArgsBuilder args = new RMAuditLogger.ArgsBuilder()
         .append(Keys.QUEUENAME, QUEUE).append(Keys.RECURSIVE, "true");
     testFailureLogFormatHelper(checkIP, null, null, null, null, null,
-        args);
+        null, null, args);
   }
 
   /**
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
index 623cdfb..645d663 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
@@ -38,6 +38,7 @@
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
@@ -978,6 +979,147 @@
     System.err.println("Failed to wait scheduler application attempt stopped.");
   }
 
+  @Test
+  public void testEnforcePartitionExclusivity() {
+    String enforcedExclusiveLabel = "x";
+    Set<String> enforcedExclusiveLabelSet =
+        Collections.singleton(enforcedExclusiveLabel);
+    String dummyLabel = "y";
+    String appLabel = "appLabel";
+    ResourceRequest rr = BuilderUtils.newResourceRequest(
+        mock(Priority.class), ResourceRequest.ANY, mock(Resource.class), 1);
+
+    // RR label unset and app label does not match. Nothing should happen.
+    SchedulerUtils.enforcePartitionExclusivity(rr, enforcedExclusiveLabelSet,
+        null);
+    Assert.assertNull(rr.getNodeLabelExpression());
+    SchedulerUtils.enforcePartitionExclusivity(rr, enforcedExclusiveLabelSet,
+        appLabel);
+    Assert.assertNull(rr.getNodeLabelExpression());
+
+    // RR label and app label do not match. Nothing should happen.
+    rr.setNodeLabelExpression(dummyLabel);
+    SchedulerUtils.enforcePartitionExclusivity(rr, enforcedExclusiveLabelSet,
+        null);
+    Assert.assertEquals(dummyLabel, rr.getNodeLabelExpression());
+    SchedulerUtils.enforcePartitionExclusivity(rr, enforcedExclusiveLabelSet,
+        appLabel);
+    Assert.assertEquals(dummyLabel, rr.getNodeLabelExpression());
+
+    // RR label matches but app label does not. RR label should be set
+    // to app label
+    rr.setNodeLabelExpression(enforcedExclusiveLabel);
+    SchedulerUtils.enforcePartitionExclusivity(rr, enforcedExclusiveLabelSet,
+        null);
+    Assert.assertNull(rr.getNodeLabelExpression());
+    rr.setNodeLabelExpression(enforcedExclusiveLabel);
+    SchedulerUtils.enforcePartitionExclusivity(rr, enforcedExclusiveLabelSet,
+        appLabel);
+    Assert.assertEquals(appLabel, rr.getNodeLabelExpression());
+
+    // RR label unset and app label matches. RR label should be set
+    // to app label
+    rr.setNodeLabelExpression(null);
+    SchedulerUtils.enforcePartitionExclusivity(rr, enforcedExclusiveLabelSet,
+        enforcedExclusiveLabel);
+    Assert.assertEquals(enforcedExclusiveLabel, rr.getNodeLabelExpression());
+
+    // RR label does not match and app label matches. RR label should be set
+    // to app label
+    rr.setNodeLabelExpression(dummyLabel);
+    SchedulerUtils.enforcePartitionExclusivity(rr, enforcedExclusiveLabelSet,
+        enforcedExclusiveLabel);
+    Assert.assertEquals(enforcedExclusiveLabel, rr.getNodeLabelExpression());
+
+    // RR label and app label matches. Nothing should happen.
+    rr.setNodeLabelExpression(enforcedExclusiveLabel);
+    SchedulerUtils.enforcePartitionExclusivity(rr, enforcedExclusiveLabelSet,
+        enforcedExclusiveLabel);
+    Assert.assertEquals(enforcedExclusiveLabel, rr.getNodeLabelExpression());
+
+    // Unconfigured label: nothing should happen.
+    rr.setNodeLabelExpression(null);
+    SchedulerUtils.enforcePartitionExclusivity(rr, null,
+        appLabel);
+    Assert.assertNull(rr.getNodeLabelExpression());
+    rr.setNodeLabelExpression(dummyLabel);
+    SchedulerUtils.enforcePartitionExclusivity(rr, null,
+        appLabel);
+    Assert.assertEquals(dummyLabel, rr.getNodeLabelExpression());
+    rr.setNodeLabelExpression(enforcedExclusiveLabel);
+    SchedulerUtils.enforcePartitionExclusivity(rr, null,
+        appLabel);
+    Assert.assertEquals(enforcedExclusiveLabel, rr.getNodeLabelExpression());
+  }
+
+  @Test
+  public void testEnforcePartitionExclusivityMultipleLabels() {
+    String enforcedLabel1 = "x";
+    String enforcedLabel2 = "y";
+    Set<String> enforcedExclusiveLabelSet = new HashSet<>();
+    enforcedExclusiveLabelSet.add(enforcedLabel1);
+    enforcedExclusiveLabelSet.add(enforcedLabel2);
+    String dummyLabel = "dummyLabel";
+    String appLabel = "appLabel";
+    ResourceRequest rr = BuilderUtils.newResourceRequest(
+        mock(Priority.class), ResourceRequest.ANY, mock(Resource.class), 1);
+
+    // RR label unset and app label does not match. Nothing should happen.
+    SchedulerUtils.enforcePartitionExclusivity(rr, enforcedExclusiveLabelSet,
+        null);
+    Assert.assertNull(rr.getNodeLabelExpression());
+    SchedulerUtils.enforcePartitionExclusivity(rr, enforcedExclusiveLabelSet,
+        appLabel);
+    Assert.assertNull(rr.getNodeLabelExpression());
+
+    // RR label and app label do not match. Nothing should happen.
+    rr.setNodeLabelExpression(dummyLabel);
+    SchedulerUtils.enforcePartitionExclusivity(rr, enforcedExclusiveLabelSet,
+        null);
+    Assert.assertEquals(dummyLabel, rr.getNodeLabelExpression());
+    SchedulerUtils.enforcePartitionExclusivity(rr, enforcedExclusiveLabelSet,
+        appLabel);
+    Assert.assertEquals(dummyLabel, rr.getNodeLabelExpression());
+
+    // RR label matches but app label does not. RR label should be set
+    // to app label
+    rr.setNodeLabelExpression(enforcedLabel1);
+    SchedulerUtils.enforcePartitionExclusivity(rr, enforcedExclusiveLabelSet,
+        null);
+    Assert.assertNull(rr.getNodeLabelExpression());
+    rr.setNodeLabelExpression(enforcedLabel2);
+    SchedulerUtils.enforcePartitionExclusivity(rr, enforcedExclusiveLabelSet,
+        appLabel);
+    Assert.assertEquals(appLabel, rr.getNodeLabelExpression());
+
+    // RR label unset and app label matches. RR label should be set
+    // to app label
+    rr.setNodeLabelExpression(null);
+    SchedulerUtils.enforcePartitionExclusivity(rr, enforcedExclusiveLabelSet,
+        enforcedLabel1);
+    Assert.assertEquals(enforcedLabel1, rr.getNodeLabelExpression());
+
+    // RR label does not match and app label matches. RR label should be set
+    // to app label
+    rr.setNodeLabelExpression(dummyLabel);
+    SchedulerUtils.enforcePartitionExclusivity(rr, enforcedExclusiveLabelSet,
+        enforcedLabel2);
+    Assert.assertEquals(enforcedLabel2, rr.getNodeLabelExpression());
+
+    // RR label and app label matches. Nothing should happen.
+    rr.setNodeLabelExpression(enforcedLabel1);
+    SchedulerUtils.enforcePartitionExclusivity(rr, enforcedExclusiveLabelSet,
+        enforcedLabel1);
+    Assert.assertEquals(enforcedLabel1, rr.getNodeLabelExpression());
+
+    // RR label and app label don't match, but they're both enforced labels.
+    // RR label should be set to app label.
+    rr.setNodeLabelExpression(enforcedLabel2);
+    SchedulerUtils.enforcePartitionExclusivity(rr, enforcedExclusiveLabelSet,
+        enforcedLabel1);
+    Assert.assertEquals(enforcedLabel1, rr.getNodeLabelExpression());
+  }
+
   public static SchedulerApplication<SchedulerApplicationAttempt>
       verifyAppAddedAndRemovedFromScheduler(
           Map<ApplicationId, SchedulerApplication<SchedulerApplicationAttempt>>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index fb323c5..01be51c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -162,6 +162,7 @@
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SimpleCandidateNodeSet;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FairOrderingPolicy;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.IteratorSelector;
 import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
 import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;
 import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
@@ -1301,8 +1302,9 @@
     //This happens because app2 has no demand/a magnitude of NaN, which
     //results in app1 and app2 being equal in the fairness comparison and
     //failling back to fifo (start) ordering
-    assertEquals(q.getOrderingPolicy().getAssignmentIterator().next().getId(),
-      appId1.toString());
+    assertEquals(q.getOrderingPolicy().getAssignmentIterator(
+        IteratorSelector.EMPTY_ITERATOR_SELECTOR).next().getId(),
+        appId1.toString());
 
     //Now, allocate for app2 (this would be the first/AM allocation)
     ResourceRequest r2 = TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 1, true, priority, recordFactory);
@@ -1314,8 +1316,9 @@
     //verify re-ordering based on the allocation alone
 
     //Now, the first app for assignment is app2
-    assertEquals(q.getOrderingPolicy().getAssignmentIterator().next().getId(),
-      appId2.toString());
+    assertEquals(q.getOrderingPolicy().getAssignmentIterator(
+        IteratorSelector.EMPTY_ITERATOR_SELECTOR).next().getId(),
+        appId2.toString());
 
     rm.stop();
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index 53c8d65..1c8d84c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -87,6 +87,7 @@
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueStateManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FifoOrderingPolicyWithExclusivePartitions;
 import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UsersManager.User;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
@@ -133,6 +134,8 @@
   final static int GB = 1024;
   final static String DEFAULT_RACK = "/default";
 
+  private final static String LABEL = "test";
+
   private final ResourceCalculator resourceCalculator =
       new DefaultResourceCalculator();
   
@@ -141,14 +144,19 @@
 
   @Before
   public void setUp() throws Exception {
-    setUpInternal(resourceCalculator);
+    setUpInternal(resourceCalculator, false);
   }
 
   private void setUpWithDominantResourceCalculator() throws Exception {
-    setUpInternal(dominantResourceCalculator);
+    setUpInternal(dominantResourceCalculator, false);
   }
 
-  private void setUpInternal(ResourceCalculator rC) throws Exception {
+  private void setUpWithNodeLabels() throws Exception {
+    setUpInternal(resourceCalculator, true);
+  }
+
+  private void setUpInternal(ResourceCalculator rC, boolean withNodeLabels)
+      throws Exception {
     CapacityScheduler spyCs = new CapacityScheduler();
     queues = new HashMap<String, CSQueue>();
     cs = spy(spyCs);
@@ -174,7 +182,7 @@
     csConf.setBoolean(CapacitySchedulerConfiguration.RESERVE_CONT_LOOK_ALL_NODES,
         false);
     final String newRoot = "root" + System.currentTimeMillis();
-    setupQueueConfiguration(csConf, newRoot);
+    setupQueueConfiguration(csConf, newRoot, withNodeLabels);
     YarnConfiguration conf = new YarnConfiguration();
     cs.setConf(conf);
     when(spyRMContext.getYarnConfiguration()).thenReturn(conf);
@@ -231,24 +239,39 @@
   private static final String E = "e";
   private void setupQueueConfiguration(
       CapacitySchedulerConfiguration conf, 
-      final String newRoot) {
+      final String newRoot, boolean withNodeLabels) {
     
     // Define top-level queues
     conf.setQueues(ROOT, new String[] {newRoot});
     conf.setMaximumCapacity(ROOT, 100);
     conf.setAcl(ROOT,
       QueueACL.SUBMIT_APPLICATIONS, " ");
+    if (withNodeLabels) {
+      conf.setCapacityByLabel(CapacitySchedulerConfiguration.ROOT, LABEL, 100);
+      conf.setMaximumCapacityByLabel(CapacitySchedulerConfiguration.ROOT,
+          LABEL, 100);
+    }
     
     final String Q_newRoot = ROOT + "." + newRoot;
     conf.setQueues(Q_newRoot, new String[] {A, B, C, D, E});
     conf.setCapacity(Q_newRoot, 100);
     conf.setMaximumCapacity(Q_newRoot, 100);
     conf.setAcl(Q_newRoot, QueueACL.SUBMIT_APPLICATIONS, " ");
+    if (withNodeLabels) {
+      conf.setAccessibleNodeLabels(Q_newRoot, Collections.singleton(LABEL));
+      conf.setCapacityByLabel(Q_newRoot, LABEL, 100);
+      conf.setMaximumCapacityByLabel(Q_newRoot, LABEL, 100);
+    }
 
     final String Q_A = Q_newRoot + "." + A;
     conf.setCapacity(Q_A, 8.5f);
     conf.setMaximumCapacity(Q_A, 20);
     conf.setAcl(Q_A, QueueACL.SUBMIT_APPLICATIONS, "*");
+    if (withNodeLabels) {
+      conf.setAccessibleNodeLabels(Q_A, Collections.singleton(LABEL));
+      conf.setCapacityByLabel(Q_A, LABEL, 100);
+      conf.setMaximumCapacityByLabel(Q_A, LABEL, 100);
+    }
     
     final String Q_B = Q_newRoot + "." + B;
     conf.setCapacity(Q_B, 80);
@@ -3100,7 +3123,7 @@
     Map<String, CSQueue> queues = new HashMap<String, CSQueue>();
     CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
     final String newRootName = "root" + System.currentTimeMillis();
-    setupQueueConfiguration(csConf, newRootName);
+    setupQueueConfiguration(csConf, newRootName, false);
 
     Resource clusterResource = Resources.createResource(100 * 16 * GB,
         100 * 32);
@@ -3293,6 +3316,116 @@
   }
 
   @Test
+  public void testFifoWithPartitionsAssignment() throws Exception {
+    setUpWithNodeLabels();
+
+    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
+    OrderingPolicy<FiCaSchedulerApp> policy =
+        new FifoOrderingPolicyWithExclusivePartitions<>();
+    policy.configure(Collections.singletonMap(
+        YarnConfiguration.EXCLUSIVE_ENFORCED_PARTITIONS_SUFFIX, LABEL));
+    a.setOrderingPolicy(policy);
+    String host00 = "127.0.0.1";
+    String rack0 = "rack_0";
+    FiCaSchedulerNode node00 = TestUtils.getMockNode(host00, rack0, 0,
+        16 * GB);
+    when(node00.getPartition()).thenReturn(LABEL);
+    String host01 = "127.0.0.2";
+    FiCaSchedulerNode node01 = TestUtils.getMockNode(host01, rack0, 0,
+        16 * GB);
+    when(node01.getPartition()).thenReturn("");
+
+    final int numNodes = 4;
+    Resource clusterResource = Resources.createResource(numNodes * (16 * GB),
+        numNodes * 16);
+    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
+
+    String user0 = "user_0";
+
+    final ApplicationAttemptId appAttemptId0 =
+        TestUtils.getMockApplicationAttemptId(0, 0);
+    FiCaSchedulerApp app0 = spy(new FiCaSchedulerApp(appAttemptId0, user0, a,
+        mock(ActiveUsersManager.class), spyRMContext, Priority.newInstance(5),
+        false));
+    a.submitApplicationAttempt(app0, user0);
+
+    final ApplicationAttemptId appAttemptId1 =
+        TestUtils.getMockApplicationAttemptId(1, 0);
+    FiCaSchedulerApp app1 = spy(new FiCaSchedulerApp(appAttemptId1, user0, a,
+        mock(ActiveUsersManager.class), spyRMContext, Priority.newInstance(3),
+        false));
+    when(app1.getPartition()).thenReturn(LABEL);
+    a.submitApplicationAttempt(app1, user0);
+
+    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(
+        app0.getApplicationAttemptId(), app0, app1.getApplicationAttemptId(),
+        app1);
+    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node00.getNodeID(),
+        node00, node01.getNodeID(), node01);
+
+    Priority priority = TestUtils.createMockPriority(1);
+    List<ResourceRequest> app0Requests = new ArrayList<>();
+    List<ResourceRequest> app1Requests = new ArrayList<>();
+
+    app0Requests.clear();
+    app0Requests.add(TestUtils
+        .createResourceRequest(ResourceRequest.ANY, 2 * GB, 1, true, priority,
+            recordFactory));
+    app0.updateResourceRequests(app0Requests);
+
+    app1Requests.clear();
+    app1Requests.add(TestUtils
+        .createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, priority,
+            recordFactory, LABEL));
+    app1.updateResourceRequests(app1Requests);
+
+    // app_1 will get containers since it is exclusive-enforced
+    applyCSAssignment(clusterResource,
+        a.assignContainers(clusterResource, node00,
+            new ResourceLimits(clusterResource),
+            SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
+    Assert.assertEquals(1 * GB, app1.getSchedulingResourceUsage()
+        .getUsed(LABEL).getMemorySize());
+    // app_0 should not get resources from node_0_0 since the labels
+    // don't match
+    applyCSAssignment(clusterResource,
+        a.assignContainers(clusterResource, node00,
+            new ResourceLimits(clusterResource),
+            SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
+    Assert.assertEquals(0 * GB, app0.getCurrentConsumption().getMemorySize());
+
+    app1Requests.clear();
+    app1Requests.add(TestUtils
+        .createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, priority,
+            recordFactory, LABEL));
+    app1.updateResourceRequests(app1Requests);
+
+    // When node_0_1 heartbeats, app_0 should get containers
+    applyCSAssignment(clusterResource,
+        a.assignContainers(clusterResource, node01,
+            new ResourceLimits(clusterResource),
+            SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
+    Assert.assertEquals(2 * GB, app0.getCurrentConsumption().getMemorySize());
+    Assert.assertEquals(1 * GB, app1.getSchedulingResourceUsage()
+        .getUsed(LABEL).getMemorySize());
+
+    app0Requests.clear();
+    app0Requests.add(TestUtils
+        .createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, priority,
+            recordFactory));
+    app0.updateResourceRequests(app0Requests);
+
+    // When node_0_0 heartbeats, app_1 should get containers again
+    applyCSAssignment(clusterResource,
+        a.assignContainers(clusterResource, node00,
+            new ResourceLimits(clusterResource),
+            SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
+    Assert.assertEquals(2 * GB, app0.getCurrentConsumption().getMemorySize());
+    Assert.assertEquals(2 * GB, app1.getSchedulingResourceUsage()
+        .getUsed(LABEL).getMemorySize());
+  }
+
+  @Test
   public void testConcurrentAccess() throws Exception {
     YarnConfiguration conf = new YarnConfiguration();
     MockRM rm = new MockRM();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestFSSchedulerConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestFSSchedulerConfigurationStore.java
index 65314be..f3d5e74 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestFSSchedulerConfigurationStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestFSSchedulerConfigurationStore.java
@@ -36,6 +36,9 @@
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
+
 
 /**
  * Tests {@link FSSchedulerConfigurationStore}.
@@ -136,6 +139,19 @@
   }
 
   @Test
+  public void testFormatConfiguration() throws Exception {
+    assertTrue(testSchedulerConfigurationDir.exists());
+    Configuration schedulerConf = new Configuration();
+    schedulerConf.set("a", "a");
+    writeConf(schedulerConf);
+    configurationStore.initialize(conf, conf, null);
+    Configuration storedConfig = configurationStore.retrieve();
+    assertEquals("a", storedConfig.get("a"));
+    configurationStore.format();
+    assertFalse(testSchedulerConfigurationDir.exists());
+  }
+
+  @Test
   public void retrieve() throws Exception {
     Configuration schedulerConf = new Configuration();
     schedulerConf.set("a", "a");
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
index 64fc80c..cb416e2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestMutableCSConfigurationProvider.java
@@ -104,6 +104,10 @@
     confProvider.confirmPendingMutation(false);
     assertNull(confProvider.loadConfiguration(conf).get(
         "yarn.scheduler.capacity.root.a.badKey"));
+
+    confProvider.formatConfigurationInStore(conf);
+    assertNull(confProvider.loadConfiguration(conf)
+        .get("yarn.scheduler.capacity.root.a.goodKey"));
   }
 
   @Test
@@ -174,6 +178,10 @@
     assertNull(confProvider.loadConfiguration(conf).get(
         "yarn.scheduler.capacity.root.a.badKey"));
 
+    confProvider.formatConfigurationInStore(conf);
+    assertNull(confProvider.loadConfiguration(conf)
+        .get("yarn.scheduler.capacity.root.a.goodKey"));
+
   }
 
   private void writeConf(Configuration conf, String storePath)
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestZKConfigurationStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestZKConfigurationStore.java
index c09ae28..f71c4e7a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestZKConfigurationStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestZKConfigurationStore.java
@@ -129,6 +129,15 @@
 
 
   @Test
+  public void testFormatConfiguration() throws Exception {
+    schedConf.set("key", "val");
+    confStore.initialize(conf, schedConf, rmContext);
+    assertEquals("val", confStore.retrieve().get("key"));
+    confStore.format();
+    assertNull(confStore.retrieve());
+  }
+
+  @Test
   public void testPersistUpdatedConfiguration() throws Exception {
     confStore.initialize(conf, schedConf, rmContext);
     assertNull(confStore.retrieve().get("key"));
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/MockSchedulableEntity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/MockSchedulableEntity.java
index 4f251bf..62f7a49 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/MockSchedulableEntity.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/MockSchedulableEntity.java
@@ -18,21 +18,19 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy;
 
-import java.util.*;
-
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
 import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
 
 
 public class MockSchedulableEntity implements SchedulableEntity {
-  
+
   private String id;
   private long serial = 0;
   private Priority priority;
   private boolean isRecovering;
+  private String partition = "";
 
   public MockSchedulableEntity() { }
   
@@ -101,4 +99,13 @@
   protected void setRecovering(boolean entityRecovering) {
     this.isRecovering = entityRecovering;
   }
+
+  @Override
+  public String getPartition() {
+    return partition;
+  }
+
+  public void setPartition(String partition) {
+    this.partition = partition;
+  }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFairOrderingPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFairOrderingPolicy.java
index 683173a..e023e01 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFairOrderingPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFairOrderingPolicy.java
@@ -126,19 +126,25 @@
 
 
     //Assignment, least to greatest consumption
-    checkIds(schedOrder.getAssignmentIterator(), new String[]{"3", "2", "1"});
+    checkIds(schedOrder.getAssignmentIterator(
+        IteratorSelector.EMPTY_ITERATOR_SELECTOR),
+        new String[]{"3", "2", "1"});
 
     //Preemption, greatest to least
     checkIds(schedOrder.getPreemptionIterator(), new String[]{"1", "2", "3"});
 
     //Change value without inform, should see no change
     msp2.setUsed(Resources.createResource(6));
-    checkIds(schedOrder.getAssignmentIterator(), new String[]{"3", "2", "1"});
+    checkIds(schedOrder.getAssignmentIterator(
+        IteratorSelector.EMPTY_ITERATOR_SELECTOR),
+        new String[]{"3", "2", "1"});
     checkIds(schedOrder.getPreemptionIterator(), new String[]{"1", "2", "3"});
 
     //Do inform, will reorder
     schedOrder.containerAllocated(msp2, null);
-    checkIds(schedOrder.getAssignmentIterator(), new String[]{"3", "1", "2"});
+    checkIds(schedOrder.getAssignmentIterator(
+        IteratorSelector.EMPTY_ITERATOR_SELECTOR),
+        new String[]{"3", "1", "2"});
     checkIds(schedOrder.getPreemptionIterator(), new String[]{"2", "1", "3"});
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFifoOrderingPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFifoOrderingPolicy.java
index 776f6c6..7ec2c01 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFifoOrderingPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFifoOrderingPolicy.java
@@ -63,7 +63,7 @@
     schedOrder.addSchedulableEntity(msp3);
     
     //Assignment, oldest to youngest
-    checkSerials(schedOrder.getAssignmentIterator(), new long[]{1, 2, 3});
+    checkSerials(schedOrder.getAssignmentIterator(IteratorSelector.EMPTY_ITERATOR_SELECTOR), new long[]{1, 2, 3});
     
     //Preemption, youngest to oldest
     checkSerials(schedOrder.getPreemptionIterator(), new long[]{3, 2, 1});
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFifoOrderingPolicyForPendingApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFifoOrderingPolicyForPendingApps.java
index 5fa9a1d..56fccc2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFifoOrderingPolicyForPendingApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFifoOrderingPolicyForPendingApps.java
@@ -74,8 +74,9 @@
     schedOrder.addSchedulableEntity(msp7);
 
     // Assignment with serial id's are 3,2,4,1,6,5,7
-    checkSerials(schedOrder.getAssignmentIterator(), new long[] { 3, 2, 4, 1,
-        6, 5, 7 });
+    checkSerials(schedOrder.getAssignmentIterator(
+        IteratorSelector.EMPTY_ITERATOR_SELECTOR), new long[] {3, 2, 4, 1,
+        6, 5, 7});
 
     //Preemption, youngest to oldest
     checkSerials(schedOrder.getPreemptionIterator(), new long[] { 7, 5, 6, 1,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFifoOrderingPolicyWithExclusivePartitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFifoOrderingPolicyWithExclusivePartitions.java
new file mode 100644
index 0000000..499a70a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFifoOrderingPolicyWithExclusivePartitions.java
@@ -0,0 +1,244 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.Assert;
+import org.junit.Test;
+
+
+/**
+ * Tests {@link FifoOrderingPolicyWithExclusivePartitions} ordering policy.
+ */
+public class TestFifoOrderingPolicyWithExclusivePartitions {
+
+  private static final String PARTITION = "test";
+  private static final String PARTITION2 = "test2";
+
+  @Test
+  public void testNoConfiguredExclusiveEnforcedPartitions() {
+    FifoOrderingPolicyWithExclusivePartitions<MockSchedulableEntity> policy =
+        new FifoOrderingPolicyWithExclusivePartitions<>();
+    policy.configure(Collections.EMPTY_MAP);
+
+    MockSchedulableEntity p1 = new MockSchedulableEntity(4, 0, false);
+    p1.setPartition(PARTITION);
+    p1.setId("p1");
+    MockSchedulableEntity p2 = new MockSchedulableEntity(3, 1, false);
+    p2.setPartition(PARTITION);
+    p2.setId("p2");
+
+    MockSchedulableEntity r1 = new MockSchedulableEntity(2, 0, false);
+    r1.setId("r1");
+    MockSchedulableEntity r2 = new MockSchedulableEntity(1, 0, false);
+    r2.setId("r2");
+
+    policy.addSchedulableEntity(p1);
+    policy.addAllSchedulableEntities(Arrays.asList(p2, r1, r2));
+    Assert.assertEquals(4, policy.getNumSchedulableEntities());
+    Assert.assertEquals(4, policy.getSchedulableEntities().size());
+    IteratorSelector sel = new IteratorSelector();
+    // Should behave like FifoOrderingPolicy, regardless of partition
+    verifyAssignmentIteratorOrder(policy,
+        IteratorSelector.EMPTY_ITERATOR_SELECTOR, "p2", "r2", "r1", "p1");
+    verifyPreemptionIteratorOrder(policy, "p1", "r1", "r2", "p2");
+    sel.setPartition(PARTITION);
+    verifyAssignmentIteratorOrder(policy, sel, "p2", "r2", "r1", "p1");
+    verifyPreemptionIteratorOrder(policy, "p1", "r1", "r2", "p2");
+
+    policy.removeSchedulableEntity(p2);
+    policy.removeSchedulableEntity(r2);
+    Assert.assertEquals(2, policy.getNumSchedulableEntities());
+    Assert.assertEquals(2, policy.getSchedulableEntities().size());
+    verifyAssignmentIteratorOrder(policy,
+        IteratorSelector.EMPTY_ITERATOR_SELECTOR, "r1", "p1");
+    verifyPreemptionIteratorOrder(policy, "p1", "r1");
+    sel.setPartition(PARTITION);
+    verifyAssignmentIteratorOrder(policy, sel, "r1", "p1");
+    verifyPreemptionIteratorOrder(policy, "p1", "r1");
+  }
+
+  @Test
+  public void testSingleExclusiveEnforcedPartition() {
+    FifoOrderingPolicyWithExclusivePartitions<MockSchedulableEntity> policy =
+        new FifoOrderingPolicyWithExclusivePartitions<>();
+    policy.configure(Collections.singletonMap(
+        YarnConfiguration.EXCLUSIVE_ENFORCED_PARTITIONS_SUFFIX, PARTITION));
+
+    // PARTITION iterator should return p2, p1, p3
+    MockSchedulableEntity p1 = new MockSchedulableEntity(1, 0, false);
+    p1.setPartition(PARTITION);
+    p1.setId("p1");
+    MockSchedulableEntity p2 = new MockSchedulableEntity(5, 1, false);
+    p2.setPartition(PARTITION);
+    p2.setId("p2");
+    MockSchedulableEntity p3 = new MockSchedulableEntity(3, 0, false);
+    p3.setPartition(PARTITION);
+    p3.setId("p3");
+
+    // non-PARTITION iterator should return r3, r2, r1
+    MockSchedulableEntity r1 = new MockSchedulableEntity(6, 0, false);
+    r1.setId("r1");
+    MockSchedulableEntity r2 = new MockSchedulableEntity(4, 0, false);
+    r2.setId("r2");
+    MockSchedulableEntity r3 = new MockSchedulableEntity(2, 1, false);
+    r3.setId("r3");
+
+    policy.addSchedulableEntity(r1);
+    Assert.assertEquals(1, policy.getNumSchedulableEntities());
+    Assert.assertEquals("r1", policy.getSchedulableEntities()
+        .iterator().next().getId());
+    verifyAssignmentIteratorOrder(policy,
+        IteratorSelector.EMPTY_ITERATOR_SELECTOR, "r1");
+    verifyPreemptionIteratorOrder(policy, "r1");
+
+    List<MockSchedulableEntity> entities = Arrays.asList(r2, r3, p1, p2);
+    policy.addAllSchedulableEntities(entities);
+    policy.addSchedulableEntity(p3);
+    Assert.assertEquals(6, policy.getNumSchedulableEntities());
+    Assert.assertEquals(6, policy.getSchedulableEntities().size());
+    // Assignment iterator should return non-PARTITION entities,
+    // in order based on FifoOrderingPolicy
+    verifyAssignmentIteratorOrder(policy,
+        IteratorSelector.EMPTY_ITERATOR_SELECTOR, "r3", "r2", "r1");
+    // Preemption iterator should return all entities, in global order
+    verifyPreemptionIteratorOrder(policy, "r1", "r2", "p3", "p1", "p2", "r3");
+    // Same thing as above, but with a non-empty partition
+    IteratorSelector sel = new IteratorSelector();
+    sel.setPartition("dummy");
+    verifyAssignmentIteratorOrder(policy, sel, "r3", "r2", "r1");
+    verifyPreemptionIteratorOrder(policy, "r1", "r2", "p3", "p1", "p2", "r3");
+    // Should return PARTITION entities, in order based on FifoOrderingPolicy
+    sel.setPartition(PARTITION);
+    verifyAssignmentIteratorOrder(policy, sel, "p2", "p1", "p3");
+    verifyPreemptionIteratorOrder(policy, "r1", "r2", "p3", "p1", "p2", "r3");
+
+    policy.removeSchedulableEntity(p2);
+    policy.removeSchedulableEntity(r2);
+    Assert.assertEquals(4, policy.getNumSchedulableEntities());
+    Assert.assertEquals(4, policy.getSchedulableEntities().size());
+    verifyAssignmentIteratorOrder(policy,
+        IteratorSelector.EMPTY_ITERATOR_SELECTOR, "r3", "r1");
+    verifyPreemptionIteratorOrder(policy, "r1", "p3", "p1", "r3");
+    sel.setPartition(PARTITION);
+    verifyAssignmentIteratorOrder(policy, sel, "p1", "p3");
+    verifyPreemptionIteratorOrder(policy, "r1", "p3", "p1", "r3");
+
+    policy.removeSchedulableEntity(p1);
+    policy.removeSchedulableEntity(p3);
+    Assert.assertEquals(2, policy.getNumSchedulableEntities());
+    Assert.assertEquals(2, policy.getSchedulableEntities().size());
+    verifyAssignmentIteratorOrder(policy,
+        IteratorSelector.EMPTY_ITERATOR_SELECTOR, "r3", "r1");
+    verifyPreemptionIteratorOrder(policy, "r1", "r3");
+    sel.setPartition(PARTITION);
+    verifyAssignmentIteratorOrder(policy, sel);
+    verifyPreemptionIteratorOrder(policy, "r1", "r3");
+  }
+
+  @Test
+  public void testMultipleExclusiveEnforcedPartitions() {
+    FifoOrderingPolicyWithExclusivePartitions<MockSchedulableEntity> policy =
+        new FifoOrderingPolicyWithExclusivePartitions<>();
+    policy.configure(Collections.singletonMap(
+        YarnConfiguration.EXCLUSIVE_ENFORCED_PARTITIONS_SUFFIX,
+        PARTITION + "," + PARTITION2));
+
+    // PARTITION iterator should return p2, p1
+    MockSchedulableEntity p1 = new MockSchedulableEntity(1, 0, false);
+    p1.setPartition(PARTITION);
+    p1.setId("p1");
+    MockSchedulableEntity p2 = new MockSchedulableEntity(5, 1, false);
+    p2.setPartition(PARTITION);
+    p2.setId("p2");
+
+    // PARTITION2 iterator should return r1, r2
+    MockSchedulableEntity r1 = new MockSchedulableEntity(3, 0, false);
+    r1.setPartition(PARTITION2);
+    r1.setId("r1");
+    MockSchedulableEntity r2 = new MockSchedulableEntity(4, 0, false);
+    r2.setPartition(PARTITION2);
+    r2.setId("r2");
+
+    // default iterator should return s2, s1
+    MockSchedulableEntity s1 = new MockSchedulableEntity(6, 0, false);
+    s1.setId("s1");
+    MockSchedulableEntity s2 = new MockSchedulableEntity(2, 0, false);
+    s2.setId("s2");
+
+    policy.addAllSchedulableEntities(Arrays.asList(s1, s2, r1));
+    Assert.assertEquals(3, policy.getNumSchedulableEntities());
+    Assert.assertEquals(3, policy.getSchedulableEntities().size());
+    IteratorSelector sel = new IteratorSelector();
+    // assignment iterator returns only default (non-partitioned) entities
+    verifyAssignmentIteratorOrder(policy,
+        IteratorSelector.EMPTY_ITERATOR_SELECTOR, "s2", "s1");
+    verifyPreemptionIteratorOrder(policy, "s1", "r1", "s2");
+    sel.setPartition(PARTITION2);
+    verifyAssignmentIteratorOrder(policy, sel, "r1");
+
+    policy.addAllSchedulableEntities(Arrays.asList(r2, p1, p2));
+    Assert.assertEquals(6, policy.getNumSchedulableEntities());
+    Assert.assertEquals(6, policy.getSchedulableEntities().size());
+    verifyAssignmentIteratorOrder(policy,
+        IteratorSelector.EMPTY_ITERATOR_SELECTOR, "s2", "s1");
+    sel.setPartition(PARTITION);
+    verifyAssignmentIteratorOrder(policy, sel, "p2", "p1");
+    sel.setPartition(PARTITION2);
+    verifyAssignmentIteratorOrder(policy, sel, "r1", "r2");
+    verifyPreemptionIteratorOrder(policy, "s1", "r2", "r1", "s2", "p1", "p2");
+
+    policy.removeSchedulableEntity(p2);
+    policy.removeSchedulableEntity(r1);
+    policy.removeSchedulableEntity(r2);
+    Assert.assertEquals(3, policy.getNumSchedulableEntities());
+    Assert.assertEquals(3, policy.getSchedulableEntities().size());
+    verifyAssignmentIteratorOrder(policy,
+        IteratorSelector.EMPTY_ITERATOR_SELECTOR, "s2", "s1");
+    sel.setPartition(PARTITION);
+    verifyAssignmentIteratorOrder(policy, sel, "p1");
+    sel.setPartition(PARTITION2);
+    verifyAssignmentIteratorOrder(policy, sel);
+    verifyPreemptionIteratorOrder(policy, "s1", "s2", "p1");
+  }
+
+  private void verifyAssignmentIteratorOrder(
+      FifoOrderingPolicyWithExclusivePartitions<MockSchedulableEntity> policy,
+      IteratorSelector sel, String... ids) {
+    verifyIteratorOrder(policy.getAssignmentIterator(sel), ids);
+  }
+
+  private void verifyPreemptionIteratorOrder(
+      FifoOrderingPolicyWithExclusivePartitions<MockSchedulableEntity> policy,
+      String... ids) {
+    verifyIteratorOrder(policy.getPreemptionIterator(), ids);
+  }
+
+  private void verifyIteratorOrder(Iterator<MockSchedulableEntity> itr,
+      String... ids) {
+    for (String id : ids) {
+      Assert.assertEquals(id, itr.next().getId());
+    }
+    Assert.assertFalse(itr.hasNext());
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
index d973dca..ee104b4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
@@ -778,6 +778,7 @@
     RMContext mockContext = mock(RMContext.class);
     when(mockContext.getSystemCredentialsForApps()).thenReturn(
         new ConcurrentHashMap<ApplicationId, SystemCredentialsForAppsProto>());
+    when(mockContext.getDispatcher()).thenReturn(dispatcher);
     ClientRMService mockClientRMService = mock(ClientRMService.class);
     when(mockContext.getClientRMService()).thenReturn(mockClientRMService);
     InetSocketAddress sockAddr =
@@ -837,6 +838,7 @@
     RMContext mockContext = mock(RMContext.class);
     when(mockContext.getSystemCredentialsForApps()).thenReturn(
         new ConcurrentHashMap<ApplicationId, SystemCredentialsForAppsProto>());
+    when(mockContext.getDispatcher()).thenReturn(dispatcher);
     ClientRMService mockClientRMService = mock(ClientRMService.class);         
     when(mockContext.getClientRMService()).thenReturn(mockClientRMService);    
     InetSocketAddress sockAddr =                                               
@@ -1460,6 +1462,7 @@
     RMContext mockContext = mock(RMContext.class);
     when(mockContext.getSystemCredentialsForApps()).thenReturn(
         new ConcurrentHashMap<ApplicationId, SystemCredentialsForAppsProto>());
+    when(mockContext.getDispatcher()).thenReturn(dispatcher);
     ClientRMService mockClientRMService = mock(ClientRMService.class);
     when(mockContext.getClientRMService()).thenReturn(mockClientRMService);
     InetSocketAddress sockAddr =
@@ -1502,6 +1505,7 @@
     RMContext mockContext = mock(RMContext.class);
     when(mockContext.getSystemCredentialsForApps()).thenReturn(
         new ConcurrentHashMap<ApplicationId, SystemCredentialsForAppsProto>());
+    when(mockContext.getDispatcher()).thenReturn(dispatcher);
     ClientRMService mockClientRMService = mock(ClientRMService.class);
     when(mockContext.getClientRMService()).thenReturn(mockClientRMService);
     InetSocketAddress sockAddr =
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
index 3e2542c..67f83c8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
@@ -190,6 +190,19 @@
   }
 
   @Test
+  public void testFormatSchedulerConf() throws Exception {
+    testAddNestedQueue();
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("cluster")
+        .queryParam("user.name", userName)
+        .path(RMWSConsts.FORMAT_SCHEDULER_CONF)
+        .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+    assertEquals(Status.OK.getStatusCode(), response.getStatus());
+    CapacitySchedulerConfiguration orgConf = getSchedulerConf();
+    assertEquals(3, orgConf.getQueues("root").length);
+  }
+
+  @Test
   public void testAddNestedQueue() throws Exception {
     CapacitySchedulerConfiguration orgConf = getSchedulerConf();
     assertNotNull(orgConf);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/proto/test_client_tokens.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/proto/test_client_tokens.proto
index 7ae83897..4ee512a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/proto/test_client_tokens.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/proto/test_client_tokens.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "YarnSecurityTestClientAMTokenProtos";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
index 1403cb1..c65301f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
@@ -137,28 +137,21 @@
   <build>
     <plugins>
       <plugin>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-maven-plugins</artifactId>
+        <groupId>org.xolstice.maven.plugins</groupId>
+        <artifactId>protobuf-maven-plugin</artifactId>
         <executions>
           <execution>
-            <id>compile-test-protoc</id>
-            <goals>
-              <goal>test-protoc</goal>
-            </goals>
+            <id>src-test-compile-protoc</id>
             <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>${basedir}/src/test/proto</param>
-                <param>${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto</param>
-                <param>${basedir}/../../hadoop-yarn-api/src/main/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/test/proto</directory>
-                <includes>
-                  <include>test_token.proto</include>
-                </includes>
-              </source>
+              <skip>false</skip>
+              <additionalProtoPathElements>
+                <additionalProtoPathElement>
+                  ${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto
+                </additionalProtoPathElement>
+                <additionalProtoPathElement>
+                  ${basedir}/../../hadoop-yarn-api/src/main/proto
+                </additionalProtoPathElement>
+              </additionalProtoPathElements>
             </configuration>
           </execution>
         </executions>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/proto/test_token.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/proto/test_token.proto
index c111462..687ed2d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/proto/test_token.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/proto/test_token.proto
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+syntax = "proto2";
 option java_package = "org.apache.hadoop.yarn.proto";
 option java_outer_classname = "YarnSecurityTestTokenProtos";
 option java_generic_services = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManager.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManager.md
index 5b9f0ef..e4ed57f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManager.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManager.md
@@ -44,7 +44,13 @@
 
 ###External Health Script
 
-Users may specify their own health checker script that will be invoked by the health checker service. Users may specify a timeout as well as options to be passed to the script. If the script exits with a non-zero exit code, times out or results in an exception being thrown, the node is marked as unhealthy. Please note that if the script cannot be executed due to permissions or an incorrect path, etc, then it counts as a failure and the node will be reported as unhealthy. Please note that speifying a health check script is not mandatory. If no script is specified, only the disk checker status will be used to determine the health of the node.
+Users may specify their own health checker script that will be invoked by the health checker service. Users may specify a timeout as well as options to be passed to the script. If the script times out, results in an exception being thrown or outputs a line which begins with the string ERROR, the node is marked as unhealthy. Please note that:
+
+  * Exit code other than 0 is **not** considered to be a failure because it might have been caused by a syntax error. Therefore the node will **not** be marked as unhealthy.
+
+  * If the script cannot be executed due to permissions or an incorrect path, etc, then it counts as a failure and the node will be reported as unhealthy.
+
+  * Specifying a health check script is not mandatory. If no script is specified, only the disk checker status will be used to determine the health of the node.
 
 The following configuration parameters can be used to set the health script:
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
index a6d7971..8f0b464 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
@@ -5292,6 +5292,87 @@
       Content-Type: application/xml
       Transfer-Encoding: chunked
 
+**Adding Node Labels to a queue**
+
+Assuming we are using the capacity scheduler and the current queue configuration is two queues root.default, and root.a, this example adds a Node Label x to the queue root.a. Create a Node Label x and assign the nodes with below commands.
+
+```yarn rmadmin -addToClusterNodeLabels "x(exclusive=true)"```
+
+```yarn rmadmin -replaceLabelsOnNode "<nodeId>=x"```
+
+HTTP Request:
+
+```xml
+      Accept: application/xml
+      PUT http://rm-http-address:port/ws/v1/cluster/scheduler-conf
+      Content-Type: application/xml
+      <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+      <sched-conf>
+        <update-queue>
+          <queue-name>root.a</queue-name>
+          <params>
+            <entry>
+              <key>accessible-node-labels</key>
+              <value>x</value>
+            </entry>
+            <entry>
+              <key>accessible-node-labels.x.capacity</key>
+              <value>100</value>
+            </entry>
+          </params>
+        </update-queue>
+        <update-queue>
+          <queue-name>root</queue-name>
+          <params>
+            <entry>
+              <key>accessible-node-labels.x.capacity</key>
+              <value>100</value>
+            </entry>
+          </params>
+        </update-queue>
+      </sched-conf>
+```
+
+
+Response Header:
+
+      HTTP/1.1 200 OK
+      Content-Type: application/xml
+      Transfer-Encoding: chunked
+
+**Removing Node Labels from a queue**
+
+Assuming we are using the capacity scheduler and the current queue configuration is two queues root.default, and root.a and Node Label x is assigned to queue root.a. This example unsets the Node Label x from the queue root.a and removes it.
+
+HTTP Request:
+
+```xml
+      Accept: application/xml
+      PUT http://rm-http-address:port/ws/v1/cluster/scheduler-conf
+      Content-Type: application/xml
+      <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+      <sched-conf>
+        <update-queue>
+          <queue-name>root.a</queue-name>
+          <params>
+            <entry>
+              <key>accessible-node-labels</key>
+              <value></value>
+            </entry>
+          </params>
+        </update-queue>
+      </sched-conf>
+```
+
+
+Response Header:
+
+      HTTP/1.1 200 OK
+      Content-Type: application/xml
+      Transfer-Encoding: chunked
+
+```yarn rmadmin -removeFromClusterNodeLabels x```
+
 
 Cluster Container Signal API
 --------------------------------
diff --git a/pom.ozone.xml b/pom.ozone.xml
index 55c0205..f83c55c 100644
--- a/pom.ozone.xml
+++ b/pom.ozone.xml
@@ -144,7 +144,7 @@
 
     <curator.version>2.12.0</curator.version>
     <findbugs.version>3.0.0</findbugs.version>
-    <spotbugs.version>3.1.0-RC1</spotbugs.version>
+    <spotbugs.version>3.1.12</spotbugs.version>
     <dnsjava.version>2.1.7</dnsjava.version>
 
     <guava.version>11.0.2</guava.version>
@@ -1211,6 +1211,12 @@
         <version>${hadoop.version}</version>
       </dependency>
       <dependency>
+        <groupId>com.github.spotbugs</groupId>
+        <artifactId>spotbugs</artifactId>
+        <version>${spotbugs.version}</version>
+        <scope>provided</scope>
+      </dependency>
+      <dependency>
         <groupId>com.google.code.findbugs</groupId>
         <artifactId>jsr305</artifactId>
         <version>${findbugs.version}</version>
@@ -1571,16 +1577,13 @@
           <version>${maven-war-plugin.version}</version>
         </plugin>
         <plugin>
-          <groupId>org.codehaus.mojo</groupId>
-          <artifactId>findbugs-maven-plugin</artifactId>
-          <version>${findbugs.version}</version>
-          <dependencies>
-            <dependency>
-              <groupId>com.github.spotbugs</groupId>
-              <artifactId>spotbugs</artifactId>
-              <version>${spotbugs.version}</version>
-            </dependency>
-          </dependencies>
+          <groupId>com.github.spotbugs</groupId>
+          <artifactId>spotbugs-maven-plugin</artifactId>
+          <version>${spotbugs.version}</version>
+          <configuration>
+            <maxHeap>1024</maxHeap>
+            <xmlOutput>true</xmlOutput>
+          </configuration>
         </plugin>
         <plugin>
           <groupId>org.codehaus.mojo</groupId>
@@ -1675,10 +1678,6 @@
         </configuration>
       </plugin>
       <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>findbugs-maven-plugin</artifactId>
-      </plugin>
-      <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-remote-resources-plugin</artifactId>
         <version>${maven-remote-resources-plugin.version}</version>