Merge remote-tracking branch 'origin/master' into HDDS-6030
diff --git a/.github/workflows/post-commit.yml b/.github/workflows/post-commit.yml
index 6d4a229..caaa303 100644
--- a/.github/workflows/post-commit.yml
+++ b/.github/workflows/post-commit.yml
@@ -15,7 +15,7 @@
 name: build-branch
 on:
   pull_request:
-    types: [opened, reopened, ready_for_review, synchronize]
+    types: [opened, ready_for_review, synchronize]
   push:
   schedule:
     - cron: 30 0,12 * * *
@@ -125,6 +125,12 @@
     steps:
       - name: Checkout project
         uses: actions/checkout@v2
+        if: matrix.check != 'bats'
+      - name: Checkout project with history
+        uses: actions/checkout@v2
+        with:
+          fetch-depth: 0
+        if: matrix.check == 'bats'
       - name: Cache for maven dependencies
         uses: actions/cache@v2
         with:
diff --git a/.gitignore b/.gitignore
index a302cc0..1ec550b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -68,6 +68,7 @@
 .mvn
 
 .dev-tools
-
+dev-support/ci/bats-assert
+dev-support/ci/bats-support
 
 hadoop-ozone/dist/src/main/license/current.txt
diff --git a/dev-support/ci/selective_ci_checks.bats b/dev-support/ci/selective_ci_checks.bats
index 7e2dff3..e5788fe 100644
--- a/dev-support/ci/selective_ci_checks.bats
+++ b/dev-support/ci/selective_ci_checks.bats
@@ -66,6 +66,17 @@
   assert_output -p needs-kubernetes-tests=true
 }
 
+@test "runner image update" {
+  run dev-support/ci/selective_ci_checks.sh b95eeba82a
+
+  assert_output -p 'basic-checks=["rat"]'
+  assert_output -p needs-build=true
+  assert_output -p needs-compose-tests=true
+  assert_output -p needs-dependency-check=true
+  assert_output -p needs-integration-tests=false
+  assert_output -p needs-kubernetes-tests=true
+}
+
 @test "check script" {
   run dev-support/ci/selective_ci_checks.sh 316899152
 
@@ -77,10 +88,54 @@
   assert_output -p needs-kubernetes-tests=true
 }
 
+@test "integration and unit: java change" {
+  run dev-support/ci/selective_ci_checks.sh 9aebf6e25
+
+  assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]'
+  assert_output -p needs-build=false
+  assert_output -p needs-compose-tests=false
+  assert_output -p needs-dependency-check=false
+  assert_output -p needs-integration-tests=true
+  assert_output -p needs-kubernetes-tests=false
+}
+
+@test "integration and unit: script change" {
+  run dev-support/ci/selective_ci_checks.sh c6850484f
+
+  assert_output -p 'basic-checks=["rat","bats","unit"]'
+  assert_output -p needs-build=false
+  assert_output -p needs-compose-tests=false
+  assert_output -p needs-dependency-check=false
+  assert_output -p needs-integration-tests=true
+  assert_output -p needs-kubernetes-tests=false
+}
+
+@test "unit only" {
+  run dev-support/ci/selective_ci_checks.sh 1dd1d0ba3
+
+  assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]'
+  assert_output -p needs-build=false
+  assert_output -p needs-compose-tests=false
+  assert_output -p needs-dependency-check=false
+  assert_output -p needs-integration-tests=false
+  assert_output -p needs-kubernetes-tests=false
+}
+
+@test "unit helper" {
+  run dev-support/ci/selective_ci_checks.sh 88383d1d5
+
+  assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]'
+  assert_output -p needs-build=false
+  assert_output -p needs-compose-tests=false
+  assert_output -p needs-dependency-check=false
+  assert_output -p needs-integration-tests=true
+  assert_output -p needs-kubernetes-tests=false
+}
+
 @test "integration only" {
   run dev-support/ci/selective_ci_checks.sh 61396ba9f
 
-  assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]'
+  assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs"]'
   assert_output -p needs-build=false
   assert_output -p needs-compose-tests=false
   assert_output -p needs-dependency-check=false
@@ -110,7 +165,18 @@
   assert_output -p needs-kubernetes-tests=false
 }
 
-@test "java-only change" {
+@test "main/java change" {
+  run dev-support/ci/selective_ci_checks.sh 86a771dfe
+
+  assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]'
+  assert_output -p needs-build=true
+  assert_output -p needs-compose-tests=true
+  assert_output -p needs-dependency-check=false
+  assert_output -p needs-integration-tests=true
+  assert_output -p needs-kubernetes-tests=true
+}
+
+@test "..../java change" {
   run dev-support/ci/selective_ci_checks.sh 01c616536
 
   assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]'
diff --git a/dev-support/ci/selective_ci_checks.sh b/dev-support/ci/selective_ci_checks.sh
index 4a490cd..fd9ab5b 100755
--- a/dev-support/ci/selective_ci_checks.sh
+++ b/dev-support/ci/selective_ci_checks.sh
@@ -219,7 +219,10 @@
     start_end::group_start "Count compose files"
     local pattern_array=(
         "^hadoop-ozone/dev-support/checks/acceptance.sh"
-        "^hadoop-ozone/dist/src/main/compose"
+        "^hadoop-ozone/dist"
+    )
+    local ignore_array=(
+        "^hadoop-ozone/dist/src/main/k8s"
     )
     filter_changed_files true
     COUNT_COMPOSE_CHANGED_FILES=${match_count}
@@ -239,18 +242,30 @@
     start_end::group_end
 }
 
-function get_count_junit_files() {
-    start_end::group_start "Count junit test files"
+function get_count_integration_files() {
+    start_end::group_start "Count integration test files"
     local pattern_array=(
         "^hadoop-ozone/dev-support/checks/_mvn_unit_report.sh"
         "^hadoop-ozone/dev-support/checks/integration.sh"
-        "^hadoop-ozone/dev-support/checks/unit.sh"
+        "^hadoop-ozone/integration-test"
+        "^hadoop-ozone/fault-injection-test/mini-chaos-tests"
         "src/test/java"
-        "src/test/resources"
+    )
+    # Ozone's unit test naming convention: Test*.java
+    # The following makes this filter ignore all tests except those in
+    # integration-test and fault-injection-test.
+    # Directories starting with `i` under hadoop-ozone need to be listed
+    # explicitly, other subdirectories are captured by the second item.
+    local ignore_array=(
+        "^hadoop-hdds/.*/src/test/java/.*/Test.*.java"
+        "^hadoop-ozone/[a-eghj-z].*/src/test/java/.*/Test.*.java"
+        "^hadoop-ozone/insight/src/test/java/.*/Test.*.java"
+        "^hadoop-ozone/interface-client/src/test/java/.*/Test.*.java"
+        "^hadoop-ozone/interface-storage/src/test/java/.*/Test.*.java"
     )
     filter_changed_files true
-    COUNT_JUNIT_CHANGED_FILES=${match_count}
-    readonly COUNT_JUNIT_CHANGED_FILES
+    COUNT_INTEGRATION_CHANGED_FILES=${match_count}
+    readonly COUNT_INTEGRATION_CHANGED_FILES
     start_end::group_end
 }
 
@@ -258,7 +273,10 @@
     start_end::group_start "Count kubernetes files"
     local pattern_array=(
         "^hadoop-ozone/dev-support/checks/kubernetes.sh"
-        "^hadoop-ozone/dist/src/main/k8s"
+        "^hadoop-ozone/dist"
+    )
+    local ignore_array=(
+        "^hadoop-ozone/dist/src/main/compose"
     )
     filter_changed_files true
     COUNT_KUBERNETES_CHANGED_FILES=${match_count}
@@ -302,7 +320,7 @@
     filter_changed_files
 
     if [[ ${match_count} != "0" ]]; then
-        BASIC_CHECKS="${BASIC_CHECKS} author"
+        add_basic_check author
     fi
 
     start_end::group_end
@@ -318,7 +336,7 @@
     filter_changed_files
 
     if [[ ${match_count} != "0" ]]; then
-        BASIC_CHECKS="${BASIC_CHECKS} bats"
+        add_basic_check bats
     fi
 
     start_end::group_end
@@ -332,10 +350,13 @@
         "pom.xml"
         "src/..../java"
     )
+    local ignore_array=(
+        "^hadoop-ozone/dist"
+    )
     filter_changed_files
 
     if [[ ${match_count} != "0" ]]; then
-        BASIC_CHECKS="${BASIC_CHECKS} checkstyle"
+        add_basic_check checkstyle
     fi
 
     start_end::group_end
@@ -343,7 +364,7 @@
 
 function check_needs_docs() {
     if [[ ${COUNT_DOC_CHANGED_FILES} != "0" ]]; then
-        BASIC_CHECKS="${BASIC_CHECKS} docs"
+        add_basic_check docs
     fi
 }
 
@@ -373,10 +394,13 @@
         "pom.xml"
         "src/..../java"
     )
+    local ignore_array=(
+        "^hadoop-ozone/dist"
+    )
     filter_changed_files
 
     if [[ ${match_count} != "0" ]]; then
-        BASIC_CHECKS="${BASIC_CHECKS} findbugs"
+        add_basic_check findbugs
     fi
 
     start_end::group_end
@@ -387,14 +411,18 @@
     local pattern_array=(
         "^hadoop-ozone/dev-support/checks/_mvn_unit_report.sh"
         "^hadoop-ozone/dev-support/checks/unit.sh"
-        "pom.xml"
-        "src/..../java"
-        "src/..../resources"
+        "src/test/java"
+        "src/test/resources"
     )
-    filter_changed_files
+    local ignore_array=(
+        "^hadoop-ozone/dist"
+        "^hadoop-ozone/fault-injection-test/mini-chaos-tests"
+        "^hadoop-ozone/integration-test"
+    )
+    filter_changed_files true
 
     if [[ ${match_count} != "0" ]]; then
-        BASIC_CHECKS="${BASIC_CHECKS} unit"
+        add_basic_check unit
     fi
 
     start_end::group_end
@@ -421,6 +449,13 @@
     start_end::group_end
 }
 
+function add_basic_check() {
+    local check="$1"
+    if [[ "$BASIC_CHECKS" != *${check}* ]]; then
+        BASIC_CHECKS="${BASIC_CHECKS} ${check}"
+    fi
+}
+
 function calculate_test_types_to_run() {
     start_end::group_start "Count core/other files"
     verbosity::store_exit_on_error_status
@@ -440,13 +475,14 @@
         compose_tests_needed=true
         integration_tests_needed=true
         kubernetes_tests_needed=true
+        add_basic_check unit
     else
         echo "All ${COUNT_ALL_CHANGED_FILES} changed files are known to be handled by specific checks."
         echo
         if [[ ${COUNT_COMPOSE_CHANGED_FILES} != "0" ]] || [[ ${COUNT_ROBOT_CHANGED_FILES} != "0" ]]; then
             compose_tests_needed="true"
         fi
-        if [[ ${COUNT_JUNIT_CHANGED_FILES} != "0" ]]; then
+        if [[ ${COUNT_INTEGRATION_CHANGED_FILES} != "0" ]]; then
             integration_tests_needed="true"
         fi
         if [[ ${COUNT_KUBERNETES_CHANGED_FILES} != "0" ]] || [[ ${COUNT_ROBOT_CHANGED_FILES} != "0" ]]; then
@@ -502,7 +538,7 @@
 get_count_all_files
 get_count_compose_files
 get_count_doc_files
-get_count_junit_files
+get_count_integration_files
 get_count_kubernetes_files
 get_count_robot_files
 get_count_misc_files
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
index f39ec86..064ce6e 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
@@ -38,6 +38,18 @@
   private static final Logger LOG =
       LoggerFactory.getLogger(OzoneClientConfig.class);
 
+  /**
+   * Enum for indicating what mode to use when combining chunk and block
+   * checksums to define an aggregate FileChecksum. This should be considered
+   * a client-side runtime option rather than a persistent property of any
+   * stored metadata, which is why this is not part of ChecksumOpt, which
+   * deals with properties of files at rest.
+   */
+  public enum ChecksumCombineMode {
+    MD5MD5CRC,  // MD5 of block checksums, which are MD5 over chunk CRCs
+    COMPOSITE_CRC  // Block/chunk-independent composite CRC
+  }
+
   @Config(key = "stream.buffer.flush.size",
       defaultValue = "16MB",
       type = ConfigType.SIZE,
@@ -124,6 +136,20 @@
       tags = ConfigTag.CLIENT)
   private boolean checksumVerify = true;
 
+  @Config(key = "checksum.combine.mode",
+      defaultValue = "COMPOSITE_CRC",
+      description = "The combined checksum type [MD5MD5CRC / COMPOSITE_CRC] "
+          + "determines which algorithm would be used to compute file checksum."
+          + "COMPOSITE_CRC calculates the combined CRC of the whole file, "
+          + "where the lower-level chunk/block checksums are combined into "
+          + "file-level checksum."
+          + "MD5MD5CRC calculates the MD5 of MD5 of checksums of individual "
+          + "chunks."
+          + "Default checksum type is COMPOSITE_CRC.",
+      tags = ConfigTag.CLIENT)
+  private String checksumCombineMode =
+      ChecksumCombineMode.COMPOSITE_CRC.name();
+
   @PostConstruct
   private void validate() {
     Preconditions.checkState(streamBufferSize > 0);
@@ -227,4 +253,16 @@
   public int getBufferIncrement() {
     return bufferIncrement;
   }
+
+  public ChecksumCombineMode getChecksumCombineMode() {
+    try {
+      return ChecksumCombineMode.valueOf(checksumCombineMode);
+    } catch (IllegalArgumentException iae) {
+      LOG.warn("Bad checksum combine mode: {}. Using default {}",
+          checksumCombineMode,
+          ChecksumCombineMode.COMPOSITE_CRC.name());
+      return ChecksumCombineMode.valueOf(
+          ChecksumCombineMode.COMPOSITE_CRC.name());
+    }
+  }
 }
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index f19853c..a268495 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -159,7 +159,7 @@
 
   private synchronized void connectToDatanode(DatanodeDetails dn)
       throws IOException {
-    if (isConnected(dn)){
+    if (isConnected(dn)) {
       return;
     }
     // read port from the data node, on failure use default configured
@@ -269,10 +269,10 @@
         Thread.currentThread().interrupt();
       }
     }
-    try{
+    try {
       for (Map.Entry<DatanodeDetails,
               CompletableFuture<ContainerCommandResponseProto> >
-              entry : futureHashMap.entrySet()){
+              entry : futureHashMap.entrySet()) {
         responseProtoHashMap.put(entry.getKey(), entry.getValue().get());
       }
     } catch (InterruptedException e) {
@@ -538,7 +538,7 @@
   }
 
   private synchronized void checkOpen(DatanodeDetails dn)
-      throws IOException{
+      throws IOException {
     if (closed) {
       throw new IOException("This channel is not connected.");
     }
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
index 6b74adb..07fd0a8 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
@@ -206,8 +206,8 @@
     if (keyName == null) {
       throw new IllegalArgumentException("Key name is null");
     }
-    if(!OzoneConsts.KEYNAME_ILLEGAL_CHARACTER_CHECK_REGEX
-            .matcher(keyName).matches()){
+    if (!OzoneConsts.KEYNAME_ILLEGAL_CHARACTER_CHECK_REGEX
+            .matcher(keyName).matches()) {
       throw new IllegalArgumentException("Invalid key name: " + keyName);
     }
   }
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java
index e4500bc..bd97cf2 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java
@@ -152,7 +152,7 @@
         // retry according to retry policy.
         chunks = getChunkInfos();
         break;
-      } catch(SCMSecurityException ex) {
+      } catch (SCMSecurityException ex) {
         throw ex;
       } catch (StorageContainerException ex) {
         refreshPipeline(ex);
@@ -340,9 +340,9 @@
         } else {
           throw e;
         }
-      } catch(SCMSecurityException ex) {
+      } catch (SCMSecurityException ex) {
         throw ex;
-      } catch(IOException ex) {
+      } catch (IOException ex) {
         // We got a IOException which might be due
         // to DN down or connectivity issue.
         if (shouldRetryRead(ex)) {
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
index 150c418..8b3f817 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
@@ -708,7 +708,7 @@
       boolean processExecutionException)
       throws IOException {
     LOG.error("Command execution was interrupted.");
-    if(processExecutionException) {
+    if (processExecutionException) {
       handleExecutionException(ex);
     } else {
       throw new IOException(EXCEPTION_MSG + ex.toString(), ex);
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java
index 94fa87a..a520f8a 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java
@@ -46,7 +46,7 @@
   }
 
   public BufferPool(int bufferSize, int capacity,
-      Function<ByteBuffer, ByteString> byteStringConversion){
+      Function<ByteBuffer, ByteString> byteStringConversion) {
     this.capacity = capacity;
     this.bufferSize = bufferSize;
     bufferList = new ArrayList<>(capacity);
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java
index 7238f2a..802adc1 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java
@@ -97,7 +97,7 @@
 
   @Override
   XceiverClientReply sendWatchForCommit(boolean bufferFull) throws IOException {
-    return bufferFull? commitWatcher.watchOnFirstIndex()
+    return bufferFull ? commitWatcher.watchOnFirstIndex()
         : commitWatcher.watchOnLastIndex();
   }
 
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index f8a1f43..ea79acc 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -168,7 +168,7 @@
    */
   public static final String HDDS_X509_MAX_DURATION = "hdds.x509.max.duration";
   // Limit Certificate duration to a max value of 5 years.
-  public static final String HDDS_X509_MAX_DURATION_DEFAULT= "P1865D";
+  public static final String HDDS_X509_MAX_DURATION_DEFAULT = "P1865D";
   public static final String HDDS_X509_SIGNATURE_ALGO =
       "hdds.x509.signature.algorithm";
   public static final String HDDS_X509_SIGNATURE_ALGO_DEFAULT = "SHA256withRSA";
@@ -286,5 +286,4 @@
           "hdds.container.checksum.verification.enabled";
   public static final boolean
           HDDS_CONTAINER_CHECKSUM_VERIFICATION_ENABLED_DEFAULT = true;
-
 }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 5abe8fb..ffbb3e3 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -632,7 +632,7 @@
    * Utility method to round up bytes into the nearest MB.
    */
   public static int roundupMb(long bytes) {
-    return (int)Math.ceil((double) bytes/(double) ONE_MB);
+    return (int)Math.ceil((double) bytes / (double) ONE_MB);
   }
 
   /**
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java
index 77d1930..792a9d0 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java
@@ -151,6 +151,6 @@
 
   public static String appendIfNotPresent(String str, char c) {
     Preconditions.checkNotNull(str, "Input string is null");
-    return str.isEmpty() || str.charAt(str.length() - 1) != c ? str + c: str;
+    return str.isEmpty() || str.charAt(str.length() - 1) != c ? str + c : str;
   }
 }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceAudience.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceAudience.java
index 37da0a3..03dc005 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceAudience.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceAudience.java
@@ -50,7 +50,7 @@
    */
   @Documented
   @Retention(RetentionPolicy.RUNTIME)
-  public @interface Public {};
+  public @interface Public { };
   
   /**
    * Intended only for the project(s) specified in the annotation.
@@ -67,7 +67,7 @@
    */
   @Documented
   @Retention(RetentionPolicy.RUNTIME)
-  public @interface Private {};
+  public @interface Private { };
 
-  private InterfaceAudience() {} // Audience can't exist on its own
+  private InterfaceAudience() { } // Audience can't exist on its own
 }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java
index 9945690..794ebd2 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java
@@ -43,7 +43,7 @@
   public static final String OZONE_QUOTA_TB = "TB";
 
   /** Quota Units.*/
-  public enum Units {B, KB, MB, GB, TB}
+  public enum Units { B, KB, MB, GB, TB }
 
   // Quota to decide how many buckets can be created.
   private long quotaInNamespace;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/QuotaList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/QuotaList.java
index 205cca1..5403469 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/QuotaList.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/QuotaList.java
@@ -28,13 +28,14 @@
   private ArrayList<OzoneQuota.Units> unitQuota;
   private ArrayList<Long> sizeQuota;
 
-  public QuotaList(){
+  public QuotaList() {
     ozoneQuota = new ArrayList<String>();
     unitQuota = new ArrayList<OzoneQuota.Units>();
     sizeQuota = new ArrayList<Long>();
   }
 
-  public void addQuotaList(String oQuota, OzoneQuota.Units uQuota, Long sQuota){
+  public void addQuotaList(
+      String oQuota, OzoneQuota.Units uQuota, Long sQuota) {
     ozoneQuota.add(oQuota);
     unitQuota.add(uQuota);
     sizeQuota.add(sQuota);
@@ -52,15 +53,15 @@
     return this.unitQuota;
   }
 
-  public OzoneQuota.Units getUnits(String oQuota){
+  public OzoneQuota.Units getUnits(String oQuota) {
     return unitQuota.get(ozoneQuota.indexOf(oQuota));
   }
 
-  public Long getQuotaSize(OzoneQuota.Units uQuota){
+  public Long getQuotaSize(OzoneQuota.Units uQuota) {
     return sizeQuota.get(unitQuota.indexOf(uQuota));
   }
 
-  public OzoneQuota.Units getQuotaUnit(Long sQuota){
+  public OzoneQuota.Units getQuotaUnit(Long sQuota) {
     return unitQuota.get(sizeQuota.indexOf(sQuota));
   }
 
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java
index 044bd6f..8623a0e 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java
@@ -46,7 +46,7 @@
    * @return ReplicationFactor
    */
   public static ReplicationFactor valueOf(int value) {
-    if(value == 1) {
+    if (value == 1) {
       return ONE;
     }
     if (value == 3) {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
index c259464..6b7e7c6 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
@@ -296,7 +296,7 @@
     return configMap;
   }
 
-  private static void addDeprecatedKeys(){
+  private static void addDeprecatedKeys() {
     Configuration.addDeprecations(new DeprecationDelta[]{
         new DeprecationDelta("ozone.datanode.pipeline.limit",
             ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT),
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java
index 782a3e1..319fefd 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java
@@ -127,7 +127,7 @@
 
   private void refresh() {
     //only one `refresh` can be running at a certain moment
-    if(isRefreshRunning.compareAndSet(false, true)) {
+    if (isRefreshRunning.compareAndSet(false, true)) {
       try {
         cachedValue.set(source.getUsedSpace());
       } catch (RuntimeException e) {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
index aef3c29..01bd0f4 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
@@ -713,7 +713,7 @@
      *
      * @return DatanodeDetails.Builder
      */
-    public Builder setPersistedOpState(HddsProtos.NodeOperationalState state){
+    public Builder setPersistedOpState(HddsProtos.NodeOperationalState state) {
       this.persistedOpState = state;
       return this;
     }
@@ -726,7 +726,7 @@
      *
      * @return DatanodeDetails.Builder
      */
-    public Builder setPersistedOpStateExpiry(long expiry){
+    public Builder setPersistedOpStateExpiry(long expiry) {
       this.persistedOpStateExpiryEpochSec = expiry;
       return this;
     }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
index 50480c1..c1cd865 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
@@ -136,7 +136,7 @@
   }
 
   private static RaftGroup newRaftGroup(Collection<RaftPeer> peers) {
-    return peers.isEmpty()? emptyRaftGroup()
+    return peers.isEmpty() ? emptyRaftGroup()
         : RaftGroup.valueOf(DUMMY_GROUP_ID, peers);
   }
 
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RequestTypeDependentRetryPolicyCreator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RequestTypeDependentRetryPolicyCreator.java
index 35d3a20..f8cda81 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RequestTypeDependentRetryPolicyCreator.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/retrypolicy/RequestTypeDependentRetryPolicyCreator.java
@@ -27,7 +27,11 @@
 import org.apache.ratis.protocol.exceptions.StateMachineException;
 import org.apache.ratis.protocol.exceptions.TimeoutIOException;
 import org.apache.ratis.protocol.exceptions.ResourceUnavailableException;
-import org.apache.ratis.retry.*;
+import org.apache.ratis.retry.ExceptionDependentRetry;
+import org.apache.ratis.retry.ExponentialBackoffRetry;
+import org.apache.ratis.retry.MultipleLinearRandomRetry;
+import org.apache.ratis.retry.RetryPolicies;
+import org.apache.ratis.retry.RetryPolicy;
 import org.apache.ratis.util.TimeDuration;
 
 import java.time.Duration;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfig.java
index c91a186..d72e27a 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfig.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfig.java
@@ -40,7 +40,7 @@
       type = ConfigType.STRING,
       defaultValue = "",
       tags = { ConfigTag.SECURITY, ConfigTag.RECON, ConfigTag.OZONE },
-      description = "The keytab file used by Recon daemon to login as "+
+      description = "The keytab file used by Recon daemon to login as " +
           "its service principal."
   )
   private String keytab;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java
index b5f6e48..14a229b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java
@@ -32,7 +32,7 @@
  * Ozone configuration.
  */
 public final class ByteStringConversion {
-  private ByteStringConversion(){} // no instantiation.
+  private ByteStringConversion() { } // no instantiation.
 
   /**
    * Creates the conversion function to be used to convert ByteBuffers to
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java
index baee038..ce79ec2 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java
@@ -43,7 +43,7 @@
       type = ConfigType.STRING,
       defaultValue = "",
       tags = { ConfigTag.SECURITY, ConfigTag.OZONE },
-      description = "The keytab file used by SCM daemon to login as "+
+      description = "The keytab file used by SCM daemon to login as " +
           "its service principal."
   )
   private String keytab;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index d4e5634..c1f43c6 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -525,7 +525,7 @@
 
   public static final String OZONE_SCM_HA_RAFT_LOG_PURGE_GAP =
           "ozone.scm.ha.ratis.log.purge.gap";
-  public static final int OZONE_SCM_HA_RAFT_LOG_PURGE_GAP_DEFAULT =1000000;
+  public static final int OZONE_SCM_HA_RAFT_LOG_PURGE_GAP_DEFAULT = 1000000;
 
   public static final String OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD =
           "ozone.scm.ha.ratis.snapshot.threshold";
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
index ae16784..0071924 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
@@ -25,6 +25,7 @@
 import java.util.Arrays;
 import java.util.Comparator;
 
+import com.fasterxml.jackson.annotation.JsonProperty;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
@@ -56,7 +57,12 @@
   // The wall-clock ms since the epoch at which the current state enters.
   private Instant stateEnterTime;
   private String owner;
-  private long containerID;
+  // This is JsonIgnored as originally this class held a long in instead of
+  // a containerID object. By emitting this in Json, it changes the JSON output.
+  // Therefore the method getContainerID is annotated to return the original
+  // field and hence maintain the original output.
+  @JsonIgnore
+  private ContainerID containerID;
   // Delete Transaction Id is updated when new transaction for a container
   // is stored in SCM delete Table.
   // TODO: Replication Manager should consider deleteTransactionId so that
@@ -86,7 +92,7 @@
       long deleteTransactionId,
       long sequenceId,
       ReplicationConfig repConfig) {
-    this.containerID = containerID;
+    this.containerID = ContainerID.valueOf(containerID);
     this.pipelineID = pipelineID;
     this.usedBytes = usedBytes;
     this.numberOfKeys = numberOfKeys;
@@ -129,12 +135,12 @@
   }
 
   /**
-   * This method is depricated, use {@code containerID()} which returns
-   * {@link ContainerID} object.
+   * Unless the long value of the ContainerID is needed, use the containerID()
+   * method to obtain the {@link ContainerID} object.
    */
-  @Deprecated
+  @JsonProperty
   public long getContainerID() {
-    return containerID;
+    return containerID.getId();
   }
 
   public HddsProtos.LifeCycleState getState() {
@@ -199,7 +205,7 @@
   }
 
   public ContainerID containerID() {
-    return ContainerID.valueOf(containerID);
+    return containerID;
   }
 
   /**
@@ -219,7 +225,6 @@
   public HddsProtos.ContainerInfoProto getProtobuf() {
     HddsProtos.ContainerInfoProto.Builder builder =
         HddsProtos.ContainerInfoProto.newBuilder();
-    Preconditions.checkState(containerID > 0);
     builder.setContainerID(getContainerID())
         .setUsedBytes(getUsedBytes())
         .setNumberOfKeys(getNumberOfKeys()).setState(getState())
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java
index 6f6caf3..2f2a7bf 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java
@@ -238,7 +238,7 @@
     increment(stat);
     List<ContainerID> list = containerSample
         .computeIfAbsent(stat, k -> new ArrayList<>());
-    synchronized(list) {
+    synchronized (list) {
       if (list.size() < SAMPLE_LIMIT) {
         list.add(container);
       }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java
index 60f5aa2..5514708 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java
@@ -36,7 +36,12 @@
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ozone.ha.ConfUtils;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.ratis.protocol.exceptions.*;
+import org.apache.ratis.protocol.exceptions.LeaderNotReadyException;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
+import org.apache.ratis.protocol.exceptions.ReconfigurationInProgressException;
+import org.apache.ratis.protocol.exceptions.ReconfigurationTimeoutException;
+import org.apache.ratis.protocol.exceptions.ResourceUnavailableException;
+import org.apache.ratis.protocol.exceptions.StateMachineException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -235,7 +240,7 @@
   public static boolean isNonRetriableException(Exception e) {
     Throwable t =
         getExceptionForClass(e, StateMachineException.class);
-    return t == null ? false : true;
+    return t != null;
   }
 
   /**
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java
index 6446595..bb4a164 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java
@@ -38,7 +38,7 @@
  */
 public class InnerNodeImpl extends NodeImpl implements InnerNode {
   protected static class Factory implements InnerNode.Factory<InnerNodeImpl> {
-    protected Factory() {}
+    protected Factory() { }
 
     @Override
     public InnerNodeImpl newInnerNode(String name, String location,
@@ -93,7 +93,7 @@
     } else {
       for (Node node: childrenMap.values()) {
         if (node instanceof InnerNode) {
-          count += ((InnerNode)node).getNumOfNodes(level -1);
+          count += ((InnerNode)node).getNumOfNodes(level - 1);
         } else {
           throw new RuntimeException("Cannot support Level:" + level +
               " on this node " + this.toString());
@@ -119,7 +119,7 @@
     } else {
       for (Node node: childrenMap.values()) {
         if (node instanceof InnerNode) {
-          result.addAll(((InnerNode)node).getNodes(level -1));
+          result.addAll(((InnerNode)node).getNodes(level - 1));
         } else {
           throw new RuntimeException("Cannot support Level:" + level +
               " on this node " + this.toString());
@@ -138,7 +138,7 @@
       return true;
     }
     Node child = childrenMap.values().iterator().next();
-    return child instanceof InnerNode ? false : true;
+    return !(child instanceof InnerNode);
   }
 
   /**
@@ -265,7 +265,7 @@
     if (child == null) {
       return null;
     }
-    if (path.length == 1){
+    if (path.length == 1) {
       return child;
     }
     if (child instanceof InnerNode) {
@@ -292,7 +292,7 @@
       }
       return getChildNode(leafIndex);
     } else {
-      for(Node node : childrenMap.values()) {
+      for (Node node : childrenMap.values()) {
         InnerNodeImpl child = (InnerNodeImpl)node;
         int leafCount = child.getNumOfLeaves();
         if (leafIndex < leafCount) {
@@ -468,7 +468,7 @@
     if (leafIndex >= getNumOfChildren()) {
       return null;
     }
-    for(Node node : childrenMap.values()) {
+    for (Node node : childrenMap.values()) {
       if (excludedNodes != null && excludedNodes.contains(node)) {
         continue;
       }
@@ -519,7 +519,7 @@
   private Node getChildNode(int index) {
     Iterator iterator = childrenMap.values().iterator();
     Node node = null;
-    while(index >= 0 && iterator.hasNext()) {
+    while (index >= 0 && iterator.hasNext()) {
       node = (Node)iterator.next();
       index--;
     }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java
index 43765a6..206a0fd 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java
@@ -45,7 +45,7 @@
  * (computers) and inner nodes represent datacenter/core-switches/routers that
  * manages traffic in/out of data centers or racks.
  */
-public class NetworkTopologyImpl implements NetworkTopology{
+public class NetworkTopologyImpl implements NetworkTopology {
   public static final Logger LOG =
       LoggerFactory.getLogger(NetworkTopologyImpl.class);
 
@@ -91,7 +91,7 @@
     Preconditions.checkArgument(node != null, "node cannot be null");
     if (node instanceof InnerNode) {
       throw new IllegalArgumentException(
-          "Not allowed to add an inner node: "+ node.getNetworkFullPath());
+          "Not allowed to add an inner node: " + node.getNetworkFullPath());
     }
     int newDepth = NetUtils.locationToDepth(node.getNetworkLocation()) + 1;
 
@@ -104,7 +104,7 @@
     boolean add;
     try {
       add = clusterTree.add(node);
-    }finally {
+    } finally {
       netlock.writeLock().unlock();
     }
 
@@ -126,12 +126,12 @@
     Preconditions.checkArgument(node != null, "node cannot be null");
     if (node instanceof InnerNode) {
       throw new IllegalArgumentException(
-          "Not allowed to remove an inner node: "+ node.getNetworkFullPath());
+          "Not allowed to remove an inner node: " + node.getNetworkFullPath());
     }
     netlock.writeLock().lock();
     try {
       clusterTree.remove(node);
-    }finally {
+    } finally {
       netlock.writeLock().unlock();
     }
     LOG.info("Removed a node: {}", node.getNetworkFullPath());
@@ -534,7 +534,7 @@
             " generation  " + ancestorGen);
       }
       // affinity ancestor should has overlap with scope
-      if (affinityAncestor.getNetworkFullPath().startsWith(scope)){
+      if (affinityAncestor.getNetworkFullPath().startsWith(scope)) {
         finalScope = affinityAncestor.getNetworkFullPath();
       } else if (!scope.startsWith(affinityAncestor.getNetworkFullPath())) {
         return null;
@@ -655,21 +655,21 @@
       if (level1 > maxLevel || level2 > maxLevel) {
         return Integer.MAX_VALUE;
       }
-      while(level1 > level2 && node1 != null) {
+      while (level1 > level2 && node1 != null) {
         node1 = node1.getParent();
         level1--;
-        cost += node1 == null? 0 : node1.getCost();
+        cost += node1 == null ? 0 : node1.getCost();
       }
-      while(level2 > level1 && node2 != null) {
+      while (level2 > level1 && node2 != null) {
         node2 = node2.getParent();
         level2--;
-        cost += node2 == null? 0 : node2.getCost();
+        cost += node2 == null ? 0 : node2.getCost();
       }
-      while(node1 != null && node2 != null && node1 != node2) {
+      while (node1 != null && node2 != null && node1 != node2) {
         node1 = node1.getParent();
         node2 = node2.getParent();
-        cost += node1 == null? 0 : node1.getCost();
-        cost += node2 == null? 0 : node2.getCost();
+        cost += node1 == null ? 0 : node1.getCost();
+        cost += node2 == null ? 0 : node2.getCost();
       }
       return cost;
     } finally {
@@ -752,7 +752,7 @@
     List<Node> excludedAncestorList =
         NetUtils.getAncestorList(this, mutableExcludedNodes, ancestorGen);
     for (Node ancestor : excludedAncestorList) {
-      if (scope.startsWith(ancestor.getNetworkFullPath())){
+      if (scope.startsWith(ancestor.getNetworkFullPath())) {
         return 0;
       }
     }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java
index 47e5de8..fc8e23b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java
@@ -28,7 +28,7 @@
   /**
    * Network topology layer type enum definition.
    */
-  public enum LayerType{
+  public enum LayerType {
     ROOT("Root", NetConstants.INNER_NODE_COST_DEFAULT),
     INNER_NODE("InnerNode", NetConstants.INNER_NODE_COST_DEFAULT),
     LEAF_NODE("Leaf", NetConstants.NODE_COST_DEFAULT);
@@ -47,7 +47,7 @@
       return description;
     }
 
-    public int getCost(){
+    public int getCost() {
       return cost;
     }
     public static LayerType getType(String typeStr) {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java
index cb9690f..289f7e6 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java
@@ -68,7 +68,7 @@
 
   private static final int LAYOUT_VERSION = 1;
   private static volatile NodeSchemaLoader instance = null;
-  private NodeSchemaLoader() {}
+  private NodeSchemaLoader() { }
 
   public static NodeSchemaLoader getInstance() {
     if (instance == null) {
@@ -324,7 +324,7 @@
     // Integrity check, only one ROOT and one LEAF is allowed
     boolean foundRoot = false;
     boolean foundLeaf = false;
-    for(NodeSchema schema: schemas.values()) {
+    for (NodeSchema schema: schemas.values()) {
       if (schema.getType() == LayerType.ROOT) {
         if (foundRoot) {
           throw new IllegalArgumentException("Multiple ROOT layers are found" +
@@ -385,7 +385,7 @@
               + "> is null");
         }
         if (TOPOLOGY_PATH.equals(tagName)) {
-          if(value.startsWith(NetConstants.PATH_SEPARATOR_STR)) {
+          if (value.startsWith(NetConstants.PATH_SEPARATOR_STR)) {
             value = value.substring(1);
           }
           String[] layerIDs = value.split(NetConstants.PATH_SEPARATOR_STR);
@@ -403,7 +403,7 @@
             throw new IllegalArgumentException("Topology path doesn't start "
                 + "with ROOT layer");
           }
-          if (schemas.get(layerIDs[layerIDs.length -1]).getType() !=
+          if (schemas.get(layerIDs[layerIDs.length - 1]).getType() !=
               LayerType.LEAF_NODE) {
             throw new IllegalArgumentException("Topology path doesn't end "
                 + "with LEAF layer");
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
index 044f151..f5c0b62 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
@@ -415,7 +415,7 @@
     private Instant creationTimestamp = null;
     private UUID suggestedLeaderId = null;
 
-    public Builder() {}
+    public Builder() { }
 
     public Builder(Pipeline pipeline) {
       this.id = pipeline.id;
@@ -486,10 +486,10 @@
       if (nodeOrder != null && !nodeOrder.isEmpty()) {
         // This branch is for build from ProtoBuf
         List<DatanodeDetails> nodesWithOrder = new ArrayList<>();
-        for(int i = 0; i < nodeOrder.size(); i++) {
+        for (int i = 0; i < nodeOrder.size(); i++) {
           int nodeIndex = nodeOrder.get(i);
           Iterator<DatanodeDetails> it = nodeStatus.keySet().iterator();
-          while(it.hasNext() && nodeIndex >= 0) {
+          while (it.hasNext() && nodeIndex >= 0) {
             DatanodeDetails node = it.next();
             if (nodeIndex == 0) {
               nodesWithOrder.add(node);
@@ -503,7 +503,7 @@
               nodesWithOrder, id);
         }
         pipeline.setNodesInOrder(nodesWithOrder);
-      } else if (nodesInOrder != null){
+      } else if (nodesInOrder != null) {
         // This branch is for pipeline clone
         pipeline.setNodesInOrder(nodesInOrder);
       }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index fcf3f13..7f2d2a8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -434,7 +434,7 @@
     request.setContainerID(containerID);
     request.setCloseContainer(CloseContainerRequestProto.getDefaultInstance());
     request.setDatanodeUuid(id);
-    if(encodedToken != null) {
+    if (encodedToken != null) {
       request.setEncodedToken(encodedToken);
     }
     client.sendCommand(request.build(), getValidatorList());
@@ -458,7 +458,7 @@
     request.setContainerID(containerID);
     request.setReadContainer(ReadContainerRequestProto.getDefaultInstance());
     request.setDatanodeUuid(id);
-    if(encodedToken != null) {
+    if (encodedToken != null) {
       request.setEncodedToken(encodedToken);
     }
     ContainerCommandResponseProto response =
@@ -560,8 +560,8 @@
     ContainerCommandRequestProto request = builder.build();
     Map<DatanodeDetails, ContainerCommandResponseProto> responses =
             xceiverClient.sendCommandOnAllNodes(request);
-    for(Map.Entry<DatanodeDetails, ContainerCommandResponseProto> entry:
-           responses.entrySet()){
+    for (Map.Entry<DatanodeDetails, ContainerCommandResponseProto> entry:
+           responses.entrySet()) {
       datanodeToResponseMap.put(entry.getKey(), entry.getValue().getGetBlock());
     }
     return datanodeToResponseMap;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java
index 3195e00..8cd68a0 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java
@@ -37,7 +37,7 @@
   public static final VersionInfo HDDS_VERSION_INFO =
       new VersionInfo("hdds");
 
-  private HddsVersionInfo() {}
+  private HddsVersionInfo() { }
 
   public static void main(String[] args) {
     System.out.println("Using HDDS " + HDDS_VERSION_INFO.getVersion());
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/ResourceSemaphore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/ResourceSemaphore.java
index 96d5996..e1e9598 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/ResourceSemaphore.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/ResourceSemaphore.java
@@ -90,7 +90,7 @@
 
   @Override
   public String toString() {
-    return (isClosed()? "closed/": availablePermits() + "/") + limit;
+    return (isClosed() ? "closed/" : availablePermits() + "/") + limit;
   }
 
   /**
@@ -101,7 +101,7 @@
 
     public Group(int... limits) {
       final List<ResourceSemaphore> list = new ArrayList<>(limits.length);
-      for(int limit : limits) {
+      for (int limit : limits) {
         list.add(new ResourceSemaphore(limit));
       }
       this.resources = Collections.unmodifiableList(list);
@@ -131,7 +131,7 @@
       }
 
       // failed at i, releasing all previous resources
-      for(i--; i >= 0; i--) {
+      for (i--; i >= 0; i--) {
         resources.get(i).release(permits[i]);
       }
       return false;
@@ -147,13 +147,13 @@
     }
 
     protected void release(int... permits) {
-      for(int i = resources.size() - 1; i >= 0; i--) {
+      for (int i = resources.size() - 1; i >= 0; i--) {
         resources.get(i).release(permits[i]);
       }
     }
 
     public void close() {
-      for(int i = resources.size() - 1; i >= 0; i--) {
+      for (int i = resources.size() - 1; i >= 0; i--) {
         resources.get(i).close();
       }
     }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java
index ba062bc..6fff80f6 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java
@@ -51,7 +51,7 @@
   /**
    * Private constructor so that no one can instantiate this class.
    */
-  private UniqueId() {}
+  private UniqueId() { }
 
   /**
    * Calculate and returns next unique id based on System#currentTimeMillis.
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 9c7d769..627c432 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -194,6 +194,11 @@
   public static final String OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT
       = "300s"; // 300s for default
 
+  public static final String OZONE_BLOCK_DELETING_SERVICE_WORKERS =
+      "ozone.block.deleting.service.workers";
+  public static final int OZONE_BLOCK_DELETING_SERVICE_WORKERS_DEFAULT
+      = 10;
+
   public static final String OZONE_KEY_PREALLOCATION_BLOCKS_MAX =
       "ozone.key.preallocation.max.blocks";
   public static final int OZONE_KEY_PREALLOCATION_BLOCKS_MAX_DEFAULT
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 133f39d..bdc8789 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -128,8 +128,8 @@
   public static final String CONTAINER_DB_SUFFIX = "container.db";
   public static final String PIPELINE_DB_SUFFIX = "pipeline.db";
   public static final String CRL_DB_SUFFIX = "crl.db";
-  public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
-  public static final String DN_CRL_DB = "dn-"+ CRL_DB_SUFFIX;
+  public static final String DN_CONTAINER_DB = "-dn-" + CONTAINER_DB_SUFFIX;
+  public static final String DN_CRL_DB = "dn-" + CRL_DB_SUFFIX;
   public static final String CRL_DB_DIRECTORY_NAME = "crl";
   public static final String OM_DB_NAME = "om.db";
   public static final String SCM_DB_NAME = "scm.db";
@@ -187,7 +187,7 @@
 
   public static final String OM_KEY_PREFIX = "/";
   public static final String OM_USER_PREFIX = "$";
-  public static final String OM_S3_PREFIX ="S3:";
+  public static final String OM_S3_PREFIX = "S3:";
   public static final String OM_S3_VOLUME_PREFIX = "s3";
   public static final String OM_S3_SECRET = "S3Secret:";
   public static final String OM_PREFIX = "Prefix:";
@@ -212,7 +212,7 @@
   /**
    * Quota Units.
    */
-  public enum Units {TB, GB, MB, KB, B}
+  public enum Units { TB, GB, MB, KB, B }
 
   /**
    * Max number of keys returned per list buckets operation.
@@ -333,6 +333,7 @@
   public static final String UNDELETED_KEYS_LIST = "unDeletedKeysList";
   public static final String SOURCE_VOLUME = "sourceVolume";
   public static final String SOURCE_BUCKET = "sourceBucket";
+  public static final String BUCKET_LAYOUT = "bucketLayout";
 
 
 
@@ -389,7 +390,7 @@
   public static final Pattern KEYNAME_ILLEGAL_CHARACTER_CHECK_REGEX  =
           Pattern.compile("^[^^{}<>^?%~#`\\[\\]\\|\\\\(\\x80-\\xff)]+$");
 
-  public static final String FS_FILE_COPYING_TEMP_SUFFIX= "._COPYING_";
+  public static final String FS_FILE_COPYING_TEMP_SUFFIX = "._COPYING_";
 
   // Transaction Info
   public static final String TRANSACTION_INFO_KEY = "#TRANSACTIONINFO";
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java
index 098ab6b..6c20968 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java
@@ -26,7 +26,7 @@
 
   private String status;
 
-  AuditEventStatus(String status){
+  AuditEventStatus(String status) {
     this.status = status;
   }
 
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
index ee6f45d..9f1f5f0 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
@@ -38,7 +38,7 @@
    * Parametrized Constructor to initialize logger.
    * @param type Audit Logger Type
    */
-  public AuditLogger(AuditLoggerType type){
+  public AuditLogger(AuditLoggerType type) {
     initializeLogger(type);
   }
 
@@ -46,7 +46,7 @@
    * Initializes the logger with specific type.
    * @param loggerType specified one of the values from enum AuditLoggerType.
    */
-  private void initializeLogger(AuditLoggerType loggerType){
+  private void initializeLogger(AuditLoggerType loggerType) {
     this.logger = LogManager.getContext(false).getLogger(loggerType.getType());
   }
 
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java
index 18241c7..dbfde9f 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java
@@ -31,7 +31,7 @@
     return type;
   }
 
-  AuditLoggerType(String type){
+  AuditLoggerType(String type) {
     this.type = type;
   }
 }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java
index 505b958..3414aa4 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java
@@ -28,11 +28,11 @@
 
   private Marker marker;
 
-  AuditMarker(Marker marker){
+  AuditMarker(Marker marker) {
     this.marker = marker;
   }
 
-  public Marker getMarker(){
+  public Marker getMarker() {
     return marker;
   }
 }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java
index 6f3bbad..9d28c9f 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java
@@ -64,12 +64,12 @@
     private Map<String, String> params;
     private String ret;
 
-    public Builder setUser(String usr){
+    public Builder setUser(String usr) {
       this.user = usr;
       return this;
     }
 
-    public Builder atIp(String ipAddr){
+    public Builder atIp(String ipAddr) {
       this.ip = ipAddr;
       return this;
     }
@@ -79,7 +79,7 @@
       return this;
     }
 
-    public Builder withParams(Map<String, String> args){
+    public Builder withParams(Map<String, String> args) {
       this.params = args;
       return this;
     }
@@ -89,12 +89,12 @@
       return this;
     }
 
-    public Builder withException(Throwable ex){
+    public Builder withException(Throwable ex) {
       this.throwable = ex;
       return this;
     }
 
-    public AuditMessage build(){
+    public AuditMessage build() {
       String message = "user=" + this.user + " | ip=" + this.ip + " | " +
           "op=" + this.op + " " + this.params + " | " + "ret=" + this.ret;
       return new AuditMessage(message, throwable);
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java
index 7ce643d..6187d6b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java
@@ -80,7 +80,7 @@
     }
 
     private static int update(int crc, ByteBuffer b, int[] table) {
-      for(; b.remaining() > 7;) {
+      for (; b.remaining() > 7;) {
         final int c0 = (b.get() ^ crc) & 0xff;
         final int c1 = (b.get() ^ (crc >>>= 8)) & 0xff;
         final int c2 = (b.get() ^ (crc >>>= 8)) & 0xff;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java
index 7622ffc..5a63c09 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java
@@ -57,8 +57,9 @@
     Preconditions.checkArgument(increment > 0);
     this.limit = limit;
     this.increment = increment;
-    this.limitIndex = limit/increment;
-    this.buffers = new ArrayList<>(limitIndex + (limit%increment == 0? 0: 1));
+    this.limitIndex = limit / increment;
+    this.buffers = new ArrayList<>(
+        limitIndex + (limit % increment == 0 ? 0 : 1));
     this.isDuplicated = isDuplicated;
   }
 
@@ -66,7 +67,7 @@
   private int getBufferCapacityAtIndex(int i) {
     Preconditions.checkArgument(i >= 0);
     Preconditions.checkArgument(i <= limitIndex);
-    return i < limitIndex? increment: limit%increment;
+    return i < limitIndex ? increment : limit % increment;
   }
 
   private void assertInt(int expected, int computed, String name, int i) {
@@ -126,7 +127,7 @@
     Preconditions.checkArgument(position < limit);
     final int i = position / increment;
     final ByteBuffer ith = getAndAllocateAtIndex(i);
-    assertInt(position%increment, ith.position(), "position", i);
+    assertInt(position % increment, ith.position(), "position", i);
     return ith;
   }
 
@@ -207,7 +208,7 @@
     }
 
     final int thatLimit = that.limit();
-    for(int p = position(); that.position() < thatLimit;) {
+    for (int p = position(); that.position() < thatLimit;) {
       final ByteBuffer b = getAndAllocateAtPosition(p);
       final int min = Math.min(b.remaining(), thatLimit - that.position());
       that.limit(that.position() + min);
@@ -229,7 +230,7 @@
     final int pr = newPosition % increment;
     final int li = newLimit / increment;
     final int lr = newLimit % increment;
-    final int newSize = lr == 0? li: li + 1;
+    final int newSize = lr == 0 ? li : li + 1;
 
     for (int i = 0; i < newSize; i++) {
       final int pos = i < pi ? increment : i == pi ? pr : 0;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
index e6e1df5..6ba4384 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
@@ -109,7 +109,7 @@
 
   public Long  getCreationTime() {
     String creationTime = properties.getProperty(CREATION_TIME);
-    if(creationTime != null) {
+    if (creationTime != null) {
       return Long.parseLong(creationTime);
     }
     return null;
@@ -117,7 +117,7 @@
 
   public int getLayoutVersion() {
     String layout = properties.getProperty(LAYOUT_VERSION);
-    if(layout != null) {
+    if (layout != null) {
       return Integer.parseInt(layout);
     }
     return 0;
@@ -166,7 +166,7 @@
       throws InconsistentStorageStateException {
     NodeType nodeType = getNodeType();
     Preconditions.checkNotNull(nodeType);
-    if(type != nodeType) {
+    if (type != nodeType) {
       throw new InconsistentStorageStateException("Expected NodeType: " + type +
           ", but found: " + nodeType);
     }
@@ -176,7 +176,7 @@
       throws InconsistentStorageStateException {
     String clusterId = getClusterID();
     Preconditions.checkNotNull(clusterId);
-    if(clusterId.isEmpty()) {
+    if (clusterId.isEmpty()) {
       throw new InconsistentStorageStateException("Cluster ID not found");
     }
   }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/RatisSnapshotInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/RatisSnapshotInfo.java
index a9de892..ebc4bba 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/RatisSnapshotInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/RatisSnapshotInfo.java
@@ -50,7 +50,7 @@
     this.snapshotIndex = newIndex;
   }
 
-  public RatisSnapshotInfo() {}
+  public RatisSnapshotInfo() { }
 
   public RatisSnapshotInfo(long term, long index) {
     this.term = term;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
index 8ea1689..434e497 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
@@ -161,8 +161,8 @@
    * @return list of chunkinfo.
    */
   public List<ContainerProtos.ChunkInfo> getChunks() {
-    return chunkList == null? Collections.emptyList()
-        : chunkList instanceof ContainerProtos.ChunkInfo?
+    return chunkList == null ? Collections.emptyList()
+        : chunkList instanceof ContainerProtos.ChunkInfo ?
             Collections.singletonList((ContainerProtos.ChunkInfo)chunkList)
         : Collections.unmodifiableList(castChunkList());
   }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java
index 7773828..a13f164 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java
@@ -45,7 +45,7 @@
     Map<String, String> auditParams = new TreeMap<>();
     Type cmdType = msg.getCmdType();
     String containerID = String.valueOf(msg.getContainerID());
-    switch(cmdType) {
+    switch (cmdType) {
     case CreateContainer:
       auditParams.put("containerID", containerID);
       auditParams.put("containerType",
@@ -75,11 +75,11 @@
       return auditParams;
 
     case PutBlock:
-      try{
+      try {
         auditParams.put("blockData",
             BlockData.getFromProtoBuf(msg.getPutBlock().getBlockData())
                 .toString());
-      } catch (IOException ex){
+      } catch (IOException ex) {
         if (LOG.isTraceEnabled()) {
           LOG.trace("Encountered error parsing BlockData from protobuf: "
               + ex.getMessage());
@@ -132,11 +132,11 @@
     case CompactChunk: return null; //CompactChunk operation
 
     case PutSmallFile:
-      try{
+      try {
         auditParams.put("blockData",
             BlockData.getFromProtoBuf(msg.getPutSmallFile()
                 .getBlock().getBlockData()).toString());
-      } catch (IOException ex){
+      } catch (IOException ex) {
         if (LOG.isTraceEnabled()) {
           LOG.trace("Encountered error parsing BlockData from protobuf: "
               + ex.getMessage());
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java
index e95105b..2fd7a9d 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java
@@ -90,7 +90,7 @@
    */
   public void registerCallBack(Callable<Void> callback)
       throws LeaseExpiredException {
-    if(hasExpired()) {
+    if (hasExpired()) {
       throw new LeaseExpiredException(messageForResource(resource));
     }
     callbacks.add(callback);
@@ -104,7 +104,7 @@
    *         If the lease has already timed out
    */
   public long getElapsedTime() throws LeaseExpiredException {
-    if(hasExpired()) {
+    if (hasExpired()) {
       throw new LeaseExpiredException(messageForResource(resource));
     }
     return Time.monotonicNow() - creationTime;
@@ -129,7 +129,7 @@
    *         If the lease has already timed out
    */
   public long getLeaseLifeTime() throws LeaseExpiredException {
-    if(hasExpired()) {
+    if (hasExpired()) {
       throw new LeaseExpiredException(messageForResource(resource));
     }
     return leaseTimeout.get();
@@ -144,7 +144,7 @@
    *         If the lease has already timed out
    */
   public void renew(long timeout) throws LeaseExpiredException {
-    if(hasExpired()) {
+    if (hasExpired()) {
       throw new LeaseExpiredException(messageForResource(resource));
     }
     leaseTimeout.addAndGet(timeout);
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java
index a79d517..3f2d5fb 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java
@@ -53,7 +53,7 @@
     if (LOG.isDebugEnabled()) {
       LOG.debug("Executing callbacks for lease on {}", resource);
     }
-    for(Callable<Void> callback : callbacks) {
+    for (Callable<Void> callback : callbacks) {
       try {
         callback.call();
       } catch (Exception e) {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java
index 45f0638..b3ffe59 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java
@@ -104,7 +104,7 @@
               long ended = System.currentTimeMillis();
               LOG.debug(String.format(
                   "Completed shutdown in %.3f seconds; Timeouts: %d",
-                  (ended-started)/1000.0, timeoutCount));
+                  (ended - started) / 1000.0, timeoutCount));
               // each of the hooks have executed; now shut down the
               // executor itself.
               shutdownExecutor(new OzoneConfiguration());
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 982b853..269f9d9 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -399,6 +399,14 @@
     </description>
   </property>
   <property>
+    <name>ozone.block.deleting.service.workers</name>
+    <value>10</value>
+    <tag>OZONE, PERFORMANCE, SCM</tag>
+    <description>Number of workers executed of block deletion service. This
+      configuration should be set to greater than 0.
+    </description>
+  </property>
+  <property>
     <name>ozone.UnsafeByteOperations.enabled</name>
     <value>true</value>
     <tag>OZONE, PERFORMANCE, CLIENT</tag>
@@ -2570,6 +2578,24 @@
     </description>
   </property>
   <property>
+    <name>recon.om.delta.update.limit</name>
+    <value>2000</value>
+    <tag>OZONE, RECON</tag>
+    <description>
+      Recon each time get a limited delta updates from OM.
+      The actual fetched data might be larger than this limit.
+    </description>
+  </property>
+  <property>
+    <name>recon.om.delta.update.loop.limit</name>
+    <value>10</value>
+    <tag>OZONE, RECON</tag>
+    <description>
+      The sync between Recon and OM consists of several small
+      fetch loops.
+    </description>
+  </property>
+  <property>
     <name>ozone.recon.scm.container.threshold</name>
     <value>100</value>
     <tag>OZONE, RECON, SCM</tag>
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
index ef93927..fd8aa28 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
@@ -122,7 +122,7 @@
     assertThat(addresses.size(), is(3));
     it = addresses.iterator();
     HashMap<String, Integer> expected1 = new HashMap<>(hostsAndPorts);
-    while(it.hasNext()) {
+    while (it.hasNext()) {
       InetSocketAddress current = it.next();
       assertTrue(expected1.remove(current.getHostName(),
           current.getPort()));
@@ -136,7 +136,7 @@
     assertThat(addresses.size(), is(3));
     it = addresses.iterator();
     HashMap<String, Integer> expected2 = new HashMap<>(hostsAndPorts);
-    while(it.hasNext()) {
+    while (it.hasNext()) {
       InetSocketAddress current = it.next();
       assertTrue(expected2.remove(current.getHostName(),
           current.getPort()));
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
index 9adf8f7..1315ad5 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
@@ -167,7 +167,7 @@
   @Test
   public void testValidationBasedOnConfig() {
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(OZONE_REPLICATION+".allowed-configs",
+    conf.set(OZONE_REPLICATION + ".allowed-configs",
         "^STANDALONE/ONE|RATIS/THREE$");
     conf.set(OZONE_REPLICATION, factor);
     conf.set(OZONE_REPLICATION_TYPE, type);
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java
index b057349..c9ed258 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java
@@ -96,7 +96,7 @@
 
     long usedSpace = du.getUsedSpace();
 
-    assertFileSize(4*KB, usedSpace);
+    assertFileSize(4 * KB, usedSpace);
   }
 
   private static void assertFileSize(long expected, long actual) {
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java
index 469faac..d3ddbe0 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java
@@ -45,7 +45,7 @@
 
   static ByteString newData(int length) {
     final ByteString.Output out = ByteString.newOutput();
-    for(int i = 0; i < length; i++) {
+    for (int i = 0; i < length; i++) {
       out.write(RANDOM.nextInt());
     }
     return out.toByteString();
@@ -128,10 +128,10 @@
   static void runTest(
       BiFunction<BlockID, ByteString, ContainerCommandRequestProto> method)
       throws Exception {
-    for(int i = 0; i < 2; i++) {
+    for (int i = 0; i < 2; i++) {
       runTest(i, method);
     }
-    for(int i = 2; i < 1 << 10;) {
+    for (int i = 2; i < 1 << 10;) {
       runTest(i + 1 + RANDOM.nextInt(i - 1), method);
       i <<= 1;
       runTest(i, method);
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java
index 12a0240..e561bb7 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java
@@ -93,7 +93,7 @@
     int count = 1;
     for (SCMNodeInfo scmNodeInfo : scmNodeInfos) {
       Assert.assertEquals(scmServiceId, scmNodeInfo.getServiceId());
-      Assert.assertEquals("scm"+count++, scmNodeInfo.getNodeId());
+      Assert.assertEquals("scm" + count++, scmNodeInfo.getNodeId());
       Assert.assertEquals("localhost:" + ++port,
           scmNodeInfo.getBlockClientAddress());
       Assert.assertEquals("localhost:" + ++port,
@@ -117,7 +117,7 @@
     int count = 1;
     for (SCMNodeInfo scmNodeInfo : scmNodeInfos) {
       Assert.assertEquals(scmServiceId, scmNodeInfo.getServiceId());
-      Assert.assertEquals("scm"+count++, scmNodeInfo.getNodeId());
+      Assert.assertEquals("scm" + count++, scmNodeInfo.getNodeId());
       Assert.assertEquals("localhost:" + OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT,
           scmNodeInfo.getBlockClientAddress());
       Assert.assertEquals("localhost:" +
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
index 0008e66..e50eca2 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
@@ -171,7 +171,7 @@
   @Test
   public void testContains() {
     Node nodeNotInMap = createDatanode("8.8.8.8", "/d2/r4");
-    for (int i=0; i < dataNodes.length; i++) {
+    for (int i = 0; i < dataNodes.length; i++) {
       assertTrue(cluster.contains(dataNodes[i]));
     }
     assertFalse(cluster.contains(nodeNotInMap));
@@ -238,7 +238,7 @@
     assumeTrue(cluster.getMaxLevel() > 2);
     int maxLevel = cluster.getMaxLevel();
     assertTrue(cluster.isSameParent(dataNodes[0], dataNodes[1]));
-    while(maxLevel > 1) {
+    while (maxLevel > 1) {
       assertTrue(cluster.isSameAncestor(dataNodes[0], dataNodes[1],
           maxLevel - 1));
       maxLevel--;
@@ -262,17 +262,17 @@
 
   @Test
   public void testAddRemove() {
-    for(int i = 0; i < dataNodes.length; i++) {
+    for (int i = 0; i < dataNodes.length; i++) {
       cluster.remove(dataNodes[i]);
     }
-    for(int i = 0; i < dataNodes.length; i++) {
+    for (int i = 0; i < dataNodes.length; i++) {
       assertFalse(cluster.contains(dataNodes[i]));
     }
     // no leaf nodes
     assertEquals(0, cluster.getNumOfLeafNode(null));
     // no inner nodes
     assertEquals(0, cluster.getNumOfNodes(2));
-    for(int i = 0; i < dataNodes.length; i++) {
+    for (int i = 0; i < dataNodes.length; i++) {
       cluster.add(dataNodes[i]);
     }
     // Inner nodes are created automatically
@@ -467,10 +467,10 @@
         }};
     int leafNum = cluster.getNumOfLeafNode(null);
     Map<Node, Integer> frequency;
-    for(Node[] list : excludedNodeLists) {
+    for (Node[] list : excludedNodeLists) {
       List<Node> excludedList = Arrays.asList(list);
       int ancestorGen = 0;
-      while(ancestorGen < cluster.getMaxLevel()) {
+      while (ancestorGen < cluster.getMaxLevel()) {
         frequency = pickNodesAtRandom(leafNum, null, excludedList, ancestorGen);
         List<Node> ancestorList = NetUtils.getAncestorList(cluster,
             excludedList, ancestorGen);
@@ -490,7 +490,7 @@
     // all nodes excluded, no node will be picked
     List<Node> excludedList = Arrays.asList(dataNodes);
     int ancestorGen = 0;
-    while(ancestorGen < cluster.getMaxLevel()) {
+    while (ancestorGen < cluster.getMaxLevel()) {
       frequency = pickNodesAtRandom(leafNum, null, excludedList, ancestorGen);
       for (Node key : dataNodes) {
         assertTrue(frequency.get(key) == 0);
@@ -500,7 +500,7 @@
     // out scope excluded nodes, each node will be picked
     excludedList = Arrays.asList(createDatanode("1.1.1.1.", "/city1/rack1"));
     ancestorGen = 0;
-    while(ancestorGen < cluster.getMaxLevel()) {
+    while (ancestorGen < cluster.getMaxLevel()) {
       frequency = pickNodes(leafNum, null, excludedList, null, ancestorGen);
       for (Node key : dataNodes) {
         assertTrue(frequency.get(key) != 0);
@@ -536,7 +536,7 @@
       while (!path.equals(ROOT)) {
         scope = "~" + path;
         int ancestorGen = 0;
-        while(ancestorGen < cluster.getMaxLevel()) {
+        while (ancestorGen < cluster.getMaxLevel()) {
           for (Node[] list : excludedNodeLists) {
             List<Node> excludedList = Arrays.asList(list);
             frequency =
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java
index b7b3dc6..00124d9 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java
@@ -51,7 +51,7 @@
   public static Pipeline createPipeline(int numNodes) throws IOException {
     Preconditions.checkArgument(numNodes >= 1);
     final List<DatanodeDetails> ids = new ArrayList<>(numNodes);
-    for(int i = 0; i < numNodes; i++) {
+    for (int i = 0; i < numNodes; i++) {
       ids.add(MockDatanodeDetails.randomLocalDatanodeDetails());
     }
     return createPipeline(ids);
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java
index fe4ccc0..ce6f58d 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java
@@ -43,7 +43,7 @@
       throws IOException {
 
     checkElementIndex(offset, srcs.length, "offset");
-    checkElementIndex(offset+length-1, srcs.length, "offset+length");
+    checkElementIndex(offset + length - 1, srcs.length, "offset+length");
 
     long bytes = 0;
     for (ByteBuffer b : srcs) {
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceSemaphore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceSemaphore.java
index cbdd558..f9c194d 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceSemaphore.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceSemaphore.java
@@ -52,18 +52,18 @@
     try {
       g.release(1, 0);
       Assert.fail("Should have failed.");
-    } catch (IllegalStateException e){
+    } catch (IllegalStateException e) {
     }
     try {
       g.release(0, 1);
       Assert.fail("Should have failed.");
-    } catch (IllegalStateException e){
+    } catch (IllegalStateException e) {
     }
   }
 
   static void assertUsed(ResourceSemaphore.Group g, int... expected) {
     Assert.assertEquals(expected.length, g.resourceSize());
-    for(int i = 0; i < expected.length; i++) {
+    for (int i = 0; i < expected.length; i++) {
       Assert.assertEquals(expected[i], g.get(i).used());
     }
   }
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java
index 0c2d98f..9555225 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java
@@ -27,7 +27,7 @@
   private String key1;
   private String key2;
 
-  public DummyEntity(){
+  public DummyEntity() {
     this.key1 = "value1";
     this.key2 = "value2";
   }
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
index 41dc4f5..01fceae 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
@@ -189,7 +189,7 @@
       lines = FileUtils.readLines(file, (String)null);
       try {
         Thread.sleep(500 * (i + 1));
-      } catch(InterruptedException ie) {
+      } catch (InterruptedException ie) {
         Thread.currentThread().interrupt();
         break;
       }
@@ -212,7 +212,7 @@
     assertEquals(0, lines.size());
   }
 
-  private static class TestException extends Exception{
+  private static class TestException extends Exception {
     TestException(String message) {
       super(message);
     }
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java
index 2e144e6..a61ff90 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java
@@ -73,7 +73,7 @@
     // Change the data and check if new checksum matches the original checksum.
     // Modifying one byte of data should be enough for the checksum data to
     // mismatch
-    data[50] = (byte) (data[50]+1);
+    data[50] = (byte) (data[50] + 1);
     ChecksumData newChecksumData = checksum.computeChecksum(data);
     Assert.assertNotEquals("Checksums should not match for different data",
         originalChecksumData, newChecksumData);
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java
index 9b69fad..1e85099 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java
@@ -46,7 +46,7 @@
   public void testImplWithByteBuffer() {
     runTestImplWithByteBuffer(1);
     runTestImplWithByteBuffer(1 << 10);
-    for(int i = 0; i < 10; i++) {
+    for (int i = 0; i < 10; i++) {
       runTestImplWithByteBuffer(nextInt(100) + 1);
     }
   }
@@ -62,7 +62,7 @@
     runTestIncrementalChunkBuffer(1, 1);
     runTestIncrementalChunkBuffer(4, 8);
     runTestIncrementalChunkBuffer(16, 1 << 10);
-    for(int i = 0; i < 10; i++) {
+    for (int i = 0; i < 10; i++) {
       final int a = ThreadLocalRandom.current().nextInt(100) + 1;
       final int b = ThreadLocalRandom.current().nextInt(100) + 1;
       runTestIncrementalChunkBuffer(Math.min(a, b), Math.max(a, b));
@@ -80,7 +80,7 @@
   public void testImplWithList() {
     runTestImplWithList(4, 8);
     runTestImplWithList(16, 1 << 10);
-    for(int i = 0; i < 10; i++) {
+    for (int i = 0; i < 10; i++) {
       final int a = ThreadLocalRandom.current().nextInt(10) + 1;
       final int b = ThreadLocalRandom.current().nextInt(100) + 1;
       runTestImplWithList(Math.min(a, b), Math.max(a, b));
@@ -131,7 +131,7 @@
       assertIterate(expected, impl, bpc);
     } else if (bpc == 0) {
       for (int d = 1; d < 5; d++) {
-        final int bytesPerChecksum = n/d;
+        final int bytesPerChecksum = n / d;
         if (bytesPerChecksum > 0) {
           assertIterate(expected, impl, bytesPerChecksum);
         }
@@ -148,7 +148,7 @@
   private static void assertDuplicate(byte[] expected, ChunkBuffer impl) {
     final int n = expected.length;
     assertToByteString(expected, 0, n, impl);
-    for(int i = 0; i < 10; i++) {
+    for (int i = 0; i < 10; i++) {
       final int offset = nextInt(n);
       final int length = nextInt(n - offset + 1);
       assertToByteString(expected, offset, length, impl);
@@ -165,14 +165,14 @@
     final int numChecksums = (n + bpc - 1) / bpc;
     final Iterator<ByteBuffer> i = duplicated.iterate(bpc).iterator();
     int count = 0;
-    for(int j = 0; j < numChecksums; j++) {
+    for (int j = 0; j < numChecksums; j++) {
       final ByteBuffer b = i.next();
-      final int expectedRemaining = j < numChecksums - 1?
-          bpc : n - bpc *(numChecksums - 1);
+      final int expectedRemaining = j < numChecksums - 1 ?
+          bpc : n - bpc * (numChecksums - 1);
       Assert.assertEquals(expectedRemaining, b.remaining());
 
-      final int offset = j* bpc;
-      for(int k = 0; k < expectedRemaining; k++) {
+      final int offset = j * bpc;
+      for (int k = 0; k < expectedRemaining; k++) {
         Assert.assertEquals(expected[offset + k], b.get());
         count++;
       }
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java
index c1470bb..be0575d 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java
@@ -45,12 +45,12 @@
   /**
    * STATES used by the test state machine.
    */
-  public enum STATES {INIT, CREATING, OPERATIONAL, CLOSED, CLEANUP, FINAL};
+  public enum STATES { INIT, CREATING, OPERATIONAL, CLOSED, CLEANUP, FINAL };
 
   /**
    * EVENTS used by the test state machine.
    */
-  public enum EVENTS {ALLOCATE, CREATE, UPDATE, CLOSE, DELETE, TIMEOUT};
+  public enum EVENTS { ALLOCATE, CREATE, UPDATE, CLOSE, DELETE, TIMEOUT };
 
   @Rule
   public ExpectedException exception = ExpectedException.none();
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
index 95282d5..86198cd 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
@@ -268,41 +268,6 @@
   }
 
   /**
-   * Returns a delete Request.
-   *
-   * @param pipeline pipeline.
-   * @param writeRequest - write request
-   * @return request
-   */
-  public static ContainerCommandRequestProto getDeleteChunkRequest(
-      Pipeline pipeline, ContainerProtos.WriteChunkRequestProto writeRequest)
-      throws IOException {
-    return newDeleteChunkRequestBuilder(pipeline, writeRequest).build();
-  }
-
-  public static Builder newDeleteChunkRequestBuilder(Pipeline pipeline,
-      ContainerProtos.WriteChunkRequestProtoOrBuilder writeRequest)
-      throws IOException {
-    LOG.trace("deleteChunk blockID={} from pipeline={}",
-        writeRequest.getBlockID(), pipeline);
-
-    ContainerProtos.DeleteChunkRequestProto.Builder deleteRequest =
-        ContainerProtos.DeleteChunkRequestProto
-            .newBuilder();
-
-    deleteRequest.setChunkData(writeRequest.getChunkData());
-    deleteRequest.setBlockID(writeRequest.getBlockID());
-
-    Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.DeleteChunk);
-    request.setContainerID(writeRequest.getBlockID().getContainerID());
-    request.setDeleteChunk(deleteRequest);
-    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
-    return request;
-  }
-
-  /**
    * Returns a create container command for test purposes. There are a bunch of
    * tests where we need to just send a request and get a reply.
    *
@@ -352,7 +317,7 @@
     LOG.trace("addContainer: {}", containerID);
 
     Builder request = getContainerCommandRequestBuilder(containerID, pipeline);
-    if(token != null){
+    if (token != null) {
       request.setEncodedToken(token.encodeToUrlString());
     }
     return request.build();
@@ -505,34 +470,6 @@
         response.getGetBlock().getBlockData().getChunksCount());
   }
 
-  /**
-   * @param pipeline - pipeline.
-   * @param putBlockRequest - putBlockRequest.
-   * @return - Request
-   */
-  public static ContainerCommandRequestProto getDeleteBlockRequest(
-      Pipeline pipeline, ContainerProtos.PutBlockRequestProto putBlockRequest)
-      throws IOException {
-    return newDeleteBlockRequestBuilder(pipeline, putBlockRequest).build();
-  }
-
-  public static Builder newDeleteBlockRequestBuilder(Pipeline pipeline,
-      ContainerProtos.PutBlockRequestProtoOrBuilder putBlockRequest)
-      throws IOException {
-    DatanodeBlockID blockID = putBlockRequest.getBlockData().getBlockID();
-    LOG.trace("deleteBlock: name={}", blockID);
-    ContainerProtos.DeleteBlockRequestProto.Builder delRequest =
-        ContainerProtos.DeleteBlockRequestProto.newBuilder();
-    delRequest.setBlockID(blockID);
-    Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.DeleteBlock);
-    request.setContainerID(blockID.getContainerID());
-    request.setDeleteBlock(delRequest);
-    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
-    return request;
-  }
-
   public static Builder newGetCommittedBlockLengthBuilder(Pipeline pipeline,
       ContainerProtos.PutBlockRequestProtoOrBuilder putBlock)
       throws IOException {
@@ -680,11 +617,6 @@
           .setBlockID(fakeBlockId).setChunkData(fakeChunkInfo)
           .setReadChunkVersion(ContainerProtos.ReadChunkVersion.V1).build());
       break;
-    case DeleteChunk:
-      builder
-          .setDeleteChunk(ContainerProtos.DeleteChunkRequestProto.newBuilder()
-              .setBlockID(fakeBlockId).setChunkData(fakeChunkInfo).build());
-      break;
     case GetSmallFile:
       builder
           .setGetSmallFile(ContainerProtos.GetSmallFileRequestProto.newBuilder()
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
index a51be5f..c9b9bf1 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
@@ -51,7 +51,7 @@
 
     @Override
     public boolean equals(Object obj) {
-      if(obj instanceof DummyResource) {
+      if (obj instanceof DummyResource) {
         return name.equals(((DummyResource) obj).name);
       }
       return false;
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestDefaultUpgradeFinalizationExecutor.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestDefaultUpgradeFinalizationExecutor.java
index 977bf3c..8e9eb3a 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestDefaultUpgradeFinalizationExecutor.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestDefaultUpgradeFinalizationExecutor.java
@@ -38,8 +38,7 @@
         mock(AbstractLayoutVersionManager.class);
     when(mockLvm.needsFinalization()).thenReturn(true);
 
-    BasicUpgradeFinalizer uf =
-        new BasicUpgradeFinalizer(mockLvm) {
+    BasicUpgradeFinalizer uf = new BasicUpgradeFinalizer(mockLvm) {
       @Override
       protected void preFinalizeUpgrade(Object service) throws IOException {
         throw new IOException("Failure!");
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index 58c6e72..82a733a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -45,7 +45,6 @@
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto;
 import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
 import org.apache.hadoop.hdds.security.x509.certificate.client.DNCertificateClient;
@@ -111,12 +110,12 @@
   private final Map<String, RatisDropwizardExports> ratisMetricsMap =
       new ConcurrentHashMap<>();
   private DNMXBeanImpl serviceRuntimeInfo =
-      new DNMXBeanImpl(HddsVersionInfo.HDDS_VERSION_INFO) {};
+      new DNMXBeanImpl(HddsVersionInfo.HDDS_VERSION_INFO) { };
   private ObjectName dnInfoBeanName;
   private DatanodeCRLStore dnCRLStore;
 
   //Constructor for DataNode PluginService
-  public HddsDatanodeService(){}
+  public HddsDatanodeService() { }
 
   public HddsDatanodeService(boolean printBanner, String[] args) {
     this.printBanner = printBanner;
@@ -376,7 +375,7 @@
               datanodeDetails.getProtoBufMessage(),
               getEncodedString(csr));
       // Persist certificates.
-      if(response.hasX509CACertificate()) {
+      if (response.hasX509CACertificate()) {
         String pemEncodedCert = response.getX509Certificate();
         dnCertClient.storeCertificate(pemEncodedCert, true);
         dnCertClient.storeCertificate(response.getX509CACertificate(), true,
@@ -453,16 +452,6 @@
   private DatanodeDetails initializeDatanodeDetails()
       throws IOException {
     String idFilePath = HddsServerUtil.getDatanodeIdFilePath(conf);
-    if (idFilePath == null || idFilePath.isEmpty()) {
-      LOG.error("A valid path is needed for config setting {}",
-          ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR);
-      throw new IllegalArgumentException(
-          ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR +
-          " must be defined. See" +
-          " https://wiki.apache.org/hadoop/Ozone#Configuration" +
-          " for details on configuring Ozone.");
-    }
-
     Preconditions.checkNotNull(idFilePath);
     File idFile = new File(idFilePath);
     if (idFile.exists()) {
@@ -487,16 +476,6 @@
   private void persistDatanodeDetails(DatanodeDetails dnDetails)
       throws IOException {
     String idFilePath = HddsServerUtil.getDatanodeIdFilePath(conf);
-    if (idFilePath == null || idFilePath.isEmpty()) {
-      LOG.error("A valid path is needed for config setting {}",
-          ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR);
-      throw new IllegalArgumentException(
-          ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR +
-          " must be defined. See" +
-          " https://wiki.apache.org/hadoop/Ozone#Configuration" +
-          " for details on configuring Ozone.");
-    }
-
     Preconditions.checkNotNull(idFilePath);
     File idFile = new File(idFilePath);
     ContainerUtils.writeDatanodeDetailsTo(dnDetails, idFile);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
index 970251c..3d6cb3b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
@@ -44,7 +44,7 @@
  *
  */
 @InterfaceAudience.Private
-@Metrics(about="Storage Container DataNode Metrics", context="dfs")
+@Metrics(about = "Storage Container DataNode Metrics", context = "dfs")
 public class ContainerMetrics {
   public static final String STORAGE_CONTAINER_METRICS =
       "StorageContainerMetrics";
@@ -106,7 +106,7 @@
     numOpsArray[type.ordinal()].incr();
   }
 
-  public long getContainerOpsMetrics(ContainerProtos.Type type){
+  public long getContainerOpsMetrics(ContainerProtos.Type type) {
     return numOpsArray[type.ordinal()].value();
   }
 
@@ -122,7 +122,7 @@
     opsBytesArray[type.ordinal()].incr(bytes);
   }
 
-  public long getContainerBytesMetrics(ContainerProtos.Type type){
+  public long getContainerBytesMetrics(ContainerProtos.Type type) {
     return opsBytesArray[type.ordinal()].value();
   }
 }
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
index 032705d..2b63183 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
@@ -189,7 +189,7 @@
             HddsConfigKeys.HDDS_CONTAINER_CHECKSUM_VERIFICATION_ENABLED,
             HddsConfigKeys.
                     HDDS_CONTAINER_CHECKSUM_VERIFICATION_ENABLED_DEFAULT);
-    if(enabled) {
+    if (enabled) {
       String storedChecksum = containerData.getChecksum();
 
       Yaml yaml = ContainerDataYaml.getYamlForContainerType(
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
index fa70819..bc6147e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
@@ -95,7 +95,7 @@
   private final AtomicLong readCount;
   private final AtomicLong writeCount;
   private final AtomicLong bytesUsed;
-  private final AtomicLong keyCount;
+  private final AtomicLong blockCount;
 
   private HddsVolume volume;
 
@@ -150,7 +150,7 @@
     this.writeCount =  new AtomicLong(0L);
     this.writeBytes =  new AtomicLong(0L);
     this.bytesUsed = new AtomicLong(0L);
-    this.keyCount = new AtomicLong(0L);
+    this.blockCount = new AtomicLong(0L);
     this.maxSize = size;
     this.originPipelineId = originPipelineId;
     this.originNodeId = originNodeId;
@@ -289,6 +289,14 @@
   }
 
   /**
+   * checks if the container is closing.
+   * @return - boolean
+   */
+  public synchronized  boolean isClosing() {
+    return ContainerDataProto.State.CLOSING == state;
+  }
+
+  /**
    * checks if the container is invalid.
    * @return - boolean
    */
@@ -494,42 +502,42 @@
   }
 
   /**
-   * Increments the number of keys in the container.
+   * Increments the number of blocks in the container.
    */
-  public void incrKeyCount() {
-    this.keyCount.incrementAndGet();
+  public void incrBlockCount() {
+    this.blockCount.incrementAndGet();
   }
 
   /**
-   * Decrements number of keys in the container.
+   * Decrements number of blocks in the container.
    */
-  public void decrKeyCount() {
-    this.keyCount.decrementAndGet();
+  public void decrBlockCount() {
+    this.blockCount.decrementAndGet();
   }
 
   /**
-   * Decrease the count of keys in the container.
+   * Decrease the count of blocks (blocks) in the container.
    *
-   * @param deletedKeyCount
+   * @param deletedBlockCount
    */
-  public void decrKeyCount(long deletedKeyCount) {
-    this.keyCount.addAndGet(-1 * deletedKeyCount);
+  public void decrBlockCount(long deletedBlockCount) {
+    this.blockCount.addAndGet(-1 * deletedBlockCount);
   }
 
   /**
-   * Returns number of keys in the container.
-   * @return key count
+   * Returns number of blocks in the container.
+   * @return block count
    */
-  public long getKeyCount() {
-    return this.keyCount.get();
+  public long getBlockCount() {
+    return this.blockCount.get();
   }
 
   /**
-   * Set's number of keys in the container.
+   * Set's number of blocks in the container.
    * @param count
    */
-  public void setKeyCount(long count) {
-    this.keyCount.set(count);
+  public void setBlockCount(long count) {
+    this.blockCount.set(count);
   }
 
   public void setChecksumTo0ByteArray() {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
index b6a8c3e..e5325c9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
@@ -235,18 +235,20 @@
   public ContainerReportsProto getContainerReport() throws IOException {
     LOG.debug("Starting container report iteration.");
 
+    ContainerReportsProto.Builder crBuilder =
+        ContainerReportsProto.newBuilder();
     // No need for locking since containerMap is a ConcurrentSkipListMap
     // And we can never get the exact state since close might happen
     // after we iterate a point.
     List<Container<?>> containers = new ArrayList<>(containerMap.values());
-
-    ContainerReportsProto.Builder crBuilder =
-        ContainerReportsProto.newBuilder();
-
-    for (Container<?> container: containers) {
-      crBuilder.addReports(container.getContainerReport());
+    // Incremental Container reports can read stale container information
+    // This is to make sure FCR and ICR can be linearized and processed by
+    // consumers such as SCM.
+    synchronized (this) {
+      for (Container<?> container : containers) {
+        crBuilder.addReports(container.getContainerReport());
+      }
     }
-
     return crBuilder.build();
   }
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index 5dbba2b..1edd046 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -581,16 +581,17 @@
   }
 
   private void audit(AuditAction action, EventType eventType,
-      Map<String, String> params, AuditEventStatus result, Throwable exception){
+      Map<String, String> params, AuditEventStatus result,
+      Throwable exception) {
     AuditMessage amsg;
     switch (result) {
     case SUCCESS:
-      if(isAllowed(action.getAction())) {
-        if(eventType == EventType.READ &&
+      if (isAllowed(action.getAction())) {
+        if (eventType == EventType.READ &&
             AUDIT.getLogger().isInfoEnabled(AuditMarker.READ.getMarker())) {
           amsg = buildAuditMessageForSuccess(action, params);
           AUDIT.logReadSuccess(amsg);
-        } else if(eventType == EventType.WRITE &&
+        } else if (eventType == EventType.WRITE &&
             AUDIT.getLogger().isInfoEnabled(AuditMarker.WRITE.getMarker())) {
           amsg = buildAuditMessageForSuccess(action, params);
           AUDIT.logWriteSuccess(amsg);
@@ -599,11 +600,11 @@
       break;
 
     case FAILURE:
-      if(eventType == EventType.READ &&
+      if (eventType == EventType.READ &&
           AUDIT.getLogger().isErrorEnabled(AuditMarker.READ.getMarker())) {
         amsg = buildAuditMessageForFailure(action, params, exception);
         AUDIT.logReadFailure(amsg);
-      } else if(eventType == EventType.WRITE &&
+      } else if (eventType == EventType.WRITE &&
           AUDIT.getLogger().isErrorEnabled(AuditMarker.WRITE.getMarker())) {
         amsg = buildAuditMessageForFailure(action, params, exception);
         AUDIT.logWriteFailure(amsg);
@@ -656,7 +657,7 @@
    * @return true or false accordingly.
    */
   private boolean isAllowed(String action) {
-    switch(action) {
+    switch (action) {
     case "CLOSE_CONTAINER":
     case "CREATE_CONTAINER":
     case "LIST_CONTAINER":
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
index b736eb5..d6ca2d1 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
@@ -130,7 +130,7 @@
   public void removeFromBlockMap(BlockID blockID) {
     Preconditions.checkNotNull(blockID);
     containers.computeIfPresent(blockID.getContainerID(), (containerId, blocks)
-        -> blocks.removeAndGetSize(blockID.getLocalID()) == 0? null: blocks);
+        -> blocks.removeAndGetSize(blockID.getLocalID()) == 0 ? null : blocks);
   }
 
   /**
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerInspector.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerInspector.java
new file mode 100644
index 0000000..5c6cab2
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerInspector.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.common.interfaces;
+
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
+import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
+
+/**
+ * A ContainerInspector is tool used to log information about all
+ * containers as they are being processed during datanode startup. It could
+ * also be used to repair containers if necessary.
+ *
+ * These are primarily debug/developer utilities that will slow down datanode
+ * startup and are only meant to be run as needed.
+ */
+public interface ContainerInspector {
+  /**
+   * Loads necessary configurations to determine how/if to run the inspector
+   * when the process method is called.
+   *
+   * @return true if the inspector will operate when process is called. False
+   * otherwise.
+   */
+  boolean load();
+
+  /**
+   * Removes configurations to run the inspector, so that the process method
+   * becomes a no-op.
+   */
+  void unload();
+
+  /**
+   * Determines whether the inspector will be modifying containers as part of
+   * the process method.
+   *
+   * @return true if the inspector will only read the container, false if it
+   * will be making modifications/repairs.
+   */
+  boolean isReadOnly();
+
+  /**
+   * Operates on the container as the inspector is configured. This may
+   * involve logging information or fixing errors.
+   *
+   * Multiple containers may be processed in parallel by calling this method
+   * on the same inspector instance, but only one process call will be invoked
+   * per container at a time. Implementations must ensure that:
+   * 1. Information they log is batched so that log output from other
+   * inspectors working on other containers is not interleaved.
+   * 2. Multiple process calls to the same inspector instance with different
+   * containers are thread safe.
+   *
+   * @param data Container data for the container to process.
+   * @param store The metadata store for this container.
+   */
+  void process(ContainerData data, DatanodeStore store);
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
index 2f92434..1dbd588 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
@@ -21,18 +21,17 @@
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
-import java.util.function.Consumer;
 
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
+import org.apache.hadoop.ozone.container.common.report.IncrementalReportSender;
 import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
@@ -51,12 +50,12 @@
   protected String clusterId;
   protected final ContainerMetrics metrics;
   protected String datanodeId;
-  private Consumer<ContainerReplicaProto> icrSender;
+  private IncrementalReportSender<Container> icrSender;
 
   protected Handler(ConfigurationSource config, String datanodeId,
       ContainerSet contSet, VolumeSet volumeSet,
       ContainerMetrics containerMetrics,
-      Consumer<ContainerReplicaProto> icrSender) {
+      IncrementalReportSender<Container> icrSender) {
     this.conf = config;
     this.containerSet = contSet;
     this.volumeSet = volumeSet;
@@ -69,7 +68,7 @@
       final ContainerType containerType, final ConfigurationSource config,
       final String datanodeId, final ContainerSet contSet,
       final VolumeSet volumeSet, final ContainerMetrics metrics,
-      Consumer<ContainerReplicaProto> icrSender) {
+      IncrementalReportSender<Container> icrSender) {
     switch (containerType) {
     case KeyValueContainer:
       return new KeyValueHandler(config,
@@ -98,8 +97,7 @@
    */
   protected void sendICR(final Container container)
       throws StorageContainerException {
-    ContainerReplicaProto containerReport = container.getContainerReport();
-    icrSender.accept(containerReport);
+    icrSender.send(container);
   }
 
   public abstract ContainerCommandResponseProto handle(
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java
index b84b445..99f4a4c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java
@@ -34,7 +34,7 @@
 
 /**
  * Publishes ContainerReport which will be sent to SCM as part of heartbeat.
- * ContainerReport consist of the following information about each containers:
+ * ContainerReport consist of the following information about each container:
  *   - containerID
  *   - size
  *   - used
@@ -80,7 +80,6 @@
 
   @Override
   protected ContainerReportsProto getReport() throws IOException {
-    return getContext().getParent().getContainer()
-        .getController().getContainerReport();
+    return getContext().getFullContainerReportDiscardPendingICR();
   }
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/IncrementalReportSender.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/IncrementalReportSender.java
new file mode 100644
index 0000000..2df9078
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/IncrementalReportSender.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.report;
+
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+
+/**
+ * IncrementalReportSender is an interface to send ICRs.
+ * @param <T>
+ */
+@FunctionalInterface
+public interface IncrementalReportSender<T> {
+  void send(T t) throws StorageContainerException;
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
index be83d9b..24df9f5 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
@@ -121,7 +121,7 @@
       type = ConfigType.INT,
       defaultValue = "1440",
       tags = {DATANODE},
-      description = "The maximum number of block delete commands queued on "+
+      description = "The maximum number of block delete commands queued on " +
           " a datanode"
   )
   private int blockDeleteQueueLimit = 60 * 24;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index e1fc297..ae3c3a9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -261,7 +261,7 @@
   /**
    * Runs the state machine at a fixed frequency.
    */
-  private void start() throws IOException {
+  private void startStateMachineThread() throws IOException {
     long now = 0;
 
     reportManager.init();
@@ -283,7 +283,7 @@
         context.execute(executorService, heartbeatFrequency,
             TimeUnit.MILLISECONDS);
       } catch (InterruptedException e) {
-        // Some one has sent interrupt signal, this could be because
+        // Someone has sent interrupt signal, this could be because
         // 1. Trigger heartbeat immediately
         // 2. Shutdown has be initiated.
         Thread.currentThread().interrupt();
@@ -293,7 +293,7 @@
 
       now = Time.monotonicNow();
       if (now < nextHB.get()) {
-        if(!Thread.interrupted()) {
+        if (!Thread.interrupted()) {
           try {
             Thread.sleep(nextHB.get() - now);
           } catch (InterruptedException e) {
@@ -379,7 +379,7 @@
       connectionManager.close();
     }
 
-    if(container != null) {
+    if (container != null) {
       container.stop();
     }
 
@@ -468,7 +468,7 @@
     Runnable startStateMachineTask = () -> {
       try {
         LOG.info("Ozone container server started.");
-        start();
+        startStateMachineThread();
       } catch (Exception ex) {
         LOG.error("Unable to start the DatanodeState Machine", ex);
       }
@@ -637,12 +637,12 @@
   }
 
   public StatusAndMessages finalizeUpgrade()
-      throws IOException{
+      throws IOException {
     return upgradeFinalizer.finalize(datanodeDetails.getUuidString(), this);
   }
 
   public StatusAndMessages queryUpgradeStatus()
-      throws IOException{
+      throws IOException {
     return upgradeFinalizer.reportStatus(datanodeDetails.getUuidString(),
         true);
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
index 9eea758..08c9405 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
@@ -16,6 +16,7 @@
  */
 package org.apache.hadoop.ozone.container.common.statemachine;
 
+import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -101,7 +102,7 @@
   private final Queue<SCMCommand> commandQueue;
   private final Map<Long, CommandStatus> cmdStatusMap;
   private final Lock lock;
-  private final DatanodeStateMachine parent;
+  private final DatanodeStateMachine parentDatanodeStateMachine;
   private final AtomicLong stateExecutionCount;
   private final ConfigurationSource conf;
   private final Set<InetSocketAddress> endpoints;
@@ -123,7 +124,7 @@
   // Endpoint -> ReportType -> Boolean of whether the full report should be
   //  queued in getFullReports call.
   private final Map<InetSocketAddress,
-      Map<String, AtomicBoolean>> fullReportSendIndicator;
+      Map<String, AtomicBoolean>> isFullReportReadyToBeSent;
   // List of supported full report types.
   private final List<String> fullReportTypeList;
   // ReportType -> Report.
@@ -160,14 +161,14 @@
           state, DatanodeStateMachine parent) {
     this.conf = conf;
     this.state = state;
-    this.parent = parent;
+    this.parentDatanodeStateMachine = parent;
     commandQueue = new LinkedList<>();
     cmdStatusMap = new ConcurrentHashMap<>();
     incrementalReportsQueue = new HashMap<>();
     containerReports = new AtomicReference<>();
     nodeReport = new AtomicReference<>();
     pipelineReports = new AtomicReference<>();
-    crlStatusReport = new AtomicReference<>();
+    crlStatusReport = new AtomicReference<>(); // Certificate Revocation List
     endpoints = new HashSet<>();
     containerActions = new HashMap<>();
     pipelineActions = new HashMap<>();
@@ -175,7 +176,7 @@
     stateExecutionCount = new AtomicLong(0);
     threadPoolNotAvailableCount = new AtomicLong(0);
     lastHeartbeatSent = new AtomicLong(0);
-    fullReportSendIndicator = new HashMap<>();
+    isFullReportReadyToBeSent = new HashMap<>();
     fullReportTypeList = new ArrayList<>();
     type2Reports = new HashMap<>();
     initReportTypeCollection();
@@ -184,7 +185,7 @@
   /**
    * init related ReportType Collections.
    */
-  private void initReportTypeCollection(){
+  private void initReportTypeCollection() {
     fullReportTypeList.add(CONTAINER_REPORTS_PROTO_NAME);
     type2Reports.put(CONTAINER_REPORTS_PROTO_NAME, containerReports);
     fullReportTypeList.add(NODE_REPORT_PROTO_NAME);
@@ -196,12 +197,12 @@
   }
 
   /**
-   * Returns the ContainerStateMachine class that holds this state.
+   * Returns the DatanodeStateMachine class that holds this state.
    *
-   * @return ContainerStateMachine.
+   * @return DatanodeStateMachine.
    */
   public DatanodeStateMachine getParent() {
-    return parent;
+    return parentDatanodeStateMachine;
   }
 
   /**
@@ -221,7 +222,7 @@
    */
   boolean isExiting(DatanodeStateMachine.DatanodeStates newState) {
     boolean isExiting = state != newState && stateExecutionCount.get() > 0;
-    if(isExiting) {
+    if (isExiting) {
       stateExecutionCount.set(0);
     }
     return isExiting;
@@ -316,8 +317,8 @@
           "not full report message type: " + reportType);
     }
     type2Reports.get(reportType).set(report);
-    if (fullReportSendIndicator != null) {
-      for (Map<String, AtomicBoolean> mp : fullReportSendIndicator.values()) {
+    if (isFullReportReadyToBeSent != null) {
+      for (Map<String, AtomicBoolean> mp : isFullReportReadyToBeSent.values()) {
         mp.get(reportType).set(true);
       }
     }
@@ -344,7 +345,7 @@
       Preconditions.checkState(reportType != null);
     }
     synchronized (incrementalReportsQueue) {
-      if (incrementalReportsQueue.containsKey(endpoint)){
+      if (incrementalReportsQueue.containsKey(endpoint)) {
         incrementalReportsQueue.get(endpoint).addAll(0, reportsToPutBack);
       }
     }
@@ -357,8 +358,57 @@
    * @return List of reports
    */
   public List<GeneratedMessage> getAllAvailableReports(
-      InetSocketAddress endpoint) {
-    return getReports(endpoint, Integer.MAX_VALUE);
+      InetSocketAddress endpoint
+  ) {
+    int maxLimit = Integer.MAX_VALUE;
+    // TODO: It is highly unlikely that we will reach maxLimit for the number
+    //       for the number of reports, specially as it does not apply to the
+    //       number of entries in a report. But if maxLimit is hit, should a
+    //       heartbeat be scheduled ASAP? Should full reports not included be
+    //       dropped? Currently this code will keep the full reports not sent
+    //       and include it in the next heartbeat.
+    return getAllAvailableReportsUpToLimit(endpoint, maxLimit);
+  }
+
+  /**
+   * Gets a point in time snapshot of all containers, any pending incremental
+   * container reports (ICR) for containers will be included in this report
+   * and this call will drop any pending ICRs.
+   * @return Full Container Report
+   */
+  public ContainerReportsProto getFullContainerReportDiscardPendingICR()
+      throws IOException {
+
+    // Block ICRs from being generated
+    synchronized (parentDatanodeStateMachine
+        .getContainer()) {
+      synchronized (incrementalReportsQueue) {
+        for (Map.Entry<InetSocketAddress, List<GeneratedMessage>>
+            entry : incrementalReportsQueue.entrySet()) {
+          if (entry.getValue() != null) {
+            entry.getValue().removeIf(
+                generatedMessage ->
+                    generatedMessage instanceof
+                        IncrementalContainerReportProto);
+          }
+        }
+      }
+      return parentDatanodeStateMachine
+          .getContainer()
+          .getContainerSet()
+          .getContainerReport();
+    }
+  }
+
+  @VisibleForTesting
+  List<GeneratedMessage> getAllAvailableReportsUpToLimit(
+      InetSocketAddress endpoint,
+      int limit) {
+    List<GeneratedMessage> reports = getFullReports(endpoint, limit);
+    List<GeneratedMessage> incrementalReports = getIncrementalReports(endpoint,
+        limit - reports.size()); // get all (MAX_VALUE)
+    reports.addAll(incrementalReports);
+    return reports;
   }
 
   List<GeneratedMessage> getIncrementalReports(
@@ -378,11 +428,15 @@
   }
 
   List<GeneratedMessage> getFullReports(
-      InetSocketAddress endpoint) {
-    Map<String, AtomicBoolean> mp = fullReportSendIndicator.get(endpoint);
-    List<GeneratedMessage> nonIncrementalReports = new LinkedList<>();
-    if (null != mp){
+      InetSocketAddress endpoint, int maxLimit) {
+    int count = 0;
+    Map<String, AtomicBoolean> mp = isFullReportReadyToBeSent.get(endpoint);
+    List<GeneratedMessage> fullReports = new LinkedList<>();
+    if (null != mp) {
       for (Map.Entry<String, AtomicBoolean> kv : mp.entrySet()) {
+        if (count == maxLimit) {
+          break;
+        }
         if (kv.getValue().get()) {
           String reportType = kv.getKey();
           final AtomicReference<GeneratedMessage> ref =
@@ -393,38 +447,18 @@
           }
           final GeneratedMessage msg = ref.get();
           if (msg != null) {
-            nonIncrementalReports.add(msg);
+            fullReports.add(msg);
+            // Mark the report as not ready to be sent, until another refresh.
             mp.get(reportType).set(false);
+            count++;
           }
         }
       }
     }
-    return nonIncrementalReports;
+    return fullReports;
   }
 
   /**
-   * Returns available reports from the report queue with a max limit on
-   * list size, or empty list if the queue is empty.
-   *
-   * @return List of reports
-   */
-  public List<GeneratedMessage> getReports(InetSocketAddress endpoint,
-                                           int maxLimit) {
-    if (maxLimit < 0) {
-      throw new IllegalArgumentException("Illegal maxLimit value: " + maxLimit);
-    }
-    List<GeneratedMessage> reports = getFullReports(endpoint);
-    if (maxLimit <= reports.size()) {
-      return reports.subList(0, maxLimit);
-    } else {
-      reports.addAll(getIncrementalReports(endpoint,
-          maxLimit - reports.size()));
-      return reports;
-    }
-  }
-
-
-  /**
    * Adds the ContainerAction to ContainerAction queue.
    *
    * @param containerAction ContainerAction to be added
@@ -453,17 +487,6 @@
   }
 
   /**
-   * Returns all the pending ContainerActions from the ContainerAction queue,
-   * or empty list if the queue is empty.
-   *
-   * @return {@literal List<ContainerAction>}
-   */
-  public List<ContainerAction> getAllPendingContainerActions(
-      InetSocketAddress endpoint) {
-    return getPendingContainerAction(endpoint, Integer.MAX_VALUE);
-  }
-
-  /**
    * Returns pending ContainerActions from the ContainerAction queue with a
    * max limit on list size, or empty list if the queue is empty.
    *
@@ -569,10 +592,12 @@
   public DatanodeState<DatanodeStateMachine.DatanodeStates> getTask() {
     switch (this.state) {
     case INIT:
-      return new InitDatanodeState(this.conf, parent.getConnectionManager(),
+      return new InitDatanodeState(this.conf,
+          parentDatanodeStateMachine.getConnectionManager(),
           this);
     case RUNNING:
-      return new RunningDatanodeState(this.conf, parent.getConnectionManager(),
+      return new RunningDatanodeState(this.conf,
+          parentDatanodeStateMachine.getConnectionManager(),
           this);
     case SHUTDOWN:
       return null;
@@ -817,14 +842,14 @@
    */
   public boolean updateCommandStatus(Long cmdId,
       Consumer<CommandStatus> cmdStatusUpdater) {
-    if(cmdStatusMap.containsKey(cmdId)) {
+    if (cmdStatusMap.containsKey(cmdId)) {
       cmdStatusUpdater.accept(cmdStatusMap.get(cmdId));
       return true;
     }
     return false;
   }
 
-  public void configureHeartbeatFrequency(){
+  public void configureHeartbeatFrequency() {
     heartbeatFrequency.set(getScmHeartbeatInterval(conf));
   }
 
@@ -845,7 +870,7 @@
       fullReportTypeList.forEach(e -> {
         mp.putIfAbsent(e, new AtomicBoolean(true));
       });
-      this.fullReportSendIndicator.putIfAbsent(endpoint, mp);
+      this.isFullReportReadyToBeSent.putIfAbsent(endpoint, mp);
     }
   }
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
index a5044cb..7908e3d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
@@ -68,7 +68,7 @@
     this.connectionManager = connectionManager;
     handlerMap = new HashMap<>();
     for (CommandHandler h : handlers) {
-      if(handlerMap.containsKey(h.getCommandType())){
+      if (handlerMap.containsKey(h.getCommandType())) {
         LOG.error("Duplicate handler for the same command. Exiting. Handle " +
             "key : {}", h.getCommandType().getDescriptorForType().getName());
         throw new IllegalArgumentException("Duplicate handler for the same " +
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java
index 4a46d5f..a7172ff 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java
@@ -21,7 +21,6 @@
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.utils.HddsServerUtil;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager;
@@ -107,16 +106,6 @@
   private void persistDatanodeDetails(DatanodeDetails dnDetails)
       throws IOException {
     String idFilePath = HddsServerUtil.getDatanodeIdFilePath(conf);
-    if (idFilePath == null || idFilePath.isEmpty()) {
-      LOG.error("A valid path is needed for config setting {}",
-          ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR);
-      throw new IllegalArgumentException(
-          ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR +
-              " must be defined. See" +
-              " https://wiki.apache.org/hadoop/Ozone#Configuration" +
-              " for details on configuring Ozone.");
-    }
-
     Preconditions.checkNotNull(idFilePath);
     File idFile = new File(idFilePath);
     ContainerUtils.writeDatanodeDetailsTo(dnDetails, idFile);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java
index 217592d..ff53088 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java
@@ -29,7 +29,6 @@
 
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.utils.HddsServerUtil;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
@@ -81,7 +80,7 @@
     try {
       addresses = getSCMAddressForDatanodes(conf);
     } catch (IllegalArgumentException e) {
-      if(!Strings.isNullOrEmpty(e.getMessage())) {
+      if (!Strings.isNullOrEmpty(e.getMessage())) {
         LOG.error("Failed to get SCM addresses: {}", e.getMessage());
       }
       return DatanodeStateMachine.DatanodeStates.SHUTDOWN;
@@ -123,12 +122,6 @@
    */
   private void persistContainerDatanodeDetails() {
     String dataNodeIDPath = HddsServerUtil.getDatanodeIdFilePath(conf);
-    if (Strings.isNullOrEmpty(dataNodeIDPath)) {
-      LOG.error("A valid path is needed for config setting {}",
-          ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR);
-      this.context.setState(DatanodeStateMachine.DatanodeStates.SHUTDOWN);
-      return;
-    }
     File idPath = new File(dataNodeIDPath);
     DatanodeDetails datanodeDetails = this.context.getParent()
         .getDatanodeDetails();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java
index 7366650..29d27f3 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java
@@ -97,7 +97,9 @@
           endPointTask = HeartbeatEndpointTask.newBuilder()
               .setConfig(conf)
               .setEndpointStateMachine(endpoint)
-              .setDatanodeDetails(context.getParent().getDatanodeDetails())
+              .setDatanodeDetails(context
+                  .getParent()
+                  .getDatanodeDetails())
               .setContext(context)
               .build();
           break;
@@ -150,8 +152,8 @@
       } else {
         // This can happen if a task is taking more time than the timeOut
         // specified for the task in await, and when it is completed the task
-        // has set the state to Shutdown, we may see the state as shutdown
-        // here. So, we need to Shutdown DatanodeStateMachine.
+        // has set the state to shut down, we may see the state as shutdown
+        // here. So, we need to shut down DatanodeStateMachine.
         LOG.error("State is Shutdown in RunningDatanodeState");
         context.setState(DatanodeStateMachine.DatanodeStates.SHUTDOWN);
       }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
index 0de533d..28ee5dc 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
@@ -54,6 +54,7 @@
 import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
 import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand;
 import org.apache.hadoop.ozone.protocol.commands.FinalizeNewLayoutVersionCommand;
+import org.apache.hadoop.ozone.protocol.commands.RefreshVolumeUsageCommand;
 import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
 
 import org.apache.hadoop.ozone.protocol.commands.SetNodeOperationalStateCommand;
@@ -284,8 +285,7 @@
             .equalsIgnoreCase(datanodeDetails.getUuid()),
         "Unexpected datanode ID in the response.");
     // Verify the response is indeed for this datanode.
-    for (SCMCommandProto commandResponseProto : response
-        .getCommandsList()) {
+    for (SCMCommandProto commandResponseProto : response.getCommandsList()) {
       switch (commandResponseProto.getCommandType()) {
       case reregisterCommand:
         if (rpcEndpoint.getState() == EndPointStates.HEARTBEAT) {
@@ -302,19 +302,19 @@
         }
         break;
       case deleteBlocksCommand:
-        DeleteBlocksCommand db = DeleteBlocksCommand
+        DeleteBlocksCommand deleteBlocksCommand = DeleteBlocksCommand
             .getFromProtobuf(
                 commandResponseProto.getDeleteBlocksCommandProto());
         if (commandResponseProto.hasTerm()) {
-          db.setTerm(commandResponseProto.getTerm());
+          deleteBlocksCommand.setTerm(commandResponseProto.getTerm());
         }
-        if (!db.blocksTobeDeleted().isEmpty()) {
+        if (!deleteBlocksCommand.blocksTobeDeleted().isEmpty()) {
           if (LOG.isDebugEnabled()) {
             LOG.debug(DeletedContainerBlocksSummary
-                .getFrom(db.blocksTobeDeleted())
+                .getFrom(deleteBlocksCommand.blocksTobeDeleted())
                 .toString());
           }
-          this.context.addCommand(db);
+          this.context.addCommand(deleteBlocksCommand);
         }
         break;
       case closeContainerCommand:
@@ -415,6 +415,15 @@
         }
         this.context.addCommand(finalizeNewLayoutVersionCommand);
         break;
+      case refreshVolumeUsageInfo:
+        RefreshVolumeUsageCommand refreshVolumeUsageCommand =
+            RefreshVolumeUsageCommand.getFromProtobuf(
+            commandResponseProto.getRefreshVolumeUsageCommandProto());
+        if (commandResponseProto.hasTerm()) {
+          refreshVolumeUsageCommand.setTerm(commandResponseProto.getTerm());
+        }
+        this.context.addCommand(refreshVolumeUsageCommand);
+        break;
       default:
         throw new IllegalArgumentException("Unknown response : "
             + commandResponseProto.getCommandType().name());
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
index fa6c937..d80d1e5 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
@@ -122,7 +122,7 @@
       }
     } catch (DiskOutOfSpaceException ex) {
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.SHUTDOWN);
-    } catch(IOException ex) {
+    } catch (IOException ex) {
       rpcEndPoint.logIfNeeded(ex);
     } finally {
       rpcEndPoint.unlock();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
index 4ecf278..557473b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
@@ -33,7 +33,7 @@
  * This class is for maintaining Container State Machine statistics.
  */
 @InterfaceAudience.Private
-@Metrics(about="Container State Machine Metrics", context="dfs")
+@Metrics(about = "Container State Machine Metrics", context = "dfs")
 public class CSMMetrics {
   public static final String SOURCE_NAME =
       CSMMetrics.class.getSimpleName();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 301fc59..494cfe1 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -38,6 +38,7 @@
 import java.util.function.Consumer;
 import java.util.stream.Collectors;
 
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
@@ -197,7 +198,11 @@
     applyTransactionSemaphore = new Semaphore(maxPendingApplyTransactions);
     stateMachineHealthy = new AtomicBoolean(true);
 
-    this.executor = Executors.newFixedThreadPool(numContainerOpExecutors);
+    this.executor = Executors.newFixedThreadPool(numContainerOpExecutors,
+        new ThreadFactoryBuilder()
+            .setNameFormat("ContainerOp-" + gid.getUuid() + "-%d")
+            .build());
+
     this.containerTaskQueues = new ConcurrentHashMap<>();
     this.waitOnBothFollowers = conf.getObject(
         DatanodeConfiguration.class).waitOnAllFollowers();
@@ -689,7 +694,7 @@
   private synchronized void updateLastApplied() {
     Long appliedTerm = null;
     long appliedIndex = -1;
-    for(long i = getLastAppliedTermIndex().getIndex() + 1;; i++) {
+    for (long i = getLastAppliedTermIndex().getIndex() + 1;; i++) {
       final Long removed = applyTransactionCompletionMap.remove(i);
       if (removed == null) {
         break;
@@ -740,7 +745,7 @@
         = queue.submit(task, executor);
     // after the task is completed, remove the queue if the queue is empty.
     f.thenAccept(dummy -> containerTaskQueues.computeIfPresent(containerId,
-        (id, q) -> q.isEmpty()? null: q));
+        (id, q) -> q.isEmpty() ? null : q));
     return f;
   }
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index c04e5e9..ab67fbd 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -273,7 +273,7 @@
     // Set the ratis storage directory
     Collection<String> storageDirPaths =
             HddsServerUtil.getOzoneDatanodeRatisDirectory(conf);
-    List<File> storageDirs= new ArrayList<>(storageDirPaths.size());
+    List<File> storageDirs = new ArrayList<>(storageDirPaths.size());
     storageDirPaths.stream().forEach(d -> storageDirs.add(new File(d)));
 
     RaftServerConfigKeys.setStorageDir(properties, storageDirs);
@@ -306,13 +306,14 @@
     int logQueueNumElements =
         conf.getInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS,
             OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT);
-    final int logQueueByteLimit = (int) conf.getStorageSize(
+    final long logQueueByteLimit = (long) conf.getStorageSize(
         OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT,
         OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT,
         StorageUnit.BYTES);
     RaftServerConfigKeys.Log.setQueueElementLimit(
         properties, logQueueNumElements);
-    RaftServerConfigKeys.Log.setQueueByteLimit(properties, logQueueByteLimit);
+    RaftServerConfigKeys.Log.setQueueByteLimit(properties,
+        SizeInBytes.valueOf(logQueueByteLimit));
 
     int numSyncRetries = conf.getInt(
         OzoneConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES,
@@ -693,7 +694,7 @@
     long bytesWritten = 0;
     Iterator<org.apache.hadoop.ozone.container.common.interfaces.Container<?>>
         containerIt = containerController.getContainers();
-    while(containerIt.hasNext()) {
+    while (containerIt.hasNext()) {
       ContainerData containerData = containerIt.next().getContainerData();
       if (containerData.getOriginPipelineId()
           .compareTo(pipelineID.getId()) == 0) {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerInspectorUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerInspectorUtil.java
new file mode 100644
index 0000000..9c900bc
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerInspectorUtil.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.common.utils;
+
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
+import org.apache.hadoop.ozone.container.common.interfaces.ContainerInspector;
+import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerMetadataInspector;
+
+import java.util.ArrayList;
+import java.util.EnumMap;
+import java.util.List;
+
+/**
+ * Utility class to manage container inspectors. New inspectors can be added
+ * here to have them loaded and process containers on startup.
+ */
+public final class ContainerInspectorUtil {
+
+  private static final EnumMap<ContainerProtos.ContainerType,
+          List<ContainerInspector>> INSPECTORS =
+      new EnumMap<>(ContainerProtos.ContainerType.class);
+
+  static {
+    for (ContainerProtos.ContainerType type:
+        ContainerProtos.ContainerType.values()) {
+      INSPECTORS.put(type, new ArrayList<>());
+    }
+
+    // If new inspectors need to be added, put them here mapped by the type
+    // of containers they can operate on.
+    INSPECTORS.get(ContainerProtos.ContainerType.KeyValueContainer)
+        .add(new KeyValueContainerMetadataInspector());
+  }
+
+  private ContainerInspectorUtil() { }
+
+  public static void load() {
+    for (List<ContainerInspector> inspectors: INSPECTORS.values()) {
+      for (ContainerInspector inspector: inspectors) {
+        inspector.load();
+      }
+    }
+  }
+
+  public static void unload() {
+    for (List<ContainerInspector> inspectors: INSPECTORS.values()) {
+      for (ContainerInspector inspector: inspectors) {
+        inspector.unload();
+      }
+    }
+  }
+
+  public static boolean isReadOnly(ContainerProtos.ContainerType type) {
+    boolean readOnly = true;
+    for (ContainerInspector inspector: INSPECTORS.get(type)) {
+      if (!inspector.isReadOnly()) {
+        readOnly = false;
+        break;
+      }
+    }
+    return readOnly;
+  }
+
+  public static void process(ContainerData data, DatanodeStore store) {
+    for (ContainerInspector inspector:
+        INSPECTORS.get(data.getContainerType())) {
+      inspector.process(data, store);
+    }
+  }
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
index 83b8615..6a38080 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
@@ -145,7 +145,7 @@
     String lvStr = getProperty(props, OzoneConsts.LAYOUTVERSION, versionFile);
 
     int lv = Integer.parseInt(lvStr);
-    if(HDDSVolumeLayoutVersion.getLatestVersion().getVersion() != lv) {
+    if (HDDSVolumeLayoutVersion.getLatestVersion().getVersion() != lv) {
       throw new InconsistentStorageStateException("Invalid layOutVersion. " +
           "Version file has layOutVersion as " + lv + " and latest Datanode " +
           "layOutVersion is " +
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
index c0c719b..4f4bd06 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
@@ -284,13 +284,12 @@
   private static final class Failure {
     static final Failure FALLBACK_INSTANCE =
         new Failure(
-          new Throwable("Failure occurred while trying to finish a future" +
-              ".") {
-            @Override
-            public synchronized Throwable fillInStackTrace() {
-              return this; // no stack trace
-            }
-          });
+            new Throwable("Failure occurred while trying to finish a future.") {
+                @Override
+                public synchronized Throwable fillInStackTrace() {
+                  return this; // no stack trace
+                }
+            });
     final Throwable exception;
 
     Failure(Throwable exception) {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
index 35ff05e..98e1629 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
@@ -290,7 +290,7 @@
   }
 
   public void refreshAllVolumeUsage() {
-    volumeMap.forEach((k, v)-> v.refreshVolumeInfo());
+    volumeMap.forEach((k, v) -> v.refreshVolumeInfo());
   }
 
   /**
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
index 5f629ad..715cb84 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
@@ -161,7 +161,7 @@
   }
 
   public StorageType getStorageType() {
-    if(this.volumeInfo != null) {
+    if (this.volumeInfo != null) {
       return this.volumeInfo.getStorageType();
     }
     return StorageType.DEFAULT;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
index 255e7ea..1fcac83 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
@@ -87,7 +87,7 @@
     }
   }
 
-  public void refreshNow(){
+  public void refreshNow() {
     source.refreshNow();
   }
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index 1284f6a..ba131ff 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -25,7 +25,10 @@
 import java.nio.file.Files;
 import java.nio.file.StandardCopyOption;
 import java.time.Instant;
+import java.util.Collections;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.hadoop.fs.FileAlreadyExistsException;
@@ -89,15 +92,32 @@
   private final KeyValueContainerData containerData;
   private ConfigurationSource config;
 
+  // Cache of Blocks (LocalIDs) awaiting final PutBlock call after the stream
+  // is closed. When a block is added to the DB as part of putBlock, it is
+  // added to the cache here. It is cleared from the Cache when the putBlock
+  // is called on the block as part of stream.close() (with endOfBlock = true
+  // in BlockManagerImpl#putBlock). Or when the container is marked for
+  // close, the whole cache is cleared as there can be no more writes to this
+  // container.
+  // We do not need to explicitly synchronize this cache as the writes to
+  // container are synchronous.
+  private Set<Long> pendingPutBlockCache;
+
   public KeyValueContainer(KeyValueContainerData containerData,
-      ConfigurationSource
-      ozoneConfig) {
+      ConfigurationSource ozoneConfig) {
     Preconditions.checkNotNull(containerData,
             "KeyValueContainerData cannot be null");
     Preconditions.checkNotNull(ozoneConfig,
             "Ozone configuration cannot be null");
     this.config = ozoneConfig;
     this.containerData = containerData;
+    if (this.containerData.isOpen() || this.containerData.isClosing()) {
+      // If container is not in OPEN or CLOSING state, there cannot be block
+      // writes to the container. So pendingPutBlockCache is not needed.
+      this.pendingPutBlockCache = new HashSet<>();
+    } else {
+      this.pendingPutBlockCache = Collections.emptySet();
+    }
   }
 
   @Override
@@ -286,6 +306,9 @@
       }
       updateContainerData(() ->
           containerData.setState(ContainerDataProto.State.CLOSING));
+      // Do not clear the pendingBlockCache here as a follower can still
+      // receive transactions from leader in CLOSING state. Refer to
+      // KeyValueHandler#checkContainerOpen()
     } finally {
       writeUnlock();
     }
@@ -297,6 +320,7 @@
     try {
       updateContainerData(() ->
           containerData.setState(ContainerDataProto.State.UNHEALTHY));
+      clearPendingPutBlockCache();
     } finally {
       writeUnlock();
     }
@@ -316,6 +340,7 @@
       // been done outside the lock.
       flushAndSyncDB();
       updateContainerData(containerData::quasiCloseContainer);
+      clearPendingPutBlockCache();
     } finally {
       writeUnlock();
     }
@@ -332,6 +357,7 @@
       // been done outside the lock.
       flushAndSyncDB();
       updateContainerData(containerData::closeContainer);
+      clearPendingPutBlockCache();
     } finally {
       writeUnlock();
     }
@@ -382,7 +408,7 @@
 
   private void compactDB() throws StorageContainerException {
     try {
-      try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
+      try (ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
         db.getStore().compactDB();
       }
     } catch (StorageContainerException ex) {
@@ -435,7 +461,7 @@
     // holding lock and writing data to disk. We can have async implementation
     // to flush the update container data to disk.
     long containerId = containerData.getContainerID();
-    if(!containerData.isValid()) {
+    if (!containerData.isValid()) {
       LOG.debug("Invalid container data. ContainerID: {}", containerId);
       throw new StorageContainerException("Invalid container data. " +
           "ContainerID: " + containerId, INVALID_CONTAINER_STATE);
@@ -667,6 +693,50 @@
     return containerData.getBlockCommitSequenceId();
   }
 
+  /**
+   * Return whether the given localID of a block is present in the
+   * pendingPutBlockCache or not.
+   */
+  public boolean isBlockInPendingPutBlockCache(long localID) {
+    return pendingPutBlockCache.contains(localID);
+  }
+
+  /**
+   * Add the given localID of a block to the pendingPutBlockCache.
+   */
+  public void addToPendingPutBlockCache(long localID)
+      throws StorageContainerException {
+    try {
+      pendingPutBlockCache.add(localID);
+    } catch (UnsupportedOperationException e) {
+      // Getting an UnsupportedOperationException here implies that the
+      // pendingPutBlockCache is an Empty Set. This should not happen if the
+      // container is in OPEN or CLOSING state. Log the exception here and
+      // throw a non-Runtime exception so that putBlock request fails.
+      String msg = "Failed to add block " + localID + " to " +
+          "pendingPutBlockCache of container " + containerData.getContainerID()
+          + " (state: " + containerData.getState() + ")";
+      LOG.error(msg, e);
+      throw new StorageContainerException(msg,
+          ContainerProtos.Result.CONTAINER_INTERNAL_ERROR);
+    }
+  }
+
+  /**
+   * Remove the given localID of a block from the pendingPutBlockCache.
+   */
+  public void removeFromPendingPutBlockCache(long localID) {
+    pendingPutBlockCache.remove(localID);
+  }
+
+  /**
+   * When a container is closed, quasi-closed or marked unhealthy, clear the
+   * pendingPutBlockCache as there won't be any more writes to the container.
+   */
+  private void clearPendingPutBlockCache() {
+    pendingPutBlockCache.clear();
+    pendingPutBlockCache = Collections.emptySet();
+  }
 
   /**
    * Returns KeyValueContainerReport for the KeyValueContainer.
@@ -681,7 +751,7 @@
         .setWriteCount(containerData.getWriteCount())
         .setReadBytes(containerData.getReadBytes())
         .setWriteBytes(containerData.getWriteBytes())
-        .setKeyCount(containerData.getKeyCount())
+        .setKeyCount(containerData.getBlockCount())
         .setUsed(containerData.getBytesUsed())
         .setState(getHddsState())
         .setDeleteTransactionId(containerData.getDeleteTransactionId())
@@ -774,7 +844,7 @@
    * @return
    * @throws IOException
    */
-  private File createTempFile(File file) throws IOException{
+  private File createTempFile(File file) throws IOException {
     return File.createTempFile("tmp_" + System.currentTimeMillis() + "_",
         file.getName(), file.getParentFile());
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
index ab2f666..40d527d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
@@ -230,13 +230,13 @@
 
     ContainerLayoutVersion layout = onDiskContainerData.getLayoutVersion();
 
-    try(ReferenceCountedDB db =
+    try (ReferenceCountedDB db =
             BlockUtils.getDB(onDiskContainerData, checkConfig);
         BlockIterator<BlockData> kvIter = db.getStore().getBlockIterator()) {
 
-      while(kvIter.hasNext()) {
+      while (kvIter.hasNext()) {
         BlockData block = kvIter.nextBlock();
-        for(ContainerProtos.ChunkInfo chunk : block.getChunks()) {
+        for (ContainerProtos.ChunkInfo chunk : block.getChunks()) {
           File chunkFile = layout.getChunkFile(onDiskContainerData,
               block.getBlockID(), ChunkInfo.getFromProtoBuf(chunk));
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
index f7eedf7..ccc252a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
@@ -270,7 +270,7 @@
       builder.setBytesUsed(this.getBytesUsed());
     }
 
-    if(this.getContainerType() != null) {
+    if (this.getContainerType() != null) {
       builder.setContainerType(ContainerProtos.ContainerType.KeyValueContainer);
     }
 
@@ -288,16 +288,16 @@
    * @param deletedBlockCount - Number of blocks deleted.
    * @throws IOException
    */
-  public void updateAndCommitDBCounters(
-      ReferenceCountedDB db, BatchOperation batchOperation,
-      int deletedBlockCount) throws IOException {
+  public void updateAndCommitDBCounters(ReferenceCountedDB db,
+      BatchOperation batchOperation, int deletedBlockCount,
+      long releasedBytes) throws IOException {
     Table<String, Long> metadataTable = db.getStore().getMetadataTable();
 
     // Set Bytes used and block count key.
     metadataTable.putWithBatch(batchOperation, CONTAINER_BYTES_USED,
-            getBytesUsed());
+            getBytesUsed() - releasedBytes);
     metadataTable.putWithBatch(batchOperation, BLOCK_COUNT,
-            getKeyCount() - deletedBlockCount);
+            getBlockCount() - deletedBlockCount);
     metadataTable.putWithBatch(batchOperation, PENDING_DELETE_BLOCK_COUNT,
             (long)(getNumPendingDeletionBlocks() - deletedBlockCount));
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java
new file mode 100644
index 0000000..614b63d
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java
@@ -0,0 +1,463 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.keyvalue;
+
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import com.google.gson.JsonArray;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonPrimitive;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
+import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator;
+import org.apache.hadoop.ozone.container.common.interfaces.ContainerInspector;
+import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
+import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.List;
+import java.util.function.BooleanSupplier;
+import java.util.stream.Stream;
+
+/**
+ * Container inspector for key value container metadata. It is capable of
+ * logging metadata information about a container, and repairing the metadata
+ * database values of #BLOCKCOUNT and #BYTESUSED.
+ *
+ * To enable this inspector in inspect mode, pass the java system property
+ * -Dozone.datanode.container.metadata.inspector=inspect on datanode startup.
+ * This will cause the inspector to log metadata information about all
+ * containers on datanode startup whose aggregate values of #BLOCKCOUNT and
+ * #BYTESUSED do not match the sum of their parts in the database at the
+ * ERROR level, and information about correct containers at the TRACE level.
+ * Changing the `inspect` argument to `repair` will update these aggregate
+ * values to match the database.
+ *
+ * When run, the inspector will output json to the logger named in the
+ * {@link KeyValueContainerMetadataInspector#REPORT_LOG} variable. The log4j
+ * configuration can be modified to send this output to a separate file
+ * without log information prefixes interfering with the json. For example:
+ *
+ * log4j.logger.ContainerMetadataInspectorReport=INFO,inspectorAppender
+ * log4j.appender.inspectorAppender=org.apache.log4j.FileAppender
+ * log4j.appender.inspectorAppender.File=${hadoop.log.dir}/\
+ * containerMetadataInspector.log
+ * log4j.appender.inspectorAppender.layout=org.apache.log4j.PatternLayout
+ */
+public class KeyValueContainerMetadataInspector implements ContainerInspector {
+  public static final Logger LOG =
+      LoggerFactory.getLogger(KeyValueContainerMetadataInspector.class);
+  public static final Logger REPORT_LOG = LoggerFactory.getLogger(
+      "ContainerMetadataInspectorReport");
+
+  /**
+   * The mode to run the inspector in.
+   */
+  public enum Mode {
+    REPAIR("repair"),
+    INSPECT("inspect"),
+    OFF("off");
+
+    private final String name;
+
+    Mode(String name) {
+      this.name = name;
+    }
+
+    public String toString() {
+      return name;
+    }
+  }
+
+  public static final String SYSTEM_PROPERTY = "ozone.datanode.container" +
+      ".metadata.inspector";
+
+  private Mode mode;
+
+  public KeyValueContainerMetadataInspector() {
+    mode = Mode.OFF;
+  }
+
+  /**
+   * Validate configuration here so that an invalid config value is only
+   * logged once, and not once per container.
+   */
+  @Override
+  public boolean load() {
+    String propertyValue = System.getProperty(SYSTEM_PROPERTY);
+    boolean propertyPresent =
+        (propertyValue != null && !propertyValue.isEmpty());
+    boolean propertyValid = false;
+
+    if (propertyPresent) {
+      if (propertyValue.equals(Mode.REPAIR.toString())) {
+        mode = Mode.REPAIR;
+        propertyValid = true;
+      } else if (propertyValue.equals(Mode.INSPECT.toString())) {
+        mode = Mode.INSPECT;
+        propertyValid = true;
+      }
+
+      if (propertyValid) {
+        LOG.info("Container metadata inspector enabled in {} mode. Report" +
+            "will be output to the {} log.", mode, REPORT_LOG.getName());
+      } else {
+        mode = Mode.OFF;
+        LOG.error("{} system property specified with invalid mode {}. " +
+                "Valid options are {} and {}. Container metadata inspection " +
+                "will not be run.", SYSTEM_PROPERTY, propertyValue,
+            Mode.REPAIR, Mode.INSPECT);
+      }
+    } else {
+      mode = Mode.OFF;
+    }
+
+    return propertyPresent && propertyValid;
+  }
+
+  @Override
+  public void unload() {
+    mode = Mode.OFF;
+  }
+
+  @Override
+  public boolean isReadOnly() {
+    return mode != Mode.REPAIR;
+  }
+
+  @Override
+  public void process(ContainerData containerData, DatanodeStore store) {
+    // If the system property to process container metadata was not
+    // specified, or the inspector is unloaded, this method is a no-op.
+    if (mode == Mode.OFF) {
+      return;
+    }
+
+    KeyValueContainerData kvData = null;
+    if (containerData instanceof KeyValueContainerData) {
+      kvData = (KeyValueContainerData) containerData;
+    } else {
+      LOG.error("This inspector only works on KeyValueContainers. Inspection " +
+          "will not be run for container {}", containerData.getContainerID());
+      return;
+    }
+
+    JsonObject containerJson = inspectContainer(kvData, store);
+    boolean correct = checkAndRepair(containerJson, kvData, store);
+
+    Gson gson = new GsonBuilder()
+        .setPrettyPrinting()
+        .serializeNulls()
+        .create();
+    String jsonReport = gson.toJson(containerJson);
+    if (correct) {
+      REPORT_LOG.trace(jsonReport);
+    } else {
+      REPORT_LOG.error(jsonReport);
+    }
+  }
+
+  private JsonObject inspectContainer(KeyValueContainerData containerData,
+      DatanodeStore store) {
+
+    JsonObject containerJson = new JsonObject();
+
+    try {
+      // Build top level container properties.
+      containerJson.addProperty("containerID", containerData.getContainerID());
+      String schemaVersion = containerData.getSchemaVersion();
+      containerJson.addProperty("schemaVersion", schemaVersion);
+      containerJson.addProperty("containerState",
+          containerData.getState().toString());
+      containerJson.addProperty("currentDatanodeID",
+          containerData.getVolume().getDatanodeUuid());
+      containerJson.addProperty("originDatanodeID",
+          containerData.getOriginNodeId());
+
+      // Build DB metadata values.
+      Table<String, Long> metadataTable = store.getMetadataTable();
+      JsonObject dBMetadata = getDBMetadataJson(metadataTable);
+      containerJson.add("dBMetadata", dBMetadata);
+
+      // Build aggregate values.
+      JsonObject aggregates = getAggregateValues(store, schemaVersion);
+      containerJson.add("aggregates", aggregates);
+
+      // Build info about chunks directory.
+      JsonObject chunksDirectory =
+          getChunksDirectoryJson(new File(containerData.getChunksPath()));
+      containerJson.add("chunksDirectory", chunksDirectory);
+    } catch (IOException ex) {
+      LOG.error("Inspecting container {} failed",
+          containerData.getContainerID(), ex);
+    }
+
+    return containerJson;
+  }
+
+  private JsonObject getDBMetadataJson(Table<String, Long> metadataTable)
+      throws IOException {
+    JsonObject dBMetadata = new JsonObject();
+
+    dBMetadata.addProperty(OzoneConsts.BLOCK_COUNT,
+        metadataTable.get(OzoneConsts.BLOCK_COUNT));
+    dBMetadata.addProperty(OzoneConsts.CONTAINER_BYTES_USED,
+        metadataTable.get(OzoneConsts.CONTAINER_BYTES_USED));
+    dBMetadata.addProperty(OzoneConsts.PENDING_DELETE_BLOCK_COUNT,
+        metadataTable.get(OzoneConsts.PENDING_DELETE_BLOCK_COUNT));
+    dBMetadata.addProperty(OzoneConsts.DELETE_TRANSACTION_KEY,
+        metadataTable.get(OzoneConsts.DELETE_TRANSACTION_KEY));
+    dBMetadata.addProperty(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID,
+        metadataTable.get(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID));
+
+    return dBMetadata;
+  }
+
+  private JsonObject getAggregateValues(DatanodeStore store,
+      String schemaVersion) throws IOException {
+    JsonObject aggregates = new JsonObject();
+
+    long usedBytesTotal = 0;
+    long blockCountTotal = 0;
+    long pendingDeleteBlockCountTotal = 0;
+    // Count normal blocks.
+    try (BlockIterator<BlockData> blockIter =
+             store.getBlockIterator(
+                 MetadataKeyFilters.getUnprefixedKeyFilter())) {
+
+      while (blockIter.hasNext()) {
+        blockCountTotal++;
+        usedBytesTotal += getBlockLength(blockIter.nextBlock());
+      }
+    }
+
+    // Count pending delete blocks.
+    if (schemaVersion.equals(OzoneConsts.SCHEMA_V1)) {
+      try (BlockIterator<BlockData> blockIter =
+               store.getBlockIterator(
+                   MetadataKeyFilters.getDeletingKeyFilter())) {
+
+        while (blockIter.hasNext()) {
+          blockCountTotal++;
+          pendingDeleteBlockCountTotal++;
+          usedBytesTotal += getBlockLength(blockIter.nextBlock());
+        }
+      }
+    } else if (schemaVersion.equals(OzoneConsts.SCHEMA_V2)) {
+      DatanodeStoreSchemaTwoImpl schemaTwoStore =
+          (DatanodeStoreSchemaTwoImpl) store;
+      pendingDeleteBlockCountTotal =
+          countPendingDeletesSchemaV2(schemaTwoStore);
+    } else {
+      throw new IOException("Failed to process deleted blocks for unknown " +
+              "container schema " + schemaVersion);
+    }
+
+    aggregates.addProperty("blockCount", blockCountTotal);
+    aggregates.addProperty("usedBytes", usedBytesTotal);
+    aggregates.addProperty("pendingDeleteBlocks",
+        pendingDeleteBlockCountTotal);
+
+    return aggregates;
+  }
+
+  private JsonObject getChunksDirectoryJson(File chunksDir) throws IOException {
+    JsonObject chunksDirectory = new JsonObject();
+
+    chunksDirectory.addProperty("path", chunksDir.getAbsolutePath());
+    boolean chunksDirPresent = FileUtils.isDirectory(chunksDir);
+    chunksDirectory.addProperty("present", chunksDirPresent);
+
+    long fileCount = 0;
+    if (chunksDirPresent) {
+      try (Stream<Path> stream = Files.list(chunksDir.toPath())) {
+        fileCount = stream.count();
+      }
+    }
+    chunksDirectory.addProperty("fileCount", fileCount);
+
+    return chunksDirectory;
+  }
+
+  private boolean checkAndRepair(JsonObject parent, ContainerData containerData,
+      DatanodeStore store) {
+    JsonArray errors = new JsonArray();
+    boolean passed = true;
+
+    Table<String, Long> metadataTable = store.getMetadataTable();
+
+    // Check and repair block count.
+    JsonElement blockCountDB = parent.getAsJsonObject("dBMetadata")
+        .get(OzoneConsts.BLOCK_COUNT);
+
+    JsonElement blockCountAggregate = parent.getAsJsonObject("aggregates")
+        .get("blockCount");
+
+    // If block count is absent from the DB, it is only an error if there are
+    // a non-zero amount of block keys in the DB.
+    long blockCountDBLong = 0;
+    if (!blockCountDB.isJsonNull()) {
+      blockCountDBLong = blockCountDB.getAsLong();
+    }
+
+    if (blockCountDBLong != blockCountAggregate.getAsLong()) {
+      passed = false;
+
+      BooleanSupplier keyRepairAction = () -> {
+        boolean repaired = false;
+        try {
+          metadataTable.put(OzoneConsts.BLOCK_COUNT,
+              blockCountAggregate.getAsLong());
+          repaired = true;
+        } catch (IOException ex) {
+          LOG.error("Error repairing block count for container {}.",
+              containerData.getContainerID(), ex);
+        }
+        return repaired;
+      };
+
+      JsonObject blockCountError = buildErrorAndRepair("dBMetadata." +
+              OzoneConsts.BLOCK_COUNT, blockCountAggregate, blockCountDB,
+          keyRepairAction);
+      errors.add(blockCountError);
+    }
+
+    // Check and repair used bytes.
+    JsonElement usedBytesDB = parent.getAsJsonObject("dBMetadata")
+        .get(OzoneConsts.CONTAINER_BYTES_USED);
+    JsonElement usedBytesAggregate = parent.getAsJsonObject("aggregates")
+        .get("usedBytes");
+
+    // If used bytes is absent from the DB, it is only an error if there is
+    // a non-zero aggregate of used bytes among the block keys.
+    long usedBytesDBLong = 0;
+    if (!usedBytesDB.isJsonNull()) {
+      usedBytesDBLong = usedBytesDB.getAsLong();
+    }
+
+    if (usedBytesDBLong != usedBytesAggregate.getAsLong()) {
+      passed = false;
+
+      BooleanSupplier keyRepairAction = () -> {
+        boolean repaired = false;
+        try {
+          metadataTable.put(OzoneConsts.CONTAINER_BYTES_USED,
+              usedBytesAggregate.getAsLong());
+          repaired = true;
+        } catch (IOException ex) {
+          LOG.error("Error repairing used bytes for container {}.",
+              containerData.getContainerID(), ex);
+        }
+        return repaired;
+      };
+
+      JsonObject usedBytesError = buildErrorAndRepair("dBMetadata." +
+              OzoneConsts.CONTAINER_BYTES_USED, usedBytesAggregate, usedBytesDB,
+          keyRepairAction);
+      errors.add(usedBytesError);
+    }
+
+    // check and repair chunks dir.
+    JsonElement chunksDirPresent = parent.getAsJsonObject("chunksDirectory")
+        .get("present");
+    if (!chunksDirPresent.getAsBoolean()) {
+      passed = false;
+
+      BooleanSupplier dirRepairAction = () -> {
+        boolean repaired = false;
+        try {
+          File chunksDir = new File(containerData.getChunksPath());
+          Files.createDirectories(chunksDir.toPath());
+          repaired = true;
+        } catch (IOException ex) {
+          LOG.error("Error recreating empty chunks directory for container {}.",
+              containerData.getContainerID(), ex);
+        }
+        return repaired;
+      };
+
+      JsonObject chunksDirError = buildErrorAndRepair("chunksDirectory.present",
+          new JsonPrimitive(true), chunksDirPresent, dirRepairAction);
+      errors.add(chunksDirError);
+    }
+
+    parent.addProperty("correct", passed);
+    parent.add("errors", errors);
+    return passed;
+  }
+
+  private JsonObject buildErrorAndRepair(String property, JsonElement expected,
+      JsonElement actual, BooleanSupplier repairAction) {
+    JsonObject error = new JsonObject();
+    error.addProperty("property", property);
+    error.add("expected", expected);
+    error.add("actual", actual);
+
+    boolean repaired = false;
+    if (mode == Mode.REPAIR) {
+      repaired = repairAction.getAsBoolean();
+    }
+    error.addProperty("repaired", repaired);
+
+    return error;
+  }
+
+  private long countPendingDeletesSchemaV2(DatanodeStoreSchemaTwoImpl
+      schemaTwoStore) throws IOException {
+    long pendingDeleteBlockCountTotal = 0;
+    Table<Long, DeletedBlocksTransaction> delTxTable =
+        schemaTwoStore.getDeleteTransactionTable();
+    try (TableIterator<Long, ? extends Table.KeyValue<Long,
+        DeletedBlocksTransaction>> iterator = delTxTable.iterator()) {
+      while (iterator.hasNext()) {
+        DeletedBlocksTransaction txn = iterator.next().getValue();
+        // In schema 2, pending delete blocks are stored in the
+        // transaction object. Since the actual blocks still exist in the
+        // block data table with no prefix, they have already been
+        // counted towards bytes used and total block count above.
+        pendingDeleteBlockCountTotal += txn.getLocalIDList().size();
+      }
+    }
+
+    return pendingDeleteBlockCountTotal;
+  }
+
+  private static long getBlockLength(BlockData block) {
+    long blockLen = 0;
+    List<ContainerProtos.ChunkInfo> chunkInfoList = block.getChunks();
+
+    for (ContainerProtos.ChunkInfo chunk : chunkInfoList) {
+      blockLen += chunk.getLen();
+    }
+
+    return blockLen;
+  }
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 4760078..73df5e4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -28,7 +28,6 @@
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.locks.Lock;
-import java.util.function.Consumer;
 import java.util.function.Function;
 
 import com.google.common.util.concurrent.Striped;
@@ -45,7 +44,6 @@
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutSmallFileRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.scm.ByteStringConversion;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
@@ -65,6 +63,7 @@
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
 import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
+import org.apache.hadoop.ozone.container.common.report.IncrementalReportSender;
 import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
 import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
 import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.WriteChunkStage;
@@ -93,7 +92,6 @@
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.PUT_SMALL_FILE_ERROR;
 import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getBlockDataResponse;
 import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getBlockLengthResponse;
-import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getBlockResponseSuccess;
 import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getGetSmallFileResponseSuccess;
 import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getPutFileResponseSuccess;
 import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getReadChunkResponse;
@@ -127,9 +125,12 @@
   // A striped lock that is held during container creation.
   private final Striped<Lock> containerCreationLocks;
 
-  public KeyValueHandler(ConfigurationSource config, String datanodeId,
-      ContainerSet contSet, VolumeSet volSet, ContainerMetrics metrics,
-      Consumer<ContainerReplicaProto> icrSender) {
+  public KeyValueHandler(ConfigurationSource config,
+                         String datanodeId,
+                         ContainerSet contSet,
+                         VolumeSet volSet,
+                         ContainerMetrics metrics,
+                         IncrementalReportSender<Container> icrSender) {
     super(config, datanodeId, contSet, volSet, metrics, icrSender);
     containerType = ContainerType.KeyValueContainer;
     blockManager = new BlockManagerImpl(config);
@@ -194,7 +195,7 @@
       DispatcherContext dispatcherContext) {
     Type cmdType = request.getCmdType();
 
-    switch(cmdType) {
+    switch (cmdType) {
     case CreateContainer:
       return handler.handleCreateContainer(request, kvContainer);
     case ReadContainer:
@@ -451,16 +452,16 @@
       BlockData blockData = BlockData.getFromProtoBuf(data);
       Preconditions.checkNotNull(blockData);
 
-      boolean incrKeyCount = false;
+      boolean endOfBlock = false;
       if (!request.getPutBlock().hasEof() || request.getPutBlock().getEof()) {
         chunkManager.finishWriteChunks(kvContainer, blockData);
-        incrKeyCount = true;
+        endOfBlock = true;
       }
 
       long bcsId =
           dispatcherContext == null ? 0 : dispatcherContext.getLogIndex();
       blockData.setBlockCommitSequenceId(bcsId);
-      blockManager.putBlock(kvContainer, blockData, incrKeyCount);
+      blockManager.putBlock(kvContainer, blockData, endOfBlock);
 
       blockDataProto = blockData.getProtoBufMessage();
 
@@ -548,33 +549,14 @@
   /**
    * Handle Delete Block operation. Calls BlockManager to process the request.
    */
+  @Deprecated
   ContainerCommandResponseProto handleDeleteBlock(
       ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
-
-    if (!request.hasDeleteBlock()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Malformed Delete Key request. trace ID: {}",
-            request.getTraceID());
-      }
-      return malformedRequest(request);
-    }
-
-    try {
-      checkContainerOpen(kvContainer);
-
-      BlockID blockID = BlockID.getFromProtobuf(
-          request.getDeleteBlock().getBlockID());
-
-      blockManager.deleteBlock(kvContainer, blockID);
-    } catch (StorageContainerException ex) {
-      return ContainerUtils.logAndReturnError(LOG, ex, request);
-    } catch (IOException ex) {
-      return ContainerUtils.logAndReturnError(LOG,
-          new StorageContainerException("Delete Key failed", ex, IO_EXCEPTION),
-          request);
-    }
-
-    return getBlockResponseSuccess(request);
+    // Block/ Chunk Deletion is handled by BlockDeletingService.
+    // SCM sends Block Deletion commands directly to Datanodes and not
+    // through a Pipeline.
+    throw new UnsupportedOperationException("Datanode handles block deletion " +
+        "using BlockDeletingService");
   }
 
   /**
@@ -660,37 +642,14 @@
   /**
    * Handle Delete Chunk operation. Calls ChunkManager to process the request.
    */
+  @Deprecated
   ContainerCommandResponseProto handleDeleteChunk(
       ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
-
-    if (!request.hasDeleteChunk()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Malformed Delete Chunk request. trace ID: {}",
-            request.getTraceID());
-      }
-      return malformedRequest(request);
-    }
-
-    try {
-      checkContainerOpen(kvContainer);
-
-      BlockID blockID = BlockID.getFromProtobuf(
-          request.getDeleteChunk().getBlockID());
-      ContainerProtos.ChunkInfo chunkInfoProto = request.getDeleteChunk()
-          .getChunkData();
-      ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto);
-      Preconditions.checkNotNull(chunkInfo);
-
-      chunkManager.deleteChunk(kvContainer, blockID, chunkInfo);
-    } catch (StorageContainerException ex) {
-      return ContainerUtils.logAndReturnError(LOG, ex, request);
-    } catch (IOException ex) {
-      return ContainerUtils.logAndReturnError(LOG,
-          new StorageContainerException("Delete Chunk failed", ex,
-              IO_EXCEPTION), request);
-    }
-
-    return getSuccessResponse(request);
+    // Block/ Chunk Deletion is handled by BlockDeletingService.
+    // SCM sends Block Deletion commands directly to Datanodes and not
+    // through a Pipeline.
+    throw new UnsupportedOperationException("Datanode handles chunk deletion " +
+        "using BlockDeletingService");
   }
 
   private void validateChunkChecksumData(ChunkBuffer data, ChunkInfo info)
@@ -744,7 +703,7 @@
           .writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext);
 
       // We should increment stats after writeChunk
-      if (stage == WriteChunkStage.WRITE_DATA||
+      if (stage == WriteChunkStage.WRITE_DATA ||
           stage == WriteChunkStage.COMBINED) {
         metrics.incContainerBytesStats(Type.WriteChunk, writeChunk
             .getChunkData().getLen());
@@ -959,7 +918,7 @@
   public void exportContainer(final Container container,
       final OutputStream outputStream,
       final TarContainerPacker packer)
-      throws IOException{
+      throws IOException {
     final KeyValueContainer kvc = (KeyValueContainer) container;
     kvc.exportContainerData(outputStream, packer);
   }
@@ -1066,13 +1025,17 @@
     deleteInternal(container, force);
   }
 
+  /**
+   * Called by BlockDeletingService to delete all the chunks in a block
+   * before proceeding to delete the block info from DB.
+   */
   @Override
   public void deleteBlock(Container container, BlockData blockData)
       throws IOException {
     chunkManager.deleteChunks(container, blockData);
-    for (ContainerProtos.ChunkInfo chunkInfo : blockData.getChunks()) {
-      ChunkInfo info = ChunkInfo.getFromProtoBuf(chunkInfo);
-      if (LOG.isDebugEnabled()) {
+    if (LOG.isDebugEnabled()) {
+      for (ContainerProtos.ChunkInfo chunkInfo : blockData.getChunks()) {
+        ChunkInfo info = ChunkInfo.getFromProtoBuf(chunkInfo);
         LOG.debug("block {} chunk {} deleted", blockData.getBlockID(), info);
       }
     }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java
index 349a15d..5b9d2f7 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java
@@ -84,11 +84,13 @@
         if (name.startsWith(DB_DIR_NAME + "/")) {
           Path destinationPath = dbRoot
               .resolve(name.substring(DB_DIR_NAME.length() + 1));
-          extractEntry(archiveInput, size, dbRoot, destinationPath);
+          extractEntry(entry, archiveInput, size, dbRoot,
+              destinationPath);
         } else if (name.startsWith(CHUNKS_DIR_NAME + "/")) {
           Path destinationPath = chunksRoot
               .resolve(name.substring(CHUNKS_DIR_NAME.length() + 1));
-          extractEntry(archiveInput, size, chunksRoot, destinationPath);
+          extractEntry(entry, archiveInput, size, chunksRoot,
+              destinationPath);
         } else if (CONTAINER_FILE_NAME.equals(name)) {
           //Don't do anything. Container file should be unpacked in a
           //separated step by unpackContainerDescriptor call.
@@ -109,27 +111,32 @@
     }
   }
 
-  private void extractEntry(InputStream input, long size,
-                            Path ancestor, Path path) throws IOException {
+  private void extractEntry(ArchiveEntry entry, InputStream input, long size,
+      Path ancestor, Path path) throws IOException {
     HddsUtils.validatePath(path, ancestor);
-    Path parent = path.getParent();
-    if (parent != null) {
-      Files.createDirectories(parent);
-    }
 
-    try (OutputStream fileOutput = new FileOutputStream(path.toFile());
-         OutputStream output = new BufferedOutputStream(fileOutput)) {
-      int bufferSize = 1024;
-      byte[] buffer = new byte[bufferSize + 1];
-      long remaining = size;
-      while (remaining > 0) {
-        int len = (int) Math.min(remaining, bufferSize);
-        int read = input.read(buffer, 0, len);
-        if (read >= 0) {
-          remaining -= read;
-          output.write(buffer, 0, read);
-        } else {
-          remaining = 0;
+    if (entry.isDirectory()) {
+      Files.createDirectories(path);
+    } else {
+      Path parent = path.getParent();
+      if (parent != null) {
+        Files.createDirectories(parent);
+      }
+
+      try (OutputStream fileOutput = new FileOutputStream(path.toFile());
+           OutputStream output = new BufferedOutputStream(fileOutput)) {
+        int bufferSize = 1024;
+        byte[] buffer = new byte[bufferSize + 1];
+        long remaining = size;
+        while (remaining > 0) {
+          int len = (int) Math.min(remaining, bufferSize);
+          int read = input.read(buffer, 0, len);
+          if (read >= 0) {
+            remaining -= read;
+            output.write(buffer, 0, read);
+          } else {
+            remaining = 0;
+          }
         }
       }
     }
@@ -209,6 +216,12 @@
   private void includePath(Path dir, String subdir,
       ArchiveOutputStream archiveOutput) throws IOException {
 
+    // Add a directory entry before adding files, in case the directory is
+    // empty.
+    ArchiveEntry entry = archiveOutput.createArchiveEntry(dir.toFile(), subdir);
+    archiveOutput.putArchiveEntry(entry);
+
+    // Add files in the directory.
     try (Stream<Path> dirEntries = Files.list(dir)) {
       for (Path path : dirEntries.collect(toList())) {
         String entryName = subdir + "/" + path.getFileName();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
index 353dbcd..306350c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
@@ -242,6 +242,10 @@
       }
       return true;
     }
+
+    // TODO: when overwriting a chunk, we should ensure that the new chunk
+    //  size is same as the old chunk size
+
     return false;
   }
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
index ad1673a..dde3e2e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
@@ -97,7 +97,7 @@
    * @param containerId
    * @return container sub directory
    */
-  private static String getContainerSubDirectory(long containerId){
+  private static String getContainerSubDirectory(long containerId) {
     int directory = (int) ((containerId >> 9) & 0xFF);
     return Storage.CONTAINER_DIR + directory;
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index f984b20..476eeef 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -19,6 +19,7 @@
 
 import java.io.File;
 import java.io.IOException;
+import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.List;
@@ -32,6 +33,7 @@
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator;
+import org.apache.hadoop.ozone.container.common.utils.ContainerInspectorUtil;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 
 import com.google.common.base.Preconditions;
@@ -189,8 +191,10 @@
     DatanodeStore store = null;
     try {
       try {
+        boolean readOnly = ContainerInspectorUtil.isReadOnly(
+            ContainerProtos.ContainerType.KeyValueContainer);
         store = BlockUtils.getUncachedDatanodeStore(
-            kvContainerData, config, true);
+            kvContainerData, config, readOnly);
       } catch (IOException e) {
         // If an exception is thrown, then it may indicate the RocksDB is
         // already open in the container cache. As this code is only executed at
@@ -249,11 +253,22 @@
       Long blockCount = metadataTable.get(OzoneConsts.BLOCK_COUNT);
       if (blockCount != null) {
         isBlockMetadataSet = true;
-        kvContainerData.setKeyCount(blockCount);
+        kvContainerData.setBlockCount(blockCount);
       }
       if (!isBlockMetadataSet) {
         initializeUsedBytesAndBlockCount(store, kvContainerData);
       }
+
+      // If the container is missing a chunks directory, possibly due to the
+      // bug fixed by HDDS-6235, create it here.
+      File chunksDir = new File(kvContainerData.getChunksPath());
+      if (!chunksDir.exists()) {
+        Files.createDirectories(chunksDir.toPath());
+      }
+      // Run advanced container inspection/repair operations if specified on
+      // startup. If this method is called but not as a part of startup,
+      // The inspectors will be unloaded and this will be a no-op.
+      ContainerInspectorUtil.process(kvContainerData, store);
     } finally {
       if (cachedDB != null) {
         // If we get a cached instance, calling close simply decrements the
@@ -315,10 +330,10 @@
       }
     }
     kvData.setBytesUsed(usedBytes);
-    kvData.setKeyCount(blockCount);
+    kvData.setBlockCount(blockCount);
   }
 
-  private static long getBlockLength(BlockData block) throws IOException {
+  public static long getBlockLength(BlockData block) throws IOException {
     long blockLen = 0;
     List<ContainerProtos.ChunkInfo> chunkInfoList = block.getChunks();
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
index 58a0dcd..ee13ffc 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -94,24 +94,24 @@
    * Puts or overwrites a block.
    *
    * @param container - Container for which block need to be added.
-   * @param data     - BlockData.
-   * @param incrKeyCount - for FilePerBlockStrategy, increase key count only
-   *                     when the whole block file is written.
+   * @param data - BlockData.
+   * @param endOfBlock - The last putBlock call for this block (when
+   *                   all the chunks are written and stream is closed)
    * @return length of the block.
    * @throws IOException
    */
   @Override
   public long putBlock(Container container, BlockData data,
-      boolean incrKeyCount) throws IOException {
+      boolean endOfBlock) throws IOException {
     return persistPutBlock(
         (KeyValueContainer) container,
         data,
         config,
-        incrKeyCount);
+        endOfBlock);
   }
 
   public static long persistPutBlock(KeyValueContainer container,
-      BlockData data, ConfigurationSource config, boolean incrKeyCount)
+      BlockData data, ConfigurationSource config, boolean endOfBlock)
       throws IOException {
     Preconditions.checkNotNull(data, "BlockData cannot be null for put " +
         "operation.");
@@ -119,7 +119,7 @@
         "cannot be negative");
     // We are not locking the key manager since LevelDb serializes all actions
     // against a single DB. We rely on DB level locking to avoid conflicts.
-    try(ReferenceCountedDB db = BlockUtils.
+    try (ReferenceCountedDB db = BlockUtils.
         getDB(container.getContainerData(), config)) {
       // This is a post condition that acts as a hint to the user.
       // Should never fail.
@@ -143,11 +143,32 @@
             containerBCSId, bcsId);
         return data.getSize();
       }
+
+      // Check if the block is present in the pendingPutBlockCache for the
+      // container to determine whether the blockCount is already incremented
+      // for this block in the DB or not.
+      long localID = data.getLocalID();
+      boolean isBlockInCache = container.isBlockInPendingPutBlockCache(localID);
+      boolean incrBlockCount = false;
+
       // update the blockData as well as BlockCommitSequenceId here
       try (BatchOperation batch = db.getStore().getBatchHandler()
           .initBatchOperation()) {
+
+        // If the block does not exist in the pendingPutBlockCache of the
+        // container, then check the DB to ascertain if it exists or not.
+        // If block exists in cache, blockCount should not be incremented.
+        if (!isBlockInCache) {
+          if (db.getStore().getBlockDataTable().get(
+              Long.toString(localID)) == null) {
+            // Block does not exist in DB => blockCount needs to be
+            // incremented when the block is added into DB.
+            incrBlockCount = true;
+          }
+        }
+
         db.getStore().getBlockDataTable().putWithBatch(
-            batch, Long.toString(data.getLocalID()), data);
+            batch, Long.toString(localID), data);
         if (bcsId != 0) {
           db.getStore().getMetadataTable().putWithBatch(
               batch, OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID, bcsId);
@@ -164,10 +185,10 @@
             container.getContainerData().getBytesUsed());
 
         // Set Block Count for a container.
-        if (incrKeyCount) {
+        if (incrBlockCount) {
           db.getStore().getMetadataTable().putWithBatch(
               batch, OzoneConsts.BLOCK_COUNT,
-              container.getContainerData().getKeyCount() + 1);
+              container.getContainerData().getBlockCount() + 1);
         }
 
         db.getStore().getBatchHandler().commitBatchOperation(batch);
@@ -176,10 +197,24 @@
       if (bcsId != 0) {
         container.updateBlockCommitSequenceId(bcsId);
       }
-      // Increment block count finally here for in-memory.
-      if (incrKeyCount) {
-        container.getContainerData().incrKeyCount();
+
+      // Increment block count and add block to pendingPutBlockCache
+      // in-memory after the DB update.
+      if (incrBlockCount) {
+        container.getContainerData().incrBlockCount();
       }
+
+      // If the Block is not in PendingPutBlockCache (and it is not endOfBlock),
+      // add it there so that subsequent putBlock calls for this block do not
+      // have to read the DB to check for block existence
+      if (!isBlockInCache && !endOfBlock) {
+        container.addToPendingPutBlockCache(localID);
+      } else if (isBlockInCache && endOfBlock) {
+        // Remove the block from the PendingPutBlockCache as there would not
+        // be any more writes to this block
+        container.removeFromPendingPutBlockCache(localID);
+      }
+
       if (LOG.isDebugEnabled()) {
         LOG.debug(
             "Block " + data.getBlockID() + " successfully committed with bcsId "
@@ -216,7 +251,7 @@
               + containerBCSId + ".", UNKNOWN_BCSID);
     }
 
-    try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
+    try (ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
       // This is a post condition that acts as a hint to the user.
       // Should never fail.
       Preconditions.checkNotNull(db, DB_NULL_ERR_MSG);
@@ -244,7 +279,7 @@
       throws IOException {
     KeyValueContainerData containerData = (KeyValueContainerData) container
         .getContainerData();
-    try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
+    try (ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) {
       // This is a post condition that acts as a hint to the user.
       // Should never fail.
       Preconditions.checkNotNull(db, DB_NULL_ERR_MSG);
@@ -260,51 +295,14 @@
 
   /**
    * Deletes an existing block.
-   *
-   * @param container - Container from which block need to be deleted.
-   * @param blockID - ID of the block.
-   * @throws StorageContainerException
    */
   @Override
   public void deleteBlock(Container container, BlockID blockID) throws
       IOException {
-    Preconditions.checkNotNull(blockID, "block ID cannot be null.");
-    Preconditions.checkState(blockID.getContainerID() >= 0,
-        "Container ID cannot be negative.");
-    Preconditions.checkState(blockID.getLocalID() >= 0,
-        "Local ID cannot be negative.");
-
-    KeyValueContainerData cData = (KeyValueContainerData) container
-        .getContainerData();
-    try(ReferenceCountedDB db = BlockUtils.getDB(cData, config)) {
-      // This is a post condition that acts as a hint to the user.
-      // Should never fail.
-      Preconditions.checkNotNull(db, DB_NULL_ERR_MSG);
-      // Note : There is a race condition here, since get and delete
-      // are not atomic. Leaving it here since the impact is refusing
-      // to delete a Block which might have just gotten inserted after
-      // the get check.
-
-      // Throw an exception if block data not found in the block data table.
-      getBlockByID(db, blockID);
-
-      // Update DB to delete block and set block count and bytes used.
-      try (BatchOperation batch = db.getStore().getBatchHandler()
-          .initBatchOperation()) {
-        String localID = Long.toString(blockID.getLocalID());
-        db.getStore().getBlockDataTable().deleteWithBatch(batch, localID);
-        // Update DB to delete block and set block count.
-        // No need to set bytes used here, as bytes used is taken care during
-        // delete chunk.
-        long blockCount = container.getContainerData().getKeyCount() - 1;
-        db.getStore().getMetadataTable()
-            .putWithBatch(batch, OzoneConsts.BLOCK_COUNT, blockCount);
-        db.getStore().getBatchHandler().commitBatchOperation(batch);
-      }
-
-      // Decrement block count here
-      container.getContainerData().decrKeyCount();
-    }
+    // Block/ Chunk Deletion is handled by BlockDeletingService.
+    // SCM sends Block Deletion commands directly to Datanodes and not
+    // through a Pipeline.
+    throw new UnsupportedOperationException();
   }
 
   /**
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
index e998278..7636473 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
@@ -21,7 +21,6 @@
 import com.google.common.base.Preconditions;
 
 import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.common.ChunkBuffer;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
@@ -102,9 +101,12 @@
 
     Preconditions.checkNotNull(blockID, "Block ID cannot be null.");
 
-    selectHandler(container)
-        .deleteChunk(container, blockID, info);
-    container.getContainerData().decrBytesUsed(info.getLen());
+    // Delete the chunk from disk.
+    // Do not decrement the ContainerData counters (usedBytes) here as it
+    // will be updated while deleting the block from the DB
+
+    selectHandler(container).deleteChunk(container, blockID, info);
+
   }
 
   @Override
@@ -113,11 +115,11 @@
 
     Preconditions.checkNotNull(blockData, "Block data cannot be null.");
 
-    selectHandler(container).deleteChunks(container, blockData);
+    // Delete the chunks belonging to blockData.
+    // Do not decrement the ContainerData counters (usedBytes) here as it
+    // will be updated while deleting the block from the DB
 
-    container.getContainerData().decrBytesUsed(
-        blockData.getChunks().stream()
-            .mapToLong(ContainerProtos.ChunkInfo::getLen).sum());
+    selectHandler(container).deleteChunks(container, blockData);
   }
 
   @Override
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
index 905918a..9e0a6e1 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
@@ -29,6 +29,7 @@
 import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 
+import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
@@ -52,6 +53,7 @@
 import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
 import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
 import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
@@ -87,14 +89,13 @@
 
   // Task priority is useful when a to-delete block has weight.
   private static final int TASK_PRIORITY_DEFAULT = 1;
-  // Core pool size for container tasks
-  private static final int BLOCK_DELETING_SERVICE_CORE_POOL_SIZE = 10;
 
   public BlockDeletingService(OzoneContainer ozoneContainer,
-      long serviceInterval, long serviceTimeout, TimeUnit timeUnit,
-      ConfigurationSource conf) {
+                              long serviceInterval, long serviceTimeout,
+                              TimeUnit timeUnit, int workerSize,
+                              ConfigurationSource conf) {
     super("BlockDeletingService", serviceInterval, timeUnit,
-        BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout);
+        workerSize, serviceTimeout);
     this.ozoneContainer = ozoneContainer;
     try {
       containerDeletionPolicy = conf.getClass(
@@ -155,9 +156,8 @@
         totalBlocks += containerBlockInfo.numBlocksToDelete;
       }
       if (containers.size() > 0) {
-        LOG.info("Plan to choose {} blocks for block deletion, "
-                + "actually deleting {} blocks.", blockLimitPerInterval,
-            totalBlocks);
+        LOG.debug("Queued {} blocks from {} containers for deletion",
+            totalBlocks, containers.size());
       }
     } catch (StorageContainerException e) {
       LOG.warn("Failed to initiate block deleting tasks, "
@@ -322,6 +322,7 @@
         if (toDeleteBlocks.isEmpty()) {
           LOG.debug("No under deletion block found in container : {}",
               containerData.getContainerID());
+          return crr;
         }
 
         List<String> succeedBlocks = new LinkedList<>();
@@ -331,6 +332,7 @@
         Handler handler = Objects.requireNonNull(ozoneContainer.getDispatcher()
             .getHandler(container.getContainerType()));
 
+        long releasedBytes = 0;
         for (Table.KeyValue<String, BlockData> entry: toDeleteBlocks) {
           String blockName = entry.getKey();
           LOG.debug("Deleting block {}", blockName);
@@ -342,6 +344,8 @@
           }
           try {
             handler.deleteBlock(container, entry.getValue());
+            releasedBytes += KeyValueContainerUtil.getBlockLength(
+                entry.getValue());
             succeedBlocks.add(blockName);
           } catch (InvalidProtocolBufferException e) {
             LOG.error("Failed to parse block info for block {}", blockName, e);
@@ -350,32 +354,41 @@
           }
         }
 
-        // Once blocks are deleted... remove the blockID from blockDataTable.
-        try(BatchOperation batch = meta.getStore().getBatchHandler()
+        // Once chunks in the blocks are deleted... remove the blockID from
+        // blockDataTable.
+        try (BatchOperation batch = meta.getStore().getBatchHandler()
             .initBatchOperation()) {
           for (String entry : succeedBlocks) {
             blockDataTable.deleteWithBatch(batch, entry);
           }
-          int deleteBlockCount = succeedBlocks.size();
+
+          // Handler.deleteBlock calls deleteChunk to delete all the chunks
+          // in the block. The ContainerData stats (DB and in-memory) are not
+          // updated with decremented used bytes during deleteChunk. This is
+          // done here so that all the DB update for block delete can be
+          // batched together while committing to DB.
+          int deletedBlocksCount = succeedBlocks.size();
           containerData.updateAndCommitDBCounters(meta, batch,
-              deleteBlockCount);
-          // update count of pending deletion blocks and block count in
-          // in-memory container status.
-          containerData.decrPendingDeletionBlocks(deleteBlockCount);
-          containerData.decrKeyCount(deleteBlockCount);
+              deletedBlocksCount, releasedBytes);
+
+          // update count of pending deletion blocks, block count and used
+          // bytes in in-memory container status.
+          containerData.decrPendingDeletionBlocks(deletedBlocksCount);
+          containerData.decrBlockCount(deletedBlocksCount);
+          containerData.decrBytesUsed(releasedBytes);
         }
 
         if (!succeedBlocks.isEmpty()) {
-          LOG.info("Container: {}, deleted blocks: {}, task elapsed time: {}ms",
-              containerData.getContainerID(), succeedBlocks.size(),
+          LOG.debug("Container: {}, deleted blocks: {}, space reclaimed: {}, " +
+                  "task elapsed time: {}ms", containerData.getContainerID(),
+              succeedBlocks.size(), releasedBytes,
               Time.monotonicNow() - startTime);
         }
         crr.addAll(succeedBlocks);
         return crr;
       } catch (IOException exception) {
-        LOG.warn(
-            "Deletion operation was not successful for container: " + container
-                .getContainerData().getContainerID(), exception);
+        LOG.warn("Deletion operation was not successful for container: " +
+            container.getContainerData().getContainerID(), exception);
         throw exception;
       }
     }
@@ -396,7 +409,6 @@
         Table<Long, DeletedBlocksTransaction>
             deleteTxns = dnStoreTwoImpl.getDeleteTransactionTable();
         List<DeletedBlocksTransaction> delBlocks = new ArrayList<>();
-        int totalBlocks = 0;
         int numBlocks = 0;
         try (TableIterator<Long,
             ? extends Table.KeyValue<Long, DeletedBlocksTransaction>> iter =
@@ -408,10 +420,8 @@
           }
         }
         if (delBlocks.isEmpty()) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("No transaction found in container : {}",
-                containerData.getContainerID());
-          }
+          LOG.debug("No transaction found in container : {}",
+              containerData.getContainerID());
           return crr;
         }
 
@@ -421,12 +431,14 @@
         Handler handler = Objects.requireNonNull(ozoneContainer.getDispatcher()
             .getHandler(container.getContainerType()));
 
-        totalBlocks =
+        Pair<Integer, Long> deleteBlocksResult =
             deleteTransactions(delBlocks, handler, blockDataTable, container);
+        int deletedBlocksCount = deleteBlocksResult.getLeft();
+        long releasedBytes = deleteBlocksResult.getRight();
 
         // Once blocks are deleted... remove the blockID from blockDataTable
         // and also remove the transactions from txnTable.
-        try(BatchOperation batch = meta.getStore().getBatchHandler()
+        try (BatchOperation batch = meta.getStore().getBatchHandler()
             .initBatchOperation()) {
           for (DeletedBlocksTransaction delTx : delBlocks) {
             deleteTxns.deleteWithBatch(batch, delTx.getTxID());
@@ -435,33 +447,44 @@
               meta.getStore().getBlockDataTable().deleteWithBatch(batch, bID);
             }
           }
-          meta.getStore().getBatchHandler().commitBatchOperation(batch);
+
+          // Handler.deleteBlock calls deleteChunk to delete all the chunks
+          // in the block. The ContainerData stats (DB and in-memory) are not
+          // updated with decremented used bytes during deleteChunk. This is
+          // done here so that all the DB updates for block delete can be
+          // batched together while committing to DB.
           containerData.updateAndCommitDBCounters(meta, batch,
-              totalBlocks);
-          // update count of pending deletion blocks and block count in
-          // in-memory container status.
-          containerData.decrPendingDeletionBlocks(totalBlocks);
-          containerData.decrKeyCount(totalBlocks);
+              deletedBlocksCount, releasedBytes);
+
+          // update count of pending deletion blocks, block count and used
+          // bytes in in-memory container status.
+          containerData.decrPendingDeletionBlocks(deletedBlocksCount);
+          containerData.decrBlockCount(deletedBlocksCount);
+          containerData.decrBytesUsed(releasedBytes);
         }
 
-        LOG.info("Container: {}, deleted blocks: {}, task elapsed time: {}ms",
-            containerData.getContainerID(), totalBlocks,
-            Time.monotonicNow() - startTime);
+        LOG.debug("Container: {}, deleted blocks: {}, space reclaimed: {}, " +
+                "task elapsed time: {}ms", containerData.getContainerID(),
+            deletedBlocksCount, Time.monotonicNow() - startTime);
 
         return crr;
       } catch (IOException exception) {
-        LOG.warn(
-            "Deletion operation was not successful for container: " + container
-                .getContainerData().getContainerID(), exception);
+        LOG.warn("Deletion operation was not successful for container: " +
+            container.getContainerData().getContainerID(), exception);
         throw exception;
       }
     }
 
-    private int deleteTransactions(List<DeletedBlocksTransaction> delBlocks,
-        Handler handler, Table<String, BlockData> blockDataTable,
-        Container container)
+    /**
+     * Delete the chunks for the given blocks.
+     * Return the deletedBlocks count and number of bytes released.
+     */
+    private Pair<Integer, Long> deleteTransactions(
+        List<DeletedBlocksTransaction> delBlocks, Handler handler,
+        Table<String, BlockData> blockDataTable, Container container)
         throws IOException {
       int blocksDeleted = 0;
+      long bytesReleased = 0;
       for (DeletedBlocksTransaction entry : delBlocks) {
         for (Long blkLong : entry.getLocalIDList()) {
           String blk = blkLong.toString();
@@ -476,6 +499,7 @@
           try {
             handler.deleteBlock(container, blkInfo);
             blocksDeleted++;
+            bytesReleased += KeyValueContainerUtil.getBlockLength(blkInfo);
           } catch (InvalidProtocolBufferException e) {
             LOG.error("Failed to parse block info for block {}", blk, e);
           } catch (IOException e) {
@@ -483,7 +507,7 @@
           }
         }
       }
-      return blocksDeleted;
+      return Pair.of(blocksDeleted, bytesReleased);
     }
 
     @Override
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
index 15a8a9e..f9f794d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
@@ -23,7 +23,11 @@
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
 import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
-import org.apache.hadoop.hdds.utils.db.*;
+import org.apache.hadoop.hdds.utils.db.BatchOperationHandler;
+import org.apache.hadoop.hdds.utils.db.DBStore;
+import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList;
 import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator;
@@ -276,7 +280,7 @@
         nextBlock = null;
         return currentBlock;
       }
-      if(hasNext()) {
+      if (hasNext()) {
         return nextBlock();
       }
       throw new NoSuchElementException("Block Iterator reached end for " +
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
index 9beec5b..171303d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java
@@ -112,7 +112,12 @@
   public void markContainerUnhealthy(final long containerId)
           throws IOException {
     Container container = containerSet.getContainer(containerId);
-    getHandler(container).markContainerUnhealthy(container);
+    if (container != null) {
+      getHandler(container).markContainerUnhealthy(container);
+    } else {
+      LOG.warn("Container {} not found, may be deleted, skip mark UNHEALTHY",
+          containerId);
+    }
   }
 
   /**
@@ -206,7 +211,12 @@
   void updateDataScanTimestamp(long containerId, Instant timestamp)
       throws IOException {
     Container container = containerSet.getContainer(containerId);
-    container.updateDataScanTimestamp(timestamp);
+    if (container != null) {
+      container.updateDataScanTimestamp(timestamp);
+    } else {
+      LOG.warn("Container {} not found, may be deleted, " +
+          "skip update DataScanTimestamp", containerId);
+    }
   }
 
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java
index 4a20dc3..c924485 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java
@@ -32,7 +32,7 @@
  * This class captures the container data scrubber metrics on the data-node.
  **/
 @InterfaceAudience.Private
-@Metrics(about="DataNode container data scrubber metrics", context="dfs")
+@Metrics(about = "DataNode container data scrubber metrics", context = "dfs")
 public final class ContainerDataScrubberMetrics {
 
   private final String name;
@@ -110,8 +110,8 @@
 
   public static ContainerDataScrubberMetrics create(final String volumeName) {
     MetricsSystem ms = DefaultMetricsSystem.instance();
-    String name = "ContainerDataScrubberMetrics-"+ (volumeName.isEmpty()
-        ? "UndefinedDataNodeVolume"+ ThreadLocalRandom.current().nextInt()
+    String name = "ContainerDataScrubberMetrics-" + (volumeName.isEmpty()
+        ? "UndefinedDataNodeVolume" + ThreadLocalRandom.current().nextInt()
         : volumeName.replace(':', '-'));
 
     return ms.register(name, null, new ContainerDataScrubberMetrics(name, ms));
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java
index 96efcf4..59657b0 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java
@@ -90,7 +90,7 @@
         metrics.incNumContainersScanned();
       }
     }
-    long interval = System.nanoTime()-start;
+    long interval = System.nanoTime() - start;
     if (!stopping) {
       metrics.incNumScanIterations();
       LOG.info("Completed an iteration of container metadata scrubber in" +
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java
index cf8e617..b70a3e5 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java
@@ -30,7 +30,7 @@
  * data-node.
  **/
 @InterfaceAudience.Private
-@Metrics(about="DataNode container data scrubber metrics", context="dfs")
+@Metrics(about = "DataNode container data scrubber metrics", context = "dfs")
 public final class ContainerMetadataScrubberMetrics {
 
   private final String name;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
index 44aa92c..2a88a2f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
@@ -190,9 +190,9 @@
   }
 
   /**
-   * verify ContainerData loaded from disk and fix-up stale members.
-   * Specifically blockCommitSequenceId, delete related metadata
-   * and bytesUsed
+   * Verify ContainerData loaded from disk and fix-up stale members.
+   * Specifically the in memory values of blockCommitSequenceId, delete related
+   * metadata, bytesUsed and block count.
    * @param containerData
    * @throws IOException
    */
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index d6c90d5..1af9c88 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -27,7 +27,6 @@
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
-import java.util.function.Consumer;
 
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -45,13 +44,16 @@
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
 import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
+import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
+import org.apache.hadoop.ozone.container.common.report.IncrementalReportSender;
 import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc;
 import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi;
 import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis;
+import org.apache.hadoop.ozone.container.common.utils.ContainerInspectorUtil;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
@@ -66,6 +68,8 @@
 import com.google.common.collect.Maps;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_WORKERS;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_WORKERS_DEFAULT;
 import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScrubberConfiguration.VOLUME_BYTES_PER_SECOND_KEY;
 
 import org.apache.hadoop.util.Timer;
@@ -137,13 +141,17 @@
     final ContainerMetrics metrics = ContainerMetrics.create(conf);
     handlers = Maps.newHashMap();
 
-    Consumer<ContainerReplicaProto> icrSender = containerReplicaProto -> {
-      IncrementalContainerReportProto icr = IncrementalContainerReportProto
-          .newBuilder()
-          .addReport(containerReplicaProto)
-          .build();
-      context.addIncrementalReport(icr);
-      context.getParent().triggerHeartbeat();
+    IncrementalReportSender<Container> icrSender = container -> {
+      synchronized (containerSet) {
+        ContainerReplicaProto containerReport = container.getContainerReport();
+
+        IncrementalContainerReportProto icr = IncrementalContainerReportProto
+            .newBuilder()
+            .addReport(containerReport)
+            .build();
+        context.addIncrementalReport(icr);
+        context.getParent().triggerHeartbeat();
+      }
     };
 
     for (ContainerType containerType : ContainerType.values()) {
@@ -178,15 +186,19 @@
     readChannel = new XceiverServerGrpc(
         datanodeDetails, config, hddsDispatcher, certClient);
     Duration svcInterval = conf.getObject(
-            DatanodeConfiguration.class).getBlockDeletionInterval();
+        DatanodeConfiguration.class).getBlockDeletionInterval();
 
     long serviceTimeout = config
         .getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
             OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
             TimeUnit.MILLISECONDS);
+
+    int serviceWorkerSize = config
+        .getInt(OZONE_BLOCK_DELETING_SERVICE_WORKERS,
+            OZONE_BLOCK_DELETING_SERVICE_WORKERS_DEFAULT);
     blockDeletingService =
         new BlockDeletingService(this, svcInterval.toMillis(), serviceTimeout,
-            TimeUnit.MILLISECONDS, config);
+            TimeUnit.MILLISECONDS, serviceWorkerSize, config);
 
     if (certClient != null && secConf.isGrpcTlsEnabled()) {
       List<X509Certificate> x509Certificates =
@@ -215,6 +227,10 @@
     ArrayList<Thread> volumeThreads = new ArrayList<>();
     long startTime = System.currentTimeMillis();
 
+    // Load container inspectors that may be triggered at startup based on
+    // system properties set. These can inspect and possibly repair
+    // containers as we iterate them here.
+    ContainerInspectorUtil.load();
     //TODO: diskchecker should be run before this, to see how disks are.
     // And also handle disk failure tolerance need to be added
     while (volumeSetIterator.hasNext()) {
@@ -234,6 +250,10 @@
       Thread.currentThread().interrupt();
     }
 
+    // After all containers have been processed, turn off container
+    // inspectors so they are not hit during normal datanode execution.
+    ContainerInspectorUtil.unload();
+
     LOG.info("Build ContainerSet costs {}s",
         (System.currentTimeMillis() - startTime) / 1000);
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
index 91c7400..023b251 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
@@ -49,7 +49,7 @@
 /**
  * Client to read container data from gRPC.
  */
-public class GrpcReplicationClient implements AutoCloseable{
+public class GrpcReplicationClient implements AutoCloseable {
 
   private static final Logger LOG =
       LoggerFactory.getLogger(GrpcReplicationClient.class);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/MeasuredReplicator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/MeasuredReplicator.java
index d1175ef..5786ed7 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/MeasuredReplicator.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/MeasuredReplicator.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.ozone.container.replication;
 
+import java.time.Duration;
 import java.time.Instant;
 
 import org.apache.hadoop.metrics2.annotation.Metric;
@@ -71,7 +72,7 @@
     long start = Time.monotonicNow();
 
     long msInQueue =
-        (Instant.now().getNano() - task.getQueued().getNano()) / 1_000_000;
+        Duration.between(task.getQueued(), Instant.now()).toMillis();
     queueTime.incr(msInQueue);
     delegate.replicate(task);
     long elapsed = Time.monotonicNow() - start;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/DirstreamClientHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/DirstreamClientHandler.java
index e14a391..fc9b449 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/DirstreamClientHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/DirstreamClientHandler.java
@@ -116,7 +116,7 @@
     }
   }
 
-  public boolean isAtTheEnd(){
+  public boolean isAtTheEnd() {
     return getCurrentFileName().equals(END_MARKER);
   }
   @Override
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java
index 9ff4b0a..f25e13c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java
@@ -46,7 +46,7 @@
   @Override
   public void preFinalizeUpgrade(DatanodeStateMachine dsm)
       throws IOException {
-    if(!canFinalizeDataNode(dsm)) {
+    if (!canFinalizeDataNode(dsm)) {
       // DataNode is not yet ready to finalize.
       // Reset the Finalization state.
       getVersionManager().setUpgradeState(FINALIZATION_REQUIRED);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java
index ec84946..3653e6c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java
@@ -120,7 +120,7 @@
       boolean scmHAEnabled =
           conf.getBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY,
           ScmConfigKeys.OZONE_SCM_HA_ENABLE_DEFAULT);
-      if (isFinalized(HDDSLayoutFeature.SCM_HA) || scmHAEnabled){
+      if (isFinalized(HDDSLayoutFeature.SCM_HA) || scmHAEnabled) {
         return clusterID;
       } else {
         return scmID;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RefreshVolumeUsageCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RefreshVolumeUsageCommand.java
new file mode 100644
index 0000000..f009a05
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RefreshVolumeUsageCommand.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.protocol.commands;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos
+    .RefreshVolumeUsageCommandProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
+
+/**
+ * Asks datanode to refresh disk usage info immediately.
+ */
+public class RefreshVolumeUsageCommand
+    extends SCMCommand<RefreshVolumeUsageCommandProto> {
+
+  public RefreshVolumeUsageCommand() {
+    super();
+  }
+
+  /**
+   * Returns the type of this command.
+   *
+   * @return Type
+   */
+  @Override
+  public SCMCommandProto.Type getType() {
+    return SCMCommandProto.Type.refreshVolumeUsageInfo;
+  }
+
+  @Override
+  public RefreshVolumeUsageCommandProto getProto() {
+    RefreshVolumeUsageCommandProto.Builder builder =
+        RefreshVolumeUsageCommandProto
+            .newBuilder().setCmdId(getId());
+    return builder.build();
+  }
+
+  public static RefreshVolumeUsageCommand getFromProtobuf(
+      RefreshVolumeUsageCommandProto refreshVolumeUsageProto) {
+    Preconditions.checkNotNull(refreshVolumeUsageProto);
+    return new RefreshVolumeUsageCommand();
+  }
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
index e3ea4ae..6aa0554 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
@@ -27,7 +27,7 @@
  * Informs a datanode to register itself with SCM again.
  */
 public class ReregisterCommand extends
-    SCMCommand<ReregisterCommandProto>{
+    SCMCommand<ReregisterCommandProto> {
 
   /**
    * Returns the type of this command.
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
index 157dee6..5cb6984 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
@@ -50,7 +50,12 @@
 import org.apache.hadoop.ozone.protocol.VersionResponse;
 
 import java.io.IOException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
 import java.util.concurrent.atomic.AtomicInteger;
 
 /**
@@ -131,9 +136,10 @@
    * @return - count of reported containers.
    */
   public long getContainerCount() {
-    return nodeContainers.values().parallelStream().mapToLong((containerMap)->{
-      return containerMap.size();
-    }).sum();
+    return nodeContainers.values().parallelStream().mapToLong(
+        (containerMap) -> {
+          return containerMap.size();
+        }).sum();
   }
 
   /**
@@ -141,11 +147,13 @@
    * @return - number of keys reported.
    */
   public long getKeyCount() {
-    return nodeContainers.values().parallelStream().mapToLong((containerMap)->{
-      return containerMap.values().parallelStream().mapToLong((container) -> {
-        return container.getKeyCount();
-      }).sum();
-    }).sum();
+    return nodeContainers.values().parallelStream().mapToLong(
+        (containerMap) -> {
+          return containerMap.values().parallelStream().mapToLong(
+              (container) -> {
+                return container.getKeyCount();
+              }).sum();
+        }).sum();
   }
 
   /**
@@ -153,11 +161,13 @@
    * @return - number of bytes used.
    */
   public long getBytesUsed() {
-    return nodeContainers.values().parallelStream().mapToLong((containerMap)->{
-      return containerMap.values().parallelStream().mapToLong((container) -> {
-        return container.getUsed();
-      }).sum();
-    }).sum();
+    return nodeContainers.values().parallelStream().mapToLong(
+        (containerMap) -> {
+          return containerMap.values().parallelStream().mapToLong(
+              (container) -> {
+                return container.getUsed();
+              }).sum();
+        }).sum();
   }
 
   /**
@@ -259,7 +269,7 @@
     List<StorageReportProto> storageReports =
         nodeReport.getStorageReportList();
 
-    for(StorageReportProto report : storageReports) {
+    for (StorageReportProto report : storageReports) {
       nodeReportProto.addStorageReport(report);
     }
 
@@ -313,7 +323,7 @@
   public int getContainerCountsForDatanode(DatanodeDetails datanodeDetails) {
     Map<String, ContainerReplicaProto> cr =
         nodeContainers.get(datanodeDetails);
-    if(cr != null) {
+    if (cr != null) {
       return cr.size();
     }
     return 0;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index 014913e..ac9cd10 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -345,7 +345,7 @@
       int numOfChunksPerBlock) {
     long chunkLength = 100;
     try (ReferenceCountedDB metadata = BlockUtils.getDB(data, conf)) {
-      container.getContainerData().setKeyCount(numOfBlocksPerContainer);
+      container.getContainerData().setBlockCount(numOfBlocksPerContainer);
       // Set block count, bytes used and pending delete block count.
       metadata.getStore().getMetadataTable()
           .put(OzoneConsts.BLOCK_COUNT, (long) numOfBlocksPerContainer);
@@ -356,7 +356,8 @@
           .put(OzoneConsts.PENDING_DELETE_BLOCK_COUNT,
               (long) numOfBlocksPerContainer);
     } catch (IOException exception) {
-      LOG.warn("Meta Data update was not successful for container: "+container);
+      LOG.warn("Meta Data update was not successful for container: "
+          + container);
     }
   }
 
@@ -427,7 +428,7 @@
     KeyValueContainerData data = (KeyValueContainerData) containerData.get(0);
     Assert.assertEquals(1, containerData.size());
 
-    try(ReferenceCountedDB meta = BlockUtils.getDB(
+    try (ReferenceCountedDB meta = BlockUtils.getDB(
         (KeyValueContainerData) containerData.get(0), conf)) {
       Map<Long, Container<?>> containerMap = containerSet.getContainerMapCopy();
       // NOTE: this test assumes that all the container is KetValueContainer and
@@ -530,7 +531,7 @@
         mockDependencies(containerSet, keyValueHandler);
     BlockDeletingService svc = new BlockDeletingService(ozoneContainer,
         TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.NANOSECONDS,
-        conf);
+        10, conf);
     svc.start();
 
     LogCapturer log = LogCapturer.captureLogs(BackgroundService.LOG);
@@ -552,7 +553,7 @@
     timeout  = 0;
     svc = new BlockDeletingService(ozoneContainer,
         TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.MILLISECONDS,
-        conf);
+        10, conf);
     svc.start();
 
     // get container meta data
@@ -734,7 +735,7 @@
       // in all the containers are deleted)).
       deleteAndWait(service, 2);
 
-      long totalContainerBlocks = blocksPerContainer*containerCount;
+      long totalContainerBlocks = blocksPerContainer * containerCount;
       GenericTestUtils.waitFor(() ->
               totalContainerBlocks * blockSpace ==
                       (totalContainerSpace -
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
index 562775d..e55d68c 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
@@ -177,7 +177,7 @@
     for (Future future: futureList) {
       try {
         future.get();
-      } catch (InterruptedException| ExecutionException e) {
+      } catch (InterruptedException | ExecutionException e) {
         Assert.fail("Should get the DB instance");
       }
     }
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
index 82c9e6e..1337f28 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
@@ -379,7 +379,6 @@
       throws Exception {
     List<Map.Entry<String, String>> confList =
         new ArrayList<>();
-    confList.add(Maps.immutableEntry(ScmConfigKeys.OZONE_SCM_NAMES, ""));
 
     // Invalid ozone.scm.names
     /** Empty **/
@@ -394,10 +393,6 @@
     /** Port out of range **/
     confList.add(Maps.immutableEntry(
         ScmConfigKeys.OZONE_SCM_NAMES, "scm:123456"));
-    // Invalid ozone.scm.datanode.id.dir
-    /** Empty **/
-    confList.add(Maps.immutableEntry(
-        ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR, ""));
 
     confList.forEach((entry) -> {
       OzoneConfiguration perTestConf = new OzoneConfiguration(conf);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
index ab8bd83..41fccb8 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
@@ -79,7 +79,7 @@
     assertEquals(val.get(), kvData.getWriteBytes());
     assertEquals(val.get(), kvData.getReadCount());
     assertEquals(val.get(), kvData.getWriteCount());
-    assertEquals(val.get(), kvData.getKeyCount());
+    assertEquals(val.get(), kvData.getBlockCount());
     assertEquals(val.get(), kvData.getNumPendingDeletionBlocks());
     assertEquals(MAXSIZE, kvData.getMaxSize());
 
@@ -91,7 +91,7 @@
     kvData.incrWriteBytes(10);
     kvData.incrReadCount();
     kvData.incrWriteCount();
-    kvData.incrKeyCount();
+    kvData.incrBlockCount();
     kvData.incrPendingDeletionBlocks(1);
     kvData.setSchemaVersion(
         VersionedDatanodeFeatures.SchemaV2.chooseSchemaVersion());
@@ -105,7 +105,7 @@
     assertEquals(10, kvData.getWriteBytes());
     assertEquals(1, kvData.getReadCount());
     assertEquals(1, kvData.getWriteCount());
-    assertEquals(1, kvData.getKeyCount());
+    assertEquals(1, kvData.getBlockCount());
     assertEquals(1, kvData.getNumPendingDeletionBlocks());
     assertEquals(pipelineId.toString(), kvData.getOriginPipelineId());
     assertEquals(datanodeId.toString(), kvData.getOriginNodeId());
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
index 700c6c2..a80adca 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
@@ -130,7 +130,8 @@
    */
   @Test
   public void testDirectTableIterationDisabled() throws Exception {
-    try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
+    try (ReferenceCountedDB refCountedDB =
+        BlockUtils.getDB(newKvData(), conf)) {
       DatanodeStore store = refCountedDB.getStore();
 
       assertTableIteratorUnsupported(store.getMetadataTable());
@@ -158,7 +159,8 @@
    */
   @Test
   public void testBlockIteration() throws IOException {
-    try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
+    try (ReferenceCountedDB refCountedDB =
+        BlockUtils.getDB(newKvData(), conf)) {
       assertEquals(TestDB.NUM_DELETED_BLOCKS, countDeletedBlocks(refCountedDB));
 
       assertEquals(TestDB.NUM_PENDING_DELETION_BLOCKS,
@@ -278,7 +280,8 @@
     final long expectedRegularBlocks =
             TestDB.KEY_COUNT - numBlocksToDelete;
 
-    try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
+    try (ReferenceCountedDB refCountedDB =
+        BlockUtils.getDB(newKvData(), conf)) {
       // Test results via block iteration.
 
       assertEquals(expectedDeletingBlocks,
@@ -320,7 +323,8 @@
         new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet,
             metrics, c -> {
         });
-    try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
+    try (ReferenceCountedDB refCountedDB =
+        BlockUtils.getDB(newKvData(), conf)) {
       // Read blocks that were already deleted before the upgrade.
       List<? extends Table.KeyValue<String, ChunkInfoList>> deletedBlocks =
               refCountedDB.getStore()
@@ -328,13 +332,13 @@
 
       Set<String> preUpgradeBlocks = new HashSet<>();
 
-      for(Table.KeyValue<String, ChunkInfoList> chunkListKV: deletedBlocks) {
+      for (Table.KeyValue<String, ChunkInfoList> chunkListKV: deletedBlocks) {
         preUpgradeBlocks.add(chunkListKV.getKey());
         try {
           chunkListKV.getValue();
           Assert.fail("No exception thrown when trying to retrieve old " +
                   "deleted blocks values as chunk lists.");
-        } catch(IOException ex) {
+        } catch (IOException ex) {
           // Exception thrown as expected.
         }
       }
@@ -370,7 +374,8 @@
 
   @Test
   public void testReadBlockData() throws Exception {
-    try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
+    try (ReferenceCountedDB refCountedDB =
+        BlockUtils.getDB(newKvData(), conf)) {
       Table<String, BlockData> blockDataTable =
           refCountedDB.getStore().getBlockDataTable();
 
@@ -395,12 +400,12 @@
       Assert.assertEquals(TestDB.BLOCK_IDS, decodedKeys);
 
       // Test reading blocks with block iterator.
-      try(BlockIterator<BlockData> iter =
+      try (BlockIterator<BlockData> iter =
               refCountedDB.getStore().getBlockIterator()) {
 
         List<String> iteratorBlockIDs = new ArrayList<>();
 
-        while(iter.hasNext()) {
+        while (iter.hasNext()) {
           long localID = iter.nextBlock().getBlockID().getLocalID();
           iteratorBlockIDs.add(Long.toString(localID));
         }
@@ -412,7 +417,8 @@
 
   @Test
   public void testReadDeletingBlockData() throws Exception {
-    try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
+    try (ReferenceCountedDB refCountedDB =
+        BlockUtils.getDB(newKvData(), conf)) {
       Table<String, BlockData> blockDataTable =
           refCountedDB.getStore().getBlockDataTable();
 
@@ -446,12 +452,12 @@
       MetadataKeyFilters.KeyPrefixFilter filter =
           MetadataKeyFilters.getDeletingKeyFilter();
 
-      try(BlockIterator<BlockData> iter =
+      try (BlockIterator<BlockData> iter =
               refCountedDB.getStore().getBlockIterator(filter)) {
 
         List<String> iteratorBlockIDs = new ArrayList<>();
 
-        while(iter.hasNext()) {
+        while (iter.hasNext()) {
           long localID = iter.nextBlock().getBlockID().getLocalID();
           iteratorBlockIDs.add(Long.toString(localID));
         }
@@ -463,7 +469,8 @@
 
   @Test
   public void testReadMetadata() throws Exception {
-    try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
+    try (ReferenceCountedDB refCountedDB =
+        BlockUtils.getDB(newKvData(), conf)) {
       Table<String, Long> metadataTable =
           refCountedDB.getStore().getMetadataTable();
 
@@ -479,7 +486,8 @@
 
   @Test
   public void testReadDeletedBlocks() throws Exception {
-    try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
+    try (ReferenceCountedDB refCountedDB =
+        BlockUtils.getDB(newKvData(), conf)) {
       Table<String, ChunkInfoList> deletedBlocksTable =
           refCountedDB.getStore().getDeletedBlocksTable();
 
@@ -578,7 +586,7 @@
   private void checkContainerData(KeyValueContainerData kvData) {
     assertTrue(kvData.isClosed());
     assertEquals(TestDB.SCHEMA_VERSION, kvData.getSchemaVersion());
-    assertEquals(TestDB.KEY_COUNT, kvData.getKeyCount());
+    assertEquals(TestDB.KEY_COUNT, kvData.getBlockCount());
     assertEquals(TestDB.BYTES_USED, kvData.getBytesUsed());
     assertEquals(TestDB.NUM_PENDING_DELETION_BLOCKS,
             kvData.getNumPendingDeletionBlocks());
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
index 00f68ef..85a8bda 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
@@ -60,11 +60,11 @@
     assertChunks(expected, computed);
     long offset = 0;
     int n = 5;
-    for(int i = 0; i < n; i++) {
+    for (int i = 0; i < n; i++) {
       offset += assertAddChunk(expected, computed, offset);
     }
 
-    for(; !expected.isEmpty();) {
+    for (; !expected.isEmpty();) {
       removeChunk(expected, computed);
     }
   }
@@ -125,7 +125,7 @@
     assertChunks(expected, computed);
     long offset = 0;
     int n = 5;
-    for(int i = 0; i < n; i++) {
+    for (int i = 0; i < n; i++) {
       offset += addChunk(expected, offset).getLen();
       LOG.info("setChunk: {}", toString(expected));
       computed.setChunks(expected);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
index 84f5008..44f0a7f 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
@@ -32,7 +32,9 @@
 import java.util.Properties;
 import java.util.UUID;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 /**
  * This class tests {@link DatanodeVersionFile}.
@@ -50,7 +52,7 @@
   private int lv;
 
   @Rule
-  public TemporaryFolder folder= new TemporaryFolder();
+  public TemporaryFolder folder = new TemporaryFolder();
 
   @Before
   public void setup() throws IOException {
@@ -70,7 +72,7 @@
   }
 
   @Test
-  public void testCreateAndReadVersionFile() throws IOException{
+  public void testCreateAndReadVersionFile() throws IOException {
 
     //Check VersionFile exists
     assertTrue(versionFile.exists());
@@ -88,7 +90,7 @@
   }
 
   @Test
-  public void testIncorrectClusterId() throws IOException{
+  public void testIncorrectClusterId() throws IOException {
     try {
       String randomClusterID = UUID.randomUUID().toString();
       HddsVolumeUtil.getClusterID(properties, versionFile,
@@ -100,7 +102,7 @@
   }
 
   @Test
-  public void testVerifyCTime() throws IOException{
+  public void testVerifyCTime() throws IOException {
     long invalidCTime = -10;
     dnVersionFile = new DatanodeVersionFile(
         storageID, clusterID, datanodeUUID, invalidCTime, lv);
@@ -117,7 +119,7 @@
   }
 
   @Test
-  public void testVerifyLayOut() throws IOException{
+  public void testVerifyLayOut() throws IOException {
     int invalidLayOutVersion = 100;
     dnVersionFile = new DatanodeVersionFile(
         storageID, clusterID, datanodeUUID, cTime, invalidLayOutVersion);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
index 70efc40..0bfdb17 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
@@ -174,7 +174,7 @@
   }
 
   @Test
-  public void testIncorrectContainerFile() throws IOException{
+  public void testIncorrectContainerFile() throws IOException {
     try {
       String containerFile = "incorrect.container";
       //Get file from resources folder
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
index 92ebbca..94f2e3e 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
@@ -232,7 +232,7 @@
     Mockito.when(ozoneContainer.getWriteChannel()).thenReturn(null);
     blockDeletingService = new BlockDeletingService(ozoneContainer,
         SERVICE_INTERVAL_IN_MILLISECONDS, SERVICE_TIMEOUT_IN_MILLISECONDS,
-        TimeUnit.MILLISECONDS, conf);
+        TimeUnit.MILLISECONDS, 10, conf);
     return blockDeletingService;
 
   }
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index 7fbd754..71f22d4 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -642,53 +642,6 @@
   }
 
   /**
-   * Deletes a block and tries to read it back.
-   *
-   * @throws IOException
-   * @throws NoSuchAlgorithmException
-   */
-  @Test
-  public void testDeleteBlock() throws IOException, NoSuchAlgorithmException {
-    long testContainerID = getTestContainerID();
-    Container container = addContainer(containerSet, testContainerID);
-    BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
-    ChunkInfo info = writeChunkHelper(blockID);
-    BlockData blockData = new BlockData(blockID);
-    List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
-    chunkList.add(info.getProtoBufMessage());
-    blockData.setChunks(chunkList);
-    blockManager.putBlock(container, blockData);
-    blockManager.deleteBlock(container, blockID);
-    exception.expect(StorageContainerException.class);
-    exception.expectMessage("Unable to find the block.");
-    blockManager.getBlock(container, blockData.getBlockID());
-  }
-
-  /**
-   * Tries to Deletes a block twice.
-   *
-   * @throws IOException
-   * @throws NoSuchAlgorithmException
-   */
-  @Test
-  public void testDeleteBlockTwice() throws IOException,
-      NoSuchAlgorithmException {
-    long testContainerID = getTestContainerID();
-    Container container = addContainer(containerSet, testContainerID);
-    BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
-    ChunkInfo info = writeChunkHelper(blockID);
-    BlockData blockData = new BlockData(blockID);
-    List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
-    chunkList.add(info.getProtoBufMessage());
-    blockData.setChunks(chunkList);
-    blockManager.putBlock(container, blockData);
-    blockManager.deleteBlock(container, blockID);
-    exception.expect(StorageContainerException.class);
-    exception.expectMessage("Unable to find the block.");
-    blockManager.deleteBlock(container, blockID);
-  }
-
-  /**
    * Tries to update an existing and non-existing container. Verifies container
    * map and persistent data both updated.
    *
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
index 4cb3094..d51d78e 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
@@ -122,11 +122,11 @@
     Iterator<Container<?>> iterator = containerSet.getContainerIterator();
 
     int count = 0;
-    while(iterator.hasNext()) {
+    while (iterator.hasNext()) {
       Container kv = iterator.next();
       ContainerData containerData = kv.getContainerData();
       long containerId = containerData.getContainerID();
-      if (containerId%2 == 0) {
+      if (containerId % 2 == 0) {
         assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED,
             containerData.getState());
       } else {
@@ -146,7 +146,7 @@
       Container kv = containerMapIterator.next().getValue();
       ContainerData containerData = kv.getContainerData();
       long containerId = containerData.getContainerID();
-      if (containerId%2 == 0) {
+      if (containerId % 2 == 0) {
         assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED,
             containerData.getState());
       } else {
@@ -167,12 +167,12 @@
     Mockito.when(vol2.getStorageID()).thenReturn("uuid-2");
 
     ContainerSet containerSet = new ContainerSet();
-    for (int i=0; i<10; i++) {
+    for (int i = 0; i < 10; i++) {
       KeyValueContainerData kvData = new KeyValueContainerData(i,
           layout,
           (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
           UUID.randomUUID().toString());
-      if (i%2 == 0) {
+      if (i % 2 == 0) {
         kvData.setVolume(vol1);
       } else {
         kvData.setVolume(vol2);
@@ -307,7 +307,7 @@
           layout,
           (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
           UUID.randomUUID().toString());
-      if (i%2 == 0) {
+      if (i % 2 == 0) {
         kvData.setState(ContainerProtos.ContainerDataProto.State.CLOSED);
       } else {
         kvData.setState(ContainerProtos.ContainerDataProto.State.OPEN);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
index 64813de..53490bb 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
@@ -36,13 +36,13 @@
     .WriteChunkRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerAction;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.common.Checksum;
 import org.apache.hadoop.ozone.common.utils.BufferUtils;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
+import org.apache.hadoop.ozone.container.common.report.IncrementalReportSender;
 import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
@@ -67,7 +67,6 @@
 import java.util.Collections;
 import java.util.Map;
 import java.util.UUID;
-import java.util.function.Consumer;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
@@ -82,8 +81,8 @@
 @RunWith(Parameterized.class)
 public class TestHddsDispatcher {
 
-  public static final Consumer<ContainerReplicaProto> NO_OP_ICR_SENDER =
-      c -> {};
+  public static final IncrementalReportSender<Container>
+      NO_OP_ICR_SENDER = c -> { };
 
   private final ContainerLayoutVersion layout;
 
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
index f969148..2b1bc3d 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
@@ -82,7 +82,7 @@
   }
 
   @After
-  public void tearDown(){
+  public void tearDown() {
     ContainerMetrics.remove();
   }
 
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
index 83e44d3..f2770d2 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
@@ -198,7 +198,7 @@
     GeneratedMessage report =
         ((CRLStatusReportPublisher) publisher).getReport();
     Assert.assertNotNull(report);
-    for(Descriptors.FieldDescriptor descriptor :
+    for (Descriptors.FieldDescriptor descriptor :
         report.getDescriptorForType().getFields()) {
       if (descriptor.getNumber() ==
           CRLStatusReport.RECEIVEDCRLID_FIELD_NUMBER) {
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java
index 6d0ad16..f8645d8 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java
@@ -30,6 +30,7 @@
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
+import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.Arrays;
 import java.util.Collections;
@@ -48,11 +49,15 @@
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import com.google.protobuf.Descriptors.Descriptor;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerAction;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine.DatanodeStates;
 import org.apache.hadoop.ozone.container.common.states.DatanodeState;
+import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.ozone.test.LambdaTestUtils;
 import org.junit.Assert;
 import org.junit.Test;
@@ -127,11 +132,18 @@
   }
 
   @Test
-  public void testReportQueueWithAddReports() {
+  public void testReportQueueWithAddReports() throws IOException {
     OzoneConfiguration conf = new OzoneConfiguration();
     DatanodeStateMachine datanodeStateMachineMock =
         mock(DatanodeStateMachine.class);
-
+    OzoneContainer o = mock(OzoneContainer.class);
+    ContainerSet s = mock(ContainerSet.class);
+    when(datanodeStateMachineMock.getContainer()).thenReturn(o);
+    when(o.getContainerSet()).thenReturn(s);
+    when(s.getContainerReport())
+        .thenReturn(
+            StorageContainerDatanodeProtocolProtos
+                .ContainerReportsProto.getDefaultInstance());
     StateContext ctx = new StateContext(conf, DatanodeStates.getInitState(),
         datanodeStateMachineMock);
     InetSocketAddress scm1 = new InetSocketAddress("scm1", 9001);
@@ -192,9 +204,30 @@
         StateContext.INCREMENTAL_CONTAINER_REPORT_PROTO_NAME, 128);
     checkReportCount(ctx.getAllAvailableReports(scm1), expectedReportCount);
     checkReportCount(ctx.getAllAvailableReports(scm2), expectedReportCount);
+    expectedReportCount.remove(
+        StateContext.INCREMENTAL_CONTAINER_REPORT_PROTO_NAME);
+
+    // Test FCR collection clears pending ICRs.
+    // Add a bunch of IncrementalContainerReport
+    batchAddIncrementalReport(ctx,
+        StateContext.INCREMENTAL_CONTAINER_REPORT_PROTO_NAME, 128);
+    // Add a FCR
+    batchRefreshfullReports(ctx,
+        StateContext.CONTAINER_REPORTS_PROTO_NAME, 1);
+
+    // Get FCR
+    ctx.getFullContainerReportDiscardPendingICR();
+
+    // Only FCR should be part of all available reports.
+    expectedReportCount.put(
+        StateContext.CONTAINER_REPORTS_PROTO_NAME, 1);
+    checkReportCount(ctx.getAllAvailableReports(scm1), expectedReportCount);
+    checkReportCount(ctx.getAllAvailableReports(scm2), expectedReportCount);
     // getReports dequeues incremental reports
     expectedReportCount.remove(
         StateContext.INCREMENTAL_CONTAINER_REPORT_PROTO_NAME);
+    expectedReportCount.remove(
+        StateContext.CONTAINER_REPORTS_PROTO_NAME);
   }
 
   void batchRefreshfullReports(StateContext ctx, String reportName, int count) {
@@ -283,6 +316,11 @@
 
   private GeneratedMessage newMockReport(String messageType) {
     GeneratedMessage report = mock(GeneratedMessage.class);
+    if (StateContext
+        .INCREMENTAL_CONTAINER_REPORT_PROTO_NAME.equals(messageType)) {
+      report =
+          mock(IncrementalContainerReportProto.class);
+    }
     when(report.getDescriptorForType()).thenReturn(
         mock(Descriptor.class));
     when(report.getDescriptorForType().getFullName()).thenReturn(
@@ -541,16 +579,19 @@
     assertEquals(0, ctx.getAllAvailableReports(scm2).size());
 
     Map<String, Integer> expectedReportCount = new HashMap<>();
-
+    int totalIncrementalCount = 128;
     // Add a bunch of ContainerReports
     batchRefreshfullReports(ctx,
-        StateContext.CONTAINER_REPORTS_PROTO_NAME, 128);
-    batchRefreshfullReports(ctx, StateContext.NODE_REPORT_PROTO_NAME, 128);
-    batchRefreshfullReports(ctx, StateContext.PIPELINE_REPORTS_PROTO_NAME, 128);
+        StateContext.CONTAINER_REPORTS_PROTO_NAME, totalIncrementalCount);
+    batchRefreshfullReports(ctx, StateContext.NODE_REPORT_PROTO_NAME,
+        totalIncrementalCount);
+    batchRefreshfullReports(ctx, StateContext.PIPELINE_REPORTS_PROTO_NAME,
+        totalIncrementalCount);
     batchRefreshfullReports(ctx,
-        StateContext.CRL_STATUS_REPORT_PROTO_NAME, 128);
+        StateContext.CRL_STATUS_REPORT_PROTO_NAME, totalIncrementalCount);
     batchAddIncrementalReport(ctx,
-        StateContext.INCREMENTAL_CONTAINER_REPORT_PROTO_NAME, 128);
+        StateContext.INCREMENTAL_CONTAINER_REPORT_PROTO_NAME,
+        totalIncrementalCount);
 
     // Should only keep the latest one
     expectedReportCount.put(StateContext.CONTAINER_REPORTS_PROTO_NAME, 1);
@@ -562,7 +603,17 @@
     // (100 - 4 non-incremental reports)
     expectedReportCount.put(
         StateContext.INCREMENTAL_CONTAINER_REPORT_PROTO_NAME, 96);
-    checkReportCount(ctx.getReports(scm1, 100), expectedReportCount);
-    checkReportCount(ctx.getReports(scm2, 100), expectedReportCount);
+    checkReportCount(ctx.getAllAvailableReportsUpToLimit(scm1, 100),
+        expectedReportCount);
+    checkReportCount(ctx.getAllAvailableReportsUpToLimit(scm2, 100),
+        expectedReportCount);
+    expectedReportCount.clear();
+    expectedReportCount.put(
+        StateContext.INCREMENTAL_CONTAINER_REPORT_PROTO_NAME,
+        totalIncrementalCount - 96);
+    checkReportCount(ctx.getAllAvailableReportsUpToLimit(scm1, 100),
+        expectedReportCount);
+    checkReportCount(ctx.getAllAvailableReportsUpToLimit(scm2, 100),
+        expectedReportCount);
   }
 }
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
index 791d6e0..de99681 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
@@ -214,7 +214,7 @@
     } catch (IOException e) {
 
       GenericTestUtils.assertExceptionContains("The Container " +
-                      "is not found. ContainerID: "+containerID, e);
+                      "is not found. ContainerID: " + containerID, e);
     }
   }
 
@@ -227,7 +227,7 @@
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains("The Container is in " +
               "the MissingContainerSet hence we can't close it. " +
-              "ContainerID: "+containerID, e);
+              "ContainerID: " + containerID, e);
     }
   }
 
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
index 990d4c9..dfe7cb3 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
@@ -114,7 +114,7 @@
     try {
       policy.chooseVolume(volumes, blockSize);
       Assert.fail("expected to throw DiskOutOfSpaceException");
-    } catch(DiskOutOfSpaceException e) {
+    } catch (DiskOutOfSpaceException e) {
       Assert.assertEquals("Not returning the expected message",
           "Out of space: The volume with the most available space (=" + 200
               + " B) is less than the container size (=" + blockSize + " B).",
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java
index 72faf57..1cdadd0 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java
@@ -65,11 +65,17 @@
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
-import static org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult.*;
+import static org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult.FAILED;
 import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
 import static org.mockito.Matchers.anyObject;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.isNull;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 
 /**
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
index 52bf3d3..f0869c9 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
@@ -225,7 +225,7 @@
   }
 
   @Test
-  public void testFailVolumes() throws  Exception{
+  public void testFailVolumes() throws  Exception {
     MutableVolumeSet volSet = null;
     File readOnlyVolumePath = new File(baseDir);
     //Set to readonly, so that this volume will be failed
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
index 81d230f..7f14ccf 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
@@ -19,7 +19,15 @@
 package org.apache.hadoop.ozone.container.keyvalue;
 
 import java.io.File;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.UUID;
 
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.fs.FileUtil;
@@ -120,7 +128,7 @@
 
     // Default filter used is all unprefixed blocks.
     List<Long> unprefixedBlockIDs = blockIDs.get("");
-    try(BlockIterator<BlockData> keyValueBlockIterator =
+    try (BlockIterator<BlockData> keyValueBlockIterator =
                 db.getStore().getBlockIterator()) {
 
       Iterator<Long> blockIDIter = unprefixedBlockIDs.iterator();
@@ -152,7 +160,7 @@
   @Test
   public void testKeyValueBlockIteratorWithNextBlock() throws Exception {
     List<Long> blockIDs = createContainerWithBlocks(CONTAINER_ID, 2);
-    try(BlockIterator<BlockData> keyValueBlockIterator =
+    try (BlockIterator<BlockData> keyValueBlockIterator =
                 db.getStore().getBlockIterator()) {
       assertEquals((long)blockIDs.get(0),
               keyValueBlockIterator.nextBlock().getLocalID());
@@ -171,7 +179,7 @@
   @Test
   public void testKeyValueBlockIteratorWithHasNext() throws Exception {
     List<Long> blockIDs = createContainerWithBlocks(CONTAINER_ID, 2);
-    try(BlockIterator<BlockData> blockIter =
+    try (BlockIterator<BlockData> blockIter =
                 db.getStore().getBlockIterator()) {
 
       // Even calling multiple times hasNext() should not move entry forward.
@@ -209,7 +217,7 @@
     int deletingBlocks = 5;
     Map<String, List<Long>> blockIDs = createContainerWithBlocks(CONTAINER_ID,
             normalBlocks, deletingBlocks);
-    try(BlockIterator<BlockData> keyValueBlockIterator =
+    try (BlockIterator<BlockData> keyValueBlockIterator =
                 db.getStore().getBlockIterator(
                         MetadataKeyFilters.getDeletingKeyFilter())) {
       List<Long> deletingBlockIDs =
@@ -230,7 +238,7 @@
   public void testKeyValueBlockIteratorWithOnlyDeletedBlocks() throws
       Exception {
     createContainerWithBlocks(CONTAINER_ID, 0, 5);
-    try(BlockIterator<BlockData> keyValueBlockIterator =
+    try (BlockIterator<BlockData> keyValueBlockIterator =
                 db.getStore().getBlockIterator()) {
       //As all blocks are deleted blocks, blocks does not match with normal key
       // filter.
@@ -288,7 +296,7 @@
    */
   private void testWithFilter(MetadataKeyFilters.KeyPrefixFilter filter,
                               List<Long> expectedIDs) throws Exception {
-    try(BlockIterator<BlockData> iterator =
+    try (BlockIterator<BlockData> iterator =
                 db.getStore().getBlockIterator(filter)) {
       // Test seek.
       iterator.seekToFirst();
@@ -364,7 +372,7 @@
             Map<String, Integer> prefixCounts) throws Exception {
     // Create required block data.
     Map<String, List<Long>> blockIDs = new HashMap<>();
-    try(ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData,
+    try (ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData,
         conf)) {
 
       List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index 98b8626..4f9e45c 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -41,6 +41,7 @@
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
 import org.apache.hadoop.ozone.container.metadata.AbstractDatanodeStore;
 import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
 import org.apache.ozone.test.GenericTestUtils;
@@ -66,6 +67,8 @@
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.List;
@@ -74,6 +77,7 @@
 import java.util.function.Supplier;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
+import java.util.stream.Stream;
 
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE;
 import static org.apache.ratis.util.Preconditions.assertTrue;
@@ -154,6 +158,55 @@
         "DB does not exist");
   }
 
+  /**
+   * Tests repair of containers affected by the bug reported in HDDS-6235.
+   */
+  @Test
+  public void testMissingChunksDirCreated() throws Exception {
+    // Create an empty container and delete its chunks directory.
+    createContainer();
+    closeContainer();
+    // Sets the checksum.
+    populate(0);
+    KeyValueContainerData data = keyValueContainer.getContainerData();
+    File chunksDir = new File(data.getChunksPath());
+    Assert.assertTrue(chunksDir.delete());
+
+    // When the container is loaded, the missing chunks directory should
+    // be created.
+    KeyValueContainerUtil.parseKVContainerData(data, CONF);
+    Assert.assertTrue(chunksDir.exists());
+  }
+
+  @Test
+  public void testEmptyContainerImportExport() throws Exception {
+    createContainer();
+    closeContainer();
+
+    KeyValueContainerData data = keyValueContainer.getContainerData();
+
+    // Check state of original container.
+    checkContainerFilesPresent(data, 0);
+
+    //destination path
+    File exportTar = folder.newFile("exported.tar.gz");
+    TarContainerPacker packer = new TarContainerPacker();
+    //export the container
+    try (FileOutputStream fos = new FileOutputStream(exportTar)) {
+      keyValueContainer.exportContainerData(fos, packer);
+    }
+
+    keyValueContainer.delete();
+
+    // import container.
+    try (FileInputStream fis = new FileInputStream(exportTar)) {
+      keyValueContainer.importContainerData(fis, packer);
+    }
+
+    // Make sure empty chunks dir was unpacked.
+    checkContainerFilesPresent(data, 0);
+  }
+
   @Test
   public void testContainerImportExport() throws Exception {
     long containerId = keyValueContainer.getContainerData().getContainerID();
@@ -199,7 +252,7 @@
     assertEquals(keyValueContainerData.getState(),
         containerData.getState());
     assertEquals(numberOfKeysToWrite,
-        containerData.getKeyCount());
+        containerData.getBlockCount());
     assertEquals(keyValueContainerData.getLayoutVersion(),
         containerData.getLayoutVersion());
     assertEquals(keyValueContainerData.getMaxSize(),
@@ -244,6 +297,18 @@
     }
   }
 
+  private void checkContainerFilesPresent(KeyValueContainerData data,
+      long expectedNumFilesInChunksDir) throws IOException {
+    File chunksDir = new File(data.getChunksPath());
+    Assert.assertTrue(Files.isDirectory(chunksDir.toPath()));
+    try (Stream<Path> stream = Files.list(chunksDir.toPath())) {
+      Assert.assertEquals(expectedNumFilesInChunksDir, stream.count());
+    }
+    Assert.assertTrue(data.getDbFile().exists());
+    Assert.assertTrue(KeyValueContainer.getContainerFile(data.getMetadataPath(),
+        data.getContainerID()).exists());
+  }
+
   /**
    * Create the container on disk.
    */
@@ -446,7 +511,7 @@
         keyValueContainerData, CONF);
     keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
 
-    try(ReferenceCountedDB db =
+    try (ReferenceCountedDB db =
         BlockUtils.getDB(keyValueContainerData, CONF)) {
       RDBStore store = (RDBStore) db.getStore().getStore();
       long defaultCacheSize = 64 * OzoneConsts.MB;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
index 8f1e76d..99812f3 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
@@ -18,50 +18,24 @@
 
 package org.apache.hadoop.ozone.container.keyvalue;
 
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.common.Checksum;
-import org.apache.hadoop.ozone.common.ChecksumData;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
-import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
-import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil;
-import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
 import org.apache.hadoop.ozone.container.ozoneimpl.ContainerScrubberConfiguration;
-import org.apache.ozone.test.GenericTestUtils;
 import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.RandomAccessFile;
-import java.util.Arrays;
-import java.util.ArrayList;
-import java.nio.ByteBuffer;
-import java.util.Collection;
-import java.util.List;
-import java.util.UUID;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertFalse;
 
@@ -69,46 +43,13 @@
 /**
  * Basic sanity test for the KeyValueContainerCheck class.
  */
-@RunWith(Parameterized.class) public class TestKeyValueContainerCheck {
+@RunWith(Parameterized.class)
+public class TestKeyValueContainerCheck
+    extends TestKeyValueContainerIntegrityChecks {
 
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestKeyValueContainerCheck.class);
-
-  private final ContainerLayoutTestInfo chunkManagerTestInfo;
-  private KeyValueContainer container;
-  private KeyValueContainerData containerData;
-  private MutableVolumeSet volumeSet;
-  private OzoneConfiguration conf;
-  private File testRoot;
-  private ChunkManager chunkManager;
-
-  public TestKeyValueContainerCheck(
-      ContainerLayoutTestInfo chunkManagerTestInfo) {
-    this.chunkManagerTestInfo = chunkManagerTestInfo;
-  }
-
-  @Parameterized.Parameters public static Collection<Object[]> data() {
-    return Arrays.asList(new Object[][] {
-        {ContainerLayoutTestInfo.FILE_PER_CHUNK},
-        {ContainerLayoutTestInfo.FILE_PER_BLOCK}
-    });
-  }
-
-  @Before public void setUp() throws Exception {
-    LOG.info("Testing  layout:{}", chunkManagerTestInfo.getLayout());
-    this.testRoot = GenericTestUtils.getRandomizedTestDir();
-    conf = new OzoneConfiguration();
-    conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
-    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testRoot.getAbsolutePath());
-    chunkManagerTestInfo.updateConfig(conf);
-    volumeSet = new MutableVolumeSet(UUID.randomUUID().toString(), conf, null,
-        StorageVolume.VolumeType.DATA_VOLUME, null);
-    chunkManager = chunkManagerTestInfo.createChunkManager(true, null);
-  }
-
-  @After public void teardown() {
-    volumeSet.shutdown();
-    FileUtil.fullyDelete(testRoot);
+  public TestKeyValueContainerCheck(ContainerLayoutTestInfo
+      containerLayoutTestInfo) {
+    super(containerLayoutTestInfo);
   }
 
   /**
@@ -119,13 +60,14 @@
     long containerID = 101;
     int deletedBlocks = 1;
     int normalBlocks = 3;
-    int chunksPerBlock = 4;
+    OzoneConfiguration conf = getConf();
     ContainerScrubberConfiguration c = conf.getObject(
         ContainerScrubberConfiguration.class);
 
     // test Closed Container
-    createContainerWithBlocks(containerID, normalBlocks, deletedBlocks,
-        chunksPerBlock);
+    KeyValueContainer container = createContainerWithBlocks(containerID,
+        normalBlocks, deletedBlocks);
+    KeyValueContainerData containerData = container.getContainerData();
 
     KeyValueContainerCheck kvCheck =
         new KeyValueContainerCheck(containerData.getMetadataPath(), conf,
@@ -151,13 +93,14 @@
     long containerID = 102;
     int deletedBlocks = 1;
     int normalBlocks = 3;
-    int chunksPerBlock = 4;
+    OzoneConfiguration conf = getConf();
     ContainerScrubberConfiguration sc = conf.getObject(
         ContainerScrubberConfiguration.class);
 
     // test Closed Container
-    createContainerWithBlocks(containerID, normalBlocks, deletedBlocks,
-        chunksPerBlock);
+    KeyValueContainer container = createContainerWithBlocks(containerID,
+        normalBlocks, deletedBlocks);
+    KeyValueContainerData containerData = container.getContainerData();
 
     container.close();
 
@@ -178,7 +121,7 @@
       ContainerProtos.ChunkInfo c = block.getChunks().get(0);
       BlockID blockID = block.getBlockID();
       ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(c);
-      File chunkFile = chunkManagerTestInfo.getLayout()
+      File chunkFile = getChunkLayout()
           .getChunkFile(containerData, blockID, chunkInfo);
       long length = chunkFile.length();
       assertTrue(length > 0);
@@ -186,7 +129,7 @@
       try (RandomAccessFile file = new RandomAccessFile(chunkFile, "rws")) {
         file.setLength(length / 2);
       }
-      assertEquals(length/2, chunkFile.length());
+      assertEquals(length / 2, chunkFile.length());
     }
 
     // metadata check should pass.
@@ -198,75 +141,4 @@
             sc.getBandwidthPerVolume()), null);
     assertFalse(valid);
   }
-
-  /**
-   * Creates a container with normal and deleted blocks.
-   * First it will insert normal blocks, and then it will insert
-   * deleted blocks.
-   */
-  private void createContainerWithBlocks(long containerId, int normalBlocks,
-      int deletedBlocks, int chunksPerBlock) throws Exception {
-    String strBlock = "block";
-    String strChunk = "-chunkFile";
-    long totalBlocks = normalBlocks + deletedBlocks;
-    int unitLen = 1024;
-    int chunkLen = 3 * unitLen;
-    int bytesPerChecksum = 2 * unitLen;
-    Checksum checksum = new Checksum(ContainerProtos.ChecksumType.SHA256,
-        bytesPerChecksum);
-    byte[] chunkData = RandomStringUtils.randomAscii(chunkLen).getBytes(UTF_8);
-    ChecksumData checksumData = checksum.computeChecksum(chunkData);
-    DispatcherContext writeStage = new DispatcherContext.Builder()
-        .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA)
-        .build();
-    DispatcherContext commitStage = new DispatcherContext.Builder()
-        .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA)
-        .build();
-
-    containerData = new KeyValueContainerData(containerId,
-        chunkManagerTestInfo.getLayout(),
-        (long) chunksPerBlock * chunkLen * totalBlocks,
-        UUID.randomUUID().toString(), UUID.randomUUID().toString());
-    container = new KeyValueContainer(containerData, conf);
-    container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(),
-        UUID.randomUUID().toString());
-    try (ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData,
-        conf)) {
-      assertNotNull(containerData.getChunksPath());
-      File chunksPath = new File(containerData.getChunksPath());
-      chunkManagerTestInfo.validateFileCount(chunksPath, 0, 0);
-
-      List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
-      for (int i = 0; i < totalBlocks; i++) {
-        BlockID blockID = new BlockID(containerId, i);
-        BlockData blockData = new BlockData(blockID);
-
-        chunkList.clear();
-        for (long chunkCount = 0; chunkCount < chunksPerBlock; chunkCount++) {
-          String chunkName = strBlock + i + strChunk + chunkCount;
-          long offset = chunkCount * chunkLen;
-          ChunkInfo info = new ChunkInfo(chunkName, offset, chunkLen);
-          info.setChecksumData(checksumData);
-          chunkList.add(info.getProtoBufMessage());
-          chunkManager.writeChunk(container, blockID, info,
-              ByteBuffer.wrap(chunkData), writeStage);
-          chunkManager.writeChunk(container, blockID, info,
-              ByteBuffer.wrap(chunkData), commitStage);
-        }
-        blockData.setChunks(chunkList);
-
-        // normal key
-        String key = Long.toString(blockID.getLocalID());
-        if (i >= normalBlocks) {
-          // deleted key
-          key = OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID();
-        }
-        metadataStore.getStore().getBlockDataTable().put(key, blockData);
-      }
-
-      chunkManagerTestInfo.validateFileCount(chunksPath, totalBlocks,
-          totalBlocks * chunksPerBlock);
-    }
-  }
-
 }
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java
new file mode 100644
index 0000000..63bf5d6
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.keyvalue;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.common.Checksum;
+import org.apache.hadoop.ozone.common.ChecksumData;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
+import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
+import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
+import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
+import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
+import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
+import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
+import org.apache.ozone.test.GenericTestUtils;
+import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.util.Arrays;
+import java.util.ArrayList;
+import java.nio.ByteBuffer;
+import java.util.Collection;
+import java.util.List;
+import java.util.UUID;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
+import static org.junit.Assert.assertNotNull;
+
+/**
+ * Base class for tests identifying issues with key value container contents.
+ */
+public class TestKeyValueContainerIntegrityChecks {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestKeyValueContainerIntegrityChecks.class);
+
+  private final ContainerLayoutTestInfo containerLayoutTestInfo;
+  private MutableVolumeSet volumeSet;
+  private OzoneConfiguration conf;
+  private File testRoot;
+  private ChunkManager chunkManager;
+
+  protected static final int UNIT_LEN = 1024;
+  protected static final int CHUNK_LEN = 3 * UNIT_LEN;
+  protected static final int CHUNKS_PER_BLOCK = 4;
+
+  public TestKeyValueContainerIntegrityChecks(ContainerLayoutTestInfo
+      containerLayoutTestInfo) {
+    this.containerLayoutTestInfo = containerLayoutTestInfo;
+  }
+
+  @Parameterized.Parameters public static Collection<Object[]> data() {
+    return Arrays.asList(new Object[][] {
+        {ContainerLayoutTestInfo.FILE_PER_CHUNK},
+        {ContainerLayoutTestInfo.FILE_PER_BLOCK}
+    });
+  }
+
+  @Before public void setUp() throws Exception {
+    LOG.info("Testing  layout:{}", containerLayoutTestInfo.getLayout());
+    this.testRoot = GenericTestUtils.getRandomizedTestDir();
+    conf = new OzoneConfiguration();
+    conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
+    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testRoot.getAbsolutePath());
+    containerLayoutTestInfo.updateConfig(conf);
+    volumeSet = new MutableVolumeSet(UUID.randomUUID().toString(), conf, null,
+        StorageVolume.VolumeType.DATA_VOLUME, null);
+    chunkManager = containerLayoutTestInfo.createChunkManager(true, null);
+  }
+
+  @After public void teardown() {
+    volumeSet.shutdown();
+    FileUtil.fullyDelete(testRoot);
+  }
+
+  protected ContainerLayoutVersion getChunkLayout() {
+    return containerLayoutTestInfo.getLayout();
+  }
+
+  protected OzoneConfiguration getConf() {
+    return conf;
+  }
+
+
+  /**
+   * Creates a container with normal and deleted blocks.
+   * First it will insert normal blocks, and then it will insert
+   * deleted blocks.
+   */
+  protected KeyValueContainer createContainerWithBlocks(long containerId,
+      int normalBlocks, int deletedBlocks) throws Exception {
+    String strBlock = "block";
+    String strChunk = "-chunkFile";
+    long totalBlocks = normalBlocks + deletedBlocks;
+    int bytesPerChecksum = 2 * UNIT_LEN;
+    Checksum checksum = new Checksum(ContainerProtos.ChecksumType.SHA256,
+        bytesPerChecksum);
+    byte[] chunkData = RandomStringUtils.randomAscii(CHUNK_LEN).getBytes(UTF_8);
+    ChecksumData checksumData = checksum.computeChecksum(chunkData);
+    DispatcherContext writeStage = new DispatcherContext.Builder()
+        .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA)
+        .build();
+    DispatcherContext commitStage = new DispatcherContext.Builder()
+        .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA)
+        .build();
+
+    KeyValueContainerData containerData = new KeyValueContainerData(containerId,
+        containerLayoutTestInfo.getLayout(),
+        (long) CHUNKS_PER_BLOCK * CHUNK_LEN * totalBlocks,
+        UUID.randomUUID().toString(), UUID.randomUUID().toString());
+    KeyValueContainer container = new KeyValueContainer(containerData, conf);
+    container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(),
+        UUID.randomUUID().toString());
+    try (ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData,
+        conf)) {
+      assertNotNull(containerData.getChunksPath());
+      File chunksPath = new File(containerData.getChunksPath());
+      containerLayoutTestInfo.validateFileCount(chunksPath, 0, 0);
+
+      List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
+      for (int i = 0; i < totalBlocks; i++) {
+        BlockID blockID = new BlockID(containerId, i);
+        BlockData blockData = new BlockData(blockID);
+
+        chunkList.clear();
+        for (long chunkCount = 0; chunkCount < CHUNKS_PER_BLOCK; chunkCount++) {
+          String chunkName = strBlock + i + strChunk + chunkCount;
+          long offset = chunkCount * CHUNK_LEN;
+          ChunkInfo info = new ChunkInfo(chunkName, offset, CHUNK_LEN);
+          info.setChecksumData(checksumData);
+          chunkList.add(info.getProtoBufMessage());
+          chunkManager.writeChunk(container, blockID, info,
+              ByteBuffer.wrap(chunkData), writeStage);
+          chunkManager.writeChunk(container, blockID, info,
+              ByteBuffer.wrap(chunkData), commitStage);
+        }
+        blockData.setChunks(chunkList);
+
+        // normal key
+        String key = Long.toString(blockID.getLocalID());
+        if (i >= normalBlocks) {
+          // deleted key
+          key = OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID();
+        }
+        metadataStore.getStore().getBlockDataTable().put(key, blockData);
+      }
+
+      containerLayoutTestInfo.validateFileCount(chunksPath, totalBlocks,
+          totalBlocks * CHUNKS_PER_BLOCK);
+    }
+
+    return container;
+  }
+
+}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java
new file mode 100644
index 0000000..9656c28
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java
@@ -0,0 +1,360 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.keyvalue;
+
+import com.google.gson.Gson;
+import com.google.gson.JsonArray;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonPrimitive;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.common.interfaces.ContainerInspector;
+import org.apache.hadoop.ozone.container.common.utils.ContainerInspectorUtil;
+import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
+import org.apache.log4j.PatternLayout;
+import org.apache.ozone.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+/**
+ * Tests for {@link KeyValueContainerMetadataInspector}.
+ */
+@RunWith(Parameterized.class)
+public class TestKeyValueContainerMetadataInspector
+    extends TestKeyValueContainerIntegrityChecks {
+  private static final long CONTAINER_ID = 102;
+
+  public TestKeyValueContainerMetadataInspector(ContainerLayoutTestInfo
+      containerLayoutTestInfo) {
+    super(containerLayoutTestInfo);
+  }
+
+  @Test
+  public void testRunDisabled() throws Exception {
+    // Create incorrect container.
+    KeyValueContainer container = createClosedContainer(3);
+    KeyValueContainerData containerData = container.getContainerData();
+    setDBBlockAndByteCounts(containerData, -2, -2);
+
+    // No system property set. Should not run.
+    System.clearProperty(KeyValueContainerMetadataInspector.SYSTEM_PROPERTY);
+    ContainerInspectorUtil.load();
+    Assert.assertNull(runInspectorAndGetReport(containerData));
+    ContainerInspectorUtil.unload();
+
+    // Unloaded. Should not run even with system property.
+    System.setProperty(KeyValueContainerMetadataInspector.SYSTEM_PROPERTY,
+        KeyValueContainerMetadataInspector.Mode.INSPECT.toString());
+    Assert.assertNull(runInspectorAndGetReport(containerData));
+
+    // Unloaded and no system property. Should not run.
+    System.clearProperty(KeyValueContainerMetadataInspector.SYSTEM_PROPERTY);
+    Assert.assertNull(runInspectorAndGetReport(containerData));
+  }
+
+  @Test
+  public void testSystemPropertyAndReadOnly() {
+    System.clearProperty(KeyValueContainerMetadataInspector.SYSTEM_PROPERTY);
+    ContainerInspector inspector = new KeyValueContainerMetadataInspector();
+    Assert.assertFalse(inspector.load());
+    Assert.assertTrue(inspector.isReadOnly());
+
+    // Inspect mode: valid argument and readonly.
+    System.setProperty(KeyValueContainerMetadataInspector.SYSTEM_PROPERTY,
+        KeyValueContainerMetadataInspector.Mode.INSPECT.toString());
+    inspector = new KeyValueContainerMetadataInspector();
+    Assert.assertTrue(inspector.load());
+    Assert.assertTrue(inspector.isReadOnly());
+
+    // Repair mode: valid argument and not readonly.
+    System.setProperty(KeyValueContainerMetadataInspector.SYSTEM_PROPERTY,
+        KeyValueContainerMetadataInspector.Mode.REPAIR.toString());
+    inspector = new KeyValueContainerMetadataInspector();
+    Assert.assertTrue(inspector.load());
+    Assert.assertFalse(inspector.isReadOnly());
+
+    // Bad argument: invalid argument and readonly.
+    System.setProperty(KeyValueContainerMetadataInspector.SYSTEM_PROPERTY,
+        "badvalue");
+    inspector = new KeyValueContainerMetadataInspector();
+    Assert.assertFalse(inspector.load());
+    Assert.assertTrue(inspector.isReadOnly());
+
+    // Clean slate for other tests.
+    System.clearProperty(KeyValueContainerMetadataInspector.SYSTEM_PROPERTY);
+  }
+
+  @Test
+  public void testIncorrectTotalsNoData() throws Exception {
+    int createBlocks = 0;
+    int setBlocks = -3;
+    int setBytes = -2;
+
+    KeyValueContainer container = createClosedContainer(createBlocks);
+    setDBBlockAndByteCounts(container.getContainerData(), setBlocks, setBytes);
+    inspectThenRepairOnIncorrectContainer(container.getContainerData(),
+        createBlocks, setBlocks, setBytes);
+  }
+
+  @Test
+  public void testIncorrectTotalsWithData() throws Exception {
+    int createBlocks = 3;
+    int setBlocks = 4;
+    int setBytes = -2;
+
+    // Make sure it runs on open containers too.
+    KeyValueContainer container = createOpenContainer(createBlocks);
+    setDBBlockAndByteCounts(container.getContainerData(), setBlocks, setBytes);
+    inspectThenRepairOnIncorrectContainer(container.getContainerData(),
+        createBlocks, setBlocks, setBytes);
+  }
+
+  @Test
+  public void testCorrectTotalsNoData() throws Exception {
+    int createBlocks = 0;
+    int setBytes = 0;
+
+    KeyValueContainer container = createClosedContainer(createBlocks);
+    setDBBlockAndByteCounts(container.getContainerData(), createBlocks,
+        setBytes);
+    inspectThenRepairOnCorrectContainer(container.getContainerData());
+  }
+
+  @Test
+  public void testCorrectTotalsWithData() throws Exception {
+    int createBlocks = 3;
+    int setBytes = CHUNK_LEN * CHUNKS_PER_BLOCK * createBlocks;
+
+    KeyValueContainer container = createClosedContainer(createBlocks);
+    setDBBlockAndByteCounts(container.getContainerData(), createBlocks,
+        setBytes);
+    inspectThenRepairOnCorrectContainer(container.getContainerData());
+  }
+
+  public void inspectThenRepairOnCorrectContainer(
+      KeyValueContainerData containerData) throws Exception {
+    // No output for correct containers.
+    Assert.assertNull(runInspectorAndGetReport(containerData,
+        KeyValueContainerMetadataInspector.Mode.INSPECT));
+
+    Assert.assertNull(runInspectorAndGetReport(containerData,
+        KeyValueContainerMetadataInspector.Mode.REPAIR));
+  }
+
+  /**
+   * Creates a container as specified by the parameters.
+   * Runs the inspector in inspect mode and checks the output.
+   * Runs the inspector in repair mode and checks the output.
+   *
+   * @param createdBlocks Number of blocks to create in the container.
+   * @param setBlocks total block count value set in the database.
+   * @param setBytes total used bytes value set in the database.
+   */
+  public void inspectThenRepairOnIncorrectContainer(
+      KeyValueContainerData containerData, int createdBlocks, int setBlocks,
+      int setBytes) throws Exception {
+    int createdBytes = CHUNK_LEN * CHUNKS_PER_BLOCK * createdBlocks;
+    int createdFiles = 0;
+    switch (getChunkLayout()) {
+    case FILE_PER_BLOCK:
+      createdFiles = createdBlocks;
+      break;
+    case FILE_PER_CHUNK:
+      createdFiles = createdBlocks * CHUNKS_PER_BLOCK;
+      break;
+    default:
+      Assert.fail("Unrecognized chunk layout version.");
+    }
+
+    String containerState = containerData.getState().toString();
+
+    // First inspect the container.
+    JsonObject inspectJson = runInspectorAndGetReport(containerData,
+        KeyValueContainerMetadataInspector.Mode.INSPECT);
+
+    checkJsonReportForIncorrectContainer(inspectJson,
+        containerState, createdBlocks, setBlocks, createdBytes, setBytes,
+        createdFiles, false);
+    // Container should not have been modified in inspect mode.
+    checkDBBlockAndByteCounts(containerData, setBlocks, setBytes);
+
+    // Now repair the container.
+    JsonObject repairJson = runInspectorAndGetReport(containerData,
+        KeyValueContainerMetadataInspector.Mode.REPAIR);
+    checkJsonReportForIncorrectContainer(repairJson,
+        containerState, createdBlocks, setBlocks, createdBytes, setBytes,
+        createdFiles, true);
+    // Metadata keys should have been fixed.
+    checkDBBlockAndByteCounts(containerData, createdBlocks, createdBytes);
+  }
+
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  private void checkJsonReportForIncorrectContainer(JsonObject inspectJson,
+      String expectedContainerState, long createdBlocks,
+      long setBlocks, long createdBytes, long setBytes, long createdFiles,
+      boolean shouldRepair) {
+    // Check main container properties.
+    Assert.assertEquals(inspectJson.get("containerID").getAsLong(),
+        CONTAINER_ID);
+    Assert.assertEquals(inspectJson.get("containerState").getAsString(),
+        expectedContainerState);
+
+    // Check DB metadata.
+    JsonObject jsonDbMetadata = inspectJson.getAsJsonObject("dBMetadata");
+    Assert.assertEquals(setBlocks,
+        jsonDbMetadata.get(OzoneConsts.BLOCK_COUNT).getAsLong());
+    Assert.assertEquals(setBytes,
+        jsonDbMetadata.get(OzoneConsts.CONTAINER_BYTES_USED).getAsLong());
+
+    // Check aggregate metadata values.
+    JsonObject jsonAggregates = inspectJson.getAsJsonObject("aggregates");
+    Assert.assertEquals(createdBlocks,
+        jsonAggregates.get("blockCount").getAsLong());
+    Assert.assertEquals(createdBytes,
+        jsonAggregates.get("usedBytes").getAsLong());
+    Assert.assertEquals(0,
+        jsonAggregates.get("pendingDeleteBlocks").getAsLong());
+
+    // Check chunks directory.
+    JsonObject jsonChunksDir = inspectJson.getAsJsonObject("chunksDirectory");
+    Assert.assertTrue(jsonChunksDir.get("present").getAsBoolean());
+    Assert.assertEquals(createdFiles,
+        jsonChunksDir.get("fileCount").getAsLong());
+
+    // Check errors.
+    checkJsonErrorsReport(inspectJson, "dBMetadata.#BLOCKCOUNT",
+        new JsonPrimitive(createdBlocks), new JsonPrimitive(setBlocks),
+        shouldRepair);
+    checkJsonErrorsReport(inspectJson, "dBMetadata.#BYTESUSED",
+        new JsonPrimitive(createdBytes), new JsonPrimitive(setBytes),
+        shouldRepair);
+  }
+
+  /**
+   * Checks the erorr list in the provided JsonReport for an error matching
+   * the template passed in with the parameters.
+   */
+  private void checkJsonErrorsReport(JsonObject jsonReport,
+      String propertyValue, JsonPrimitive correctExpected,
+      JsonPrimitive correctActual, boolean correctRepair) {
+
+    Assert.assertFalse(jsonReport.get("correct").getAsBoolean());
+
+    JsonArray jsonErrors = jsonReport.getAsJsonArray("errors");
+    boolean matchFound = false;
+    for (JsonElement jsonErrorElem: jsonErrors) {
+      JsonObject jsonErrorObject = jsonErrorElem.getAsJsonObject();
+      String thisProperty =
+          jsonErrorObject.get("property").getAsString();
+
+      if (thisProperty.equals(propertyValue)) {
+        matchFound = true;
+
+        JsonPrimitive expectedJsonPrim =
+            jsonErrorObject.get("expected").getAsJsonPrimitive();
+        Assert.assertEquals(correctExpected, expectedJsonPrim);
+
+        JsonPrimitive actualJsonPrim =
+            jsonErrorObject.get("actual").getAsJsonPrimitive();
+        Assert.assertEquals(correctActual, actualJsonPrim);
+
+        boolean repaired =
+            jsonErrorObject.get("repaired").getAsBoolean();
+        Assert.assertEquals(correctRepair, repaired);
+        break;
+      }
+    }
+
+    Assert.assertTrue(matchFound);
+  }
+
+  public void setDBBlockAndByteCounts(KeyValueContainerData containerData,
+      long blockCount, long byteCount) throws Exception {
+    try (ReferenceCountedDB db = BlockUtils.getDB(containerData, getConf())) {
+      Table<String, Long> metadataTable = db.getStore().getMetadataTable();
+      // Don't care about in memory state. Just change the DB values.
+      metadataTable.put(OzoneConsts.BLOCK_COUNT, blockCount);
+      metadataTable.put(OzoneConsts.CONTAINER_BYTES_USED, byteCount);
+    }
+  }
+
+  public void checkDBBlockAndByteCounts(KeyValueContainerData containerData,
+      long expectedBlockCount, long expectedBytesUsed) throws Exception {
+    try (ReferenceCountedDB db = BlockUtils.getDB(containerData, getConf())) {
+      Table<String, Long> metadataTable = db.getStore().getMetadataTable();
+
+      long bytesUsed = metadataTable.get(OzoneConsts.CONTAINER_BYTES_USED);
+      Assert.assertEquals(expectedBytesUsed, bytesUsed);
+
+      long blockCount = metadataTable.get(OzoneConsts.BLOCK_COUNT);
+      Assert.assertEquals(expectedBlockCount, blockCount);
+    }
+  }
+
+  private JsonObject runInspectorAndGetReport(
+      KeyValueContainerData containerData,
+      KeyValueContainerMetadataInspector.Mode mode) throws Exception {
+    System.setProperty(KeyValueContainerMetadataInspector.SYSTEM_PROPERTY,
+        mode.toString());
+    ContainerInspectorUtil.load();
+    JsonObject json = runInspectorAndGetReport(containerData);
+    ContainerInspectorUtil.unload();
+    System.clearProperty(KeyValueContainerMetadataInspector.SYSTEM_PROPERTY);
+
+    return json;
+  }
+
+  private JsonObject runInspectorAndGetReport(
+      KeyValueContainerData containerData) throws Exception {
+    // Use an empty layout so the captured log has no prefix and can be
+    // parsed as json.
+    GenericTestUtils.LogCapturer capturer =
+        GenericTestUtils.LogCapturer.captureLogs(
+            KeyValueContainerMetadataInspector.REPORT_LOG, new PatternLayout());
+    KeyValueContainerUtil.parseKVContainerData(containerData, getConf());
+    capturer.stopCapturing();
+    String output = capturer.getOutput();
+    capturer.clearOutput();
+
+    return new Gson().fromJson(output, JsonObject.class);
+  }
+
+  private KeyValueContainer createClosedContainer(int normalBlocks)
+      throws Exception {
+    KeyValueContainer container = createOpenContainer(normalBlocks);
+    container.close();
+    return container;
+  }
+
+  private KeyValueContainer createOpenContainer(int normalBlocks)
+      throws Exception {
+    return super.createContainerWithBlocks(CONTAINER_ID, normalBlocks, 0);
+  }
+
+  private void containsAllStrings(String logOutput, String[] expectedMessages) {
+    for (String expectedMessage : expectedMessages) {
+      Assert.assertTrue("Log output did not contain \"" +
+              expectedMessage + "\"", logOutput.contains(expectedMessage));
+    }
+  }
+}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
index 134477b..314a8aa 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
@@ -195,15 +195,9 @@
     Mockito.verify(handler, times(1)).handleGetBlock(
         any(ContainerCommandRequestProto.class), any());
 
-    // Test Delete Block Request handling
-    ContainerCommandRequestProto deleteBlockRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.DeleteBlock);
-    KeyValueHandler
-        .dispatchRequest(handler, deleteBlockRequest, container, context);
-    Mockito.verify(handler, times(1)).handleDeleteBlock(
-        any(ContainerCommandRequestProto.class), any());
+    // Block Deletion is handled by BlockDeletingService and need not be
+    // tested here.
 
-    // Test List Block Request handling
     ContainerCommandRequestProto listBlockRequest =
         getDummyCommandRequestProto(ContainerProtos.Type.ListBlock);
     KeyValueHandler
@@ -219,13 +213,8 @@
     Mockito.verify(handler, times(1)).handleReadChunk(
         any(ContainerCommandRequestProto.class), any(), any());
 
-    // Test Delete Chunk Request handling
-    ContainerCommandRequestProto deleteChunkRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.DeleteChunk);
-    KeyValueHandler
-        .dispatchRequest(handler, deleteChunkRequest, container, context);
-    Mockito.verify(handler, times(1)).handleDeleteChunk(
-        any(ContainerCommandRequestProto.class), any());
+    // Chunk Deletion is handled by BlockDeletingService and need not be
+    // tested here.
 
     // Test Write Chunk Request handling
     ContainerCommandRequestProto writeChunkRequest =
@@ -261,7 +250,7 @@
   }
 
   @Test
-  public void testVolumeSetInKeyValueHandler() throws Exception{
+  public void testVolumeSetInKeyValueHandler() throws Exception {
     File path = GenericTestUtils.getRandomizedTestDir();
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(HDDS_DATANODE_DIR_KEY, path.getAbsolutePath());
@@ -296,7 +285,7 @@
       try {
         new KeyValueHandler(conf,
             context.getParent().getDatanodeDetails().getUuidString(),
-            cset, volumeSet, metrics, c->{});
+            cset, volumeSet, metrics, c -> { });
       } catch (RuntimeException ex) {
         GenericTestUtils.assertExceptionContains("class org.apache.hadoop" +
             ".ozone.container.common.impl.HddsDispatcher not org.apache" +
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java
index c2aea22..cbc4f95 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java
@@ -34,7 +34,6 @@
 import java.io.IOException;
 import java.util.UUID;
 
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_UNHEALTHY;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.SUCCESS;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNKNOWN_BCSID;
 import static org.apache.hadoop.ozone.container.ContainerTestHelper.DATANODE_UUID;
@@ -103,19 +102,6 @@
   }
 
   @Test
-  public void testDeleteChunk() throws IOException {
-    KeyValueContainer container = getMockUnhealthyContainer();
-    KeyValueHandler handler = getDummyHandler();
-
-    ContainerProtos.ContainerCommandResponseProto response =
-        handler.handleDeleteChunk(
-            getDummyCommandRequestProto(
-                ContainerProtos.Type.DeleteChunk),
-            container);
-    assertThat(response.getResult(), is(CONTAINER_UNHEALTHY));
-  }
-
-  @Test
   public void testGetSmallFile() throws IOException {
     KeyValueContainer container = getMockUnhealthyContainer();
     KeyValueHandler handler = getDummyHandler();
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java
index 23f690e..08a5e2f 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java
@@ -214,7 +214,7 @@
 
     BlockData blockData = new BlockData(blockID);
     // WHEN
-    for (int i = 0; i< count; i++) {
+    for (int i = 0; i < count; i++) {
       ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", localID, i),
           i * len, len);
       chunkManager.writeChunk(container, blockID, info, data, context);
@@ -225,10 +225,9 @@
 
     // THEN
     checkWriteIOStats(len * count, count);
-    assertTrue(getHddsVolume().getVolumeIOStats().getWriteTime() > 0);
 
     // WHEN
-    for (int i = 0; i< count; i++) {
+    for (int i = 0; i < count; i++) {
       ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", localID, i),
           i * len, len);
       chunkManager.readChunk(container, blockID, info, context);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java
index 77eae56..5773eb3 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java
@@ -22,7 +22,6 @@
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
@@ -34,7 +33,6 @@
 import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
@@ -50,7 +48,6 @@
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
 import static org.mockito.ArgumentMatchers.anyList;
 import static org.mockito.ArgumentMatchers.anyLong;
 import static org.mockito.Mockito.mock;
@@ -142,7 +139,7 @@
 
   @Test
   public void testPutBlock() throws Exception {
-    assertEquals(0, keyValueContainer.getContainerData().getKeyCount());
+    assertEquals(0, keyValueContainer.getContainerData().getBlockCount());
     //Put Block with bcsId != 0
     blockManager.putBlock(keyValueContainer, blockData1);
 
@@ -150,7 +147,7 @@
     //Check Container's bcsId
     fromGetBlockData = blockManager.getBlock(keyValueContainer,
         blockData1.getBlockID());
-    assertEquals(1, keyValueContainer.getContainerData().getKeyCount());
+    assertEquals(1, keyValueContainer.getContainerData().getBlockCount());
     assertEquals(1,
         keyValueContainer.getContainerData().getBlockCommitSequenceId());
     assertEquals(1, fromGetBlockData.getBlockCommitSequenceId());
@@ -161,7 +158,7 @@
     //Check Container's bcsId
     fromGetBlockData = blockManager.getBlock(keyValueContainer,
         blockData.getBlockID());
-    assertEquals(2, keyValueContainer.getContainerData().getKeyCount());
+    assertEquals(2, keyValueContainer.getContainerData().getBlockCount());
     assertEquals(0, fromGetBlockData.getBlockCommitSequenceId());
     assertEquals(1,
         keyValueContainer.getContainerData().getBlockCommitSequenceId());
@@ -170,11 +167,11 @@
 
   @Test
   public void testPutAndGetBlock() throws Exception {
-    assertEquals(0, keyValueContainer.getContainerData().getKeyCount());
+    assertEquals(0, keyValueContainer.getContainerData().getBlockCount());
     //Put Block
     blockManager.putBlock(keyValueContainer, blockData);
 
-    assertEquals(1, keyValueContainer.getContainerData().getKeyCount());
+    assertEquals(1, keyValueContainer.getContainerData().getBlockCount());
     //Get Block
     BlockData fromGetBlockData = blockManager.getBlock(keyValueContainer,
         blockData.getBlockID());
@@ -189,27 +186,6 @@
   }
 
   @Test
-  public void testDeleteBlock() throws Exception {
-    assertEquals(0,
-        keyValueContainer.getContainerData().getKeyCount());
-    //Put Block
-    blockManager.putBlock(keyValueContainer, blockData);
-    assertEquals(1,
-        keyValueContainer.getContainerData().getKeyCount());
-    //Delete Block
-    blockManager.deleteBlock(keyValueContainer, blockID);
-    assertEquals(0,
-        keyValueContainer.getContainerData().getKeyCount());
-    try {
-      blockManager.getBlock(keyValueContainer, blockID);
-      fail("testDeleteBlock");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains(
-          "Unable to find the block", ex);
-    }
-  }
-
-  @Test
   public void testListBlock() throws Exception {
     blockManager.putBlock(keyValueContainer, blockData);
     List<BlockData> listBlockData = blockManager.listBlock(
@@ -236,27 +212,4 @@
     assertNotNull(listBlockData);
     assertTrue(listBlockData.size() == 10);
   }
-
-  @Test
-  public void testGetNoSuchBlock() throws Exception {
-    assertEquals(0,
-        keyValueContainer.getContainerData().getKeyCount());
-    //Put Block
-    blockManager.putBlock(keyValueContainer, blockData);
-    assertEquals(1,
-        keyValueContainer.getContainerData().getKeyCount());
-    //Delete Block
-    blockManager.deleteBlock(keyValueContainer, blockID);
-    assertEquals(0,
-        keyValueContainer.getContainerData().getKeyCount());
-    try {
-      //Since the block has been deleted, we should not be able to find it
-      blockManager.getBlock(keyValueContainer, blockID);
-      fail("testGetNoSuchBlock failed");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains(
-          "Unable to find the block", ex);
-      assertEquals(ContainerProtos.Result.NO_SUCH_BLOCK, ex.getResult());
-    }
-  }
 }
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
index d3e5757..15fe50a 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
@@ -94,7 +94,7 @@
     Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
         .thenReturn(hddsVolume);
 
-    for (int i=0; i<2; i++) {
+    for (int i = 0; i < 2; i++) {
       KeyValueContainerData keyValueContainerData = new KeyValueContainerData(i,
           ContainerLayoutVersion.FILE_PER_BLOCK,
           (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
@@ -124,7 +124,7 @@
 
   private void markBlocksForDelete(KeyValueContainer keyValueContainer,
       boolean setMetaData, List<Long> blockNames, int count) throws Exception {
-    try(ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer
+    try (ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer
         .getContainerData(), conf)) {
 
       for (int i = 0; i < count; i++) {
@@ -154,7 +154,7 @@
     long containerId = keyValueContainer.getContainerData().getContainerID();
 
     List<Long> blkNames = new ArrayList<>();
-    try(ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer
+    try (ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer
         .getContainerData(), conf)) {
 
       for (int i = 0; i < blockCount; i++) {
@@ -197,7 +197,7 @@
 
     Assert.assertEquals(2, containerSet.containerCount());
 
-    for (int i=0; i < 2; i++) {
+    for (int i = 0; i < 2; i++) {
       Container keyValueContainer = containerSet.getContainer(i);
 
       KeyValueContainerData keyValueContainerData = (KeyValueContainerData)
@@ -205,7 +205,7 @@
 
       // Verify block related metadata.
       Assert.assertEquals(blockCount,
-          keyValueContainerData.getKeyCount());
+          keyValueContainerData.getBlockCount());
 
       Assert.assertEquals(blockCount * blockLen,
           keyValueContainerData.getBytesUsed());
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 8e1458d..fec52c8 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -48,7 +48,11 @@
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.ozone.test.LambdaTestUtils;
-import org.junit.*;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcOutputStream.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcOutputStream.java
index cf6ece3..099ca9c 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcOutputStream.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcOutputStream.java
@@ -126,18 +126,18 @@
   public void bufferFlushedWhenFull() throws IOException {
     byte[] bytes = getRandomBytes(bufferSize);
 
-    subject.write(bytes, 0, bufferSize-1);
-    subject.write(bytes[bufferSize-1]);
+    subject.write(bytes, 0, bufferSize - 1);
+    subject.write(bytes[bufferSize - 1]);
     verify(observer).onNext(any());
 
     subject.write(bytes[0]);
-    subject.write(bytes, 1, bufferSize-1);
+    subject.write(bytes, 1, bufferSize - 1);
     verify(observer, times(2)).onNext(any());
   }
 
   @Test
   public void singleArraySpansMultipleResponses() throws IOException {
-    byte[] bytes = writeBytes(subject, 2 * bufferSize + bufferSize/2);
+    byte[] bytes = writeBytes(subject, 2 * bufferSize + bufferSize / 2);
     subject.close();
 
     verifyResponses(bytes);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestMeasuredReplicator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestMeasuredReplicator.java
index f8f0b93..a9d4dad 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestMeasuredReplicator.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestMeasuredReplicator.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.ozone.container.replication;
 
+import java.time.Instant;
+import java.time.temporal.ChronoUnit;
 import java.util.ArrayList;
 
 import org.apache.hadoop.ozone.container.replication.ReplicationTask.Status;
@@ -119,4 +121,17 @@
     Assert.assertEquals(0, measuredReplicator.getSuccessTime().value());
   }
 
+  @Test
+  public void testReplicationQueueTimeMetrics() {
+    final Instant queued = Instant.now().minus(1, ChronoUnit.SECONDS);
+    ReplicationTask task = new ReplicationTask(100L, new ArrayList<>()) {
+      @Override
+      public Instant getQueued() {
+        return queued;
+      }
+    };
+    measuredReplicator.replicate(task);
+    // There might be some deviation, so we use >= 1000 here.
+    Assert.assertTrue(measuredReplicator.getQueueTime().value() >= 1000);
+  }
 }
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
index 37c5557..8078fc2 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
@@ -58,7 +58,7 @@
 @RunWith(Parameterized.class)
 public class TestReplicationSupervisor {
 
-  private final ContainerReplicator noopReplicator = task -> {};
+  private final ContainerReplicator noopReplicator = task -> { };
   private final ContainerReplicator throwingReplicator = task -> {
     throw new RuntimeException("testing replication failure");
   };
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
index ecb7af8..ee2bcfe 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
@@ -45,7 +45,7 @@
   public BlockDeletingServiceTestImpl(OzoneContainer container,
       int serviceInterval, ConfigurationSource conf) {
     super(container, serviceInterval, SERVICE_TIMEOUT_IN_MILLISECONDS,
-        TimeUnit.MILLISECONDS, conf);
+        TimeUnit.MILLISECONDS, 10, conf);
   }
 
   @VisibleForTesting
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java
index cb5257d..d882ca4 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java
@@ -144,7 +144,7 @@
     ExecutorService executor = Executors.newFixedThreadPool(1);
     Future<Void> readFuture = executor.submit(() -> {
       // Layout version check should be thread safe.
-      while(!dsm.getLayoutVersionManager()
+      while (!dsm.getLayoutVersionManager()
           .isAllowed(HDDSLayoutFeature.SCM_HA)) {
         readChunk(writeChunk, pipeline);
       }
@@ -203,7 +203,7 @@
     ExecutorService executor = Executors.newFixedThreadPool(1);
     Future<Void> importFuture = executor.submit(() -> {
       // Layout version check should be thread safe.
-      while(!dsm.getLayoutVersionManager()
+      while (!dsm.getLayoutVersionManager()
           .isAllowed(HDDSLayoutFeature.SCM_HA)) {
         importContainer(exportContainerID, exportedContainerFile);
         readChunk(exportWriteChunk, pipeline);
@@ -541,7 +541,7 @@
    * Get the cluster ID and SCM ID from SCM to the datanode.
    */
   public void callVersionEndpointTask() throws Exception {
-    try(EndpointStateMachine esm = ContainerTestUtils.createEndpoint(conf,
+    try (EndpointStateMachine esm = ContainerTestUtils.createEndpoint(conf,
         address, 1000)) {
       VersionEndpointTask vet = new VersionEndpointTask(esm, conf,
           dsm.getContainer());
diff --git a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml
index f4bfcef..f20a70f 100644
--- a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml
+++ b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml
@@ -131,6 +131,7 @@
         </module>
         <module name="RedundantImport"/>
         <module name="UnusedImports"/>
+	<module name="AvoidStarImport"/>
 
 
         <!-- Checks for Size Violations.                    -->
@@ -152,6 +153,7 @@
         <module name="WhitespaceAfter">
           <property name="tokens" value="COMMA, SEMI"/>
         </module>
+        <module name="WhitespaceAround"/>
 
 
         <!-- Modifier Checks                                    -->
diff --git a/hadoop-hdds/docs/content/concept/Containers.md b/hadoop-hdds/docs/content/concept/Containers.md
index b5894a6..e1e1ad5 100644
--- a/hadoop-hdds/docs/content/concept/Containers.md
+++ b/hadoop-hdds/docs/content/concept/Containers.md
@@ -28,8 +28,7 @@
 
 Containers are big binary units (5Gb by default) which can contain multiple blocks:
 
-{{< image src="Containers.png">}}
-
+![Containers](Containers.png)
 Blocks are local information and not managed by SCM. Therefore even if billions of small files are created in the system (which means billions of blocks are created), only of the status of the containers will be reported by the Datanodes and containers will be replicated.
  
 When Ozone Manager requests a new Block allocation from the SCM, SCM will identify the suitable container and generate a block id which contains `ContainerId` + `LocalId`. Client will connect to the Datanode which stores the Container, and datanode can manage the separated block based on the `LocalId`.
diff --git a/hadoop-hdds/docs/content/concept/Datanodes.md b/hadoop-hdds/docs/content/concept/Datanodes.md
index 4372da6..1149eba 100644
--- a/hadoop-hdds/docs/content/concept/Datanodes.md
+++ b/hadoop-hdds/docs/content/concept/Datanodes.md
@@ -31,7 +31,7 @@
 
 ## Storage Containers
 
-{{< image src="ContainerMetadata.png">}}
+![Container Metadata](ContainerMetadata.png)
 
 A storage container is a self-contained super block. It has a list of Ozone
 blocks that reside inside it, as well as on-disk files which contain the
@@ -50,7 +50,7 @@
 An Ozone block contains the container ID and a local ID. The figure below
 shows the logical layout out of Ozone block.
 
-{{< image src="OzoneBlock.png">}}
+![Ozone Block](OzoneBlock.png)
 
 The container ID lets the clients discover the location of the container. The
 authoritative information about where a container is located is with the
diff --git a/hadoop-hdds/docs/content/concept/OzoneManager.md b/hadoop-hdds/docs/content/concept/OzoneManager.md
index 50bf441..b05d314 100644
--- a/hadoop-hdds/docs/content/concept/OzoneManager.md
+++ b/hadoop-hdds/docs/content/concept/OzoneManager.md
@@ -24,7 +24,7 @@
   limitations under the License.
 -->
 
-{{< image src="OzoneManager.png">}}
+![Ozone Manager](OzoneManager.png)
 
 Ozone Manager (OM) is the namespace manager for Ozone.
 
@@ -60,7 +60,7 @@
 
 ### Key Write
 
-{{< image src="OzoneManager-WritePath.png">}}
+![Ozone Manager Write Path](OzoneManager-WritePath.png)
 
 * To write a key to Ozone, a client tells Ozone manager that it would like to
 write a key into a bucket that lives inside a specific volume. Once Ozone
@@ -84,7 +84,7 @@
 
 ### Key Reads
 
-{{< image src="OzoneManager-ReadPath.png">}}
+![Ozone Manager Read Path](OzoneManager-ReadPath.png)
 
 * Key reads are simpler, the client requests the block list from the Ozone
 Manager
diff --git a/hadoop-hdds/docs/content/concept/Recon.md b/hadoop-hdds/docs/content/concept/Recon.md
index e3f6350..064127a 100644
--- a/hadoop-hdds/docs/content/concept/Recon.md
+++ b/hadoop-hdds/docs/content/concept/Recon.md
@@ -31,8 +31,7 @@
 
 ## High Level Design
 
-{{< image src="/concept/ReconHighLevelDesign.png">}}
-
+![Recon High Level Design](ReconHighLevelDesign.png)
 <br/>
 
 On a high level, Recon collects and aggregates metadata from Ozone Manager (OM), 
@@ -50,8 +49,7 @@
 
 ## Recon and Ozone Manager
 
-{{< image src="/concept/ReconOmDesign.png">}}
-
+![Recon OM Design](ReconOmDesign.png)
 <br/>
 
 Recon gets a full snapshot of OM rocks db initially from the leader OM's HTTP 
@@ -68,8 +66,7 @@
 
 ## Recon and Storage Container Manager
 
-{{< image src="/concept/ReconScmDesign.png">}}
-
+![Recon SCM Design](ReconScmDesign.png)
 <br/>
 
 Recon also acts as a passive SCM for datanodes. When Recon is configured in the
diff --git a/hadoop-hdds/docs/content/feature/OM-HA.md b/hadoop-hdds/docs/content/feature/OM-HA.md
index 0719a29..573ce77 100644
--- a/hadoop-hdds/docs/content/feature/OM-HA.md
+++ b/hadoop-hdds/docs/content/feature/OM-HA.md
@@ -35,7 +35,7 @@
 
 A single Ozone Manager uses [RocksDB](https://github.com/facebook/rocksdb/) to persist metadata (volumes, buckets, keys) locally. HA version of Ozone Manager does exactly the same but all the data is replicated with the help of the RAFT consensus algorithm to follower Ozone Manager instances.
 
-{{< image src="HA-OM.png">}}
+![HA OM](HA-OM.png)
 
 Client connects to the Leader Ozone Manager which process the request and schedule the replication with RAFT. When the request is replicated to all the followers the leader can return with the response.
 
@@ -106,7 +106,7 @@
 
 RocksDB instance are updated by a background thread with batching transactions (so called "double buffer" as when one of the buffers is used to commit the data the other one collects all the new requests for the next commit.) To make all data available for the next request even if the background process is not yet wrote them the key data is cached in the memory.
 
-{{< image src="HA-OM-doublebuffer.png">}}
+![HA - OM Double Buffer](HA-OM-doublebuffer.png)
 
 The details of this approach discussed in a separated [design doc]({{< ref "design/omha.md" >}}) but it's integral part of the OM HA design.
 
diff --git a/hadoop-hdds/docs/content/feature/PrefixFSO.md b/hadoop-hdds/docs/content/feature/PrefixFSO.md
index 78a5bc3..7d87b26 100644
--- a/hadoop-hdds/docs/content/feature/PrefixFSO.md
+++ b/hadoop-hdds/docs/content/feature/PrefixFSO.md
@@ -1,5 +1,5 @@
 ---
-title: "Prefix based FileSystem Optimization"
+title: "Prefix based File System Optimization"
 weight: 2
 menu:
    main:
@@ -23,63 +23,49 @@
   limitations under the License.
 -->
 
-The prefix based FileSystem optimization feature supports atomic rename and
- delete of any directory at any level in the namespace. Also, it will perform
-  rename and delete of any directory in a deterministic/constant time.
+The prefix-based File System Optimization feature supports atomic rename and delete of any directory at any level in the
+namespace in deterministic/constant time.
 
-Note: This feature works only when `ozone.om.enable.filesystem.paths` is
- enabled which means that Hadoop Compatible File System compatibility is
-  favored instead of S3 compatibility. Some irregular S3 key names may be
-   rejected or normalized.
+This feature can be enabled for each specific bucket that requires it by setting the `--layout` flag
+to `FILE_SYSTEM_OPTIMIZED` at the time of bucket creation.
 
-This feature is strongly recommended to be turned ON when Ozone buckets are
- mainly used via Hadoop compatible interfaces, especially with high number of
-  files in deep directory hierarchy.
+```bash
+ozone sh bucket create /<volume-name>/<bucket-name> --layout FILE_SYSTEM_OPTIMIZED
+```
+
+Note: File System Optimization favors Hadoop Compatible File System instead of S3 compatibility. Some irregular S3 key
+names may be rejected or normalized.
+
+This feature is strongly recommended to be turned ON for Ozone buckets mainly used via Hadoop compatible interfaces,
+especially with high number of files in deep directory hierarchy.
 
 ## OzoneManager Metadata layout format
-OzoneManager supports two metadata layout formats - simple and prefix.
+OzoneManager supports two metadata bucket layout formats - Object Store (OBS) and File System Optimized (FSO).
 
-Simple is the existing OM metadata format, which stores key entry with full path
- name. In Prefix based optimization, OM metadata format stores intermediate
-  directories into `DirectoryTable` and files into `FileTable` as shown in the
-   below picture. The key to the table is the name of a directory or a file
-    prefixed by the unique identifier of its parent directory, `<parent
-     unique-id>/<filename>`. 
-     
-{{< image src="PrefixFSO-Format.png">}}
+Object Store (OBS) is the existing OM metadata format, which stores key entry with full path name. In File System
+Optimized (FSO) buckets, OM metadata format stores intermediate directories into `DirectoryTable` and files
+into `FileTable` as shown in the below picture. The key to the table is the name of a directory or a file prefixed by
+the unique identifier of its parent directory, `<parent unique-id>/<filename>`.
+
+![Prefix FSO Format](PrefixFSO-Format.png)
 
 
 ### Directory delete operation with prefix layout: ###
 Following picture describes the OM metadata changes while performing a delete
  operation on a directory.
 
-{{< image src="PrefixFSO-Delete.png">}}
+![Prefix FSO Delete](PrefixFSO-Delete.png)
 
 ### Directory rename operation with prefix layout: ###
 Following picture describes the OM metadata changes while performing a rename
  operation on a directory.
 
-{{< image src="PrefixFSO-Rename.png">}}
+![Prefix FSO Rename](PrefixFSO-Rename.png)
 
 ## Configuration
-By default the feature is disabled. It can be enabled with the following
- settings in `ozone-site.xml`:
 
-```XML
-<property>
-   <name>ozone.om.enable.filesystem.paths</name>
-   <value>true</value>
-</property>
-<property>
-   <name>ozone.om.metadata.layout</name>
-   <value>PREFIX</value>
-</property>
-```
-
-In reference to efforts towards supporting protocol aware buckets 
-within an Ozone cluster, the following configuration can be used 
-to define the default value for bucket layout during bucket creation 
-if the client has not specified the bucket layout argument. 
+The following configuration can be configured in `ozone-site.xml` to define the default value for bucket layout during bucket creation
+if the client has not specified the bucket layout argument.
 Supported values are `OBJECT_STORE` and `FILE_SYSTEM_OPTIMIZED`.
 
 By default, the buckets will default to `OBJECT_STORE` behaviour.
@@ -87,7 +73,7 @@
 ```XML
 
 <property>
- <name>ozone.default.bucket.layout</name>
- <value>OBJECT_STORE</value>
+    <name>ozone.default.bucket.layout</name>
+    <value>OBJECT_STORE</value>
 </property>
 ```
\ No newline at end of file
diff --git a/hadoop-hdds/docs/content/feature/SCM-HA.md b/hadoop-hdds/docs/content/feature/SCM-HA.md
index 78aafcf..ebbe998 100644
--- a/hadoop-hdds/docs/content/feature/SCM-HA.md
+++ b/hadoop-hdds/docs/content/feature/SCM-HA.md
@@ -109,7 +109,7 @@
 
 ## SCM HA Security
 
-{{< image src="scm-secure-ha.png">}}
+![SCM Secure HA](scm-secure-ha.png)
 
 In a secure SCM HA cluster on the SCM where we perform init, we call this SCM as a primordial SCM. 
 Primordial SCM starts root-CA with self-signed certificates and is used to issue a signed certificate 
diff --git a/hadoop-hdds/docs/content/interface/O3fs.md b/hadoop-hdds/docs/content/interface/O3fs.md
index c56efb5..0aa25a8 100644
--- a/hadoop-hdds/docs/content/interface/O3fs.md
+++ b/hadoop-hdds/docs/content/interface/O3fs.md
@@ -76,7 +76,7 @@
 You also need to add the ozone-filesystem-hadoop3.jar file to the classpath:
 
 {{< highlight bash >}}
-export HADOOP_CLASSPATH=/opt/ozone/share/ozonefs/lib/ozone-filesystem-hadoop3-*.jar:$HADOOP_CLASSPATH
+export HADOOP_CLASSPATH=/opt/ozone/share/ozone/lib/ozone-filesystem-hadoop3-*.jar:$HADOOP_CLASSPATH
 {{< /highlight >}}
 
 (Note: with Hadoop 2.x, use the `ozone-filesystem-hadoop2-*.jar`)
diff --git a/hadoop-hdds/docs/content/interface/O3fs.zh.md b/hadoop-hdds/docs/content/interface/O3fs.zh.md
index fa8b4d8..bd33609 100644
--- a/hadoop-hdds/docs/content/interface/O3fs.zh.md
+++ b/hadoop-hdds/docs/content/interface/O3fs.zh.md
@@ -72,7 +72,7 @@
 你还需要将 ozone-filesystem.jar 文件加入 classpath:
 
 {{< highlight bash >}}
-export HADOOP_CLASSPATH=/opt/ozone/share/ozonefs/lib/ozone-filesystem-hadoop3-*.jar:$HADOOP_CLASSPATH
+export HADOOP_CLASSPATH=/opt/ozone/share/ozone/lib/ozone-filesystem-hadoop3-*.jar:$HADOOP_CLASSPATH
 {{< /highlight >}}
 
 (注意:当使用Hadoop 2.x时,应该在classpath上添加ozone-filesystem-hadoop2-*.jar)
@@ -145,7 +145,7 @@
 你还需要将 ozone-filesystem.jar 文件加入 classpath:
 
 {{< highlight bash >}}
-export HADOOP_CLASSPATH=/opt/ozone/share/ozonefs/lib/ozone-filesystem-hadoop3-*.jar:$HADOOP_CLASSPATH
+export HADOOP_CLASSPATH=/opt/ozone/share/ozone/lib/ozone-filesystem-hadoop3-*.jar:$HADOOP_CLASSPATH
 {{< /highlight >}}
 
 (注意:当使用Hadoop 2.x时,应该在classpath上添加ozone-filesystem-hadoop2-*.jar)
diff --git a/hadoop-hdds/docs/content/interface/Ofs.md b/hadoop-hdds/docs/content/interface/Ofs.md
index 66cf2b7..7a3f892 100644
--- a/hadoop-hdds/docs/content/interface/Ofs.md
+++ b/hadoop-hdds/docs/content/interface/Ofs.md
@@ -82,7 +82,7 @@
 You also need to add the ozone-filesystem-hadoop3.jar file to the classpath:
 
 {{< highlight bash >}}
-export HADOOP_CLASSPATH=/opt/ozone/share/ozonefs/lib/ozone-filesystem-hadoop3-*.jar:$HADOOP_CLASSPATH
+export HADOOP_CLASSPATH=/opt/ozone/share/ozone/lib/ozone-filesystem-hadoop3-*.jar:$HADOOP_CLASSPATH
 {{< /highlight >}}
 
 (Note: with Hadoop 2.x, use the `ozone-filesystem-hadoop2-*.jar`)
diff --git a/hadoop-hdds/docs/content/security/SecuringTDE.md b/hadoop-hdds/docs/content/security/SecuringTDE.md
index c85da0f..2a98984 100644
--- a/hadoop-hdds/docs/content/security/SecuringTDE.md
+++ b/hadoop-hdds/docs/content/security/SecuringTDE.md
@@ -58,10 +58,10 @@
    * Assign the encryption key to a bucket.
 
   ```bash
-  ozone sh bucket create -k encKey /vol/encryptedBucket
+  ozone sh bucket create -k encKey /vol/encryptedbucket
   ```
 
-After this command, all data written to the _encryptedBucket_ will be encrypted
+After this command, all data written to the _encryptedbucket_ will be encrypted
 via the encKey and while reading the clients will talk to Key Management
 Server and read the key and decrypt it. In other words, the data stored
 inside Ozone is always encrypted. The fact that data is encrypted at rest
@@ -71,20 +71,47 @@
 
 There are two ways to create an encrypted bucket that can be accessed via S3 Gateway.
 
-####1. Create a bucket using shell under "/s3v" volume
+#### Option 1. Create a bucket using shell under "/s3v" volume
 
   ```bash
-  ozone sh bucket create -k encKey /s3v/encryptedBucket
+  ozone sh bucket create -k encKey --layout=OBJECT_STORE /s3v/encryptedbucket
   ```
-####2. Create a link to an encrypted bucket under "/s3v" volume
+
+#### Option 2. Create a link to an encrypted bucket under "/s3v" volume
 
   ```bash
-  ozone sh bucket create -k encKey /vol/encryptedBucket
-  ozone sh bucket link  /vol/encryptedBucket /s3v/linkencryptedbucket
+  ozone sh bucket create -k encKey --layout=OBJECT_STORE /vol/encryptedbucket
+  ozone sh bucket link /vol/encryptedbucket /s3v/linkencryptedbucket
   ```
-Note: An encrypted bucket cannot be created via S3 APIs. It must be done using Ozone shell commands as shown above.
+
+Note 1: An encrypted bucket cannot be created via S3 APIs. It must be done using Ozone shell commands as shown above.
 After creating an encrypted bucket, all the keys added to this bucket using s3g will be encrypted.
 
+Note 2: `--layout=OBJECT_STORE` is specified in the above examples
+for full compatibility with S3 (which is the default value for the `--layout`
+argument, but explicitly added here to make a point).
+
+Bucket created with the `OBJECT_STORE` type will NOT be accessible via
+HCFS (ofs or o3fs) at all. And such access will be rejected. For instance:
+
+  ```bash
+  $ ozone fs -ls ofs://ozone1/s3v/encryptedbucket/
+  -ls: Bucket: encryptedbucket has layout: OBJECT_STORE, which does not support file system semantics. Bucket Layout must be FILE_SYSTEM_OPTIMIZED or LEGACY.
+  ```
+
+  ```bash
+  $ ozone fs -ls o3fs://encryptedbucket.s3v.ozone1/
+  22/02/07 00:00:00 WARN fs.FileSystem: Failed to initialize fileystem o3fs://encryptedbucket.s3v.ozone1/: java.lang.IllegalArgumentException: Bucket: encryptedbucket has layout: OBJECT_STORE, which does not support file system semantics. Bucket Layout must be FILE_SYSTEM_OPTIMIZED or LEGACY.
+  -ls: Bucket: encryptedbucket has layout: OBJECT_STORE, which does not support file system semantics. Bucket Layout must be FILE_SYSTEM_OPTIMIZED or LEGACY.
+  ```
+
+If one wants the bucket to be accessible from both S3G and HCFS (ofs and o3fs)
+at the same time, use `--layout=FILE_SYSTEM_OPTIMIZED` instead.
+
+However, in buckets with `FILE_SYSTEM_OPTIMIZED` layout, some irregular S3 key
+names may be rejected or normalized, which can be undesired.
+See [Prefix based File System Optimization]({{< relref "../feature/PrefixFSO.md" >}}) for more information.
+
 In non-secure mode, the user running the S3Gateway daemon process is the proxy user, 
 while in secure mode the S3Gateway Kerberos principal (ozone.s3g.kerberos.principal) is the proxy user. 
 S3Gateway proxy's all the users accessing the encrypted buckets to decrypt the key. 
@@ -111,12 +138,11 @@
          This is the host where the S3Gateway is running. Set this to '*' to allow
          requests from any hosts to be proxied.
   </description>
-
 </property>
-
 ```
 
-###KMS Authorization
+### KMS Authorization
+
 If Ranger authorization is enabled for KMS, then decrypt key permission should be given to
 access key id user(currently access key is kerberos principal) to decrypt the encrypted key 
 to read/write a key in the encrypted bucket.
diff --git a/hadoop-hdds/docs/content/security/SecuringTDE.zh.md b/hadoop-hdds/docs/content/security/SecuringTDE.zh.md
index bda5e76..d7a2911 100644
--- a/hadoop-hdds/docs/content/security/SecuringTDE.zh.md
+++ b/hadoop-hdds/docs/content/security/SecuringTDE.zh.md
@@ -49,7 +49,7 @@
    * 将加密密钥分配给桶
 
   ```bash
-  ozone sh bucket create -k encKey /vol/encryptedBucket
+  ozone sh bucket create -k encKey /vol/encryptedbucket
   ```
 
-这条命令执行后,所以写往 _encryptedBucket_ 的数据都会用 encKey 进行加密,当读取里面的数据时,客户端通过 KMS 获取密钥进行解密。换句话说,Ozone 中存储的数据一直是加密的,但用户和客户端对此完全无感知。
+这条命令执行后,所以写往 _encryptedbucket_ 的数据都会用 encKey 进行加密,当读取里面的数据时,客户端通过 KMS 获取密钥进行解密。换句话说,Ozone 中存储的数据一直是加密的,但用户和客户端对此完全无感知。
diff --git a/hadoop-hdds/docs/dev-support/bin/generate-site.sh b/hadoop-hdds/docs/dev-support/bin/generate-site.sh
index 3d7baa8..1556f95 100755
--- a/hadoop-hdds/docs/dev-support/bin/generate-site.sh
+++ b/hadoop-hdds/docs/dev-support/bin/generate-site.sh
@@ -31,8 +31,18 @@
   ENABLE_GIT_INFO="--enableGitInfo"
 fi
 
+# Copy docs files to a temporary directory inside target
+# for pre-processing the markdown files.
+TMPDIR="$DOCDIR/target/tmp"
+mkdir -p "$TMPDIR"
+rsync -a --exclude="target" --exclude="public" "$DOCDIR/" "$TMPDIR"
+
+# Replace all markdown images with a hugo shortcode to make them responsive.
+python3 $DIR/make_images_responsive.py $TMPDIR
+
 DESTDIR="$DOCDIR/target/classes/docs"
 mkdir -p "$DESTDIR"
-cd "$DOCDIR"
+# We want to build the processed files inside the $DOCDIR/target/tmp
+cd "$TMPDIR"
 hugo "${ENABLE_GIT_INFO}" -d "$DESTDIR" "$@"
 cd -
diff --git a/hadoop-hdds/docs/dev-support/bin/make_images_responsive.py b/hadoop-hdds/docs/dev-support/bin/make_images_responsive.py
new file mode 100644
index 0000000..4c945eb
--- /dev/null
+++ b/hadoop-hdds/docs/dev-support/bin/make_images_responsive.py
@@ -0,0 +1,57 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import os
+import re
+import logging
+
+LOGLEVEL = os.environ.get('LOGLEVEL', 'WARNING').upper()
+logging.basicConfig(level=LOGLEVEL)
+
+# The first argument to the script is the directory where the documentation is
+# stored.
+docs_directory = os.path.expanduser(sys.argv[1])
+content_directory = os.path.join(docs_directory, 'content')
+
+for root, subdirs, files in os.walk(docs_directory):
+    for filename in files:
+        # We only want to modify markdown files.
+        if filename.endswith('.md'):
+            file_path = os.path.join(root, filename)
+
+            new_file_content = []
+
+            with open(file_path, 'r', encoding='utf-8') as f:
+                for line in f:
+                    # If the line contains the image tag, we need to replace it
+                    if re.search(re.compile("^!\[(.*?)\]\((.*?)\)"), line):
+                        logging.debug(
+                            f'file {filename} (full path: {file_path})')
+                        logging.debug(f"found markdown image: {line}")
+
+                        line_replacement = line.replace(
+                            '![', '{{< image alt="').replace('](', '" src="').replace(')', '">}}')
+
+                        logging.debug(
+                            f"replaced with shortcode: {line_replacement}")
+
+                        new_file_content.append(line_replacement)
+
+                    else:
+                        new_file_content.append(line)
+
+            with open(file_path, 'w', encoding='utf-8') as f:
+                f.writelines(new_file_content)
diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/image.html b/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/image.html
index 1f558d9..2d143e7 100644
--- a/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/image.html
+++ b/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/image.html
@@ -16,4 +16,4 @@
 -->
 
 <!-- shortcode to easily scale images according to page width-->
-<img src='{{ .Get "src" }}' class="img-responsive"/>
\ No newline at end of file
+<img src='{{ .Get "src" }}' alt='{{ .Get "alt" }}' class="img-responsive"/>
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java
index 606b459..26107d5 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java
@@ -24,6 +24,7 @@
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmNodeDetailsProto;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeDetailsProto;
 import org.apache.hadoop.hdds.scm.ScmConfig;
 import org.apache.hadoop.hdds.security.x509.crl.CRLInfo;
 import org.apache.hadoop.security.KerberosInfo;
@@ -158,4 +159,15 @@
    */
   long revokeCertificates(List<String> certIds, int reason, long revocationTime)
       throws IOException;
+
+  /**
+   * Get SCM signed certificate.
+   *
+   * @param nodeDetails - Node Details.
+   * @param certSignReq  - Certificate signing request.
+   * @return String      - pem encoded SCM signed
+   *                         certificate.
+   */
+  String getCertificate(NodeDetailsProto nodeDetails,
+      String certSignReq) throws IOException;
 }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
index d89ecc6..bbbad8b 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
@@ -29,6 +29,7 @@
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.CRLInfoProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmNodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos;
@@ -38,6 +39,7 @@
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertificateRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCrlsRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetDataNodeCertRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMListCACertificateRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetLatestCrlIdRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMListCertificateRequestProto;
@@ -173,6 +175,21 @@
   }
 
   /**
+   * Get SCM signed certificate.
+   *
+   * @param nodeDetails - Node Details.
+   * @param certSignReq  - Certificate signing request.
+   * @return String      - pem encoded SCM signed
+   *                         certificate.
+   */
+  @Override
+  public String getCertificate(NodeDetailsProto nodeDetails,
+      String certSignReq) throws IOException {
+    return getCertificateChain(nodeDetails, certSignReq)
+        .getX509Certificate();
+  }
+
+  /**
    * Get signed certificate for SCM node.
    *
    * @param scmNodeDetails  - SCM Node Details.
@@ -268,6 +285,26 @@
   }
 
   /**
+   * Get SCM signed certificate.
+   *
+   * @param nodeDetails   - Node Details.
+   * @param certSignReq - Certificate signing request.
+   * @return byte[]         - SCM signed certificate.
+   */
+  public SCMGetCertResponseProto getCertificateChain(
+      NodeDetailsProto nodeDetails, String certSignReq)
+      throws IOException {
+
+    SCMGetCertRequestProto request =
+        SCMGetCertRequestProto.newBuilder()
+            .setCSR(certSignReq)
+            .setNodeDetails(nodeDetails)
+            .build();
+    return submitRequest(Type.GetCert,
+        builder -> builder.setGetCertRequest(request))
+        .getGetCertResponseProto();
+  }
+  /**
    * Get CA certificate.
    *
    * @return serial   - Root certificate.
@@ -366,7 +403,7 @@
         .setReason(Reason.valueOf(reason))
         .setRevokeTime(revocationTime).build();
     return submitRequest(Type.RevokeCertificates,
-        builder->builder.setRevokeCertificatesRequest(req))
+        builder -> builder.setRevokeCertificatesRequest(req))
         .getRevokeCertificatesResponseProto().getCrlId();
   }
 
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
index b26c0da..77ef3f0 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
@@ -332,7 +332,7 @@
       response = submitRequest(Type.GetExistContainerWithPipelinesInBatch,
           (builder) -> builder
               .setGetExistContainerWithPipelinesInBatchRequest(request));
-    } catch (IOException ex){
+    } catch (IOException ex) {
       return cps;
     }
 
@@ -781,7 +781,7 @@
       Optional<Integer> maxDatanodesPercentageToInvolvePerIteration,
       Optional<Long> maxSizeToMovePerIterationInGB,
       Optional<Long> maxSizeEnteringTargetInGB,
-      Optional<Long> maxSizeLeavingSourceInGB) throws IOException{
+      Optional<Long> maxSizeLeavingSourceInGB) throws IOException {
     StartContainerBalancerRequestProto.Builder builder =
         StartContainerBalancerRequestProto.newBuilder();
     builder.setTraceID(TracingUtil.exportCurrentSpan());
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/CRLClientUpdateHandler.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/CRLClientUpdateHandler.java
index 6d54481..72da519 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/CRLClientUpdateHandler.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/CRLClientUpdateHandler.java
@@ -68,7 +68,7 @@
 
     this.clientStore = serviceGrpcClient.getClientCRLStore();
     this.crlCheckInterval = crlCheckInterval;
-    LOG.info("Pending CRL check interval : {}s", crlCheckInterval/1000);
+    LOG.info("Pending CRL check interval : {}s", crlCheckInterval / 1000);
     this.executorService = Executors.newSingleThreadScheduledExecutor(
         new ThreadFactoryBuilder().setDaemon(true)
             .setNameFormat("CRLUpdateHandler Thread - %d").build());
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/ClientCRLStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/ClientCRLStore.java
index 721988e..5e326cc 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/ClientCRLStore.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/ClientCRLStore.java
@@ -76,7 +76,7 @@
 
   public List<Long> getRevokedCertIds(X509CRL crl) {
     return Collections.unmodifiableList(crl.getRevokedCertificates().stream()
-        .map(cert->cert.getSerialNumber().longValue())
+        .map(cert -> cert.getSerialNumber().longValue())
         .collect(Collectors.toList()));
   }
 
@@ -91,7 +91,7 @@
 
   public List<Long> getPendingCrlIds() {
     return new ArrayList<>(pendingCrls)
-        .stream().map(crl->crl.getCrlSequenceID())
+        .stream().map(crl -> crl.getCrlSequenceID())
         .collect(Collectors.toList());
   }
 
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/SCMUpdateServiceGrpcClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/SCMUpdateServiceGrpcClient.java
index 96e1577..8b96d5c 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/SCMUpdateServiceGrpcClient.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/SCMUpdateServiceGrpcClient.java
@@ -92,7 +92,7 @@
       createChannel();
     }
     clientId = subScribeClient();
-    assert(clientId != null);
+    assert (clientId != null);
 
     // start background thread processing pending crl ids.
     handler = new CRLClientUpdateHandler(clientId, updateClient,
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/UpdateServiceConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/UpdateServiceConfig.java
index 9f55c4d..958335f 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/UpdateServiceConfig.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/UpdateServiceConfig.java
@@ -27,9 +27,8 @@
 @ConfigGroup(prefix = "ozone.scm.update.service")
 public final class UpdateServiceConfig {
 
-  @Config(key = "port", defaultValue = "9893", description = "Port used for"
-      + " the SCM grpc update service for CRL.", tags = {
-      ConfigTag.SECURITY})
+  @Config(key = "port", defaultValue = "9893", tags = {ConfigTag.SECURITY},
+      description = "Port used for the SCM grpc update service for CRL.")
   private int port;
 
   public int getPort() {
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java
index 3136168..6738868 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java
@@ -100,7 +100,7 @@
     Objects.requireNonNull(attribute);
     List<Extensions> extensionsList = new ArrayList<>();
     for (ASN1Encodable value : attribute.getAttributeValues()) {
-      if(value != null) {
+      if (value != null) {
         Extensions extensions = Extensions.getInstance(value);
         extensionsList.add(extensions);
       }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
index fc2a77b..83be3aa 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
@@ -228,7 +228,7 @@
     CompletableFuture<X509CertificateHolder> xcertHolder =
         approver.inspectCSR(csr);
 
-    if(xcertHolder.isCompletedExceptionally()) {
+    if (xcertHolder.isCompletedExceptionally()) {
       // This means that approver told us there are things which it disagrees
       // with in this Certificate Request. Since the first set of sanity
       // checks failed, we just return the future object right here.
@@ -324,7 +324,7 @@
   public List<X509Certificate> listCertificate(NodeType role,
       long startSerialId, int count, boolean isRevoked) throws IOException {
     return store.listCertificate(role, BigInteger.valueOf(startSerialId), count,
-        isRevoked? CertificateStore.CertType.REVOKED_CERTS :
+        isRevoked ? CertificateStore.CertType.REVOKED_CERTS :
             CertificateStore.CertType.VALID_CERTS);
   }
 
@@ -554,7 +554,7 @@
       OzoneSecurityUtil.getValidInetsForCurrentHost().forEach(
           ip -> {
             builder.addIpAddress(ip.getHostAddress());
-            if(validator.isValid(ip.getCanonicalHostName())) {
+            if (validator.isValid(ip.getCanonicalHostName())) {
               builder.addDnsName(ip.getCanonicalHostName());
             }
           });
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java
index a146c73..da799d7 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java
@@ -53,7 +53,7 @@
       PKIProfile pkiProfile) {
     BasicConstraints constraints =
         BasicConstraints.getInstance(ext.getParsedValue());
-    if(constraints.isCA()) {
+    if (constraints.isCA()) {
       if (pkiProfile.isCA()) {
         return true;
       }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultProfile.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultProfile.java
index 7833f6c..44e0517 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultProfile.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultProfile.java
@@ -79,22 +79,22 @@
   // Map that handles all the Extensions lookup and validations.
   protected static final Map<ASN1ObjectIdentifier, BiFunction<Extension,
       PKIProfile, Boolean>> EXTENSIONS_MAP = Stream.of(
-      new SimpleEntry<>(Extension.keyUsage, VALIDATE_KEY_USAGE),
-      new SimpleEntry<>(Extension.subjectAlternativeName, VALIDATE_SAN),
-      new SimpleEntry<>(Extension.authorityKeyIdentifier,
-          VALIDATE_AUTHORITY_KEY_IDENTIFIER),
-      new SimpleEntry<>(Extension.extendedKeyUsage,
-          VALIDATE_EXTENDED_KEY_USAGE),
-      // Ozone certs are issued only for the use of Ozone.
-      // However, some users will discover that this is a full scale CA
-      // and decide to mis-use these certs for other purposes.
-      // To discourage usage of these certs for other purposes, we can leave
-      // the Ozone Logo inside these certs. So if a browser is used to
-      // connect these logos will show up.
-      // https://www.ietf.org/rfc/rfc3709.txt
-      new SimpleEntry<>(Extension.logoType, VALIDATE_LOGO_TYPE))
-      .collect(Collectors.toMap(SimpleEntry::getKey,
-          SimpleEntry::getValue));
+          new SimpleEntry<>(Extension.keyUsage, VALIDATE_KEY_USAGE),
+          new SimpleEntry<>(Extension.subjectAlternativeName, VALIDATE_SAN),
+          new SimpleEntry<>(Extension.authorityKeyIdentifier,
+              VALIDATE_AUTHORITY_KEY_IDENTIFIER),
+          new SimpleEntry<>(Extension.extendedKeyUsage,
+              VALIDATE_EXTENDED_KEY_USAGE),
+          // Ozone certs are issued only for the use of Ozone.
+          // However, some users will discover that this is a full scale CA
+          // and decide to mis-use these certs for other purposes.
+          // To discourage usage of these certs for other purposes, we can leave
+          // the Ozone Logo inside these certs. So if a browser is used to
+          // connect these logos will show up.
+          // https://www.ietf.org/rfc/rfc3709.txt
+          new SimpleEntry<>(Extension.logoType, VALIDATE_LOGO_TYPE))
+          .collect(Collectors.toMap(SimpleEntry::getKey,
+              SimpleEntry::getValue));
   // If we decide to add more General Names, we should add those here and
   // also update the logic in validateGeneralName function.
   private static final KeyPurposeId[] EXTENDED_KEY_USAGE = {
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CommonCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CommonCertificateClient.java
new file mode 100644
index 0000000..c32965f
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CommonCertificateClient.java
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.security.x509.certificate.client;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdds.security.x509.SecurityConfig;
+import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest;
+import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException;
+
+import org.slf4j.Logger;
+
+import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.FAILURE;
+import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.GETCERT;
+import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.RECOVER;
+import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.SUCCESS;
+
+/**
+ * Common Certificate client.
+ */
+public class CommonCertificateClient extends DefaultCertificateClient {
+
+  private final Logger log;
+
+  public CommonCertificateClient(SecurityConfig securityConfig, Logger log,
+      String certSerialId, String component) throws IOException {
+    super(securityConfig, log, certSerialId, component);
+    this.log = log;
+  }
+
+  /**
+   * Returns a CSR builder that can be used to creates a Certificate signing
+   * request.
+   *
+   * @return CertificateSignRequest.Builder
+   */
+  @Override
+  public CertificateSignRequest.Builder getCSRBuilder()
+      throws CertificateException {
+    return super.getCSRBuilder()
+        .setDigitalEncryption(true)
+        .setDigitalSignature(true);
+  }
+
+  @Override
+  protected InitResponse handleCase(InitCase init)
+      throws CertificateException, IOException {
+    switch (init) {
+    case NONE:
+      log.info("Creating keypair for client as keypair and certificate not " +
+          "found.");
+      bootstrapClientKeys();
+      return GETCERT;
+    case CERT:
+      log.error("Private key not found, while certificate is still present." +
+          "Delete keypair and try again.");
+      return FAILURE;
+    case PUBLIC_KEY:
+      log.error("Found public key but private key and certificate missing.");
+      return FAILURE;
+    case PRIVATE_KEY:
+      log.info("Found private key but public key and certificate is missing.");
+      // TODO: Recovering public key from private might be possible in some
+      //  cases.
+      return FAILURE;
+    case PUBLICKEY_CERT:
+      log.error("Found public key and certificate but private key is " +
+          "missing.");
+      return FAILURE;
+    case PRIVATEKEY_CERT:
+      log.info("Found private key and certificate but public key missing.");
+      if (recoverPublicKey()) {
+        return SUCCESS;
+      } else {
+        log.error("Public key recovery failed.");
+        return FAILURE;
+      }
+    case PUBLICKEY_PRIVATEKEY:
+      log.info("Found private and public key but certificate is missing.");
+      if (validateKeyPair(getPublicKey())) {
+        return RECOVER;
+      } else {
+        log.error("Keypair validation failed.");
+        return FAILURE;
+      }
+    case ALL:
+      log.info("Found certificate file along with KeyPair.");
+      if (validateKeyPairAndCertificate()) {
+        return SUCCESS;
+      } else {
+        return FAILURE;
+      }
+    default:
+      log.error("Unexpected case: {} (private/public/cert)",
+          Integer.toBinaryString(init.ordinal()));
+      return FAILURE;
+    }
+  }
+
+  @Override
+  public Logger getLogger() {
+    return log;
+  }
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
index 8f88a38..f5b2405 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
@@ -619,7 +619,7 @@
       OzoneSecurityUtil.getValidInetsForCurrentHost().forEach(
           ip -> {
             builder.addIpAddress(ip.getHostAddress());
-            if(validator.isValid(ip.getCanonicalHostName())) {
+            if (validator.isValid(ip.getCanonicalHostName())) {
               builder.addDnsName(ip.getCanonicalHostName());
             } else {
               getLogger().error("Invalid domain {}", ip.getCanonicalHostName());
@@ -683,7 +683,7 @@
       String certName = String.format(CERT_FILE_NAME_FORMAT,
           cert.getSerialNumber().toString());
 
-      if(caCert) {
+      if (caCert) {
         certName = CA_CERT_PREFIX + certName;
         caCertId = cert.getSerialNumber().toString();
       }
@@ -791,17 +791,17 @@
   @Override
   public synchronized InitResponse init() throws IOException {
     int initCase = 0;
-    PrivateKey pvtKey= getPrivateKey();
+    PrivateKey pvtKey = getPrivateKey();
     PublicKey pubKey = getPublicKey();
     X509Certificate certificate = getCertificate();
 
-    if(pvtKey != null){
-      initCase = initCase | 1<<2;
+    if (pvtKey != null) {
+      initCase = initCase | 1 << 2;
     }
-    if(pubKey != null){
-      initCase = initCase | 1<<1;
+    if (pubKey != null) {
+      initCase = initCase | 1 << 1;
     }
-    if(certificate != null){
+    if (certificate != null) {
       initCase = initCase | 1;
     }
     getLogger().info("Certificate client init case: {}", initCase);
@@ -933,7 +933,7 @@
     PublicKey pubKey = getCertificate().getPublicKey();
     try {
 
-      if(validateKeyPair(pubKey)){
+      if (validateKeyPair(pubKey)) {
         keyCodec.writePublicKey(pubKey);
         publicKey = pubKey;
       } else {
@@ -1055,7 +1055,7 @@
         updateCAList();
       }
       return pemEncodedCACerts;
-    }finally {
+    } finally {
       lock.unlock();
     }
   }
@@ -1080,7 +1080,7 @@
   }
 
   @Override
-  public boolean processCrl(CRLInfo crl){
+  public boolean processCrl(CRLInfo crl) {
     List<String> certIds2Remove = new ArrayList();
     crl.getX509CRL().getRevokedCertificates().forEach(
         cert -> certIds2Remove.add(cert.getSerialNumber().toString()));
@@ -1090,15 +1090,15 @@
   }
 
 
-  private boolean removeCertificates(List<String> certIds){
+  private boolean removeCertificates(List<String> certIds) {
     lock.lock();
     boolean reInitCert = false;
     try {
       // For now, remove self cert and ca cert is not implemented
       // both requires a restart of the service.
-      if ((certSerialId!=null && certIds.contains(certSerialId)) ||
-          (caCertId!=null && certIds.contains(caCertId)) ||
-          (rootCaCertId!=null && certIds.contains(rootCaCertId))) {
+      if ((certSerialId != null && certIds.contains(certSerialId)) ||
+          (caCertId != null && certIds.contains(caCertId)) ||
+          (rootCaCertId != null && certIds.contains(rootCaCertId))) {
         reInitCert = true;
       }
 
@@ -1137,7 +1137,7 @@
    * Set Local CRL id.
    * @param crlId
    */
-  public void setLocalCrlId(long crlId){
+  public void setLocalCrlId(long crlId) {
     this.localCrlId = crlId;
   }
 }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java
index 6f563eb..3ce7d16 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java
@@ -19,24 +19,17 @@
 
 package org.apache.hadoop.hdds.security.x509.certificate.client;
 
-import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException;
 
 import java.io.IOException;
 
-import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.FAILURE;
-import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.GETCERT;
-import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.RECOVER;
-import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.SUCCESS;
-
 /**
  * Certificate client for OzoneManager.
  */
-public class OMCertificateClient extends DefaultCertificateClient {
+public class OMCertificateClient extends CommonCertificateClient {
 
   private static final Logger LOG =
       LoggerFactory.getLogger(OMCertificateClient.class);
@@ -46,12 +39,12 @@
   public OMCertificateClient(SecurityConfig securityConfig,
       String certSerialId, String localCrlId) throws IOException {
     super(securityConfig, LOG, certSerialId, COMPONENT_NAME);
-    this.setLocalCrlId(localCrlId!=null ?
-        Long.parseLong(localCrlId): 0);
+    this.setLocalCrlId(localCrlId != null ?
+        Long.parseLong(localCrlId) : 0);
   }
 
   public OMCertificateClient(SecurityConfig securityConfig,
-      String certSerialId) throws IOException{
+      String certSerialId) throws IOException {
     this(securityConfig, certSerialId, null);
   }
 
@@ -60,75 +53,6 @@
   }
 
   @Override
-  protected InitResponse handleCase(InitCase init) throws IOException {
-    switch (init) {
-    case NONE:
-      LOG.info("Creating keypair for client as keypair and certificate not " +
-          "found.");
-      bootstrapClientKeys();
-      return GETCERT;
-    case CERT:
-      LOG.error("Private key not found, while certificate is still present." +
-          "Delete keypair and try again.");
-      return FAILURE;
-    case PUBLIC_KEY:
-      LOG.error("Found public key but private key and certificate missing.");
-      return FAILURE;
-    case PRIVATE_KEY:
-      LOG.info("Found private key but public key and certificate is missing.");
-      // TODO: Recovering public key from private might be possible in some
-      //  cases.
-      return FAILURE;
-    case PUBLICKEY_CERT:
-      LOG.error("Found public key and certificate but private key is " +
-          "missing.");
-      return FAILURE;
-    case PRIVATEKEY_CERT:
-      LOG.info("Found private key and certificate but public key missing.");
-      if (recoverPublicKey()) {
-        return SUCCESS;
-      } else {
-        LOG.error("Public key recovery failed.");
-        return FAILURE;
-      }
-    case PUBLICKEY_PRIVATEKEY:
-      LOG.info("Found private and public key but certificate is missing.");
-      if (validateKeyPair(getPublicKey())) {
-        return RECOVER;
-      } else {
-        LOG.error("Keypair validation failed.");
-        return FAILURE;
-      }
-    case ALL:
-      LOG.info("Found certificate file along with KeyPair.");
-      if (validateKeyPairAndCertificate()) {
-        return SUCCESS;
-      } else {
-        return FAILURE;
-      }
-    default:
-      LOG.error("Unexpected case: {} (private/public/cert)",
-          Integer.toBinaryString(init.ordinal()));
-      return FAILURE;
-    }
-  }
-
-  /**
-   * Returns a CSR builder that can be used to creates a Certificate signing
-   * request.
-   *
-   * @return CertificateSignRequest.Builder
-   */
-  @Override
-  public CertificateSignRequest.Builder getCSRBuilder()
-      throws CertificateException {
-    return super.getCSRBuilder()
-        .setDigitalEncryption(true)
-        .setDigitalSignature(true);
-  }
-
-
-  @Override
   public Logger getLogger() {
     return LOG;
   }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/ReconCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/ReconCertificateClient.java
new file mode 100644
index 0000000..482d81a
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/ReconCertificateClient.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.security.x509.certificate.client;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdds.security.x509.SecurityConfig;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Certificate client for Recon.
+ */
+public class ReconCertificateClient  extends CommonCertificateClient {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ReconCertificateClient.class);
+
+  public static final String COMPONENT_NAME = "recon";
+
+  public ReconCertificateClient(SecurityConfig securityConfig,
+      String certSerialId) throws IOException {
+    super(securityConfig, LOG, certSerialId, COMPONENT_NAME);
+  }
+
+  @Override
+  public Logger getLogger() {
+    return LOG;
+  }
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java
index b8d2859..ec7b5a8 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java
@@ -117,7 +117,7 @@
     PemObject pemObject =
         new PemObject("CERTIFICATE REQUEST", request.getEncoded());
     StringWriter str = new StringWriter();
-    try(JcaPEMWriter pemWriter = new JcaPEMWriter(str)) {
+    try (JcaPEMWriter pemWriter = new JcaPEMWriter(str)) {
       pemWriter.writeObject(pemObject);
     }
     return str.toString();
@@ -135,7 +135,7 @@
       throws IOException {
     try (PemReader reader = new PemReader(new StringReader(csr))) {
       PemObject pemObject = reader.readPemObject();
-      if(pemObject.getContent() == null) {
+      if (pemObject.getContent() == null) {
         throw new SCMSecurityException("Invalid Certificate signing request",
             INVALID_CSR);
       }
@@ -268,10 +268,10 @@
 
     private Extension getKeyUsageExtension() throws IOException {
       int keyUsageFlag = KeyUsage.keyAgreement;
-      if(digitalEncryption){
+      if (digitalEncryption) {
         keyUsageFlag |= KeyUsage.keyEncipherment | KeyUsage.dataEncipherment;
       }
-      if(digitalSignature) {
+      if (digitalSignature) {
         keyUsageFlag |= KeyUsage.digitalSignature;
       }
 
@@ -303,7 +303,7 @@
       List<Extension> extensions = new ArrayList<>();
 
       // Add basic extension
-      if(ca) {
+      if (ca) {
         extensions.add(getBasicExtension());
       }
 
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java
index 5a9fba6..8aa512f 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java
@@ -44,7 +44,7 @@
   private Instant revocationTime;
 
   private CRLInfo(X509CRL x509CRL, long creationTimestamp, long crlSequenceID) {
-    assert((x509CRL != null) &&
+    assert ((x509CRL != null) &&
         !x509CRL.getRevokedCertificates().isEmpty());
     this.x509CRL = x509CRL;
     this.creationTimestamp = creationTimestamp;
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfoCodec.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfoCodec.java
index 3178cfd..2d53b8f 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfoCodec.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfoCodec.java
@@ -44,7 +44,7 @@
     try {
       return CRLInfo.fromProtobuf(
           HddsProtos.CRLInfoProto.PARSER.parseFrom(rawData));
-    } catch (CertificateException|CRLException e) {
+    } catch (CertificateException | CRLException e) {
       throw new IllegalArgumentException(
           "Can't encode the the raw data from the byte array", e);
     }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/FixedThreadPoolExecutor.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/FixedThreadPoolExecutor.java
deleted file mode 100644
index 2015c55..0000000
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/FixedThreadPoolExecutor.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.server.events;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.util.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EVENT_PREFIX;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EVENT_THREAD_POOL_SIZE_DEFAULT;
-
-/**
- * Fixed thread pool EventExecutor to call all the event handler one-by-one.
- *
- * @param <P> the payload type of events
- */
-@Metrics(context = "EventQueue")
-public class FixedThreadPoolExecutor<P> implements EventExecutor<P> {
-
-  private static final String EVENT_QUEUE = "EventQueue";
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(FixedThreadPoolExecutor.class);
-
-  private final String name;
-
-  private final ExecutorService executor;
-
-  @Metric
-  private MutableCounterLong queued;
-
-  @Metric
-  private MutableCounterLong done;
-
-  @Metric
-  private MutableCounterLong failed;
-
-  @Metric
-  private MutableCounterLong scheduled;
-
-  /**
-   * Create FixedThreadPoolExecutor.
-   *
-   * @param eventName
-   * @param name Unique name used in monitoring and metrics.
-   */
-  public FixedThreadPoolExecutor(String eventName, String name) {
-    this.name = name;
-    DefaultMetricsSystem.instance()
-        .register(EVENT_QUEUE + name, "Event Executor metrics ", this);
-
-
-    OzoneConfiguration configuration = new OzoneConfiguration();
-    int threadPoolSize = configuration.getInt(OZONE_SCM_EVENT_PREFIX +
-            StringUtils.camelize(eventName) + ".thread.pool.size",
-        OZONE_SCM_EVENT_THREAD_POOL_SIZE_DEFAULT);
-
-    executor = Executors.newFixedThreadPool(threadPoolSize, runnable -> {
-      Thread thread = new Thread(runnable);
-      thread.setName(EVENT_QUEUE + "-" + name);
-      return thread;
-    });
-  }
-
-  @Override
-  public void onMessage(EventHandler<P> handler, P message, EventPublisher
-      publisher) {
-    queued.incr();
-    executor.execute(() -> {
-      scheduled.incr();
-      try {
-        handler.onMessage(message, publisher);
-        done.incr();
-      } catch (Exception ex) {
-        LOG.error("Error on execution message {}", message, ex);
-        failed.incr();
-      }
-    });
-  }
-
-  @Override
-  public long failedEvents() {
-    return failed.value();
-  }
-
-  @Override
-  public long successfulEvents() {
-    return done.value();
-  }
-
-  @Override
-  public long queuedEvents() {
-    return queued.value();
-  }
-
-  @Override
-  public long scheduledEvents() {
-    return scheduled.value();
-  }
-
-  @Override
-  public void close() {
-    executor.shutdown();
-  }
-
-  @Override
-  public String getName() {
-    return name;
-  }
-}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/FixedThreadPoolWithAffinityExecutor.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/FixedThreadPoolWithAffinityExecutor.java
new file mode 100644
index 0000000..175b59e
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/FixedThreadPoolWithAffinityExecutor.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingDeque;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EVENT_PREFIX;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EVENT_THREAD_POOL_SIZE_DEFAULT;
+
+/**
+ * Fixed thread pool EventExecutor to call all the event handler one-by-one.
+ * Payloads with the same hashcode will be mapped to the same thread.
+ *
+ * @param <P> the payload type of events
+ */
+@Metrics(context = "EventQueue")
+public class FixedThreadPoolWithAffinityExecutor<P>
+    implements EventExecutor<P> {
+
+  private static final String EVENT_QUEUE = "EventQueue";
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(FixedThreadPoolWithAffinityExecutor.class);
+
+  private final String name;
+
+  private final List<ThreadPoolExecutor> executors;
+
+  // MutableCounterLong is thread safe.
+  @Metric
+  private MutableCounterLong queued;
+
+  @Metric
+  private MutableCounterLong done;
+
+  @Metric
+  private MutableCounterLong failed;
+
+  @Metric
+  private MutableCounterLong scheduled;
+
+  /**
+   * Create FixedThreadPoolExecutor with affinity.
+   * Based on the payload's hash code, the payload will be scheduled to the
+   * same thread.
+   *
+   * @param name Unique name used in monitoring and metrics.
+   */
+  public FixedThreadPoolWithAffinityExecutor(
+      String name,
+      List<ThreadPoolExecutor> executors) {
+    this.name = name;
+    DefaultMetricsSystem.instance()
+        .register(EVENT_QUEUE + name,
+            "Event Executor metrics ",
+            this);
+    this.executors = executors;
+  }
+
+  public static List<ThreadPoolExecutor> initializeExecutorPool(
+      String eventName) {
+    List<ThreadPoolExecutor> executors = new LinkedList<>();
+    OzoneConfiguration configuration = new OzoneConfiguration();
+    int threadPoolSize = configuration.getInt(OZONE_SCM_EVENT_PREFIX +
+            StringUtils.camelize(eventName) + ".thread.pool.size",
+        OZONE_SCM_EVENT_THREAD_POOL_SIZE_DEFAULT);
+    BlockingQueue<Runnable> workQueue = new LinkedBlockingDeque<>();
+    for (int i = 0; i < threadPoolSize; i++) {
+      ThreadFactory threadFactory = new ThreadFactoryBuilder()
+          .setDaemon(true)
+          .setNameFormat("FixedThreadPoolWithAffinityExecutor-" + i + "-%d")
+          .build();
+      executors.add(new
+          ThreadPoolExecutor(
+          1,
+          1,
+          0,
+          TimeUnit.SECONDS,
+          workQueue,
+          threadFactory));
+    }
+    return executors;
+  }
+
+  @Override
+  public void onMessage(EventHandler<P> handler, P message, EventPublisher
+      publisher) {
+    queued.incr();
+    // For messages that need to be routed to the same thread need to
+    // implement hashCode to match the messages. This should be safe for
+    // other messages that implement the native hash.
+    int index = message.hashCode() & (executors.size() - 1);
+    executors.get(index).execute(() -> {
+      scheduled.incr();
+      try {
+        handler.onMessage(message, publisher);
+        done.incr();
+      } catch (Exception ex) {
+        LOG.error("Error on execution message {}", message, ex);
+        failed.incr();
+      }
+    });
+  }
+
+  @Override
+  public long failedEvents() {
+    return failed.value();
+  }
+
+  @Override
+  public long successfulEvents() {
+    return done.value();
+  }
+
+  @Override
+  public long queuedEvents() {
+    return queued.value();
+  }
+
+  @Override
+  public long scheduledEvents() {
+    return scheduled.value();
+  }
+
+  @Override
+  public void close() {
+    for (ThreadPoolExecutor executor : executors) {
+      executor.shutdown();
+    }
+  }
+
+  @Override
+  public String getName() {
+    return name;
+  }
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java
index a3f1b6b..f4f188a 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java
@@ -192,7 +192,7 @@
   protected static String generateFileName(Integer pid, Output output,
       Event event) {
     String outputFormat = output.name().toLowerCase();
-    if(output == Output.FLAMEGRAPH) {
+    if (output == Output.FLAMEGRAPH) {
       outputFormat = "html";
     }
     return FILE_PREFIX + pid + "-" +
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointMetrics.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointMetrics.java
index 87dc882..3dc1766 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointMetrics.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointMetrics.java
@@ -32,7 +32,7 @@
  * This interface is for maintaining DB checkpoint statistics.
  */
 @InterfaceAudience.Private
-@Metrics(about="DB checkpoint Metrics", context="dfs")
+@Metrics(about = "DB checkpoint Metrics", context = "dfs")
 public class DBCheckpointMetrics {
   private static final String SOURCE_NAME =
       DBCheckpointMetrics.class.getSimpleName();
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java
index b06e8d0..2f7cccd 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java
@@ -419,7 +419,7 @@
           return getCAListWithRetry(() -> waitForCACerts(
               scmSecurityProtocolClient::listCACertificate,
               expectedCount), waitDuration);
-        } else{
+        } else {
           return scmSecurityProtocolClient.listCACertificate();
         }
       }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
index 30c2395..98821d9 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
@@ -32,6 +32,7 @@
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
+import com.google.common.base.Strings;
 import com.google.protobuf.BlockingService;
 import org.apache.commons.compress.archivers.ArchiveEntry;
 import org.apache.commons.compress.archivers.ArchiveOutputStream;
@@ -397,8 +398,8 @@
    */
   public static String getDatanodeIdFilePath(ConfigurationSource conf) {
     String dataNodeIDDirPath =
-        conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR);
-    if (dataNodeIDDirPath == null) {
+        conf.getTrimmed(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR);
+    if (Strings.isNullOrEmpty(dataNodeIDDirPath)) {
       File metaDirPath = ServerUtils.getOzoneMetaDirPath(conf);
       if (metaDirPath == null) {
         // this means meta data is not found, in theory should not happen at
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java
index 508320e..e3b91ba 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java
@@ -82,7 +82,7 @@
     private int keysScanned = 0;
     private int keysHinted = 0;
 
-    public KeyPrefixFilter() {}
+    public KeyPrefixFilter() { }
 
     /**
      * KeyPrefixFilter constructor. It is made of positive and negative prefix
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java
index ec4c0e1..1d1bff1 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java
@@ -47,7 +47,7 @@
   private TransactionInfo(String transactionInfo) {
     String[] tInfo =
         transactionInfo.split(TRANSACTION_INFO_SPLIT_KEY);
-    Preconditions.checkState(tInfo.length==2,
+    Preconditions.checkState(tInfo.length == 2,
         "Incorrect TransactionInfo value");
 
     term = Long.parseLong(tInfo[0]);
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java
index 50ac54f..c9bf385 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java
@@ -124,8 +124,8 @@
     Env env = Env.getDefault();
     DBOptions options = null;
     File configLocation = getConfigLocation();
-    if(configLocation != null &&
-        StringUtil.isNotBlank(configLocation.toString())){
+    if (configLocation != null &&
+        StringUtil.isNotBlank(configLocation.toString())) {
       Path optionsFile = Paths.get(configLocation.toString(),
           getOptionsFileNameFromDB(dbFileName));
 
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
index ad48a19..8b07003 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
@@ -168,7 +168,7 @@
    * @return DBStore
    */
   public DBStore build() throws IOException {
-    if(StringUtil.isBlank(dbname) || (dbPath == null)) {
+    if (StringUtil.isBlank(dbname) || (dbPath == null)) {
       LOG.error("Required Parameter missing.");
       throw new IOException("Required parameter is missing. Please make sure "
           + "Path and DB name is provided.");
@@ -340,7 +340,7 @@
         try {
           option = DBConfigFromFile.readFromFile(dbname,
               columnFamilyDescriptors);
-          if(option != null) {
+          if (option != null) {
             LOG.info("Using RocksDB DBOptions from {}.ini file", dbname);
           }
         } catch (IOException ex) {
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBUpdatesWrapper.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBUpdatesWrapper.java
index aa48c5e..d2dcc05 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBUpdatesWrapper.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBUpdatesWrapper.java
@@ -29,6 +29,7 @@
 
   private List<byte[]> dataList = new ArrayList<>();
   private long currentSequenceNumber = -1;
+  private long latestSequenceNumber = -1;
 
   public void addWriteBatch(byte[] data, long sequenceNumber) {
     dataList.add(data);
@@ -48,5 +49,13 @@
   public long getCurrentSequenceNumber() {
     return currentSequenceNumber;
   }
+
+  public void setLatestSequenceNumber(long sequenceNumber) {
+    this.latestSequenceNumber = sequenceNumber;
+  }
+
+  public long getLatestSequenceNumber() {
+    return latestSequenceNumber;
+  }
 }
 
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
index eb71ec1..7e45994 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
@@ -432,6 +432,7 @@
       LOG.error("Unable to get delta updates since sequenceNumber {} ",
           sequenceNumber, e);
     }
+    dbUpdatesWrapper.setLatestSequenceNumber(db.getLatestSequenceNumber());
     return dbUpdatesWrapper;
   }
 
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
index f92306a..c7f6196 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
@@ -98,7 +98,7 @@
     if (cacheType == CacheType.FULL_CACHE) {
       cache = new FullTableCache<>();
       //fill cache
-      try(TableIterator<KEY, ? extends KeyValue<KEY, VALUE>> tableIterator =
+      try (TableIterator<KEY, ? extends KeyValue<KEY, VALUE>> tableIterator =
               iterator()) {
 
         while (tableIterator.hasNext()) {
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java
index 7be2921..401d644 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java
@@ -56,7 +56,7 @@
 
   @Override
   public int compareTo(Object o) {
-    if(Objects.equals(key, ((CacheKey<?>)o).key)) {
+    if (Objects.equals(key, ((CacheKey<?>)o).key)) {
       return 0;
     } else {
       return key.toString().compareTo((((CacheKey<?>) o).key).toString());
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java
index d87e90d..120a08b 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java
@@ -63,7 +63,7 @@
 
   @Override
   public int compareTo(Object o) {
-    if(this.epoch == ((EpochEntry<?>)o).epoch) {
+    if (this.epoch == ((EpochEntry<?>)o).epoch) {
       return 0;
     } else if (this.epoch < ((EpochEntry<?>)o).epoch) {
       return -1;
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java
index 39bf082..a2b2e77 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java
@@ -92,7 +92,7 @@
   }
 
   @Override
-  public void reinitialize(SCMMetadataStore metadataStore) {}
+  public void reinitialize(SCMMetadataStore metadataStore) { }
 
   @Override
   public List<CRLInfo> getCrls(List<Long> crlIds) throws IOException {
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java
index 569014e..99193cb 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java
@@ -51,7 +51,7 @@
 import org.apache.ozone.test.LambdaTestUtils;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES;
 import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.FAILURE;
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java
index 3d32a33..d6df77f 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java
@@ -138,7 +138,7 @@
     assertTrue(crlFile.exists());
 
     try (BufferedReader reader = new BufferedReader(new InputStreamReader(
-        new FileInputStream(crlFile), UTF_8))){
+        new FileInputStream(crlFile), UTF_8))) {
 
       // Verify contents of the file
       String header = reader.readLine();
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java
index 5b1a1f0..1aab7a5 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java
@@ -279,8 +279,8 @@
         GeneralNames.fromExtensions(
             extensions, Extension.subjectAlternativeName);
     GeneralName[] names = gns.getNames();
-    for(int i=0; i < names.length; i++) {
-      if(names[i].getTagNo() == GeneralName.otherName) {
+    for (int i = 0; i < names.length; i++) {
+      if (names[i].getTagNo() == GeneralName.otherName) {
         ASN1Encodable asn1Encodable = names[i].getName();
         Iterator iterator = ((DLSequence) asn1Encodable).iterator();
         while (iterator.hasNext()) {
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java
index 1e3a8f4..776aa4a 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java
@@ -165,7 +165,7 @@
       OzoneSecurityUtil.getValidInetsForCurrentHost().forEach(
           ip -> {
             builder.addIpAddress(ip.getHostAddress());
-            if(validator.isValid(ip.getCanonicalHostName())) {
+            if (validator.isValid(ip.getCanonicalHostName())) {
               builder.addDnsName(ip.getCanonicalHostName());
             }
           });
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java
index 9bad0f31..2fef2b8 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java
@@ -79,7 +79,7 @@
     HDDSKeyGenerator keyGen = new HDDSKeyGenerator(config.getConfiguration());
     KeyPair keyPair = keyGen.generateKey(4096);
     PublicKey publicKey = keyPair.getPublic();
-    if(publicKey instanceof RSAPublicKey) {
+    if (publicKey instanceof RSAPublicKey) {
       Assert.assertEquals(4096,
           ((RSAPublicKey)(publicKey)).getModulus().bitLength());
     }
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
index f2a7b38..1af979b 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
@@ -70,35 +70,43 @@
 
     TestHandler testHandler = new TestHandler();
 
-    queue.addHandler(EVENT1, new FixedThreadPoolExecutor<>(EVENT1.getName(),
-            EventQueue.getExecutorName(EVENT1, testHandler)), testHandler);
+    queue.addHandler(EVENT1,
+        new FixedThreadPoolWithAffinityExecutor<>(
+            EventQueue.getExecutorName(EVENT1, testHandler),
+            FixedThreadPoolWithAffinityExecutor.initializeExecutorPool(
+                EVENT1.getName())),
+        testHandler);
 
     queue.fireEvent(EVENT1, 11L);
     queue.fireEvent(EVENT1, 11L);
-    queue.fireEvent(EVENT1, 11L);
-    queue.fireEvent(EVENT1, 11L);
-    queue.fireEvent(EVENT1, 11L);
-    queue.fireEvent(EVENT1, 11L);
-    queue.fireEvent(EVENT1, 11L);
-    queue.fireEvent(EVENT1, 11L);
-    queue.fireEvent(EVENT1, 11L);
-    queue.fireEvent(EVENT1, 11L);
+    queue.fireEvent(EVENT1, 12L);
+    queue.fireEvent(EVENT1, 13L);
+    queue.fireEvent(EVENT1, 14L);
+    queue.fireEvent(EVENT1, 15L);
+    queue.fireEvent(EVENT1, 16L);
+    queue.fireEvent(EVENT1, 17L);
+    queue.fireEvent(EVENT1, 18L);
+    queue.fireEvent(EVENT1, 19L);
+    queue.fireEvent(EVENT1, 20L);
 
     EventExecutor eventExecutor =
         queue.getExecutorAndHandler(EVENT1).keySet().iterator().next();
 
     // As it is fixed threadpool executor with 10 threads, all should be
     // scheduled.
-    Assert.assertEquals(10, eventExecutor.queuedEvents());
+    Assert.assertEquals(11, eventExecutor.queuedEvents());
 
     // As we don't see all 10 events scheduled.
     Assert.assertTrue(eventExecutor.scheduledEvents() > 1 &&
         eventExecutor.scheduledEvents() <= 10);
 
     queue.processAll(60000);
-    Assert.assertEquals(110, eventTotal.intValue());
 
-    Assert.assertEquals(10, eventExecutor.successfulEvents());
+    Assert.assertTrue(eventExecutor.scheduledEvents() == 11);
+
+    Assert.assertEquals(166, eventTotal.intValue());
+
+    Assert.assertEquals(11, eventExecutor.successfulEvents());
     eventTotal.set(0);
 
   }
@@ -131,5 +139,4 @@
     Assert.assertEquals(23, result[1]);
 
   }
-
 }
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHtmlQuoting.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHtmlQuoting.java
index 1410e17..5beb90e 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHtmlQuoting.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHtmlQuoting.java
@@ -17,7 +17,10 @@
  */
 package org.apache.hadoop.hdds.server.http;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import javax.servlet.http.HttpServletRequest;
 
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java
index 99fcbae..e78bcb0 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java
@@ -69,7 +69,7 @@
   public void builderWithOneParamV2() throws IOException {
     OzoneConfiguration conf = new OzoneConfiguration();
     File newFolder = folder.newFolder();
-    if(!newFolder.exists()) {
+    if (!newFolder.exists()) {
       Assert.assertTrue(newFolder.mkdirs());
     }
     thrown.expect(IOException.class);
@@ -82,7 +82,7 @@
   public void builderWithOpenClose() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     File newFolder = folder.newFolder();
-    if(!newFolder.exists()) {
+    if (!newFolder.exists()) {
       Assert.assertTrue(newFolder.mkdirs());
     }
     DBStore dbStore = DBStoreBuilder.newBuilder(conf)
@@ -97,7 +97,7 @@
   public void builderWithDoubleTableName() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     File newFolder = folder.newFolder();
-    if(!newFolder.exists()) {
+    if (!newFolder.exists()) {
       Assert.assertTrue(newFolder.mkdirs());
     }
     // Registering a new table with the same name should replace the previous
@@ -127,7 +127,7 @@
   public void builderWithDataWrites() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     File newFolder = folder.newFolder();
-    if(!newFolder.exists()) {
+    if (!newFolder.exists()) {
       Assert.assertTrue(newFolder.mkdirs());
     }
     try (DBStore dbStore = DBStoreBuilder.newBuilder(conf)
@@ -156,7 +156,7 @@
   public void builderWithDiskProfileWrites() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     File newFolder = folder.newFolder();
-    if(!newFolder.exists()) {
+    if (!newFolder.exists()) {
       Assert.assertTrue(newFolder.mkdirs());
     }
     try (DBStore dbStore = DBStoreBuilder.newBuilder(conf)
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
index f95a8ff..ed8744c 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
@@ -74,7 +74,7 @@
     statistics.setStatsLevel(StatsLevel.ALL);
     options = options.setStatistics(statistics);
     configSet = new HashSet<>();
-    for(String name : families) {
+    for (String name : families) {
       TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions());
       configSet.add(newConfig);
     }
@@ -288,42 +288,6 @@
     }
   }
 
-  /**
-   * Not strictly a unit test. Just a confirmation of the expected behavior
-   * of RocksDB keyMayExist API.
-   * Expected behavior - On average, keyMayExist latency < key.get() latency
-   * for invalid keys.
-   * @throws Exception if unable to read from RocksDB.
-   */
-  @Test
-  public void testRocksDBKeyMayExistApi() throws Exception {
-    try (RDBStore newStore =
-             new RDBStore(folder.newFolder(), options, configSet)) {
-      RocksDB db = newStore.getDb();
-
-      //Test with 50 invalid keys.
-      long start = System.nanoTime();
-      for (int i = 0; i < 50; i++) {
-        Assert.assertTrue(db.get(
-            org.apache.commons.codec.binary.StringUtils
-                .getBytesUtf16("key" + i)) == null);
-      }
-      long end = System.nanoTime();
-      long keyGetLatency = end - start;
-
-      start = System.nanoTime();
-      for (int i = 0; i < 50; i++) {
-        Assert.assertFalse(db.keyMayExist(
-            org.apache.commons.codec.binary.StringUtils
-                .getBytesUtf16("key" + i), null));
-      }
-      end = System.nanoTime();
-      long keyMayExistLatency = end - start;
-
-      Assert.assertTrue(keyMayExistLatency < keyGetLatency);
-    }
-  }
-
   @Test
   public void testGetDBUpdatesSince() throws Exception {
 
@@ -394,7 +358,7 @@
     options.setCreateMissingColumnFamilies(true);
     configSet = new HashSet<>();
     List<String> familiesMinusOne = families.subList(0, families.size() - 1);
-    for(String name : familiesMinusOne) {
+    for (String name : familiesMinusOne) {
       TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions());
       configSet.add(newConfig);
     }
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java
index fea40bb..b49556d 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java
@@ -92,7 +92,7 @@
   }
 
   @Test
-  public void testHasNextDependsOnIsvalid(){
+  public void testHasNextDependsOnIsvalid() {
     when(rocksDBIteratorMock.isValid()).thenReturn(true, true, false);
 
     RDBStoreIterator iter = new RDBStoreIterator(rocksDBIteratorMock);
@@ -169,7 +169,7 @@
 
     RDBStoreIterator iter = new RDBStoreIterator(rocksDBIteratorMock);
     byte[] key = null;
-    if(iter.hasNext()) {
+    if (iter.hasNext()) {
       ByteArrayKeyValue entry = iter.next();
       key = entry.getKey();
     }
@@ -191,7 +191,7 @@
     ByteArrayKeyValue entry;
     byte[] key = null;
     byte[] value = null;
-    if(iter.hasNext()) {
+    if (iter.hasNext()) {
       entry = iter.next();
       key = entry.getKey();
       value = entry.getValue();
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
index 5d00763..0f1858b 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
@@ -63,7 +63,7 @@
     count++;
     try {
       Assert.assertNotNull(keyValue.getKey());
-    } catch(IOException ex) {
+    } catch (IOException ex) {
       Assert.fail("Unexpected Exception " + ex.toString());
     }
     return true;
@@ -80,7 +80,7 @@
     options = options.setStatistics(statistics);
 
     Set<TableConfig> configSet = new HashSet<>();
-    for(String name : families) {
+    for (String name : families) {
       TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions());
       configSet.add(newConfig);
     }
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java
index 073027f..837ea27 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java
@@ -296,7 +296,7 @@
       }
 
       ArrayList<Long> epochs = new ArrayList<>();
-      for (long i=0; i<=5L; i++) {
+      for (long i = 0; i <= 5L; i++) {
         epochs.add(i);
       }
       testTable.cleanupCache(epochs);
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCache.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCache.java
index a1cc7dd..860a695 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCache.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCache.java
@@ -74,13 +74,13 @@
   public void testPartialTableCache() {
 
 
-    for (int i = 0; i< 10; i++) {
+    for (int i = 0; i < 10; i++) {
       tableCache.put(new CacheKey<>(Integer.toString(i)),
           new CacheValue<>(Optional.of(Integer.toString(i)), i));
     }
 
 
-    for (int i=0; i < 10; i++) {
+    for (int i = 0; i < 10; i++) {
       Assert.assertEquals(Integer.toString(i),
           tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue());
     }
@@ -94,7 +94,7 @@
     // On a full table cache if some one calls cleanup it is a no-op.
     tableCache.evictCache(epochs);
 
-    for (int i=5; i < 10; i++) {
+    for (int i = 5; i < 10; i++) {
       Assert.assertEquals(Integer.toString(i),
           tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue());
     }
@@ -109,7 +109,7 @@
     int cleanupCount = 0;
 
     ArrayList<Long> epochs = new ArrayList();
-    for (long i=0; i<insertedCount; i+=2) {
+    for (long i = 0; i < insertedCount; i += 2) {
       if (cleanupCount++ < 1000) {
         epochs.add(i);
       }
@@ -329,7 +329,7 @@
         });
 
     // Check we have first 10 entries in cache.
-    for (int i=1; i <= 10; i++) {
+    for (int i = 1; i <= 10; i++) {
       Assert.assertEquals(Integer.toString(i),
           tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue());
     }
@@ -357,13 +357,13 @@
       final int tc = totalCount;
       Assert.assertEquals(tc - deleted, tableCache.size());
       // Check if we have remaining entries.
-      for (int i=6; i <= totalCount; i++) {
+      for (int i = 6; i <= totalCount; i++) {
         Assert.assertEquals(Integer.toString(i), tableCache.get(
             new CacheKey<>(Integer.toString(i))).getCacheValue());
       }
 
       epochs = new ArrayList<>();
-      for (long i=6; i<= totalCount; i++) {
+      for (long i = 6; i <= totalCount; i++) {
         epochs.add(i);
       }
 
@@ -373,7 +373,7 @@
       Assert.assertEquals(0, tableCache.size());
     } else {
       ArrayList<Long> epochs = new ArrayList<>();
-      for (long i=0; i<= totalCount; i++) {
+      for (long i = 0; i <= totalCount; i++) {
         epochs.add(i);
       }
       tableCache.evictCache(epochs);
@@ -453,7 +453,7 @@
 
     tableCache.evictCache(epochs);
 
-    if(cacheType == TableCache.CacheType.PARTIAL_CACHE) {
+    if (cacheType == TableCache.CacheType.PARTIAL_CACHE) {
       Assert.assertTrue(tableCache.size() == 0);
       Assert.assertTrue(tableCache.getEpochEntrySet().size() == 0);
     } else {
@@ -475,7 +475,7 @@
   private int writeToCache(int count, int startVal, long sleep)
       throws InterruptedException {
     int counter = 1;
-    while (counter <= count){
+    while (counter <= count) {
       tableCache.put(new CacheKey<>(Integer.toString(startVal)),
           new CacheValue<>(Optional.of(Integer.toString(startVal)), startVal));
       startVal++;
diff --git a/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto b/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto
index 9f57e14..01c9a1e 100644
--- a/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto
+++ b/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto
@@ -186,12 +186,12 @@
 
   optional   PutBlockRequestProto putBlock = 12;
   optional   GetBlockRequestProto getBlock = 13;
-  optional   DeleteBlockRequestProto deleteBlock = 14;
+  optional   DeleteBlockRequestProto deleteBlock = 14 [deprecated = true];
   optional   ListBlockRequestProto listBlock = 15;
 
   optional   ReadChunkRequestProto readChunk = 16;
   optional   WriteChunkRequestProto writeChunk = 17;
-  optional   DeleteChunkRequestProto deleteChunk = 18;
+  optional   DeleteChunkRequestProto deleteChunk = 18 [deprecated = true];
   optional   ListChunkRequestProto listChunk = 19;
 
   optional   PutSmallFileRequestProto putSmallFile = 20;
diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
index b55531e..caac440 100644
--- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto
+++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
@@ -84,6 +84,13 @@
     required string hostName = 3;      // Hostname of SCM.
 }
 
+message NodeDetailsProto {
+    required string uuid = 1;
+    required string clusterId = 2;
+    required string hostName = 3;
+    required NodeType nodeType = 4;
+}
+
 message Port {
     required string name = 1;
     required uint32 value = 2;
diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
index cb8d8d8..1b4a493 100644
--- a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
+++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
@@ -213,7 +213,7 @@
   required State state = 2;
   optional int64 size = 3;
   optional int64 used = 4;
-  optional int64 keyCount = 5;
+  optional int64 keyCount = 5; // keyCount here refers to BlockCount
   optional int64 readCount = 6;
   optional int64 writeCount = 7;
   optional int64 readBytes = 8;
@@ -325,6 +325,8 @@
   optional SetNodeOperationalStateCommandProto setNodeOperationalStateCommandProto = 9;
   optional FinalizeNewLayoutVersionCommandProto
   finalizeNewLayoutVersionCommandProto = 10;
+  optional RefreshVolumeUsageCommandProto refreshVolumeUsageCommandProto = 11;
+
 
   // If running upon Ratis, holds term of underlying RaftServer iff current
   // SCM is a leader. If running without Ratis, holds SCMContext.INVALID_TERM.
@@ -418,6 +420,13 @@
   required int64 cmdId = 2;
 }
 
+/**
+This command asks the datanode to refresh disk usage immediately.
+*/
+message RefreshVolumeUsageCommandProto {
+  required int64 cmdId = 1;
+}
+
 message SetNodeOperationalStateCommandProto {
   required  int64 cmdId = 1;
   required  NodeOperationalState nodeOperationalState = 2;
diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerSecurityProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerSecurityProtocol.proto
index c682071..dc6bcf9 100644
--- a/hadoop-hdds/interface-server/src/main/proto/ScmServerSecurityProtocol.proto
+++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerSecurityProtocol.proto
@@ -55,7 +55,7 @@
     optional SCMGetCrlsRequestProto getCrlsRequest = 10;
     optional SCMGetLatestCrlIdRequestProto getLatestCrlIdRequest = 11;
     optional SCMRevokeCertificatesRequestProto revokeCertificatesRequest = 12;
-
+    optional SCMGetCertRequestProto getCertRequest = 13;
 }
 
 message SCMSecurityResponse {
@@ -95,6 +95,7 @@
     GetCrls = 9;
     GetLatestCrlId = 10;
     RevokeCertificates = 11;
+    GetCert = 12;
 }
 
 enum Status {
@@ -134,6 +135,11 @@
     required string CSR = 2;
 }
 
+message SCMGetCertRequestProto {
+    required NodeDetailsProto nodeDetails = 1;
+    required string CSR = 2;
+}
+
 message SCMGetSCMCertRequestProto {
     required ScmNodeDetailsProto scmDetails = 1;
     required string CSR = 2;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
index ec48b8a..bcdb737 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
@@ -351,7 +351,7 @@
               dataSizeRequired))) {
         LOG.debug("Datanode {} is chosen. Required metadata size is {} and " +
                 "required data size is {}",
-            datanodeDetails.toString(), metadataSizeRequired, dataSizeRequired);
+            datanodeDetails, metadataSizeRequired, dataSizeRequired);
         return true;
       }
     }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
index 3c5fdc0..78a87b8 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
@@ -442,7 +442,7 @@
       commitTransactions(ackProto.getResultsList(),
           UUID.fromString(ackProto.getDnId()));
       metrics.incrBlockDeletionCommandSuccess();
-    } else if (status == CommandStatus.Status.FAILED){
+    } else if (status == CommandStatus.Status.FAILED) {
       metrics.incrBlockDeletionCommandFailure();
     } else {
       LOG.error("Delete Block Command is not executed yet.");
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
index 010fbaa..9b94e20 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
@@ -73,17 +73,37 @@
   /**
    * Process the given ContainerReplica received from specified datanode.
    *
-   * @param datanodeDetails DatanodeDetails of the node which reported
-   *                        this replica
-   * @param replicaProto ContainerReplica
-   *
-   * @throws IOException In case of any Exception while processing the report
+   * @param datanodeDetails DatanodeDetails for the DN
+   * @param replicaProto Protobuf representing the replicas
+   * @param publisher EventPublisher instance
+   * @throws IOException
+   * @throws InvalidStateTransitionException
    */
   protected void processContainerReplica(final DatanodeDetails datanodeDetails,
       final ContainerReplicaProto replicaProto, final EventPublisher publisher)
       throws IOException, InvalidStateTransitionException {
-    final ContainerID containerId = ContainerID
-        .valueOf(replicaProto.getContainerID());
+    ContainerInfo container = getContainerManager().getContainer(
+        ContainerID.valueOf(replicaProto.getContainerID()));
+    processContainerReplica(
+        datanodeDetails, container, replicaProto, publisher);
+  }
+
+  /**
+   * Process the given ContainerReplica received from specified datanode.
+   *
+   * @param datanodeDetails DatanodeDetails of the node which reported
+   *                        this replica
+   * @param containerInfo ContainerInfo represending the container
+   * @param replicaProto ContainerReplica
+   * @param publisher EventPublisher instance
+   *
+   * @throws IOException In case of any Exception while processing the report
+   */
+  protected void processContainerReplica(final DatanodeDetails datanodeDetails,
+      final ContainerInfo containerInfo,
+      final ContainerReplicaProto replicaProto, final EventPublisher publisher)
+      throws IOException, InvalidStateTransitionException {
+    final ContainerID containerId = containerInfo.containerID();
 
     if (logger.isDebugEnabled()) {
       logger.debug("Processing replica of container {} from datanode {}",
@@ -91,9 +111,9 @@
     }
     // Synchronized block should be replaced by container lock,
     // once we have introduced lock inside ContainerInfo.
-    synchronized (containerManager.getContainer(containerId)) {
-      updateContainerStats(datanodeDetails, containerId, replicaProto);
-      if (!updateContainerState(datanodeDetails, containerId, replicaProto,
+    synchronized (containerInfo) {
+      updateContainerStats(datanodeDetails, containerInfo, replicaProto);
+      if (!updateContainerState(datanodeDetails, containerInfo, replicaProto,
           publisher)) {
         updateContainerReplica(datanodeDetails, containerId, replicaProto);
       }
@@ -104,16 +124,15 @@
    * Update the container stats if it's lagging behind the stats in reported
    * replica.
    *
-   * @param containerId ID of the container
+   * @param containerInfo ContainerInfo representing the container
    * @param replicaProto Container Replica information
    * @throws ContainerNotFoundException If the container is not present
    */
   private void updateContainerStats(final DatanodeDetails datanodeDetails,
-                                    final ContainerID containerId,
+                                    final ContainerInfo containerInfo,
                                     final ContainerReplicaProto replicaProto)
       throws ContainerNotFoundException {
-    final ContainerInfo containerInfo = containerManager
-        .getContainer(containerId);
+    final ContainerID containerId = containerInfo.containerID();
 
     if (isHealthy(replicaProto::getState)) {
       if (containerInfo.getSequenceId() <
@@ -165,19 +184,18 @@
    * Updates the container state based on the given replica state.
    *
    * @param datanode Datanode from which the report is received
-   * @param containerId ID of the container
+   * @param container ContainerInfo representing the the container
    * @param replica ContainerReplica
    * @boolean true - replica should be ignored in the next process
    * @throws IOException In case of Exception
    */
   private boolean updateContainerState(final DatanodeDetails datanode,
-                                    final ContainerID containerId,
+                                    final ContainerInfo container,
                                     final ContainerReplicaProto replica,
                                     final EventPublisher publisher)
       throws IOException, InvalidStateTransitionException {
 
-    final ContainerInfo container = containerManager
-        .getContainer(containerId);
+    final ContainerID containerId = container.containerID();
     boolean ignored = false;
 
     switch (container.getState()) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
index 74e70bb..a903be6 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
@@ -84,6 +84,13 @@
   List<ContainerInfo> getContainers(LifeCycleState state);
 
   /**
+   * Returns the size of containers which are in the specified state.
+   *
+   * @return size of containers.
+   */
+  int getContainerStateCount(LifeCycleState state);
+
+  /**
    * Returns true if the container exist, false otherwise.
    * @param id Container ID
    * @return true if container exist, else false
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java
index 5e220df..1c39efd 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java
@@ -143,7 +143,7 @@
   public ContainerInfo getContainer(final ContainerID id)
       throws ContainerNotFoundException {
     return Optional.ofNullable(containerStateManager
-        .getContainer(id.getProtobuf()))
+        .getContainer(id))
         .orElseThrow(() -> new ContainerNotFoundException("ID " + id));
   }
 
@@ -157,19 +157,36 @@
     final List<ContainerID> containersIds =
         new ArrayList<>(containerStateManager.getContainerIDs());
     Collections.sort(containersIds);
-    return containersIds.stream()
-        .filter(id -> id.getId() >= start).limit(count)
-        .map(ContainerID::getProtobuf)
-        .map(containerStateManager::getContainer)
-        .collect(Collectors.toList());
+    List<ContainerInfo> containers;
+    lock.lock();
+    try {
+      containers = containersIds.stream()
+          .filter(id -> id.getId() >= start).limit(count)
+          .map(containerStateManager::getContainer)
+          .collect(Collectors.toList());
+    } finally {
+      lock.unlock();
+    }
+    return containers;
   }
 
   @Override
   public List<ContainerInfo> getContainers(final LifeCycleState state) {
-    return containerStateManager.getContainerIDs(state).stream()
-        .map(ContainerID::getProtobuf)
-        .map(containerStateManager::getContainer)
-        .filter(Objects::nonNull).collect(Collectors.toList());
+    List<ContainerInfo> containers;
+    lock.lock();
+    try {
+      containers = containerStateManager.getContainerIDs(state).stream()
+          .map(containerStateManager::getContainer)
+          .filter(Objects::nonNull).collect(Collectors.toList());
+    } finally {
+      lock.unlock();
+    }
+    return containers;
+  }
+
+  @Override
+  public int getContainerStateCount(final LifeCycleState state) {
+    return containerStateManager.getContainerIDs(state).size();
   }
 
   @Override
@@ -259,18 +276,18 @@
         .build();
     containerStateManager.addContainer(containerInfo);
     scmContainerManagerMetrics.incNumSuccessfulCreateContainers();
-    return containerStateManager.getContainer(containerID.getProtobuf());
+    return containerStateManager.getContainer(containerID);
   }
 
   @Override
-  public void updateContainerState(final ContainerID id,
+  public void updateContainerState(final ContainerID cid,
                                    final LifeCycleEvent event)
       throws IOException, InvalidStateTransitionException {
-    final HddsProtos.ContainerID cid = id.getProtobuf();
+    HddsProtos.ContainerID protoId = cid.getProtobuf();
     lock.lock();
     try {
       if (containerExist(cid)) {
-        containerStateManager.updateContainerState(cid, event);
+        containerStateManager.updateContainerState(protoId, event);
       } else {
         throwContainerNotFoundException(cid);
       }
@@ -283,15 +300,14 @@
   public Set<ContainerReplica> getContainerReplicas(final ContainerID id)
       throws ContainerNotFoundException {
     return Optional.ofNullable(containerStateManager
-        .getContainerReplicas(id.getProtobuf()))
+        .getContainerReplicas(id))
         .orElseThrow(() -> new ContainerNotFoundException("ID " + id));
   }
 
   @Override
-  public void updateContainerReplica(final ContainerID id,
+  public void updateContainerReplica(final ContainerID cid,
                                      final ContainerReplica replica)
       throws ContainerNotFoundException {
-    final HddsProtos.ContainerID cid = id.getProtobuf();
     if (containerExist(cid)) {
       containerStateManager.updateContainerReplica(cid, replica);
     } else {
@@ -300,10 +316,9 @@
   }
 
   @Override
-  public void removeContainerReplica(final ContainerID id,
+  public void removeContainerReplica(final ContainerID cid,
                                      final ContainerReplica replica)
       throws ContainerNotFoundException, ContainerReplicaNotFoundException {
-    final HddsProtos.ContainerID cid = id.getProtobuf();
     if (containerExist(cid)) {
       containerStateManager.removeContainerReplica(cid, replica);
     } else {
@@ -395,13 +410,13 @@
   }
 
   @Override
-  public void deleteContainer(final ContainerID id)
+  public void deleteContainer(final ContainerID cid)
       throws IOException {
-    final HddsProtos.ContainerID cid = id.getProtobuf();
+    HddsProtos.ContainerID protoId = cid.getProtobuf();
     lock.lock();
     try {
       if (containerExist(cid)) {
-        containerStateManager.removeContainer(cid);
+        containerStateManager.removeContainer(protoId);
         scmContainerManagerMetrics.incNumSuccessfulDeleteContainers();
       } else {
         scmContainerManagerMetrics.incNumFailureDeleteContainers();
@@ -412,28 +427,15 @@
     }
   }
 
-  @Deprecated
-  private void checkIfContainerExist(final HddsProtos.ContainerID id)
-      throws ContainerNotFoundException {
-    if (!containerStateManager.contains(id)) {
-      throw new ContainerNotFoundException("Container with id #" +
-          id.getId() + " not found.");
-    }
-  }
-
   @Override
   public boolean containerExist(final ContainerID id) {
-    return containerExist(id.getProtobuf());
-  }
-
-  private boolean containerExist(final HddsProtos.ContainerID id) {
     return containerStateManager.contains(id);
   }
 
-  private void throwContainerNotFoundException(final HddsProtos.ContainerID id)
+  private void throwContainerNotFoundException(final ContainerID id)
       throws ContainerNotFoundException {
-    throw new ContainerNotFoundException("Container with id #" +
-        id.getId() + " not found.");
+    throw new ContainerNotFoundException("Container with id " +
+        id + " not found.");
   }
 
   @Override
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaCount.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaCount.java
index 590d1f1..3dab4ad 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaCount.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaCount.java
@@ -100,15 +100,15 @@
 
   @Override
   public String toString() {
-    return "Container State: " +container.getState()+
-        " Replica Count: "+replica.size()+
-        " Healthy Count: "+healthyCount+
-        " Decommission Count: "+decommissionCount+
-        " Maintenance Count: "+maintenanceCount+
-        " inFlightAdd Count: "+inFlightAdd+
-        " inFightDel Count: "+inFlightDel+
-        " ReplicationFactor: "+repFactor+
-        " minMaintenance Count: "+minHealthyForMaintenance;
+    return "Container State: " + container.getState() +
+        " Replica Count: " + replica.size() +
+        " Healthy Count: " + healthyCount +
+        " Decommission Count: " + decommissionCount +
+        " Maintenance Count: " + maintenanceCount +
+        " inFlightAdd Count: " + inFlightAdd +
+        " inFightDel Count: " + inFlightDel +
+        " ReplicationFactor: " + repFactor +
+        " minMaintenance Count: " + minHealthyForMaintenance;
   }
 
   /**
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
index 32804d7..5cdefcf 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
@@ -37,10 +37,8 @@
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
-import java.util.stream.Collectors;
 
 /**
  * Handles container reports from datanode.
@@ -92,7 +90,50 @@
   }
 
   /**
-   * Process the container reports from datanodes.
+   * Process the container reports from datanodes. The datanode sends a list
+   * of all containers it knows about, including their State and stats, such as
+   * key count and bytes used.
+   *
+   * Inside SCM, there are two key places which store Container Replica details:
+   *
+   *   1. Inside the SCMNodeManager, there is a Map with datanode as the key
+   *      and the value is a Set of ContainerIDs. This is the set of containers
+   *      stored on this DN, and it is the only place we can quickly obtain
+   *      the list of Containers a DN knows about. This list is used by the
+   *      DeadNodeHandler to close any containers residing on a dead node, and
+   *      to purge the Replicas stored on the dead node from
+   *      SCMContainerManager. It is also used during decommission to check the
+   *      replicas on a datanode are sufficiently replicated.
+   *
+   *   2. Inside SCMContainerManagerImpl, there is a Map that is keyed on
+   *      ContainerID and the value is a Set of ContainerReplica objects,
+   *      allowing the current locations for any given Container to be found.
+   *
+   *  When a Full Container report is received, we must ensure the list in (1)
+   *  is correct, keeping in mind Containers could get lost on a Datanode, for
+   *  example by a failed disk. We must also store the new replicas, keeping in
+   *  mind their stats may have changed from the previous report and also that
+   *  the container may have gone missing on the datanode.
+   *
+   *  The most tricky part of the processing is around the containers that
+   *  were on the datanode, and are no longer there. To find them, we take a
+   *  snapshot of the ContainerSet from NodeManager (stored in the
+   *  expectedContainersInDatanode variable). For each replica in the report, we
+   *  check if it is in the snapshot and if so remove it from the snapshot.
+   *  After processing all replicas in the report, the containers
+   *  remaining in this set are now missing on the Datanode, and must be removed
+   *  from both NodeManager and ContainerManager.
+   *
+   *  Another case which must be handled is when a datanode reports a replica
+   *  which is not present in SCM. The default Ozone behaviour is log a warning
+   *  for, and allow the replica to remain on the datanode. This can be
+   *  changed to have a command sent to the datanode to delete the replica via
+   *  the hdds.scm.unknown-container.action setting.
+   *
+   *  Note that the datanode also sends smaller Incremental Container Reports
+   *  more frequently, but the logic is synchronized on the datanode to prevent
+   *  full and incremental reports processing in parallel for the same datanode
+   *  on SCM.
    *
    * @param reportFromDatanode Container Report
    * @param publisher EventPublisher reference
@@ -120,25 +161,37 @@
       synchronized (datanodeDetails) {
         final List<ContainerReplicaProto> replicas =
             containerReport.getReportsList();
-        final Set<ContainerID> containersInSCM =
+        final Set<ContainerID> expectedContainersInDatanode =
             nodeManager.getContainers(datanodeDetails);
 
-        final Set<ContainerID> containersInDn = replicas.parallelStream()
-            .map(ContainerReplicaProto::getContainerID)
-            .map(ContainerID::valueOf).collect(Collectors.toSet());
+        for (ContainerReplicaProto replica : replicas) {
+          ContainerID cid = ContainerID.valueOf(replica.getContainerID());
+          ContainerInfo container = null;
+          try {
+            // We get the container using the ContainerID object we obtained
+            // from protobuf. However we don't want to store that object if
+            // there is already an instance for the same ContainerID we can
+            // reuse.
+            container = containerManager.getContainer(cid);
+            cid = container.containerID();
+          } catch (ContainerNotFoundException e) {
+            // Ignore this for now. It will be handled later with a null check
+            // and the code will either log a warning or remove this replica
+            // from the datanode, depending on the cluster setting for handling
+            // unexpected containers.
+          }
 
-        final Set<ContainerID> missingReplicas = new HashSet<>(containersInSCM);
-        missingReplicas.removeAll(containersInDn);
-
-        processContainerReplicas(datanodeDetails, replicas, publisher);
-        processMissingReplicas(datanodeDetails, missingReplicas);
-
-        /*
-         * Update the latest set of containers for this datanode in
-         * NodeManager
-         */
-        nodeManager.setContainers(datanodeDetails, containersInDn);
-
+          boolean alreadyInDn = expectedContainersInDatanode.remove(cid);
+          if (!alreadyInDn) {
+            // This is a new Container not in the nodeManager -> dn map yet
+            nodeManager.addContainer(datanodeDetails, cid);
+          }
+          processSingleReplica(datanodeDetails, container, replica, publisher);
+        }
+        // Anything left in expectedContainersInDatanode was not in the full
+        // report, so it is now missing on the DN. We need to remove it from the
+        // list
+        processMissingReplicas(datanodeDetails, expectedContainersInDatanode);
         containerManager.notifyContainerReportProcessing(true, true);
       }
     } catch (NodeNotFoundException ex) {
@@ -154,32 +207,34 @@
    * that will be deleted by SCM.
    *
    * @param datanodeDetails Datanode from which this report was received
-   * @param replicas list of ContainerReplicaProto
+   * @param container ContainerInfo representing the container
+   * @param replicaProto Proto message for the replica
    * @param publisher EventPublisher reference
    */
-  private void processContainerReplicas(final DatanodeDetails datanodeDetails,
-      final List<ContainerReplicaProto> replicas,
+  private void processSingleReplica(final DatanodeDetails datanodeDetails,
+      final ContainerInfo container, final ContainerReplicaProto replicaProto,
       final EventPublisher publisher) {
-    for (ContainerReplicaProto replicaProto : replicas) {
-      try {
-        processContainerReplica(datanodeDetails, replicaProto, publisher);
-      } catch (ContainerNotFoundException e) {
-        if(unknownContainerHandleAction.equals(
-            UNKNOWN_CONTAINER_ACTION_WARN)) {
-          LOG.error("Received container report for an unknown container" +
-              " {} from datanode {}.", replicaProto.getContainerID(),
-              datanodeDetails, e);
-        } else if (unknownContainerHandleAction.equals(
-            UNKNOWN_CONTAINER_ACTION_DELETE)) {
-          final ContainerID containerId = ContainerID
-              .valueOf(replicaProto.getContainerID());
-          deleteReplica(containerId, datanodeDetails, publisher, "unknown");
-        }
-      } catch (IOException | InvalidStateTransitionException e) {
-        LOG.error("Exception while processing container report for container" +
+    if (container == null) {
+      if (unknownContainerHandleAction.equals(
+          UNKNOWN_CONTAINER_ACTION_WARN)) {
+        LOG.error("Received container report for an unknown container" +
                 " {} from datanode {}.", replicaProto.getContainerID(),
-            datanodeDetails, e);
+            datanodeDetails);
+      } else if (unknownContainerHandleAction.equals(
+          UNKNOWN_CONTAINER_ACTION_DELETE)) {
+        final ContainerID containerId = ContainerID
+            .valueOf(replicaProto.getContainerID());
+        deleteReplica(containerId, datanodeDetails, publisher, "unknown");
       }
+      return;
+    }
+    try {
+      processContainerReplica(
+          datanodeDetails, container, replicaProto, publisher);
+    } catch (IOException | InvalidStateTransitionException e) {
+      LOG.error("Exception while processing container report for container" +
+              " {} from datanode {}.", replicaProto.getContainerID(),
+          datanodeDetails, e);
     }
   }
 
@@ -193,6 +248,12 @@
                                       final Set<ContainerID> missingReplicas) {
     for (ContainerID id : missingReplicas) {
       try {
+        nodeManager.removeContainer(datanodeDetails, id);
+      } catch (NodeNotFoundException e) {
+        LOG.warn("Failed to remove container {} from a node which does not " +
+            "exist {}", id, datanodeDetails, e);
+      }
+      try {
         containerManager.getContainerReplicas(id).stream()
             .filter(replica -> replica.getDatanodeDetails()
                 .equals(datanodeDetails)).findFirst()
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
index dbd10a3..9f74106 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
@@ -100,7 +100,7 @@
   /**
    *
    */
-  boolean contains(HddsProtos.ContainerID containerID);
+  boolean contains(ContainerID containerID);
 
   /**
    * Returns the ID of all the managed containers.
@@ -117,23 +117,23 @@
   /**
    *
    */
-  ContainerInfo getContainer(HddsProtos.ContainerID id);
+  ContainerInfo getContainer(ContainerID id);
 
   /**
    *
    */
-  Set<ContainerReplica> getContainerReplicas(HddsProtos.ContainerID id);
+  Set<ContainerReplica> getContainerReplicas(ContainerID id);
 
   /**
    *
    */
-  void updateContainerReplica(HddsProtos.ContainerID id,
+  void updateContainerReplica(ContainerID id,
                               ContainerReplica replica);
 
   /**
    *
    */
-  void removeContainerReplica(HddsProtos.ContainerID id,
+  void removeContainerReplica(ContainerID id,
                               ContainerReplica replica);
 
   /**
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java
index 25c962c..5941ac3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java
@@ -277,10 +277,10 @@
   }
 
   @Override
-  public ContainerInfo getContainer(final HddsProtos.ContainerID id) {
+  public ContainerInfo getContainer(final ContainerID id) {
     lock.readLock().lock();
     try {
-      return containers.getContainerInfo(ContainerID.getFromProtobuf(id));
+      return containers.getContainerInfo(id);
     } finally {
       lock.readLock().unlock();
     }
@@ -326,11 +326,10 @@
   }
 
   @Override
-  public boolean contains(final HddsProtos.ContainerID id) {
+  public boolean contains(ContainerID id) {
     lock.readLock().lock();
     try {
-      // TODO: Remove the protobuf conversion after fixing ContainerStateMap.
-      return containers.contains(ContainerID.getFromProtobuf(id));
+      return containers.contains(id);
     } finally {
       lock.readLock().unlock();
     }
@@ -370,35 +369,32 @@
 
 
   @Override
-  public Set<ContainerReplica> getContainerReplicas(
-      final HddsProtos.ContainerID id) {
+  public Set<ContainerReplica> getContainerReplicas(final ContainerID id) {
     lock.readLock().lock();
     try {
-      return containers.getContainerReplicas(
-          ContainerID.getFromProtobuf(id));
+      return containers.getContainerReplicas(id);
     } finally {
       lock.readLock().unlock();
     }
   }
 
   @Override
-  public void updateContainerReplica(final HddsProtos.ContainerID id,
+  public void updateContainerReplica(final ContainerID id,
                                      final ContainerReplica replica) {
     lock.writeLock().lock();
     try {
-      containers.updateContainerReplica(ContainerID.getFromProtobuf(id),
-          replica);
+      containers.updateContainerReplica(id, replica);
     } finally {
       lock.writeLock().unlock();
     }
   }
 
   @Override
-  public void removeContainerReplica(final HddsProtos.ContainerID id,
+  public void removeContainerReplica(final ContainerID id,
                                      final ContainerReplica replica) {
     lock.writeLock().lock();
     try {
-      containers.removeContainerReplica(ContainerID.getFromProtobuf(id),
+      containers.removeContainerReplica(id,
           replica);
     } finally {
       lock.writeLock().unlock();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
index 73afcd2..c494e2d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
@@ -78,14 +78,20 @@
     synchronized (dd) {
       for (ContainerReplicaProto replicaProto :
           report.getReport().getReportList()) {
+        ContainerID id = ContainerID.valueOf(replicaProto.getContainerID());
+        ContainerInfo container = null;
         try {
-          final ContainerID id = ContainerID.valueOf(
-              replicaProto.getContainerID());
-          if (!replicaProto.getState().equals(
-              ContainerReplicaProto.State.DELETED)) {
-            nodeManager.addContainer(dd, id);
+          try {
+            container = getContainerManager().getContainer(id);
+            // Ensure we reuse the same ContainerID instance in containerInfo
+            id = container.containerID();
+          } finally {
+            if (!replicaProto.getState().equals(
+                ContainerReplicaProto.State.DELETED)) {
+              nodeManager.addContainer(dd, id);
+            }
           }
-          processContainerReplica(dd, replicaProto, publisher);
+          processContainerReplica(dd, container, replicaProto, publisher);
         } catch (ContainerNotFoundException e) {
           success = false;
           LOG.warn("Container {} not found!", replicaProto.getContainerID());
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
index 7f52a06..59e73a3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
@@ -487,7 +487,7 @@
         updateInflightAction(container, inflightReplication,
             action -> replicas.stream()
                 .anyMatch(r -> r.getDatanodeDetails().equals(action.datanode)),
-            ()-> metrics.incrNumReplicationCmdsTimeout(),
+            () -> metrics.incrNumReplicationCmdsTimeout(),
             action -> updateCompletedReplicationMetrics(container, action));
 
         updateInflightAction(container, inflightDeletion,
@@ -624,7 +624,7 @@
       final List<InflightAction> actions = inflightActions.get(id);
 
       Iterator<InflightAction> iter = actions.iterator();
-      while(iter.hasNext()) {
+      while (iter.hasNext()) {
         try {
           InflightAction a = iter.next();
           NodeStatus status = nodeManager.getNodeStatus(a.datanode);
@@ -919,7 +919,7 @@
    */
   private boolean isPolicySatisfiedAfterMove(ContainerInfo cif,
                     DatanodeDetails srcDn, DatanodeDetails targetDn,
-                    final List<ContainerReplica> replicas){
+                    final List<ContainerReplica> replicas) {
     Set<ContainerReplica> movedReplicas =
         replicas.stream().collect(Collectors.toSet());
     movedReplicas.removeIf(r -> r.getDatanodeDetails().equals(srcDn));
@@ -1157,7 +1157,7 @@
 
       if (replicaSet.isSufficientlyReplicated()
           && placementStatus.isPolicySatisfied()) {
-        LOG.info("The container {} with replicas {} is sufficiently "+
+        LOG.info("The container {} with replicas {} is sufficiently " +
             "replicated and is not mis-replicated",
             container.getContainerID(), replicaSet);
         return;
@@ -1348,8 +1348,8 @@
     ContainerReplicaCount replicaCount =
         getContainerReplicaCount(cif, replicaSet);
 
-    if(!replicaSet.stream()
-        .anyMatch(r -> r.getDatanodeDetails().equals(srcDn))){
+    if (!replicaSet.stream()
+        .anyMatch(r -> r.getDatanodeDetails().equals(srcDn))) {
       // if the target is present but source disappears somehow,
       // we can consider move is successful.
       compleleteMoveFutureWithResult(cid, MoveResult.COMPLETED);
@@ -1654,7 +1654,7 @@
     try {
       return nodeManager.getNodeStatus(dn);
     } catch (NodeNotFoundException e) {
-      throw new IllegalStateException("Unable to find NodeStatus for "+dn, e);
+      throw new IllegalStateException("Unable to find NodeStatus for " + dn, e);
     }
   }
 
@@ -1944,7 +1944,7 @@
       try {
         cid = ContainerID.getFromProtobuf(contianerIDProto);
         mp = MoveDataNodePair.getFromProtobuf(mdnpp);
-        if(!inflightMove.containsKey(cid)) {
+        if (!inflightMove.containsKey(cid)) {
           transactionBuffer.addToBuffer(moveTable, cid, mp);
           inflightMove.putIfAbsent(cid, mp);
         }
@@ -2055,8 +2055,8 @@
       boolean isTgtExist = replicas.stream()
           .anyMatch(r -> r.getDatanodeDetails().equals(v.getTgt()));
 
-      if(isSrcExist) {
-        if(isTgtExist) {
+      if (isSrcExist) {
+        if (isTgtExist) {
           //the former scm leader may or may not send the deletion command
           //before reelection.here, we just try to send the command again.
           deleteSrcDnForMove(cif, replicas);
@@ -2081,8 +2081,8 @@
    * complete the CompletableFuture of the container in the given Map with
    * a given MoveResult.
    */
-  private void compleleteMoveFutureWithResult(ContainerID cid, MoveResult mr){
-    if(inflightMoveFuture.containsKey(cid)) {
+  private void compleleteMoveFutureWithResult(ContainerID cid, MoveResult mr) {
+    if (inflightMoveFuture.containsKey(cid)) {
       inflightMoveFuture.get(cid).complete(mr);
       inflightMoveFuture.remove(cid);
     }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java
index a975f04..018f0df 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java
@@ -70,7 +70,7 @@
     potentialTargets = pt;
   }
 
-  private void setUpperLimit(Double upperLimit){
+  private void setUpperLimit(Double upperLimit) {
     this.upperLimit = upperLimit;
   }
 
@@ -199,12 +199,12 @@
    */
   @Override
   public void increaseSizeEntering(DatanodeDetails target, long size) {
-    if(sizeEnteringNode.containsKey(target)) {
+    if (sizeEnteringNode.containsKey(target)) {
       long totalEnteringSize = sizeEnteringNode.get(target) + size;
       sizeEnteringNode.put(target, totalEnteringSize);
       potentialTargets.removeIf(
           c -> c.getDatanodeDetails().equals(target));
-      if(totalEnteringSize < config.getMaxSizeEnteringTarget()) {
+      if (totalEnteringSize < config.getMaxSizeEnteringTarget()) {
         //reorder
         potentialTargets.add(nodeManager.getUsageInfo(target));
       }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java
index ea32cfa..bd6d3cc 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java
@@ -35,6 +35,7 @@
 import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -54,6 +55,9 @@
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_NODE_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_NODE_REPORT_INTERVAL_DEFAULT;
+
 /**
  * Container balancer is a service in SCM to move containers between over- and
  * under-utilized datanodes.
@@ -108,35 +112,24 @@
    * new ContainerBalancerConfiguration and ContainerBalancerMetrics.
    * Container Balancer does not start on construction.
    *
-   * @param nodeManager        NodeManager
-   * @param containerManager   ContainerManager
-   * @param replicationManager ReplicationManager
-   * @param ozoneConfiguration OzoneConfiguration
+   * @param scm the storage container manager
    */
-  public ContainerBalancer(
-      NodeManager nodeManager,
-      ContainerManager containerManager,
-      ReplicationManager replicationManager,
-      OzoneConfiguration ozoneConfiguration,
-      final SCMContext scmContext,
-      NetworkTopology networkTopology,
-      PlacementPolicy placementPolicy) {
-    this.nodeManager = nodeManager;
-    this.containerManager = containerManager;
-    this.replicationManager = replicationManager;
-    this.ozoneConfiguration = ozoneConfiguration;
-    this.config = ozoneConfiguration.
-        getObject(ContainerBalancerConfiguration.class);
+  public ContainerBalancer(StorageContainerManager scm) {
+    this.nodeManager = scm.getScmNodeManager();
+    this.containerManager = scm.getContainerManager();
+    this.replicationManager = scm.getReplicationManager();
+    this.ozoneConfiguration = scm.getConfiguration();
+    this.config = ozoneConfiguration.getObject(
+        ContainerBalancerConfiguration.class);
     this.metrics = ContainerBalancerMetrics.create();
-    this.scmContext = scmContext;
-
+    this.scmContext = scm.getScmContext();
     this.selectedContainers = new HashSet<>();
     this.overUtilizedNodes = new ArrayList<>();
     this.underUtilizedNodes = new ArrayList<>();
     this.withinThresholdUtilizedNodes = new ArrayList<>();
     this.unBalancedNodes = new ArrayList<>();
-    this.placementPolicy = placementPolicy;
-    this.networkTopology = networkTopology;
+    this.placementPolicy = scm.getContainerPlacementPolicy();
+    this.networkTopology = scm.getClusterMap();
 
     this.lock = new ReentrantLock();
     findSourceStrategy = new FindSourceGreedy(nodeManager);
@@ -180,11 +173,41 @@
    */
   private void balance() {
     this.iterations = config.getIterations();
-    if(this.iterations == -1) {
+    if (this.iterations == -1) {
       //run balancer infinitely
       this.iterations = Integer.MAX_VALUE;
     }
+
     for (int i = 0; i < iterations && balancerRunning; i++) {
+      if (config.getTriggerDuEnable()) {
+        // before starting a new iteration, we trigger all the datanode
+        // to run `du`. this is an aggressive action, with which we can
+        // get more precise usage info of all datanodes before moving.
+        // this is helpful for container balancer to make more appropriate
+        // decisions. this will increase the disk io load of data nodes, so
+        // please enable it with caution.
+        nodeManager.refreshAllHealthyDnUsageInfo();
+        synchronized (this) {
+          try {
+            long nodeReportInterval =
+                ozoneConfiguration.getTimeDuration(HDDS_NODE_REPORT_INTERVAL,
+                HDDS_NODE_REPORT_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
+            // one for sending command , one for running du, and one for
+            // reporting back make it like this for now, a more suitable
+            // value. can be set in the future if needed
+            wait(3 * nodeReportInterval);
+          } catch (InterruptedException e) {
+            LOG.info("Container Balancer was interrupted while waiting for" +
+                "datanodes refreshing volume usage info");
+            Thread.currentThread().interrupt();
+            return;
+          }
+        }
+        if (!isBalancerRunning()) {
+          return;
+        }
+      }
+
       // stop balancing if iteration is not initialized
       if (!initializeIteration()) {
         stop();
@@ -194,6 +217,7 @@
       //if no new move option is generated, it means the cluster can
       //not be balanced any more , so just stop
       IterationResult iR = doIteration();
+      metrics.incrementNumIterations(1);
       LOG.info("Result of this iteration of Container Balancer: {}", iR);
       if (iR == IterationResult.CAN_NOT_BALANCE_ANY_MORE) {
         stop();
@@ -267,15 +291,9 @@
         datanodeUsageInfo.getDatanodeDetails()));
 
     this.totalNodesInCluster = datanodeUsageInfos.size();
-    this.clusterCapacity = 0L;
-    this.clusterUsed = 0L;
-    this.clusterRemaining = 0L;
-    this.selectedContainers.clear();
-    this.overUtilizedNodes.clear();
-    this.underUtilizedNodes.clear();
-    this.unBalancedNodes.clear();
-    this.countDatanodesInvolvedPerIteration = 0;
-    this.sizeMovedPerIteration = 0;
+
+    // reset some variables and metrics for this iteration
+    resetState();
 
     clusterAvgUtilisation = calculateAvgUtilization(datanodeUsageInfos);
     if (LOG.isDebugEnabled()) {
@@ -313,11 +331,7 @@
       }
       if (Double.compare(utilization, upperLimit) > 0) {
         overUtilizedNodes.add(datanodeUsageInfo);
-        metrics.incrementDatanodesNumToBalance(1);
-
-        metrics.setMaxDatanodeUtilizedPercentage(Math.max(
-            metrics.getMaxDatanodeUtilizedPercentage(),
-            ratioToPercent(utilization)));
+        metrics.incrementNumDatanodesUnbalanced(1);
 
         // amount of bytes greater than upper limit in this node
         Long overUtilizedBytes = ratioToBytes(
@@ -328,7 +342,7 @@
         totalOverUtilizedBytes += overUtilizedBytes;
       } else if (Double.compare(utilization, lowerLimit) < 0) {
         underUtilizedNodes.add(datanodeUsageInfo);
-        metrics.incrementDatanodesNumToBalance(1);
+        metrics.incrementNumDatanodesUnbalanced(1);
 
         // amount of bytes lesser than lower limit in this node
         Long underUtilizedBytes = ratioToBytes(
@@ -341,7 +355,7 @@
         withinThresholdUtilizedNodes.add(datanodeUsageInfo);
       }
     }
-    metrics.setDataSizeToBalanceGB(
+    metrics.incrementDataSizeUnbalancedGB(
         Math.max(totalOverUtilizedBytes, totalUnderUtilizedBytes) /
             OzoneConsts.GB);
     Collections.reverse(underUtilizedNodes);
@@ -451,7 +465,7 @@
             ContainerInfo container =
                 containerManager.getContainer(moveSelection.getContainerID());
             this.sizeMovedPerIteration += container.getUsedBytes();
-            metrics.incrementMovedContainersNum(1);
+            metrics.incrementNumMovedContainersInLatestIteration(1);
             LOG.info("Move completed for container {} to target {}",
                 container.containerID(),
                 moveSelection.getTargetNode().getUuidString());
@@ -462,7 +476,8 @@
           }
         }
       } catch (InterruptedException e) {
-        LOG.warn("Container move for container {} was interrupted.",
+        LOG.warn("Interrupted while waiting for container move result for " +
+                "container {}.",
             moveSelection.getContainerID(), e);
         Thread.currentThread().interrupt();
       } catch (ExecutionException e) {
@@ -475,7 +490,9 @@
     }
     countDatanodesInvolvedPerIteration =
         sourceToTargetMap.size() + selectedTargets.size();
-    metrics.incrementDataSizeMovedGB(
+    metrics.incrementNumDatanodesInvolvedInLatestIteration(
+        countDatanodesInvolvedPerIteration);
+    metrics.incrementDataSizeMovedGBInLatestIteration(
         sizeMovedPerIteration / OzoneConsts.GB);
     LOG.info("Number of datanodes involved in this iteration: {}. Size moved " +
             "in this iteration: {}B.",
@@ -741,6 +758,26 @@
   }
 
   /**
+   * Resets some variables and metrics for this iteration.
+   */
+  private void resetState() {
+    this.clusterCapacity = 0L;
+    this.clusterUsed = 0L;
+    this.clusterRemaining = 0L;
+    this.selectedContainers.clear();
+    this.overUtilizedNodes.clear();
+    this.underUtilizedNodes.clear();
+    this.unBalancedNodes.clear();
+    this.countDatanodesInvolvedPerIteration = 0;
+    this.sizeMovedPerIteration = 0;
+    metrics.resetDataSizeMovedGBInLatestIteration();
+    metrics.resetNumMovedContainersInLatestIteration();
+    metrics.resetNumDatanodesInvolvedInLatestIteration();
+    metrics.resetDataSizeUnbalancedGB();
+    metrics.resetNumDatanodesUnbalanced();
+  }
+
+  /**
    * Stops ContainerBalancer.
    */
   public void stop() {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java
index 8582347..4e994c8 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java
@@ -97,9 +97,8 @@
   private long moveTimeout = Duration.ofMinutes(30).toMillis();
 
   @Config(key = "balancing.iteration.interval", type = ConfigType.TIME,
-      defaultValue = "70m", tags = {
-      ConfigTag.BALANCER}, description = "The interval period between each " +
-      "iteration of Container Balancer.")
+      defaultValue = "70m", tags = {ConfigTag.BALANCER}, description =
+      "The interval period between each iteration of Container Balancer.")
   private long balancingInterval = Duration.ofMinutes(70).toMillis();
 
   @Config(key = "include.datanodes", type = ConfigType.STRING, defaultValue =
@@ -123,6 +122,15 @@
           "This configuration is false by default.")
   private boolean networkTopologyEnable = false;
 
+  @Config(key = "trigger.du.before.move.enable", type = ConfigType.BOOLEAN,
+      defaultValue = "false", tags = {ConfigTag.BALANCER},
+      description = "whether to send command to all the healthy and " +
+          "in-service data nodes to run du immediately before starting" +
+          "a balance iteration. note that running du is very time " +
+          "consuming , especially when the disk usage rate of a " +
+          "data node is very high")
+  private boolean triggerDuEnable = false;
+
   /**
    * Gets the threshold value for Container Balancer.
    *
@@ -193,6 +201,15 @@
   }
 
   /**
+   * Get the triggerDuEnable value for Container Balancer.
+   *
+   * @return the boolean value of triggerDuEnable
+   */
+  public Boolean getTriggerDuEnable() {
+    return triggerDuEnable;
+  }
+
+  /**
    * Set the NetworkTopologyEnable value for Container Balancer.
    *
    * @param enable the boolean value to be set to networkTopologyEnable
@@ -361,8 +378,8 @@
             "%-50s %s%n" +
             "%-50s %s%n" +
             "%-50s %d%n" +
-            "%-50s %dGB%n"+
-            "%-50s %dGB%n"+
+            "%-50s %dGB%n" +
+            "%-50s %dGB%n" +
             "%-50s %dGB%n", "Key", "Value", "Threshold",
         threshold, "Max Datanodes to Involve per Iteration(percent)",
         maxDatanodesPercentageToInvolvePerIteration,
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerMetrics.java
index 984787f..0799844 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerMetrics.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerMetrics.java
@@ -23,8 +23,7 @@
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
-import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 
 /**
  * Metrics related to Container Balancer running in SCM.
@@ -37,27 +36,26 @@
 
   private final MetricsSystem ms;
 
-  @Metric(about = "The total amount of used space in GigaBytes that needs to " +
-      "be balanced.")
-  private MutableGaugeLong dataSizeToBalanceGB;
+  @Metric(about = "Amount of Gigabytes that Container Balancer moved" +
+      " in the latest iteration.")
+  private MutableCounterLong dataSizeMovedGBInLatestIteration;
 
-  @Metric(about = "The amount of Giga Bytes that have been moved to achieve " +
-      "balance.")
-  private MutableGaugeLong dataSizeMovedGB;
+  @Metric(about = "Number of containers that Container Balancer moved" +
+      " in the latest iteration.")
+  private MutableCounterLong numMovedContainersInLatestIteration;
 
-  @Metric(about = "Number of containers that Container Balancer has moved" +
-      " until now.")
-  private MutableGaugeLong movedContainersNum;
+  @Metric(about = "Number of iterations that Container Balancer has run for.")
+  private MutableCounterLong numIterations;
 
-  @Metric(about = "The total number of datanodes that need to be balanced.")
-  private MutableGaugeLong datanodesNumToBalance;
+  @Metric(about = "Number of datanodes that were involved in balancing in the" +
+      " latest iteration.")
+  private MutableCounterLong numDatanodesInvolvedInLatestIteration;
 
-  @Metric(about = "Number of datanodes that Container Balancer has balanced " +
-      "until now.")
-  private MutableGaugeLong datanodesNumBalanced;
+  @Metric(about = "Amount of data in Gigabytes that is causing unbalance.")
+  private MutableCounterLong dataSizeUnbalancedGB;
 
-  @Metric(about = "Utilisation value of the current maximum utilised datanode.")
-  private MutableGaugeInt maxDatanodeUtilizedPercentage;
+  @Metric(about = "Number of unbalanced datanodes.")
+  private MutableCounterLong numDatanodesUnbalanced;
 
   /**
    * Create and register metrics named {@link ContainerBalancerMetrics#NAME}
@@ -75,82 +73,101 @@
     this.ms = ms;
   }
 
-  public long getDataSizeToBalanceGB() {
-    return dataSizeToBalanceGB.value();
+  /**
+   * Gets the amount of data moved by Container Balancer in the latest
+   * iteration.
+   * @return size in GB
+   */
+  public long getDataSizeMovedGBInLatestIteration() {
+    return dataSizeMovedGBInLatestIteration.value();
   }
 
-  public void setDataSizeToBalanceGB(long size) {
-    this.dataSizeToBalanceGB.set(size);
+  public void incrementDataSizeMovedGBInLatestIteration(long valueToAdd) {
+    this.dataSizeMovedGBInLatestIteration.incr(valueToAdd);
   }
 
-  public long getDataSizeMovedGB() {
-    return dataSizeMovedGB.value();
-  }
-
-  public void setDataSizeMovedGB(long dataSizeMovedGB) {
-    this.dataSizeMovedGB.set(dataSizeMovedGB);
-  }
-
-  public long incrementDataSizeMovedGB(long valueToAdd) {
-    this.dataSizeMovedGB.incr(valueToAdd);
-    return this.dataSizeMovedGB.value();
-  }
-
-  public long getMovedContainersNum() {
-    return movedContainersNum.value();
-  }
-
-  public void setMovedContainersNum(long movedContainersNum) {
-    this.movedContainersNum.set(movedContainersNum);
-  }
-
-  public long incrementMovedContainersNum(long valueToAdd) {
-    this.movedContainersNum.incr(valueToAdd);
-    return this.movedContainersNum.value();
-  }
-
-  public long getDatanodesNumToBalance() {
-    return datanodesNumToBalance.value();
-  }
-
-  public void setDatanodesNumToBalance(long datanodesNumToBalance) {
-    this.datanodesNumToBalance.set(datanodesNumToBalance);
+  public void resetDataSizeMovedGBInLatestIteration() {
+    dataSizeMovedGBInLatestIteration.incr(
+        -getDataSizeMovedGBInLatestIteration());
   }
 
   /**
-   * Add specified valueToAdd to the number of datanodes that need to be
-   * balanced.
-   *
-   * @param valueToAdd number of datanodes to add
+   * Gets the number of containers moved by Container Balancer in the latest
+   * iteration.
+   * @return number of containers
    */
-  public void incrementDatanodesNumToBalance(long valueToAdd) {
-    this.datanodesNumToBalance.incr(valueToAdd);
+  public long getNumMovedContainersInLatestIteration() {
+    return numMovedContainersInLatestIteration.value();
   }
 
-  public long getDatanodesNumBalanced() {
-    return datanodesNumBalanced.value();
+  public void incrementNumMovedContainersInLatestIteration(long valueToAdd) {
+    this.numMovedContainersInLatestIteration.incr(valueToAdd);
   }
 
-  public void setDatanodesNumBalanced(long datanodesNumBalanced) {
-    this.datanodesNumBalanced.set(datanodesNumBalanced);
+  public void resetNumMovedContainersInLatestIteration() {
+    numMovedContainersInLatestIteration.incr(
+        -getNumMovedContainersInLatestIteration());
   }
 
   /**
-   * Add specified valueToAdd to datanodesNumBalanced.
-   *
-   * @param valueToAdd The value to add.
-   * @return The result after addition.
+   * Gets the number of iterations that Container Balancer has run for.
+   * @return number of iterations
    */
-  public long incrementDatanodesNumBalanced(long valueToAdd) {
-    datanodesNumBalanced.incr(valueToAdd);
-    return datanodesNumBalanced.value();
+  public long getNumIterations() {
+    return numIterations.value();
   }
 
-  public int getMaxDatanodeUtilizedPercentage() {
-    return maxDatanodeUtilizedPercentage.value();
+  public void incrementNumIterations(long valueToAdd) {
+    numIterations.incr(valueToAdd);
   }
 
-  public void setMaxDatanodeUtilizedPercentage(int percentage) {
-    this.maxDatanodeUtilizedPercentage.set(percentage);
+  /**
+   * Gets number of datanodes that were involved in balancing in the latest
+   * iteration.
+   * @return number of datanodes
+   */
+  public long getNumDatanodesInvolvedInLatestIteration() {
+    return numDatanodesInvolvedInLatestIteration.value();
+  }
+
+  public void incrementNumDatanodesInvolvedInLatestIteration(long valueToAdd) {
+    numDatanodesInvolvedInLatestIteration.incr(valueToAdd);
+  }
+
+  public void resetNumDatanodesInvolvedInLatestIteration() {
+    numDatanodesInvolvedInLatestIteration.incr(
+        -getNumDatanodesInvolvedInLatestIteration());
+  }
+
+  /**
+   * Gets the amount of data in Gigabytes that is causing unbalance.
+   * @return size of data as a long value
+   */
+  public long getDataSizeUnbalancedGB() {
+    return dataSizeUnbalancedGB.value();
+  }
+
+  public void incrementDataSizeUnbalancedGB(long valueToAdd) {
+    dataSizeUnbalancedGB.incr(valueToAdd);
+  }
+
+  public void resetDataSizeUnbalancedGB() {
+    dataSizeUnbalancedGB.incr(-getDataSizeUnbalancedGB());
+  }
+
+  /**
+   * Gets the number of datanodes that are unbalanced.
+   * @return long value
+   */
+  public long getNumDatanodesUnbalanced() {
+    return numDatanodesUnbalanced.value();
+  }
+
+  public void incrementNumDatanodesUnbalanced(long valueToAdd) {
+    numDatanodesUnbalanced.incr(valueToAdd);
+  }
+
+  public void resetNumDatanodesUnbalanced() {
+    numDatanodesUnbalanced.incr(-getNumDatanodesUnbalanced());
   }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java
index 591461d..540d263 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java
@@ -33,7 +33,7 @@
  * The selection criteria for selecting source datanodes , the containers of
  * which will be moved out.
  */
-public class FindSourceGreedy implements FindSourceStrategy{
+public class FindSourceGreedy implements FindSourceStrategy {
   private static final Logger LOG =
       LoggerFactory.getLogger(FindSourceGreedy.class);
   private Map<DatanodeDetails, Long> sizeLeavingNode;
@@ -84,7 +84,7 @@
   @Override
   public void increaseSizeLeaving(DatanodeDetails dui, long size) {
     Long currentSize = sizeLeavingNode.get(dui);
-    if(currentSize != null) {
+    if (currentSize != null) {
       sizeLeavingNode.put(dui, currentSize + size);
       //reorder according to the latest sizeLeavingNode
       potentialSources.add(nodeManager.getUsageInfo(dui));
@@ -114,7 +114,7 @@
    * data nodes.
    */
   @Override
-  public void removeCandidateSourceDataNode(DatanodeDetails dui){
+  public void removeCandidateSourceDataNode(DatanodeDetails dui) {
     potentialSources.removeIf(a -> a.getDatanodeDetails().equals(dui));
   }
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java
index c799b02..bf0ea7c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java
@@ -47,7 +47,7 @@
   public static PlacementPolicy getPolicy(
       ConfigurationSource conf, final NodeManager nodeManager,
       NetworkTopology clusterMap, final boolean fallback,
-      SCMContainerPlacementMetrics metrics) throws SCMException{
+      SCMContainerPlacementMetrics metrics) throws SCMException {
     final Class<? extends PlacementPolicy> placementClass = conf
         .getClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
             OZONE_SCM_CONTAINER_PLACEMENT_IMPL_DEFAULT,
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java
index 1ca68bd..22bdf21 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java
@@ -33,7 +33,7 @@
 /**
  * This class is for maintaining Topology aware container placement statistics.
  */
-@Metrics(about="SCM Container Placement Metrics", context = OzoneConsts.OZONE)
+@Metrics(about = "SCM Container Placement Metrics", context = OzoneConsts.OZONE)
 public class SCMContainerPlacementMetrics implements MetricsSource {
   public static final String SOURCE_NAME =
       SCMContainerPlacementMetrics.class.getSimpleName();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java
index d46713b..2631a1d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java
@@ -55,7 +55,7 @@
   private final NetworkTopology networkTopology;
   private boolean fallback;
   private static final int RACK_LEVEL = 1;
-  private static final int MAX_RETRY= 3;
+  private static final int MAX_RETRY = 3;
   private final SCMContainerPlacementMetrics metrics;
   // Used to check the placement policy is validated in the parent class
   private static final int REQUIRED_RACKS = 2;
@@ -118,7 +118,7 @@
       mutableFavoredNodes.addAll(favoredNodes);
       mutableFavoredNodes.removeAll(excludedNodes);
     }
-    int favoredNodeNum = mutableFavoredNodes == null? 0 :
+    int favoredNodeNum = mutableFavoredNodes == null ? 0 :
         mutableFavoredNodes.size();
 
     List<DatanodeDetails> chosenNodes = new ArrayList<>();
@@ -195,7 +195,7 @@
       // in the same rack, then choose nodes on different racks, otherwise,
       // choose one on the same rack as one of excluded nodes, remaining chosen
       // are on different racks.
-      for(int i = 0; i < excludedNodesCount; i++) {
+      for (int i = 0; i < excludedNodesCount; i++) {
         for (int j = i + 1; j < excludedNodesCount; j++) {
           if (networkTopology.isSameParent(
               excludedNodes.get(i), excludedNodes.get(j))) {
@@ -257,7 +257,7 @@
     int maxRetry = MAX_RETRY;
     List<String> excludedNodesForCapacity = null;
     boolean isFallbacked = false;
-    while(true) {
+    while (true) {
       metrics.incrDatanodeChooseAttemptCount();
       DatanodeDetails node = null;
       if (affinityNodes != null) {
@@ -348,8 +348,8 @@
     Preconditions.checkArgument(chosenNodes != null);
     List<DatanodeDetails> excludedNodeList = excludedNodes != null ?
         excludedNodes : chosenNodes;
-    int favoredNodeNum = favoredNodes == null? 0 : favoredNodes.size();
-    while(true) {
+    int favoredNodeNum = favoredNodes == null ? 0 : favoredNodes.size();
+    while (true) {
       DatanodeDetails favoredNode = favoredNodeNum > favorIndex ?
           favoredNodes.get(favorIndex) : null;
       DatanodeDetails chosenNode;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java
index 5194829..f9d2ade 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java
@@ -28,7 +28,7 @@
 /**
  * This class is for maintaining StorageContainerManager statistics.
  */
-@Metrics(about="Storage Container Manager Metrics", context="dfs")
+@Metrics(about = "Storage Container Manager Metrics", context = "dfs")
 public class SCMMetrics {
   public static final String SOURCE_NAME =
       SCMMetrics.class.getSimpleName();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
index bbf1c70..d7a434b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdds.scm.container.states;
 
+import java.util.HashSet;
 import java.util.Set;
 import java.util.Collections;
 import java.util.Map;
@@ -119,7 +120,7 @@
       ownerMap.insert(info.getOwner(), id);
       repConfigMap.insert(info.getReplicationConfig(), id);
       typeMap.insert(info.getReplicationType(), id);
-      replicaMap.put(id, ConcurrentHashMap.newKeySet());
+      replicaMap.put(id, Collections.emptySet());
 
       // Flush the cache of this container type, will be added later when
       // get container queries are executed.
@@ -147,6 +148,7 @@
       ownerMap.remove(info.getOwner(), id);
       repConfigMap.remove(info.getReplicationConfig(), id);
       typeMap.remove(info.getReplicationType(), id);
+      replicaMap.remove(id);
       // Flush the cache of this container type.
       flushCache(info);
       LOG.trace("Container {} removed from ContainerStateMap.", id);
@@ -173,8 +175,7 @@
   public Set<ContainerReplica> getContainerReplicas(
       final ContainerID containerID) {
     Preconditions.checkNotNull(containerID);
-    final Set<ContainerReplica> replicas = replicaMap.get(containerID);
-    return replicas == null ? null : Collections.unmodifiableSet(replicas);
+    return replicaMap.get(containerID);
   }
 
   /**
@@ -189,9 +190,10 @@
       final ContainerReplica replica) {
     Preconditions.checkNotNull(containerID);
     if (contains(containerID)) {
-      final Set<ContainerReplica> replicas = replicaMap.get(containerID);
-      replicas.remove(replica);
-      replicas.add(replica);
+      final Set<ContainerReplica> newSet = createNewReplicaSet(containerID);
+      newSet.remove(replica);
+      newSet.add(replica);
+      replaceReplicaSet(containerID, newSet);
     }
   }
 
@@ -207,10 +209,22 @@
     Preconditions.checkNotNull(containerID);
     Preconditions.checkNotNull(replica);
     if (contains(containerID)) {
-      replicaMap.get(containerID).remove(replica);
+      final Set<ContainerReplica> newSet = createNewReplicaSet(containerID);
+      newSet.remove(replica);
+      replaceReplicaSet(containerID, newSet);
     }
   }
 
+  private Set<ContainerReplica> createNewReplicaSet(ContainerID containerID) {
+    Set<ContainerReplica> existingSet = replicaMap.get(containerID);
+    return existingSet == null ? new HashSet<>() : new HashSet<>(existingSet);
+  }
+
+  private void replaceReplicaSet(ContainerID containerID,
+      Set<ContainerReplica> newSet) {
+    replicaMap.put(containerID, Collections.unmodifiableSet(newSet));
+  }
+
   /**
    * Just update the container State.
    * @param info ContainerInfo.
@@ -358,7 +372,7 @@
 
     final ContainerQueryKey queryKey =
         new ContainerQueryKey(state, owner, repConfig);
-    if(resultCache.containsKey(queryKey)){
+    if (resultCache.containsKey(queryKey)) {
       return resultCache.get(queryKey);
     }
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
index 5ff4fe7..09381df 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
@@ -58,7 +58,7 @@
       "Node_Registration_Container_Report");
 
   /**
-   * ContainerReports are send out by Datanodes. This report is received by
+   * ContainerReports are sent out by Datanodes. This report is received by
    * SCMDatanodeHeartbeatDispatcher and Container_Report Event is generated.
    */
   public static final TypedEvent<ContainerReportFromDatanode> CONTAINER_REPORT =
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java
index 54ed61e..03733a1 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java
@@ -216,7 +216,7 @@
       // Persist scm cert serial ID.
       scmStorageConfig.setScmCertSerialId(subSCMCertHolder.getSerialNumber()
           .toString());
-    } catch (InterruptedException | ExecutionException| IOException |
+    } catch (InterruptedException | ExecutionException | IOException |
         CertificateException  e) {
       LOG.error("Error while fetching/storing SCM signed certificate.", e);
       Thread.currentThread().interrupt();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcClient.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcClient.java
index f565ac0..336b7e7 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcClient.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcClient.java
@@ -24,7 +24,7 @@
 import org.apache.hadoop.hdds.protocol.scm.proto.InterSCMProtocolServiceGrpc;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.client.SCMCertificateClient;
+import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.ratis.thirdparty.io.grpc.ManagedChannel;
 import org.apache.ratis.thirdparty.io.grpc.netty.GrpcSslContexts;
@@ -59,7 +59,7 @@
 
   public InterSCMGrpcClient(final String host,
       int port, final ConfigurationSource conf,
-      SCMCertificateClient scmCertificateClient) throws IOException {
+      CertificateClient scmCertificateClient) throws IOException {
     Preconditions.checkNotNull(conf);
     timeout = conf.getTimeDuration(
             ScmConfigKeys.OZONE_SCM_HA_GRPC_DEADLINE_INTERVAL,
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java
index e949850..b99b6f6 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java
@@ -40,6 +40,7 @@
 import java.util.Collections;
 import java.util.concurrent.TimeUnit;
 
+import static org.apache.hadoop.hdds.security.exception.SCMSecurityException.ErrorCode.GET_CERTIFICATE_FAILED;
 import static org.apache.hadoop.hdds.security.exception.SCMSecurityException.ErrorCode.GET_DN_CERTIFICATE_FAILED;
 import static org.apache.hadoop.hdds.security.exception.SCMSecurityException.ErrorCode.GET_OM_CERTIFICATE_FAILED;
 import static org.apache.hadoop.hdds.security.exception.SCMSecurityException.ErrorCode.GET_SCM_CERTIFICATE_FAILED;
@@ -133,7 +134,7 @@
                 ScmConfigKeys.OZONE_SCM_HA_RATIS_LEADER_ELECTION_TIMEOUT,
                 ScmConfigKeys.
                         OZONE_SCM_HA_RATIS_LEADER_ELECTION_TIMEOUT_DEFAULT,
-                TimeUnit.MILLISECONDS)+200L,
+                TimeUnit.MILLISECONDS) + 200L,
             TimeUnit.MILLISECONDS));
     Rpc.setSlownessTimeout(properties, TimeDuration.valueOf(
             ozoneConf.getTimeDuration(
@@ -258,7 +259,8 @@
         throw new ServiceException(new RetriableWithFailOverException(e));
       } else if (ex.getErrorCode().equals(GET_SCM_CERTIFICATE_FAILED) ||
           ex.getErrorCode().equals(GET_OM_CERTIFICATE_FAILED) ||
-          ex.getErrorCode().equals(GET_DN_CERTIFICATE_FAILED)) {
+          ex.getErrorCode().equals(GET_DN_CERTIFICATE_FAILED) ||
+          ex.getErrorCode().equals(GET_CERTIFICATE_FAILED)) {
         throw new ServiceException(new RetriableWithNoFailoverException(e));
       }
     }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMDBCheckpointProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMDBCheckpointProvider.java
index 22ed92c..7a0becd 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMDBCheckpointProvider.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMDBCheckpointProvider.java
@@ -37,7 +37,7 @@
 
   private static final Logger LOG =
       LoggerFactory.getLogger(SCMDBCheckpointProvider.class);
-  private transient DBStore scmDbStore;;
+  private transient DBStore scmDbStore;
 
   public SCMDBCheckpointProvider(DBStore scmDbStore) {
     this.scmDbStore = scmDbStore;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java
index b07ee54..bb12df6 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java
@@ -68,7 +68,7 @@
               invokeLocal(method, args);
       LOG.debug("Call: {} took {} ms", method, Time.monotonicNow() - startTime);
       return result;
-    } catch(InvocationTargetException iEx) {
+    } catch (InvocationTargetException iEx) {
       throw iEx.getCause();
     }
   }
@@ -88,7 +88,8 @@
    */
   private Object invokeRatis(Method method, Object[] args)
       throws Exception {
-    long startTime = Time.monotonicNowNanos();
+    LOG.trace("Invoking method {} on target {}", method, ratisHandler);
+    // TODO: Add metric here to track time taken by Ratis
     Preconditions.checkNotNull(ratisHandler);
     SCMRatisRequest scmRatisRequest = SCMRatisRequest.of(requestType,
         method.getName(), method.getParameterTypes(), args);
@@ -99,7 +100,7 @@
     // via ratis. So, in this special scenario we use RaftClient.
     final SCMRatisResponse response;
     if (method.getName().equals("storeValidCertificate") &&
-        args[args.length -1].equals(HddsProtos.NodeType.SCM)) {
+        args[args.length - 1].equals(HddsProtos.NodeType.SCM)) {
       response =
           HASecurityUtils.submitScmCertsToRatis(
               ratisHandler.getDivision().getGroup(),
@@ -110,8 +111,6 @@
       response = ratisHandler.submitRequest(
           scmRatisRequest);
     }
-    LOG.info("Invoking method {} on target {}, cost {}us",
-        method, ratisHandler, (Time.monotonicNowNanos() - startTime) / 1000.0);
 
     if (response.isSuccess()) {
       return response.getResult();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMServiceManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMServiceManager.java
index 4fbd811..1b75c4f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMServiceManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMServiceManager.java
@@ -17,11 +17,10 @@
 package org.apache.hadoop.hdds.scm.ha;
 
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.scm.ha.SCMService.Event;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.hadoop.hdds.scm.ha.SCMService.*;
-
 import java.util.ArrayList;
 import java.util.List;
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java
index 95d906e..9fb771b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java
@@ -47,7 +47,7 @@
     codecs.put(X509Certificate.class, new X509CertificateCodec());
   }
 
-  private CodecFactory() {}
+  private CodecFactory() { }
 
   public static Codec getCodec(Class<?> type)
       throws InvalidProtocolBufferException {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java
index 799e128..de7fcb0 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java
@@ -238,11 +238,11 @@
 
   @Override
   public TableIterator getAllCerts(CertificateStore.CertType certType) {
-    if(certType == CertificateStore.CertType.VALID_CERTS) {
+    if (certType == CertificateStore.CertType.VALID_CERTS) {
       return validCertsTable.iterator();
     }
 
-    if(certType == CertificateStore.CertType.REVOKED_CERTS) {
+    if (certType == CertificateStore.CertType.REVOKED_CERTS) {
       return revokedCertsTable.iterator();
     }
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java
index 9bfa7d6..bf2559b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java
@@ -44,7 +44,7 @@
   @Override
   public X509Certificate fromPersistedFormat(byte[] rawData)
       throws IOException {
-    try{
+    try {
       String s = new String(rawData, StandardCharsets.UTF_8);
       return CertificateCodec.getX509Certificate(s);
     } catch (CertificateException exp) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
index eb6dc0d..aa93025 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
@@ -91,7 +91,7 @@
     try {
       Commands cmds = commandMap.remove(datanodeUuid);
       List<SCMCommand> cmdList = null;
-      if(cmds != null) {
+      if (cmds != null) {
         cmdList = cmds.getCommands();
         commandsInQueue -= cmdList.size() > 0 ? cmdList.size() : 0;
         // A post condition really.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
index afb33d9..a0fb6c9 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
@@ -34,7 +34,14 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.*;
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Queue;
+import java.util.Set;
 import java.util.stream.Collectors;
 
 /**
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java
index 27a84de..47d7c534 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java
@@ -85,20 +85,20 @@
       return port;
     }
 
-    private void parseHostname() throws InvalidHostStringException{
+    private void parseHostname() throws InvalidHostStringException {
       try {
         // A URI *must* have a scheme, so just create a fake one
-        URI uri = new URI("empty://"+rawHostname.trim());
+        URI uri = new URI("empty://" + rawHostname.trim());
         this.hostname = uri.getHost();
         this.port = uri.getPort();
 
         if (this.hostname == null) {
-          throw new InvalidHostStringException("The string "+rawHostname+
+          throw new InvalidHostStringException("The string " + rawHostname +
               " does not contain a value hostname or hostname:port definition");
         }
       } catch (URISyntaxException e) {
         throw new InvalidHostStringException(
-            "Unable to parse the hoststring "+rawHostname, e);
+            "Unable to parse the hoststring " + rawHostname, e);
       }
     }
   }
@@ -138,7 +138,7 @@
         results.add(found.get(0));
       } else if (found.size() > 1) {
         DatanodeDetails match = null;
-        for(DatanodeDetails dn : found) {
+        for (DatanodeDetails dn : found) {
           if (validateDNPortMatch(host.getPort(), dn)) {
             match = dn;
             break;
@@ -231,7 +231,7 @@
         // NodeNotFoundException here expect if the node is remove in the
         // very short window between validation and starting decom. Therefore
         // log a warning and ignore the exception
-        LOG.warn("The host {} was not found in SCM. Ignoring the request to "+
+        LOG.warn("The host {} was not found in SCM. Ignoring the request to " +
             "decommission it", dn.getHostName());
         errors.add(new DatanodeAdminError(dn.getHostName(),
             "The host was not found in SCM"));
@@ -274,12 +274,12 @@
           dn, NodeOperationalState.DECOMMISSIONING);
       monitor.startMonitoring(dn);
     } else if (nodeStatus.isDecommission()) {
-      LOG.info("Start Decommission called on node {} in state {}. Nothing to "+
+      LOG.info("Start Decommission called on node {} in state {}. Nothing to " +
           "do.", dn, opState);
     } else {
       LOG.error("Cannot decommission node {} in state {}", dn, opState);
-      throw new InvalidNodeStateException("Cannot decommission node "+
-          dn +" in state "+ opState);
+      throw new InvalidNodeStateException("Cannot decommission node " +
+          dn + " in state " + opState);
     }
   }
 
@@ -296,7 +296,7 @@
         // NodeNotFoundException here expect if the node is remove in the
         // very short window between validation and starting decom. Therefore
         // log a warning and ignore the exception
-        LOG.warn("Host {} was not found in SCM. Ignoring the request to "+
+        LOG.warn("Host {} was not found in SCM. Ignoring the request to " +
             "recommission it.", dn.getHostName());
         errors.add(new DatanodeAdminError(dn.getHostName(),
             "The host was not found in SCM"));
@@ -306,7 +306,7 @@
   }
 
   public synchronized void recommission(DatanodeDetails dn)
-      throws NodeNotFoundException{
+      throws NodeNotFoundException {
     NodeStatus nodeStatus = getNodeStatus(dn);
     NodeOperationalState opState = nodeStatus.getOperationalState();
     if (opState != NodeOperationalState.IN_SERVICE) {
@@ -315,7 +315,7 @@
       monitor.stopMonitoring(dn);
       LOG.info("Queued node {} for recommission", dn);
     } else {
-      LOG.info("Recommission called on node {} with state {}. "+
+      LOG.info("Recommission called on node {} with state {}. " +
           "Nothing to do.", dn, opState);
     }
   }
@@ -333,7 +333,7 @@
         // NodeNotFoundException here expect if the node is remove in the
         // very short window between validation and starting decom. Therefore
         // log a warning and ignore the exception
-        LOG.warn("The host {} was not found in SCM. Ignoring the request to "+
+        LOG.warn("The host {} was not found in SCM. Ignoring the request to " +
             "start maintenance on it", dn.getHostName());
       } catch (InvalidNodeStateException e) {
         errors.add(new DatanodeAdminError(dn.getHostName(), e.getMessage()));
@@ -360,12 +360,12 @@
       monitor.startMonitoring(dn);
       LOG.info("Starting Maintenance for node {}", dn);
     } else if (nodeStatus.isMaintenance()) {
-      LOG.info("Starting Maintenance called on node {} with state {}. "+
+      LOG.info("Starting Maintenance called on node {} with state {}. " +
           "Nothing to do.", dn, opState);
     } else {
       LOG.error("Cannot start maintenance on node {} in state {}", dn, opState);
-      throw new InvalidNodeStateException("Cannot start maintenance on node "+
-          dn +" in state "+ opState);
+      throw new InvalidNodeStateException("Cannot start maintenance on node " +
+          dn + " in state " + opState);
     }
   }
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index 4e1a964..0edd4df 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -235,6 +235,17 @@
                     ContainerID containerId) throws NodeNotFoundException;
 
   /**
+   * Removes the given container from the specified datanode.
+   *
+   * @param datanodeDetails - DatanodeDetails
+   * @param containerId - containerID
+   * @throws NodeNotFoundException - if datanode is not known. For new datanode
+   *                        use addDatanodeInContainerMap call.
+   */
+  void removeContainer(DatanodeDetails datanodeDetails,
+      ContainerID containerId) throws NodeNotFoundException;
+
+  /**
    * Remaps datanode to containers mapping to the new set of containers.
    * @param datanodeDetails - DatanodeDetails
    * @param containerIds - Set of containerIDs
@@ -260,6 +271,13 @@
    */
   void addDatanodeCommand(UUID dnId, SCMCommand command);
 
+
+  /**
+   * send refresh command to all the healthy datanodes to refresh
+   * volume usage info immediately.
+   */
+  void refreshAllHealthyDnUsageInfo();
+
   /**
    * Process node report.
    *
@@ -324,7 +342,7 @@
     return null;
   }
 
-  default HDDSLayoutVersionManager getLayoutVersionManager(){
+  default HDDSLayoutVersionManager getLayoutVersionManager() {
     return null;
   }
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
index e752454..ec8b848 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
@@ -602,6 +602,20 @@
   }
 
   /**
+   * Removes the given container from the specified datanode.
+   *
+   * @param uuid - datanode uuid
+   * @param containerId - containerID
+   * @throws NodeNotFoundException - if datanode is not known. For new datanode
+   *                        use addDatanodeInContainerMap call.
+   */
+  public void removeContainer(final UUID uuid,
+                           final ContainerID containerId)
+      throws NodeNotFoundException {
+    nodeStateMap.removeContainer(uuid, containerId);
+  }
+
+  /**
    * Update set of containers available on a datanode.
    * @param uuid - DatanodeID
    * @param containerIds - Set of containerIDs
@@ -613,7 +627,9 @@
   }
 
   /**
-   * Return set of containerIDs available on a datanode.
+   * Return set of containerIDs available on a datanode. This is a copy of the
+   * set which resides inside NodeStateMap and hence can be modified without
+   * synchronization or side effects.
    * @param uuid - DatanodeID
    * @return - set of containerIDs
    */
@@ -728,7 +744,7 @@
         (lastHbTime) -> lastHbTime < staleNodeDeadline;
 
     try {
-      for(DatanodeInfo node : nodeStateMap.getAllDatanodeInfos()) {
+      for (DatanodeInfo node : nodeStateMap.getAllDatanodeInfos()) {
         NodeStatus status = nodeStateMap.getNodeStatus(node.getUuid());
         switch (status.getHealth()) {
         case HEALTHY:
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java
index a9164c7..03dd2e2 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java
@@ -209,8 +209,8 @@
 
   @Override
   public String toString() {
-    return "OperationalState: "+operationalState+" Health: "+health+
-        " OperationStateExpiry: "+opStateExpiryEpochSeconds;
+    return "OperationalState: " + operationalState + " Health: " + health +
+        " OperationStateExpiry: " + opStateExpiryEpochSeconds;
   }
 
 }
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 68c2697..bef7ab2 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -59,6 +59,7 @@
 import org.apache.hadoop.ozone.protocol.VersionResponse;
 import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
 import org.apache.hadoop.ozone.protocol.commands.FinalizeNewLayoutVersionCommand;
+import org.apache.hadoop.ozone.protocol.commands.RefreshVolumeUsageCommand;
 import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.ozone.protocol.commands.SetNodeOperationalStateCommand;
@@ -85,6 +86,7 @@
 import java.util.concurrent.ScheduledFuture;
 import java.util.stream.Collectors;
 
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY_READONLY;
 
@@ -268,7 +270,7 @@
    */
   @Override
   public void setNodeOperationalState(DatanodeDetails datanodeDetails,
-      NodeOperationalState newState) throws NodeNotFoundException{
+      NodeOperationalState newState) throws NodeNotFoundException {
     setNodeOperationalState(datanodeDetails, newState, 0);
   }
 
@@ -283,7 +285,7 @@
   @Override
   public void setNodeOperationalState(DatanodeDetails datanodeDetails,
       NodeOperationalState newState, long opStateExpiryEpocSec)
-      throws NodeNotFoundException{
+      throws NodeNotFoundException {
     nodeStateManager.setNodeOperationalState(
         datanodeDetails, newState, opStateExpiryEpocSec);
   }
@@ -612,7 +614,7 @@
         // send Finalize command multiple times.
         scmNodeEventPublisher.fireEvent(SCMEvents.DATANODE_COMMAND,
             new CommandForDatanode<>(datanodeDetails.getUuid(), finalizeCmd));
-      } catch(NotLeaderException ex) {
+      } catch (NotLeaderException ex) {
         LOG.warn("Skip sending finalize upgrade command since current SCM is" +
             "not leader.", ex);
       }
@@ -680,7 +682,7 @@
   public List<DatanodeUsageInfo> getMostOrLeastUsedDatanodes(
       boolean mostUsed) {
     List<DatanodeDetails> healthyNodes =
-        getNodes(NodeOperationalState.IN_SERVICE, NodeState.HEALTHY);
+        getNodes(IN_SERVICE, NodeState.HEALTHY);
 
     List<DatanodeUsageInfo> datanodeUsageInfoList =
         new ArrayList<>(healthyNodes.size());
@@ -764,7 +766,7 @@
     for (DatanodeInfo dni : nodeStateManager.getAllNodes()) {
       NodeStatus status = dni.getNodeStatus();
       nodes.get(status.getOperationalState().name())
-          .compute(status.getHealth().name(), (k, v) -> v+1);
+          .compute(status.getHealth().name(), (k, v) -> v + 1);
     }
     return nodes;
   }
@@ -972,6 +974,13 @@
     nodeStateManager.addContainer(datanodeDetails.getUuid(), containerId);
   }
 
+  @Override
+  public void removeContainer(final DatanodeDetails datanodeDetails,
+                           final ContainerID containerId)
+      throws NodeNotFoundException {
+    nodeStateManager.removeContainer(datanodeDetails.getUuid(), containerId);
+  }
+
   /**
    * Update set of containers available on a datanode.
    *
@@ -988,7 +997,9 @@
   }
 
   /**
-   * Return set of containerIDs available on a datanode.
+   * Return set of containerIDs available on a datanode. This is a copy of the
+   * set which resides inside NodeManager and hence can be modified without
+   * synchronization or side effects.
    *
    * @param datanodeDetails - DatanodeID
    * @return - set of containerIDs
@@ -1009,6 +1020,25 @@
   }
 
   /**
+   * send refresh command to all the healthy datanodes to refresh
+   * volume usage info immediately.
+   */
+  @Override
+  public void refreshAllHealthyDnUsageInfo() {
+    RefreshVolumeUsageCommand refreshVolumeUsageCommand =
+        new RefreshVolumeUsageCommand();
+    try {
+      refreshVolumeUsageCommand.setTerm(scmContext.getTermOfLeader());
+    } catch (NotLeaderException nle) {
+      LOG.warn("Skip sending refreshVolumeUsage command,"
+          + " since current SCM is not leader.", nle);
+      return;
+    }
+    getNodes(IN_SERVICE, HEALTHY).forEach(datanode ->
+        addDatanodeCommand(datanode.getUuid(), refreshVolumeUsageCommand));
+  }
+
+  /**
    * This method is called by EventQueue whenever someone adds a new
    * DATANODE_COMMAND to the Queue.
    *
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
index 6eb7359..b727580 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
@@ -129,12 +129,12 @@
      *     ...
      */
     MetricsRecordBuilder metrics = collector.addRecord(registry.info());
-    for(Map.Entry<String, Map<String, Integer>> e : nodeCount.entrySet()) {
-      for(Map.Entry<String, Integer> h : e.getValue().entrySet()) {
+    for (Map.Entry<String, Map<String, Integer>> e : nodeCount.entrySet()) {
+      for (Map.Entry<String, Integer> h : e.getValue().entrySet()) {
         metrics.addGauge(
             Interns.info(
-                StringUtils.camelize(e.getKey()+"_"+h.getKey()+"_nodes"),
-                "Number of "+e.getKey()+" "+h.getKey()+" datanodes"),
+                StringUtils.camelize(e.getKey() + "_" + h.getKey() + "_nodes"),
+                "Number of " + e.getKey() + " " + h.getKey() + " datanodes"),
             h.getValue());
       }
     }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
index 1b0e5b5..ed45ed0 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
@@ -136,7 +136,7 @@
 
   //TODO: Unregister call should happen as a part of SCMNodeManager shutdown.
   private void unregisterMXBean() {
-    if(this.scmNodeStorageInfoBean != null) {
+    if (this.scmNodeStorageInfoBean != null) {
       MBeans.unregister(this.scmNodeStorageInfoBean);
       this.scmNodeStorageInfoBean = null;
     }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java
index 57a377d..5269a7a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java
@@ -99,7 +99,7 @@
   Set<T> getObjects(UUID datanode) {
     Preconditions.checkNotNull(datanode);
     final Set<T> s = dn2ObjectMap.get(datanode);
-    return s != null? Collections.unmodifiableSet(s): Collections.emptySet();
+    return s != null ? new HashSet<>(s) : Collections.emptySet();
   }
 
   public ReportResult.ReportResultBuilder<T> newBuilder() {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
index 0a3e137..0d8580d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hdds.scm.node.states;
 
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
@@ -330,7 +329,7 @@
   }
 
   public void setContainers(UUID uuid, Set<ContainerID> containers)
-      throws NodeNotFoundException{
+      throws NodeNotFoundException {
     lock.writeLock().lock();
     try {
       checkIfNodeExist(uuid);
@@ -345,8 +344,7 @@
     lock.readLock().lock();
     try {
       checkIfNodeExist(uuid);
-      return Collections
-          .unmodifiableSet(new HashSet<>(nodeToContainer.get(uuid)));
+      return new HashSet<>(nodeToContainer.get(uuid));
     } finally {
       lock.readLock().unlock();
     }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
index 9b9e206..954d212 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
@@ -562,7 +562,7 @@
       backgroundPipelineCreator.stop();
     }
 
-    if(pmInfoBean != null) {
+    if (pmInfoBean != null) {
       MBeans.unregister(this.pmInfoBean);
       pmInfoBean = null;
     }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
index 85ea5a5..64815a9 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
@@ -90,12 +90,12 @@
     for (PipelineReport report : pipelineReport.getPipelineReportList()) {
       try {
         processPipelineReport(report, dn, publisher);
-      } catch(NotLeaderException ex) {
+      } catch (NotLeaderException ex) {
         // Avoid NotLeaderException logging which happens when processing
         // pipeline report on followers.
       } catch (PipelineNotFoundException e) {
         LOGGER.error("Could not find pipeline {}", report.getPipelineID());
-      } catch(IOException e) {
+      } catch (IOException e) {
         LOGGER.error("Could not process pipeline report={} from dn={}.",
             report, dn, e);
       }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerImpl.java
index 9589eb9..0b93f4d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerImpl.java
@@ -258,10 +258,8 @@
       HddsProtos.PipelineID pipelineIDProto, HddsProtos.PipelineState newState)
       throws IOException {
     PipelineID pipelineID = PipelineID.getFromProtobuf(pipelineIDProto);
-    Pipeline.PipelineState oldState = null;
     lock.writeLock().lock();
     try {
-      oldState = getPipeline(pipelineID).getPipelineState();
       // null check is here to prevent the case where SCM store
       // is closed but the staleNode handlers/pipeline creations
       // still try to access it.
@@ -275,9 +273,8 @@
       LOG.warn("Pipeline {} is not found in the pipeline Map. Pipeline"
           + " may have been deleted already.", pipelineID);
     } catch (IOException ex) {
-      LOG.warn("Pipeline {} state update failed", pipelineID);
-      // revert back to old state in memory
-      pipelineStateMap.updatePipelineState(pipelineID, oldState);
+      LOG.error("Pipeline {} state update failed", pipelineID);
+      throw ex;
     } finally {
       lock.writeLock().unlock();
     }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java
index 0e98925..8b9d913 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java
@@ -26,7 +26,16 @@
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.Set;
+import java.util.TreeSet;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.CopyOnWriteArrayList;
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
index 847b50e..bbdabf0 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
@@ -91,7 +91,7 @@
             ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
     final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf);
     final RaftPeer p = RatisHelper.toRaftPeer(dn);
-    try(RaftClient client = RatisHelper
+    try (RaftClient client = RatisHelper
         .newRaftClient(SupportedRpcType.valueOfIgnoreCase(rpcType), p,
             retryPolicy, grpcTlsConfig, ozoneConf)) {
       client.getGroupManagementApi(p.getId())
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerFactory.java
index 356d047..f130eed 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerFactory.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerFactory.java
@@ -44,7 +44,7 @@
   public ContainerInfo getContainer(final long size,
       ReplicationConfig repConfig, String owner, ExcludeList excludeList)
       throws IOException {
-    switch(repConfig.getReplicationType()) {
+    switch (repConfig.getReplicationType()) {
     case STAND_ALONE:
       return standaloneProvider
           .getContainer(size, repConfig, owner, excludeList);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SCMSecurityProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SCMSecurityProtocolServerSideTranslatorPB.java
index 4b97444..9d5d72a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SCMSecurityProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SCMSecurityProtocolServerSideTranslatorPB.java
@@ -27,6 +27,7 @@
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCrlsRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCrlsResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetDataNodeCertRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetLatestCrlIdRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetLatestCrlIdResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetOMCertRequestProto;
@@ -149,6 +150,11 @@
             .setRevokeCertificatesResponseProto(revokeCertificates(
                 request.getRevokeCertificatesRequest()))
             .build();
+      case GetCert:
+        return scmSecurityResponse.setGetCertResponseProto(
+            getCertificate(request.getGetCertRequest()))
+            .build();
+
       default:
         throw new IllegalArgumentException(
             "Unknown request type: " + request.getCmdType());
@@ -212,6 +218,28 @@
   }
 
   /**
+   * Get SCM signed certificate.
+   *
+   * @param request
+   * @return SCMGetCertResponseProto.
+   */
+  public SCMGetCertResponseProto getCertificate(
+      SCMGetCertRequestProto request) throws IOException {
+    String certificate = impl
+        .getCertificate(request.getNodeDetails(),
+            request.getCSR());
+    SCMGetCertResponseProto.Builder builder =
+        SCMGetCertResponseProto
+            .newBuilder()
+            .setResponseCode(ResponseCode.success)
+            .setX509Certificate(certificate)
+            .setX509CACertificate(impl.getCACertificate());
+    setRootCAIfNeeded(builder);
+
+    return builder.build();
+  }
+
+  /**
    * Get signed certificate for SCM.
    *
    * @param request - SCMGetSCMCertRequestProto
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
index 0370623..5d6ee5b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
@@ -758,7 +758,7 @@
     Optional<Long> maxSizeEnteringTargetInGB = Optional.empty();
     Optional<Long> maxSizeLeavingSourceInGB = Optional.empty();
 
-    if(request.hasThreshold()) {
+    if (request.hasThreshold()) {
       threshold = Optional.of(request.getThreshold());
     }
 
@@ -778,15 +778,15 @@
                   100));
     }
 
-    if(request.hasMaxSizeToMovePerIterationInGB()) {
+    if (request.hasMaxSizeToMovePerIterationInGB()) {
       maxSizeToMovePerIterationInGB =
           Optional.of(request.getMaxSizeToMovePerIterationInGB());
     }
-    if(request.hasMaxSizeEnteringTargetInGB()) {
+    if (request.hasMaxSizeEnteringTargetInGB()) {
       maxSizeEnteringTargetInGB =
           Optional.of(request.getMaxSizeEnteringTargetInGB());
     }
-    if(request.hasMaxSizeLeavingSourceInGB()) {
+    if (request.hasMaxSizeLeavingSourceInGB()) {
       maxSizeLeavingSourceInGB =
           Optional.of(request.getMaxSizeLeavingSourceInGB());
     }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java
index ef9d6f4..2a1c895 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java
@@ -42,7 +42,7 @@
  * Class defining Safe mode exit criteria for Containers.
  */
 public class ContainerSafeModeRule extends
-    SafeModeExitRule<NodeRegistrationContainerReport>{
+    SafeModeExitRule<NodeRegistrationContainerReport> {
 
   public static final Logger LOG =
       LoggerFactory.getLogger(ContainerSafeModeRule.class);
@@ -115,7 +115,7 @@
 
     reportsProto.getReport().getReportsList().forEach(c -> {
       if (containerMap.containsKey(c.getContainerID())) {
-        if(containerMap.remove(c.getContainerID()) != null) {
+        if (containerMap.remove(c.getContainerID()) != null) {
           containerWithMinReplicas.getAndAdd(1);
           getSafeModeMetrics()
               .incCurrentContainersWithOneReplicaReportedCount();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java
index 0c4ce84..b03fedb 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java
@@ -33,7 +33,7 @@
  * registered with SCM.
  */
 public class DataNodeSafeModeRule extends
-    SafeModeExitRule<NodeRegistrationContainerReport>{
+    SafeModeExitRule<NodeRegistrationContainerReport> {
 
   // Min DataNodes required to exit safe mode.
   private int requiredDns;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
index c7b831bf..fb4ba7d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
@@ -204,7 +204,7 @@
       );
       throw ex;
     } finally {
-      if(auditSuccess) {
+      if (auditSuccess) {
         AUDIT.logWriteSuccess(
             buildAuditMessageForSuccess(SCMAction.ALLOCATE_BLOCK, auditMap)
         );
@@ -274,7 +274,7 @@
   @Override
   public ScmInfo getScmInfo() throws IOException {
     boolean auditSuccess = true;
-    try{
+    try {
       ScmInfo.Builder builder =
           new ScmInfo.Builder()
               .setClusterId(scm.getScmStorageConfig().getClusterID())
@@ -287,7 +287,7 @@
       );
       throw ex;
     } finally {
-      if(auditSuccess) {
+      if (auditSuccess) {
         AUDIT.logReadSuccess(
             buildAuditMessageForSuccess(SCMAction.GET_SCM_INFO, null)
         );
@@ -305,7 +305,7 @@
     auditMap.put("cluster", String.valueOf(request.getClusterId()));
     auditMap.put("addr", String.valueOf(request.getRatisAddr()));
     boolean auditSuccess = true;
-    try{
+    try {
       return scm.getScmHAManager().addSCM(request);
     } catch (Exception ex) {
       auditSuccess = false;
@@ -314,7 +314,7 @@
       );
       throw ex;
     } finally {
-      if(auditSuccess) {
+      if (auditSuccess) {
         AUDIT.logReadSuccess(
             buildAuditMessageForSuccess(SCMAction.ADD_SCM, auditMap)
         );
@@ -326,12 +326,12 @@
   public List<DatanodeDetails> sortDatanodes(List<String> nodes,
       String clientMachine) throws IOException {
     boolean auditSuccess = true;
-    try{
+    try {
       NodeManager nodeManager = scm.getScmNodeManager();
       Node client = null;
       List<DatanodeDetails> possibleClients =
           nodeManager.getNodesByAddress(clientMachine);
-      if (possibleClients.size()>0){
+      if (possibleClients.size() > 0) {
         client = possibleClients.get(0);
       }
       List<Node> nodeList = new ArrayList();
@@ -353,7 +353,7 @@
       );
       throw ex;
     } finally {
-      if(auditSuccess) {
+      if (auditSuccess) {
         AUDIT.logReadSuccess(
             buildAuditMessageForSuccess(SCMAction.SORT_DATANODE, null)
         );
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index 59e4b2b..9388a98 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -222,7 +222,7 @@
       );
       throw ex;
     } finally {
-      if(auditSuccess) {
+      if (auditSuccess) {
         AUDIT.logReadSuccess(
             buildAuditMessageForSuccess(SCMAction.GET_CONTAINER, auditMap)
         );
@@ -363,7 +363,7 @@
    * replication factor.
    */
   private boolean hasRequiredReplicas(ContainerInfo contInfo) {
-    try{
+    try {
       return getScm().getContainerManager()
           .getContainerReplicas(contInfo.containerID())
           .size() >= contInfo.getReplicationConfig().getRequiredNodes();
@@ -458,7 +458,7 @@
           buildAuditMessageForFailure(SCMAction.LIST_CONTAINER, auditMap, ex));
       throw ex;
     } finally {
-      if(auditSuccess) {
+      if (auditSuccess) {
         AUDIT.logReadSuccess(
             buildAuditMessageForSuccess(SCMAction.LIST_CONTAINER, auditMap));
       }
@@ -483,7 +483,7 @@
       );
       throw ex;
     } finally {
-      if(auditSuccess) {
+      if (auditSuccess) {
         AUDIT.logWriteSuccess(
             buildAuditMessageForSuccess(SCMAction.DELETE_CONTAINER, auditMap)
         );
@@ -643,7 +643,7 @@
   @Override
   public ScmInfo getScmInfo() throws IOException {
     boolean auditSuccess = true;
-    try{
+    try {
       ScmInfo.Builder builder =
           new ScmInfo.Builder()
               .setClusterId(scm.getScmStorageConfig().getClusterID())
@@ -667,7 +667,7 @@
       );
       throw ex;
     } finally {
-      if(auditSuccess) {
+      if (auditSuccess) {
         AUDIT.logReadSuccess(
             buildAuditMessageForSuccess(SCMAction.GET_SCM_INFO, null)
         );
@@ -936,7 +936,8 @@
    */
   @Override
   public List<HddsProtos.DatanodeUsageInfoProto> getDatanodeUsageInfo(
-      boolean mostUsed, int count) throws IOException, IllegalArgumentException{
+      boolean mostUsed, int count)
+      throws IOException, IllegalArgumentException {
 
     // check admin authorisation
     try {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
index 745be9d..a93d7ae 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
@@ -255,6 +255,16 @@
         ContainerReportsProto report) {
       super(datanodeDetails, report);
     }
+
+    @Override
+    public boolean equals(Object o) {
+      return this == o;
+    }
+
+    @Override
+    public int hashCode() {
+      return this.getDatanodeDetails().getUuid().hashCode();
+    }
   }
 
   /**
@@ -268,6 +278,16 @@
         IncrementalContainerReportProto report) {
       super(datanodeDetails, report);
     }
+
+    @Override
+    public boolean equals(Object o) {
+      return this == o;
+    }
+
+    @Override
+    public int hashCode() {
+      return this.getDatanodeDetails().getUuid().hashCode();
+    }
   }
 
   /**
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index 248c90c..0b5bee8 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -70,6 +70,7 @@
 import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
 import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand;
 import org.apache.hadoop.ozone.protocol.commands.FinalizeNewLayoutVersionCommand;
+import org.apache.hadoop.ozone.protocol.commands.RefreshVolumeUsageCommand;
 import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
 import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
@@ -89,6 +90,7 @@
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.deleteBlocksCommand;
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.deleteContainerCommand;
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.finalizeNewLayoutVersionCommand;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.refreshVolumeUsageInfo;
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.replicateContainerCommand;
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.reregisterCommand;
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.setNodeOperationalStateCommand;
@@ -204,7 +206,7 @@
           buildAuditMessageForFailure(SCMAction.GET_VERSION, null, ex));
       throw ex;
     } finally {
-      if(auditSuccess) {
+      if (auditSuccess) {
         AUDIT.logReadSuccess(
             buildAuditMessageForSuccess(SCMAction.GET_VERSION, null));
       }
@@ -249,7 +251,7 @@
           buildAuditMessageForFailure(SCMAction.REGISTER, auditMap, ex));
       throw ex;
     } finally {
-      if(auditSuccess) {
+      if (auditSuccess) {
         AUDIT.logWriteSuccess(
             buildAuditMessageForSuccess(SCMAction.REGISTER, auditMap));
       }
@@ -284,7 +286,7 @@
       );
       throw ex;
     } finally {
-      if(auditSuccess) {
+      if (auditSuccess) {
         AUDIT.logWriteSuccess(
             buildAuditMessageForSuccess(SCMAction.SEND_HEARTBEAT, auditMap)
         );
@@ -330,7 +332,8 @@
       scm.getScmBlockManager().getDeletedBlockLog().incrementCount(txs);
       return builder
           .setCommandType(deleteBlocksCommand)
-          .setDeleteBlocksCommandProto(((DeleteBlocksCommand) cmd).getProto())
+          .setDeleteBlocksCommandProto(
+              ((DeleteBlocksCommand) cmd).getProto())
           .build();
     case closeContainerCommand:
       return builder
@@ -373,6 +376,13 @@
             .setFinalizeNewLayoutVersionCommandProto(
                 ((FinalizeNewLayoutVersionCommand)cmd).getProto())
             .build();
+    case refreshVolumeUsageInfo:
+      return builder
+          .setCommandType(refreshVolumeUsageInfo)
+          .setRefreshVolumeUsageCommandProto(
+              ((RefreshVolumeUsageCommand)cmd).getProto())
+          .build();
+
     default:
       throw new IllegalArgumentException("Scm command " +
           cmd.getType().toString() + " is not implemented");
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java
index 15f72e1..0602ba2 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java
@@ -31,7 +31,10 @@
 
 import java.util.concurrent.atomic.AtomicReference;
 
-import static org.apache.hadoop.hdds.HddsConfigKeys.*;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_DATANODE_CONTAINER_PROTOCOL_ACL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_SCM_BLOCK_PROTOCOL_ACL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_SCM_CERTIFICATE_PROTOCOL_ACL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_SCM_CONTAINER_PROTOCOL_ACL;
 
 /**
  * {@link PolicyProvider} for SCM protocols.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java
index ede522d..60f1099 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java
@@ -37,6 +37,7 @@
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmNodeDetailsProto;
@@ -159,6 +160,17 @@
     return getEncodedCertToString(certSignReq, NodeType.DATANODE);
   }
 
+  @Override
+  public String getCertificate(
+      NodeDetailsProto nodeDetails,
+      String certSignReq) throws IOException {
+    LOGGER.info("Processing CSR for {} {}, UUID: {}",
+        nodeDetails.getNodeType(), nodeDetails.getHostName(),
+        nodeDetails.getUuid());
+    Objects.requireNonNull(nodeDetails);
+    return getEncodedCertToString(certSignReq, nodeDetails.getNodeType());
+  }
+
   /**
    * Get SCM signed certificate for OM.
    *
@@ -261,8 +273,10 @@
       errorCode = SCMSecurityException.ErrorCode.GET_SCM_CERTIFICATE_FAILED;
     } else if (role == NodeType.OM) {
       errorCode = SCMSecurityException.ErrorCode.GET_OM_CERTIFICATE_FAILED;
-    } else {
+    } else if (role == NodeType.DATANODE) {
       errorCode = SCMSecurityException.ErrorCode.GET_DN_CERTIFICATE_FAILED;
+    } else {
+      errorCode = SCMSecurityException.ErrorCode.GET_CERTIFICATE_FAILED;
     }
     return new SCMSecurityException("generate " + role.toString() +
         " Certificate operation failed", ex, errorCode);
@@ -305,7 +319,7 @@
       throw new SCMSecurityException("Get CA Certificate is not supported " +
           "when custom CA is enabled.");
     }
-    if(LOGGER.isDebugEnabled()) {
+    if (LOGGER.isDebugEnabled()) {
       LOGGER.debug("Getting CA certificate.");
     }
     try {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java
index 6b8f192..3048ece 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java
@@ -133,6 +133,6 @@
   }
 
   public boolean checkPrimarySCMIdInitialized() {
-    return getPrimaryScmNodeId() != null ? true : false;
+    return getPrimaryScmNodeId() != null;
   }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 8d8abdd..7eb5a3d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -64,7 +64,8 @@
 import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.DefaultProfile;
 import org.apache.hadoop.hdds.security.x509.certificate.client.SCMCertificateClient;
 import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
-import org.apache.hadoop.hdds.server.events.FixedThreadPoolExecutor;
+import org.apache.hadoop.hdds.server.events.EventExecutor;
+import org.apache.hadoop.hdds.server.events.FixedThreadPoolWithAffinityExecutor;
 import org.apache.hadoop.hdds.server.http.RatisDropwizardExports;
 import org.apache.hadoop.hdds.utils.HAUtils;
 import org.apache.hadoop.hdds.utils.HddsServerUtil;
@@ -110,6 +111,8 @@
 import org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.PipelineChoosePolicyFactory;
 import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
 import org.apache.hadoop.hdds.scm.server.upgrade.SCMUpgradeFinalizer;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode;
 import org.apache.hadoop.hdds.security.OzoneSecurityException;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateServer;
@@ -160,6 +163,7 @@
 import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT_DEFAULT;
@@ -250,6 +254,7 @@
   private final OzoneConfiguration configuration;
   private SCMContainerMetrics scmContainerMetrics;
   private SCMContainerPlacementMetrics placementMetrics;
+  private PlacementPolicy containerPlacementPolicy;
   private MetricsSystem ms;
   private final Map<String, RatisDropwizardExports> ratisMetricsMap =
       new ConcurrentHashMap<>();
@@ -380,11 +385,7 @@
 
     initializeEventHandlers();
 
-    containerBalancer = new ContainerBalancer(scmNodeManager, containerManager,
-        replicationManager, configuration, scmContext, clusterMap,
-        ContainerPlacementPolicyFactory
-            .getPolicy(conf, scmNodeManager, clusterMap, true,
-                placementMetrics));
+    containerBalancer = new ContainerBalancer(this);
     LOG.info(containerBalancer.toString());
 
     // Emit initial safe mode status, as now handlers are registered.
@@ -438,11 +439,34 @@
     eventQueue.addHandler(SCMEvents.DATANODE_COMMAND, scmNodeManager);
     eventQueue.addHandler(SCMEvents.RETRIABLE_DATANODE_COMMAND, scmNodeManager);
     eventQueue.addHandler(SCMEvents.NODE_REPORT, nodeReportHandler);
-    eventQueue.addHandler(SCMEvents.CONTAINER_REPORT,
-        new FixedThreadPoolExecutor<>(SCMEvents.CONTAINER_REPORT.getName(),
+
+    // Use the same executor for both ICR and FCR.
+    // The Executor maps the event to a thread for DN.
+    // Dispatcher should always dispatch FCR first followed by ICR
+    List<ThreadPoolExecutor> executors =
+        FixedThreadPoolWithAffinityExecutor.initializeExecutorPool(
+            SCMEvents.CONTAINER_REPORT.getName()
+                + "_OR_"
+                + SCMEvents.INCREMENTAL_CONTAINER_REPORT.getName());
+
+    EventExecutor<ContainerReportFromDatanode>
+        containerReportExecutors =
+        new FixedThreadPoolWithAffinityExecutor<>(
             EventQueue.getExecutorName(SCMEvents.CONTAINER_REPORT,
-                containerReportHandler)), containerReportHandler);
+                containerReportHandler),
+            executors);
+    EventExecutor<IncrementalContainerReportFromDatanode>
+        incrementalReportExecutors =
+        new FixedThreadPoolWithAffinityExecutor<>(
+            EventQueue.getExecutorName(
+                SCMEvents.INCREMENTAL_CONTAINER_REPORT,
+                incrementalContainerReportHandler),
+            executors);
+
+    eventQueue.addHandler(SCMEvents.CONTAINER_REPORT, containerReportExecutors,
+        containerReportHandler);
     eventQueue.addHandler(SCMEvents.INCREMENTAL_CONTAINER_REPORT,
+        incrementalReportExecutors,
         incrementalContainerReportHandler);
     eventQueue.addHandler(SCMEvents.CONTAINER_ACTIONS, actionsHandler);
     eventQueue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerHandler);
@@ -560,7 +584,7 @@
           .build();
     }
 
-    if(configurator.getScmNodeManager() != null) {
+    if (configurator.getScmNodeManager() != null) {
       scmNodeManager = configurator.getScmNodeManager();
     } else {
       scmNodeManager = new SCMNodeManager(conf, scmStorageConfig, eventQueue,
@@ -568,7 +592,7 @@
     }
 
     placementMetrics = SCMContainerPlacementMetrics.create();
-    PlacementPolicy containerPlacementPolicy =
+    containerPlacementPolicy =
         ContainerPlacementPolicyFactory.getPolicy(conf, scmNodeManager,
             clusterMap, true, placementMetrics);
 
@@ -619,7 +643,7 @@
           scmHAManager,
           getScmMetadataStore().getMoveTable());
     }
-    if(configurator.getScmSafeModeManager() != null) {
+    if (configurator.getScmSafeModeManager() != null) {
       scmSafeModeManager = configurator.getScmSafeModeManager();
     } else {
       scmSafeModeManager = new SCMSafeModeManager(conf,
@@ -645,7 +669,7 @@
 
     // TODO: Support Certificate Server loading via Class Name loader.
     // So it is easy to use different Certificate Servers if needed.
-    if(this.scmMetadataStore == null) {
+    if (this.scmMetadataStore == null) {
       LOG.error("Cannot initialize Certificate Server without a valid meta " +
           "data layer.");
       throw new SCMException("Cannot initialize CA without a valid metadata " +
@@ -812,7 +836,7 @@
   private void initializeMetadataStore(OzoneConfiguration conf,
                                        SCMConfigurator configurator)
       throws IOException {
-    if(configurator.getMetadataStore() != null) {
+    if (configurator.getMetadataStore() != null) {
       scmMetadataStore = configurator.getMetadataStore();
     } else {
       scmMetadataStore = new SCMMetadataStoreImpl(conf);
@@ -993,7 +1017,7 @@
         // will be persisted into the version file once this node gets added
         // to existing SCM ring post node regular start up.
 
-        if(OzoneSecurityUtil.isSecurityEnabled(conf)) {
+        if (OzoneSecurityUtil.isSecurityEnabled(conf)) {
           HASecurityUtils.initializeSecurity(scmStorageConfig, config,
               getScmAddress(scmhaNodeDetails, conf), false);
         }
@@ -1624,12 +1648,15 @@
     return scmSafeModeManager;
   }
 
-  @VisibleForTesting
   @Override
   public ReplicationManager getReplicationManager() {
     return replicationManager;
   }
 
+  public PlacementPolicy getContainerPlacementPolicy() {
+    return containerPlacementPolicy;
+  }
+
   @VisibleForTesting
   @Override
   public ContainerBalancer getContainerBalancer() {
@@ -1779,7 +1806,7 @@
     Map<String, Integer> nodeStateCount = new HashMap<>();
     for (HddsProtos.LifeCycleState state : HddsProtos.LifeCycleState.values()) {
       nodeStateCount.put(state.toString(),
-          containerManager.getContainers(state).size());
+          containerManager.getContainerStateCount(state));
     }
     return nodeStateCount;
   }
@@ -1848,7 +1875,7 @@
   }
 
   public StatusAndMessages finalizeUpgrade(String upgradeClientID)
-      throws IOException{
+      throws IOException {
     return upgradeFinalizer.finalize(upgradeClientID, this);
   }
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java
index 1d8859f..030601a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java
@@ -179,7 +179,7 @@
 
     @Override
     public boolean init(OzoneConfiguration conf, String clusterId)
-        throws IOException{
+        throws IOException {
       return StorageContainerManager.scmInit(conf, clusterId);
     }
 
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
index 1fc4f76..feb58fc 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
@@ -613,7 +613,7 @@
     conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
     conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
     SCMStorageConfig scmStore = new SCMStorageConfig(conf);
-    if(scmStore.getState() != Storage.StorageState.INITIALIZED) {
+    if (scmStore.getState() != Storage.StorageState.INITIALIZED) {
       String clusterId = UUID.randomUUID().toString();
       String scmId = UUID.randomUUID().toString();
       scmStore.setClusterId(clusterId);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java
index 20c0468..0c9222d 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java
@@ -43,7 +43,7 @@
   public Timeout timeout = Timeout.seconds(300);
 
   @Rule
-  public ExpectedException thrown= ExpectedException.none();
+  public ExpectedException thrown = ExpectedException.none();
 
   /**
    * Verify that the datanode endpoint is parsed correctly.
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java
index f738650..face383 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java
@@ -31,11 +31,14 @@
 import org.apache.hadoop.test.PathUtils;
 
 import org.apache.commons.io.FileUtils;
+
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_SCM_DATANODE_ID_FILE_DEFAULT;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import org.junit.Rule;
@@ -53,10 +56,10 @@
       TestHddsServerUtils.class);
 
   @Rule
-  public Timeout timeout = Timeout.seconds(300);;
+  public Timeout timeout = Timeout.seconds(300);
 
   @Rule
-  public ExpectedException thrown= ExpectedException.none();
+  public ExpectedException thrown = ExpectedException.none();
 
   /**
    * Test getting OZONE_SCM_DATANODE_ADDRESS_KEY with port.
@@ -219,4 +222,34 @@
     // the min limit value will be returned
     assertEquals(90000, HddsServerUtil.getStaleNodeInterval(conf));
   }
+
+  @Test
+  public void testGetDatanodeIdFilePath() {
+    final File testDir = PathUtils.getTestDir(TestHddsServerUtils.class);
+    final File metaDir = new File(testDir, "metaDir");
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
+
+    try {
+      // test fallback if not set
+      assertEquals(new File(metaDir,
+              OZONE_SCM_DATANODE_ID_FILE_DEFAULT).toString(),
+          HddsServerUtil.getDatanodeIdFilePath(conf));
+
+      // test fallback if set empty
+      conf.set(OZONE_SCM_DATANODE_ID_DIR, "");
+      assertEquals(new File(metaDir,
+              OZONE_SCM_DATANODE_ID_FILE_DEFAULT).toString(),
+          HddsServerUtil.getDatanodeIdFilePath(conf));
+
+      // test use specific value if set
+      final File dnIdDir = new File(testDir, "datanodeIDDir");
+      conf.set(OZONE_SCM_DATANODE_ID_DIR, dnIdDir.getPath());
+      assertEquals(new File(dnIdDir,
+              OZONE_SCM_DATANODE_ID_FILE_DEFAULT).toString(),
+          HddsServerUtil.getDatanodeIdFilePath(conf));
+    } finally {
+      FileUtils.deleteQuietly(metaDir);
+    }
+  }
 }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
index e078b1f..e8bd07b 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
@@ -103,7 +103,7 @@
   public ExpectedException thrown = ExpectedException.none();
 
   @Rule
-  public TemporaryFolder folder= new TemporaryFolder();
+  public TemporaryFolder folder = new TemporaryFolder();
   private SCMMetadataStore scmMetadataStore;
   private ReplicationConfig replicationConfig;
 
@@ -452,7 +452,7 @@
       // the pipeline per raft log disk config is set to 1 by default
       int numContainers = (int)Math.ceil((double)
               (numContainerPerOwnerInPipeline *
-                  numContainerPerOwnerInPipeline)/numMetaDataVolumes);
+                  numContainerPerOwnerInPipeline) / numMetaDataVolumes);
       Assert.assertTrue(numContainers == pipelineManager.
           getNumberOfContainers(pipeline.getId()));
       Assert.assertTrue(
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index f6cab87..6836218 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -531,8 +531,20 @@
   }
 
   @Override
+  public void removeContainer(DatanodeDetails dd,
+      ContainerID containerId) {
+    try {
+      Set<ContainerID> set = node2ContainerMap.getContainers(dd.getUuid());
+      set.remove(containerId);
+      node2ContainerMap.setContainersForDatanode(dd.getUuid(), set);
+    } catch (SCMException e) {
+      e.printStackTrace();
+    }
+  }
+
+  @Override
   public void addDatanodeCommand(UUID dnId, SCMCommand command) {
-    if(commandMap.containsKey(dnId)) {
+    if (commandMap.containsKey(dnId)) {
       List<SCMCommand> commandList = commandMap.get(dnId);
       Preconditions.checkNotNull(commandList);
       commandList.add(command);
@@ -544,6 +556,15 @@
   }
 
   /**
+   * send refresh command to all the healthy datanodes to refresh
+   * volume usage info immediately.
+   */
+  @Override
+  public void refreshAllHealthyDnUsageInfo() {
+    //no op
+  }
+
+  /**
    * Empty implementation for processNodeReport.
    *
    * @param dnUuid
@@ -601,7 +622,7 @@
   }
 
   public void clearCommandQueue(UUID dnId) {
-    if(commandMap.containsKey(dnId)) {
+    if (commandMap.containsKey(dnId)) {
       commandMap.put(dnId, new LinkedList<>());
     }
   }
@@ -799,7 +820,7 @@
     if (uuids == null) {
       return results;
     }
-    for(String uuid : uuids) {
+    for (String uuid : uuids) {
       DatanodeDetails dn = getNodeByUuid(uuid);
       if (dn != null) {
         results.add(dn);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
index c015f18..1dc4fb4 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
@@ -41,7 +41,11 @@
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 
 import java.io.IOException;
-import java.util.*;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 
 /**
@@ -82,7 +86,7 @@
    */
   public void setPipelines(DatanodeDetails dd, int count) {
     Set<PipelineID> pipelines = new HashSet<>();
-    for (int i=0; i<count; i++) {
+    for (int i = 0; i < count; i++) {
       pipelines.add(PipelineID.randomId());
     }
     pipelineMap.put(dd.getUuid(), pipelines);
@@ -257,12 +261,24 @@
       throws NodeNotFoundException {
   }
 
-
+  @Override
+  public void removeContainer(DatanodeDetails datanodeDetails,
+                           ContainerID containerId) {
+  }
 
   @Override
   public void addDatanodeCommand(UUID dnId, SCMCommand command) {
   }
 
+  /**
+   * send refresh command to all the healthy datanodes to refresh
+   * volume usage info immediately.
+   */
+  @Override
+  public void refreshAllHealthyDnUsageInfo() {
+    //no op
+  }
+
   @Override
   public void processNodeReport(DatanodeDetails datanodeDetails,
                                 NodeReportProto nodeReport) {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
index c6bb279..ca5df90 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
@@ -79,7 +79,7 @@
 
   @After
   public void cleanup() throws Exception {
-    if(containerManager != null) {
+    if (containerManager != null) {
       containerManager.close();
     }
 
@@ -125,12 +125,12 @@
   }
 
   @Test
-  public void testGetContainers() throws Exception{
+  public void testGetContainers() throws Exception {
     Assert.assertTrue(
         containerManager.getContainers().isEmpty());
 
     ContainerID[] cidArray = new ContainerID[10];
-    for(int i = 0; i < 10; i++){
+    for (int i = 0; i < 10; i++) {
       ContainerInfo container = containerManager.allocateContainer(
           new RatisReplicationConfig(
               ReplicationFactor.THREE), "admin");
@@ -154,5 +154,19 @@
         containerManager.getContainers(HddsProtos.LifeCycleState.OPEN).size());
     Assert.assertEquals(2, containerManager
         .getContainers(HddsProtos.LifeCycleState.CLOSING).size());
+    containerManager.updateContainerState(cidArray[1],
+        HddsProtos.LifeCycleEvent.QUASI_CLOSE);
+    containerManager.updateContainerState(cidArray[2],
+        HddsProtos.LifeCycleEvent.FINALIZE);
+    containerManager.updateContainerState(cidArray[2],
+        HddsProtos.LifeCycleEvent.CLOSE);
+    Assert.assertEquals(7, containerManager.
+        getContainerStateCount(HddsProtos.LifeCycleState.OPEN));
+    Assert.assertEquals(1, containerManager
+        .getContainerStateCount(HddsProtos.LifeCycleState.CLOSING));
+    Assert.assertEquals(1, containerManager
+        .getContainerStateCount(HddsProtos.LifeCycleState.QUASI_CLOSED));
+    Assert.assertEquals(1, containerManager
+        .getContainerStateCount(HddsProtos.LifeCycleState.CLOSED));
   }
 }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
index b4cbf27..d25dc54 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
@@ -103,13 +103,13 @@
     Mockito.when(containerManager.getContainer(Mockito.any(ContainerID.class)))
         .thenAnswer(invocation -> containerStateManager
             .getContainer(((ContainerID)invocation
-                .getArguments()[0]).getProtobuf()));
+                .getArguments()[0])));
 
     Mockito.when(containerManager.getContainerReplicas(
         Mockito.any(ContainerID.class)))
         .thenAnswer(invocation -> containerStateManager
             .getContainerReplicas(((ContainerID)invocation
-                .getArguments()[0]).getProtobuf()));
+                .getArguments()[0])));
 
     Mockito.doAnswer(invocation -> {
       containerStateManager
@@ -123,7 +123,7 @@
 
     Mockito.doAnswer(invocation -> {
       containerStateManager.updateContainerReplica(
-          ((ContainerID)invocation.getArguments()[0]).getProtobuf(),
+          ((ContainerID)invocation.getArguments()[0]),
           (ContainerReplica) invocation.getArguments()[1]);
       return null;
     }).when(containerManager).updateContainerReplica(
@@ -131,7 +131,7 @@
 
     Mockito.doAnswer(invocation -> {
       containerStateManager.removeContainerReplica(
-          ((ContainerID)invocation.getArguments()[0]).getProtobuf(),
+          ((ContainerID)invocation.getArguments()[0]),
           (ContainerReplica) invocation.getArguments()[1]);
       return null;
     }).when(containerManager).removeContainerReplica(
@@ -177,13 +177,13 @@
         ContainerReplicaProto.State.CLOSED,
         datanodeOne, datanodeTwo, datanodeThree)
         .forEach(r -> containerStateManager.updateContainerReplica(
-            containerOne.containerID().getProtobuf(), r));
+            containerOne.containerID(), r));
 
     getReplicas(containerTwo.containerID(),
         ContainerReplicaProto.State.CLOSED,
         datanodeOne, datanodeTwo, datanodeThree)
         .forEach(r -> containerStateManager.updateContainerReplica(
-            containerTwo.containerID().getProtobuf(), r));
+            containerTwo.containerID(), r));
 
 
     // SCM expects both containerOne and containerTwo to be in all the three
@@ -236,13 +236,13 @@
         ContainerReplicaProto.State.CLOSED,
         datanodeOne, datanodeTwo, datanodeThree)
         .forEach(r -> containerStateManager.updateContainerReplica(
-            containerOne.containerID().getProtobuf(), r));
+            containerOne.containerID(), r));
 
     getReplicas(containerTwo.containerID(),
         ContainerReplicaProto.State.CLOSED,
         datanodeOne, datanodeTwo, datanodeThree)
         .forEach(r -> containerStateManager.updateContainerReplica(
-            containerTwo.containerID().getProtobuf(), r));
+            containerTwo.containerID(), r));
 
 
     // SCM expects both containerOne and containerTwo to be in all the three
@@ -315,11 +315,11 @@
 
     containerOneReplicas.forEach(r ->
         containerStateManager.updateContainerReplica(
-        containerTwo.containerID().getProtobuf(), r));
+        containerTwo.containerID(), r));
 
     containerTwoReplicas.forEach(r ->
         containerStateManager.updateContainerReplica(
-        containerTwo.containerID().getProtobuf(), r));
+        containerTwo.containerID(), r));
 
 
     final ContainerReportsProto containerReport = getContainerReportsProto(
@@ -383,11 +383,11 @@
 
     containerOneReplicas.forEach(r ->
         containerStateManager.updateContainerReplica(
-        containerTwo.containerID().getProtobuf(), r));
+        containerTwo.containerID(), r));
 
     containerTwoReplicas.forEach(r ->
         containerStateManager.updateContainerReplica(
-        containerTwo.containerID().getProtobuf(), r));
+        containerTwo.containerID(), r));
 
 
     final ContainerReportsProto containerReport = getContainerReportsProto(
@@ -454,11 +454,11 @@
 
     containerOneReplicas.forEach(r ->
         containerStateManager.updateContainerReplica(
-        containerTwo.containerID().getProtobuf(), r));
+        containerTwo.containerID(), r));
 
     containerTwoReplicas.forEach(r ->
         containerStateManager.updateContainerReplica(
-        containerTwo.containerID().getProtobuf(), r));
+        containerTwo.containerID(), r));
 
 
     final ContainerReportsProto containerReport = getContainerReportsProto(
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
index 1bd07f7..bb64fa6 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
@@ -120,7 +120,7 @@
 
     //WHEN
     Set<ContainerReplica> replicas = containerStateManager
-        .getContainerReplicas(c1.containerID().getProtobuf());
+        .getContainerReplicas(c1.containerID());
 
     //THEN
     Assert.assertEquals(3, replicas.size());
@@ -140,7 +140,7 @@
 
     //WHEN
     Set<ContainerReplica> replicas = containerStateManager
-        .getContainerReplicas(c1.containerID().getProtobuf());
+        .getContainerReplicas(c1.containerID());
 
     Assert.assertEquals(2, replicas.size());
     Assert.assertEquals(3, c1.getReplicationConfig().getRequiredNodes());
@@ -153,7 +153,7 @@
         .setDatanodeDetails(node)
         .build();
     containerStateManager
-        .updateContainerReplica(cont.containerID().getProtobuf(), replica);
+        .updateContainerReplica(cont.containerID(), replica);
   }
 
   private ContainerInfo allocateContainer() throws IOException {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
index 29cae36..25ead76 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
@@ -131,18 +131,18 @@
     Mockito.when(containerManager.getContainer(Mockito.any(ContainerID.class)))
         .thenAnswer(invocation -> containerStateManager
             .getContainer(((ContainerID)invocation
-                .getArguments()[0]).getProtobuf()));
+                .getArguments()[0])));
 
     Mockito.when(containerManager.getContainerReplicas(
         Mockito.any(ContainerID.class)))
         .thenAnswer(invocation -> containerStateManager
             .getContainerReplicas(((ContainerID)invocation
-                .getArguments()[0]).getProtobuf()));
+                .getArguments()[0])));
 
     Mockito.doAnswer(invocation -> {
       containerStateManager
           .removeContainerReplica(((ContainerID)invocation
-                  .getArguments()[0]).getProtobuf(),
+                  .getArguments()[0]),
               (ContainerReplica)invocation.getArguments()[1]);
       return null;
     }).when(containerManager).removeContainerReplica(
@@ -162,7 +162,7 @@
     Mockito.doAnswer(invocation -> {
       containerStateManager
           .updateContainerReplica(((ContainerID)invocation
-                  .getArguments()[0]).getProtobuf(),
+                  .getArguments()[0]),
               (ContainerReplica) invocation.getArguments()[1]);
       return null;
     }).when(containerManager).updateContainerReplica(
@@ -201,7 +201,7 @@
 
     containerStateManager.addContainer(container.getProtobuf());
     containerReplicas.forEach(r -> containerStateManager.updateContainerReplica(
-        container.containerID().getProtobuf(), r));
+        container.containerID(), r));
 
     final IncrementalContainerReportProto containerReport =
         getIncrementalContainerReportProto(container.containerID(),
@@ -234,7 +234,7 @@
 
     containerStateManager.addContainer(container.getProtobuf());
     containerReplicas.forEach(r -> containerStateManager.updateContainerReplica(
-        container.containerID().getProtobuf(), r));
+        container.containerID(), r));
 
 
     final IncrementalContainerReportProto containerReport =
@@ -272,7 +272,7 @@
 
     containerStateManager.addContainer(container.getProtobuf());
     containerReplicas.forEach(r -> containerStateManager.updateContainerReplica(
-        container.containerID().getProtobuf(), r));
+        container.containerID(), r));
 
     final IncrementalContainerReportProto containerReport =
         getIncrementalContainerReportProto(container.containerID(),
@@ -305,9 +305,9 @@
 
     containerStateManager.addContainer(container.getProtobuf());
     containerReplicas.forEach(r -> containerStateManager.updateContainerReplica(
-        container.containerID().getProtobuf(), r));
+        container.containerID(), r));
     Assert.assertEquals(3, containerStateManager
-        .getContainerReplicas(container.containerID().getProtobuf()).size());
+        .getContainerReplicas(container.containerID()).size());
     final IncrementalContainerReportProto containerReport =
         getIncrementalContainerReportProto(container.containerID(),
             ContainerReplicaProto.State.DELETED,
@@ -317,7 +317,7 @@
             datanodeOne, containerReport);
     reportHandler.onMessage(icr, publisher);
     Assert.assertEquals(2, containerStateManager
-        .getContainerReplicas(container.containerID().getProtobuf()).size());
+        .getContainerReplicas(container.containerID()).size());
   }
 
   @Test
@@ -352,7 +352,7 @@
     final ContainerReportsProto fullReport = TestContainerReportHandler
         .getContainerReportsProto(containerTwo.containerID(), CLOSED,
             datanode.getUuidString());
-    final ContainerReportFromDatanode fcr =new ContainerReportFromDatanode(
+    final ContainerReportFromDatanode fcr = new ContainerReportFromDatanode(
         datanode, fullReport);
 
     // We need to run the FCR and ICR at the same time via the executor so we
@@ -363,7 +363,7 @@
       // Running this test 10 times to ensure the race condition we are testing
       // for does not occur. In local tests, before the code was fixed, this
       // test failed consistently every time (reproducing the issue).
-      for (int i=0; i<10; i++) {
+      for (int i = 0; i < 10; i++) {
         Future<?> t1 =
             executor.submit(() -> fullReportHandler.onMessage(fcr, publisher));
         Future<?> t2 =
@@ -376,7 +376,7 @@
           // If we find "container" in the NM, then we must also have it in
           // Container Manager.
           Assert.assertEquals(1, containerStateManager
-              .getContainerReplicas(container.containerID().getProtobuf())
+              .getContainerReplicas(container.containerID())
               .size());
           Assert.assertEquals(2, nmContainers.size());
         } else {
@@ -385,12 +385,12 @@
           // NM, but have found something for it in ContainerManager, and that
           // should not happen. It should be in both, or neither.
           Assert.assertEquals(0, containerStateManager
-              .getContainerReplicas(container.containerID().getProtobuf())
+              .getContainerReplicas(container.containerID())
               .size());
           Assert.assertEquals(1, nmContainers.size());
         }
         Assert.assertEquals(1, containerStateManager
-            .getContainerReplicas(containerTwo.containerID().getProtobuf())
+            .getContainerReplicas(containerTwo.containerID())
             .size());
       }
     } finally {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java
index f6c47d3..c26ebf7 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java
@@ -156,7 +156,7 @@
           List<ContainerInfo> containers = new ArrayList<>();
           for (ContainerID id : ids) {
             containers.add(containerStateManager.getContainer(
-                id.getProtobuf()));
+                id));
           }
           return containers;
         });
@@ -164,13 +164,13 @@
     Mockito.when(containerManager.getContainer(Mockito.any(ContainerID.class)))
         .thenAnswer(invocation -> containerStateManager
             .getContainer(((ContainerID)invocation
-                .getArguments()[0]).getProtobuf()));
+                .getArguments()[0])));
 
     Mockito.when(containerManager.getContainerReplicas(
         Mockito.any(ContainerID.class)))
         .thenAnswer(invocation -> containerStateManager
             .getContainerReplicas(((ContainerID)invocation
-                .getArguments()[0]).getProtobuf()));
+                .getArguments()[0])));
 
     containerPlacementPolicy = Mockito.mock(PlacementPolicy.class);
 
@@ -289,7 +289,7 @@
     replicas.addAll(getReplicas(id, State.OPEN, datanode));
 
     for (ContainerReplica replica : replicas) {
-      containerStateManager.updateContainerReplica(id.getProtobuf(), replica);
+      containerStateManager.updateContainerReplica(id, replica);
     }
 
     final int currentCloseCommandCount = datanodeCommandHandler
@@ -302,7 +302,7 @@
 
     // Update the OPEN to CLOSING
     for (ContainerReplica replica : getReplicas(id, State.CLOSING, datanode)) {
-      containerStateManager.updateContainerReplica(id.getProtobuf(), replica);
+      containerStateManager.updateContainerReplica(id, replica);
     }
 
     replicationManager.processAll();
@@ -333,10 +333,10 @@
         id, State.OPEN, 1000L, datanodeDetails.getUuid(), datanodeDetails);
 
     containerStateManager.addContainer(container.getProtobuf());
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaOne);
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaTwo);
+    containerStateManager.updateContainerReplica(id, replicaOne);
+    containerStateManager.updateContainerReplica(id, replicaTwo);
     containerStateManager.updateContainerReplica(
-        id.getProtobuf(), replicaThree);
+        id, replicaThree);
 
     final int currentCloseCommandCount = datanodeCommandHandler
         .getInvocationCount(SCMCommandProto.Type.closeContainerCommand);
@@ -373,10 +373,10 @@
         id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
 
     containerStateManager.addContainer(container.getProtobuf());
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaOne);
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaTwo);
+    containerStateManager.updateContainerReplica(id, replicaOne);
+    containerStateManager.updateContainerReplica(id, replicaTwo);
     containerStateManager.updateContainerReplica(
-        id.getProtobuf(), replicaThree);
+        id, replicaThree);
 
     // All the QUASI_CLOSED replicas have same originNodeId, so the
     // container will not be closed. ReplicationManager should take no action.
@@ -413,10 +413,10 @@
         id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
 
     containerStateManager.addContainer(container.getProtobuf());
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaOne);
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaTwo);
+    containerStateManager.updateContainerReplica(id, replicaOne);
+    containerStateManager.updateContainerReplica(id, replicaTwo);
     containerStateManager.updateContainerReplica(
-        id.getProtobuf(), replicaThree);
+        id, replicaThree);
 
     final int currentDeleteCommandCount = datanodeCommandHandler
         .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand);
@@ -434,7 +434,7 @@
         id, State.UNHEALTHY, 1000L, originNodeId,
         replicaOne.getDatanodeDetails());
     containerStateManager.updateContainerReplica(
-        id.getProtobuf(), unhealthyReplica);
+        id, unhealthyReplica);
 
     replicationManager.processAll();
     eventQueue.processAll(1000);
@@ -447,7 +447,7 @@
         replicationManager.getMetrics().getNumDeletionCmdsSent());
 
     // Now we will delete the unhealthy replica from in-memory.
-    containerStateManager.removeContainerReplica(id.getProtobuf(), replicaOne);
+    containerStateManager.removeContainerReplica(id, replicaOne);
 
     final long currentBytesToReplicate = replicationManager.getMetrics()
         .getNumReplicationBytesTotal();
@@ -482,7 +482,7 @@
     final ContainerReplica replicatedReplicaOne = getReplicas(
         id, State.CLOSED, 1000L, originNodeId, targetDn);
     containerStateManager.updateContainerReplica(
-        id.getProtobuf(), replicatedReplicaOne);
+        id, replicatedReplicaOne);
 
     final long currentReplicationCommandCompleted = replicationManager
         .getMetrics().getNumReplicationCmdsCompleted();
@@ -528,11 +528,11 @@
         id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
 
     containerStateManager.addContainer(container.getProtobuf());
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaOne);
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaTwo);
+    containerStateManager.updateContainerReplica(id, replicaOne);
+    containerStateManager.updateContainerReplica(id, replicaTwo);
     containerStateManager.updateContainerReplica(
-        id.getProtobuf(), replicaThree);
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaFour);
+        id, replicaThree);
+    containerStateManager.updateContainerReplica(id, replicaFour);
 
     final int currentDeleteCommandCount = datanodeCommandHandler
         .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand);
@@ -559,16 +559,16 @@
         .get(id).get(0).getDatanode();
     if (targetDn.equals(replicaOne.getDatanodeDetails())) {
       containerStateManager.removeContainerReplica(
-          id.getProtobuf(), replicaOne);
+          id, replicaOne);
     } else if (targetDn.equals(replicaTwo.getDatanodeDetails())) {
       containerStateManager.removeContainerReplica(
-          id.getProtobuf(), replicaTwo);
+          id, replicaTwo);
     } else if (targetDn.equals(replicaThree.getDatanodeDetails())) {
       containerStateManager.removeContainerReplica(
-          id.getProtobuf(), replicaThree);
+          id, replicaThree);
     } else if (targetDn.equals(replicaFour.getDatanodeDetails())) {
       containerStateManager.removeContainerReplica(
-          id.getProtobuf(), replicaFour);
+          id, replicaFour);
     }
 
     final long currentDeleteCommandCompleted = replicationManager.getMetrics()
@@ -616,11 +616,11 @@
         id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
 
     containerStateManager.addContainer(container.getProtobuf());
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaOne);
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaTwo);
+    containerStateManager.updateContainerReplica(id, replicaOne);
+    containerStateManager.updateContainerReplica(id, replicaTwo);
     containerStateManager.updateContainerReplica(
-        id.getProtobuf(), replicaThree);
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaFour);
+        id, replicaThree);
+    containerStateManager.updateContainerReplica(id, replicaFour);
 
     final int currentDeleteCommandCount = datanodeCommandHandler
         .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand);
@@ -648,7 +648,7 @@
     final long currentDeleteCommandCompleted = replicationManager.getMetrics()
         .getNumDeletionCmdsCompleted();
     // Now we remove the replica to simulate deletion complete
-    containerStateManager.removeContainerReplica(id.getProtobuf(), replicaOne);
+    containerStateManager.removeContainerReplica(id, replicaOne);
 
     replicationManager.processAll();
     eventQueue.processAll(1000);
@@ -683,8 +683,8 @@
         id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
 
     containerStateManager.addContainer(container.getProtobuf());
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaOne);
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaTwo);
+    containerStateManager.updateContainerReplica(id, replicaOne);
+    containerStateManager.updateContainerReplica(id, replicaTwo);
 
     final int currentReplicateCommandCount = datanodeCommandHandler
         .getInvocationCount(SCMCommandProto.Type.replicateContainerCommand);
@@ -722,7 +722,7 @@
     final ContainerReplica replicatedReplicaThree = getReplicas(
         id, State.CLOSED, 1000L, originNodeId, targetDn);
     containerStateManager.updateContainerReplica(
-        id.getProtobuf(), replicatedReplicaThree);
+        id, replicatedReplicaThree);
 
     replicationManager.processAll();
     eventQueue.processAll(1000);
@@ -776,8 +776,8 @@
         id, State.UNHEALTHY, 1000L, originNodeId, randomDatanodeDetails());
 
     containerStateManager.addContainer(container.getProtobuf());
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaOne);
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaTwo);
+    containerStateManager.updateContainerReplica(id, replicaOne);
+    containerStateManager.updateContainerReplica(id, replicaTwo);
 
     final int currentReplicateCommandCount = datanodeCommandHandler
         .getInvocationCount(SCMCommandProto.Type.replicateContainerCommand);
@@ -804,7 +804,7 @@
         replicateCommand.get().getDatanodeId());
     ContainerReplica newReplica = getReplicas(
         id, State.QUASI_CLOSED, 1000L, originNodeId, newNode);
-    containerStateManager.updateContainerReplica(id.getProtobuf(), newReplica);
+    containerStateManager.updateContainerReplica(id, newReplica);
 
     ReplicationManagerReport report = replicationManager.getContainerReport();
     Assert.assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED));
@@ -836,7 +836,7 @@
     Assert.assertEquals(1, replicationManager.getMetrics()
         .getInflightDeletion());
 
-    containerStateManager.removeContainerReplica(id.getProtobuf(), replicaTwo);
+    containerStateManager.removeContainerReplica(id, replicaTwo);
 
     final long currentDeleteCommandCompleted = replicationManager.getMetrics()
         .getNumDeletionCmdsCompleted();
@@ -900,7 +900,7 @@
         randomDatanodeDetails());
     containerStateManager.addContainer(container.getProtobuf());
     for (ContainerReplica replica : replicas) {
-      containerStateManager.updateContainerReplica(id.getProtobuf(), replica);
+      containerStateManager.updateContainerReplica(id, replica);
     }
 
     final int currentCloseCommandCount = datanodeCommandHandler
@@ -935,7 +935,7 @@
 
     containerStateManager.addContainer(container.getProtobuf());
     for (ContainerReplica replica : replicas) {
-      containerStateManager.updateContainerReplica(id.getProtobuf(), replica);
+      containerStateManager.updateContainerReplica(id, replica);
     }
 
     replicationManager.processAll();
@@ -964,7 +964,7 @@
 
     containerStateManager.addContainer(container.getProtobuf());
     for (ContainerReplica replica : replicas) {
-      containerStateManager.updateContainerReplica(id.getProtobuf(), replica);
+      containerStateManager.updateContainerReplica(id, replica);
     }
 
     final CloseContainerEventHandler closeContainerHandler =
@@ -996,7 +996,7 @@
 
     containerStateManager.addContainer(container.getProtobuf());
     for (ContainerReplica replica : replicas) {
-      containerStateManager.updateContainerReplica(id.getProtobuf(), replica);
+      containerStateManager.updateContainerReplica(id, replica);
     }
 
     replicationManager.processAll();
@@ -1031,10 +1031,10 @@
         id, State.CLOSED, 1000L, originNodeId, randomDatanodeDetails());
 
     containerStateManager.addContainer(container.getProtobuf());
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaOne);
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaTwo);
+    containerStateManager.updateContainerReplica(id, replicaOne);
+    containerStateManager.updateContainerReplica(id, replicaTwo);
     containerStateManager.updateContainerReplica(
-        id.getProtobuf(), replicaThree);
+        id, replicaThree);
 
     // Ensure a mis-replicated status is returned for any containers in this
     // test where there are 3 replicas. When there are 2 or 4 replicas
@@ -1116,12 +1116,12 @@
         id, State.UNHEALTHY, 1000L, originNodeId, randomDatanodeDetails());
 
     containerStateManager.addContainer(container.getProtobuf());
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaOne);
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaTwo);
+    containerStateManager.updateContainerReplica(id, replicaOne);
+    containerStateManager.updateContainerReplica(id, replicaTwo);
     containerStateManager.updateContainerReplica(
-        id.getProtobuf(), replicaThree);
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaFour);
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaFive);
+        id, replicaThree);
+    containerStateManager.updateContainerReplica(id, replicaFour);
+    containerStateManager.updateContainerReplica(id, replicaFive);
 
     // Ensure a mis-replicated status is returned for any containers in this
     // test where there are exactly 3 replicas checked.
@@ -1168,11 +1168,11 @@
         id, State.CLOSED, 1000L, originNodeId, randomDatanodeDetails());
 
     containerStateManager.addContainer(container.getProtobuf());
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaOne);
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaTwo);
+    containerStateManager.updateContainerReplica(id, replicaOne);
+    containerStateManager.updateContainerReplica(id, replicaTwo);
     containerStateManager.updateContainerReplica(
-        id.getProtobuf(), replicaThree);
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaFour);
+        id, replicaThree);
+    containerStateManager.updateContainerReplica(id, replicaFour);
 
     Mockito.when(containerPlacementPolicy.validateContainerPlacement(
         Mockito.argThat(list -> list.size() == 3),
@@ -1214,12 +1214,12 @@
         id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails());
 
     containerStateManager.addContainer(container.getProtobuf());
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaOne);
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaTwo);
+    containerStateManager.updateContainerReplica(id, replicaOne);
+    containerStateManager.updateContainerReplica(id, replicaTwo);
     containerStateManager.updateContainerReplica(
-        id.getProtobuf(), replicaThree);
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaFour);
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaFive);
+        id, replicaThree);
+    containerStateManager.updateContainerReplica(id, replicaFour);
+    containerStateManager.updateContainerReplica(id, replicaFive);
 
     Mockito.when(containerPlacementPolicy.validateContainerPlacement(
         Mockito.argThat(list -> list != null && list.size() <= 4),
@@ -1431,7 +1431,7 @@
     // for removal
     Set<ContainerReplica> decom =
         containerStateManager.getContainerReplicas(
-            container.containerID().getProtobuf())
+            container.containerID())
         .stream()
         .filter(r -> r.getDatanodeDetails().getPersistedOpState() != IN_SERVICE)
         .collect(Collectors.toSet());
@@ -1496,7 +1496,7 @@
         SCMCommandProto.Type.deleteContainerCommand, dn1.getDatanodeDetails()));
     Assert.assertEquals(1, datanodeCommandHandler.getInvocationCount(
         SCMCommandProto.Type.deleteContainerCommand));
-    containerStateManager.removeContainerReplica(id.getProtobuf(), dn1);
+    containerStateManager.removeContainerReplica(id, dn1);
 
     replicationManager.processAll();
     eventQueue.processAll(1000);
@@ -1579,10 +1579,10 @@
     //deleteContainerCommand will be sent again
     Assert.assertEquals(2, datanodeCommandHandler.getInvocationCount(
         SCMCommandProto.Type.deleteContainerCommand));
-    containerStateManager.removeContainerReplica(id.getProtobuf(), dn1);
+    containerStateManager.removeContainerReplica(id, dn1);
 
     //replica in src datanode is deleted now
-    containerStateManager.removeContainerReplica(id.getProtobuf(), dn1);
+    containerStateManager.removeContainerReplica(id, dn1);
     replicationManager.processAll();
     eventQueue.processAll(1000);
 
@@ -1630,7 +1630,7 @@
     //now, replication succeeds, but replica in dn2 lost,
     //and there are only tree replicas totally, so rm should
     //not delete the replica on dn1
-    containerStateManager.removeContainerReplica(id.getProtobuf(), dn2);
+    containerStateManager.removeContainerReplica(id, dn2);
     replicationManager.processAll();
     eventQueue.processAll(1000);
 
@@ -1741,7 +1741,7 @@
     containerStateManager.updateContainerState(id.getProtobuf(),
         LifeCycleEvent.FORCE_CLOSE);
     Assert.assertTrue(LifeCycleState.CLOSED ==
-        containerStateManager.getContainer(id.getProtobuf()).getState());
+        containerStateManager.getContainer(id).getState());
 
     //Node is not in healthy state
     for (HddsProtos.NodeState state : HddsProtos.NodeState.values()) {
@@ -1805,8 +1805,8 @@
 
     //make the replica num be 2 to test the case
     //that container is in inflightReplication
-    containerStateManager.removeContainerReplica(id.getProtobuf(), dn5);
-    containerStateManager.removeContainerReplica(id.getProtobuf(), dn4);
+    containerStateManager.removeContainerReplica(id, dn5);
+    containerStateManager.removeContainerReplica(id, dn4);
     //replication manager should generate inflightReplication
     replicationManager.processAll();
     //waiting for inflightReplication generation
@@ -1902,7 +1902,7 @@
     final ContainerReplica replica = getReplicas(
         container.containerID(), replicaState, 1000L, originNodeId, dn);
     containerStateManager
-        .updateContainerReplica(container.containerID().getProtobuf(), replica);
+        .updateContainerReplica(container.containerID(), replica);
     return replica;
   }
 
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java
index e6f32e0..6068c31 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java
@@ -37,10 +37,11 @@
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.ha.SCMContext;
-import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
 import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo;
 import org.apache.hadoop.hdds.scm.node.NodeStatus;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.ozone.test.GenericTestUtils;
 import org.junit.Assert;
@@ -53,6 +54,7 @@
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
 
+import java.time.Duration;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -63,6 +65,7 @@
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.stream.Collectors;
 
+import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 /**
@@ -77,6 +80,7 @@
   private ContainerManager containerManager;
   private ContainerBalancer containerBalancer;
   private MockNodeManager mockNodeManager;
+  private StorageContainerManager scm;
   private OzoneConfiguration conf;
   private PlacementPolicy placementPolicy;
   private ContainerBalancerConfiguration balancerConfiguration;
@@ -100,6 +104,7 @@
   @Before
   public void setup() throws SCMException, NodeNotFoundException {
     conf = new OzoneConfiguration();
+    scm = Mockito.mock(StorageContainerManager.class);
     containerManager = Mockito.mock(ContainerManager.class);
     replicationManager = Mockito.mock(ReplicationManager.class);
 
@@ -146,9 +151,16 @@
     when(containerManager.getContainers())
         .thenReturn(new ArrayList<>(cidToInfoMap.values()));
 
-    containerBalancer = new ContainerBalancer(mockNodeManager, containerManager,
-        replicationManager, conf, SCMContext.emptyContext(),
-        new NetworkTopologyImpl(conf), placementPolicy);
+    when(scm.getScmNodeManager()).thenReturn(mockNodeManager);
+    when(scm.getContainerPlacementPolicy()).thenReturn(placementPolicy);
+    when(scm.getContainerManager()).thenReturn(containerManager);
+    when(scm.getReplicationManager()).thenReturn(replicationManager);
+    when(scm.getScmContext()).thenReturn(SCMContext.emptyContext());
+    when(scm.getClusterMap()).thenReturn(null);
+    when(scm.getEventQueue()).thenReturn(mock(EventPublisher.class));
+    when(scm.getConfiguration()).thenReturn(conf);
+
+    containerBalancer = new ContainerBalancer(scm);
   }
 
   @Test
@@ -186,7 +198,7 @@
       // modify this after balancer is fully completed
       try {
         Thread.sleep(100);
-      } catch (InterruptedException e) {}
+      } catch (InterruptedException e) { }
 
       expectedUnBalancedNodes =
           determineExpectedUnBalancedNodes(randomThreshold);
@@ -214,15 +226,12 @@
     balancerConfiguration.setThreshold(99.99);
     containerBalancer.start(balancerConfiguration);
 
-    // waiting for balance completed.
-    // TODO: this is a temporary implementation for now
-    // modify this after balancer is fully completed
-    try {
-      Thread.sleep(100);
-    } catch (InterruptedException e) {}
+    sleepWhileBalancing(100);
 
     containerBalancer.stop();
+    ContainerBalancerMetrics metrics = containerBalancer.getMetrics();
     Assert.assertEquals(0, containerBalancer.getUnBalancedNodes().size());
+    Assert.assertEquals(0, metrics.getNumDatanodesUnbalanced());
   }
 
   /**
@@ -239,16 +248,15 @@
     balancerConfiguration.setIterations(1);
     containerBalancer.start(balancerConfiguration);
 
-    // waiting for balance completed.
-    // TODO: this is a temporary implementation for now
-    // modify this after balancer is fully completed
-    try {
-      Thread.sleep(1000);
-    } catch (InterruptedException e) {}
+    sleepWhileBalancing(500);
 
+    int number = percent * numberOfNodes / 100;
+    ContainerBalancerMetrics metrics = containerBalancer.getMetrics();
     Assert.assertFalse(
-        containerBalancer.getCountDatanodesInvolvedPerIteration() >
-            (percent * numberOfNodes / 100));
+        containerBalancer.getCountDatanodesInvolvedPerIteration() > number);
+    Assert.assertTrue(metrics.getNumDatanodesInvolvedInLatestIteration() > 0);
+    Assert.assertFalse(
+        metrics.getNumDatanodesInvolvedInLatestIteration() > number);
     containerBalancer.stop();
   }
 
@@ -266,7 +274,7 @@
     // modify this after balancer is fully completed
     try {
       Thread.sleep(1000);
-    } catch (InterruptedException e) {}
+    } catch (InterruptedException e) { }
 
     containerBalancer.stop();
 
@@ -286,7 +294,7 @@
     // modify this after balancer is fully completed
     try {
       Thread.sleep(1000);
-    } catch (InterruptedException e) {}
+    } catch (InterruptedException e) { }
 
     containerBalancer.stop();
     // check whether all selected containers are closed
@@ -305,16 +313,16 @@
     balancerConfiguration.setIterations(1);
     containerBalancer.start(balancerConfiguration);
 
-    // waiting for balance completed.
-    // TODO: this is a temporary implementation for now
-    // modify this after balancer is fully completed
-    try {
-      Thread.sleep(1000);
-    } catch (InterruptedException e) {}
+    sleepWhileBalancing(500);
 
     // balancer should not have moved more size than the limit
     Assert.assertFalse(containerBalancer.getSizeMovedPerIteration() >
         10 * OzoneConsts.GB);
+
+    long size =
+        containerBalancer.getMetrics().getDataSizeMovedGBInLatestIteration();
+    Assert.assertTrue(size > 0);
+    Assert.assertFalse(size > 10);
     containerBalancer.stop();
   }
 
@@ -330,7 +338,7 @@
     // modify this after balancer is fully completed
     try {
       Thread.sleep(1000);
-    } catch (InterruptedException e) {}
+    } catch (InterruptedException e) { }
 
     containerBalancer.stop();
     Map<DatanodeDetails, ContainerMoveSelection> sourceToTargetMap =
@@ -357,7 +365,7 @@
     // modify this after balancer is fully completed
     try {
       Thread.sleep(1000);
-    } catch (InterruptedException e) {}
+    } catch (InterruptedException e) { }
 
     containerBalancer.stop();
     Map<DatanodeDetails, ContainerMoveSelection> sourceToTargetMap =
@@ -428,7 +436,7 @@
     // modify this after balancer is fully completed
     try {
       Thread.sleep(1000);
-    } catch (InterruptedException e) {}
+    } catch (InterruptedException e) { }
 
     containerBalancer.stop();
     Set<ContainerID> containers = new HashSet<>();
@@ -455,7 +463,7 @@
     // modify this after balancer is fully completed
     try {
       Thread.sleep(1000);
-    } catch (InterruptedException e) {}
+    } catch (InterruptedException e) { }
 
     containerBalancer.stop();
     Set<ContainerID> excludeContainers =
@@ -490,7 +498,7 @@
     // modify this after balancer is fully completed
     try {
       Thread.sleep(500);
-    } catch (InterruptedException e) {}
+    } catch (InterruptedException e) { }
 
     containerBalancer.stop();
     // balancer should have identified unbalanced nodes
@@ -500,29 +508,25 @@
 
   @Test
   public void testMetrics() {
+    conf.set("hdds.datanode.du.refresh.period", "1ms");
+    balancerConfiguration.setBalancingInterval(Duration.ofMillis(2));
     balancerConfiguration.setThreshold(10);
     balancerConfiguration.setIterations(1);
-    balancerConfiguration.setMaxSizeEnteringTarget(10 * OzoneConsts.GB);
-    balancerConfiguration.setMaxSizeToMovePerIteration(100 * OzoneConsts.GB);
+    balancerConfiguration.setMaxSizeEnteringTarget(6 * OzoneConsts.GB);
+    // deliberately set max size per iteration to a low value, 6GB
+    balancerConfiguration.setMaxSizeToMovePerIteration(6 * OzoneConsts.GB);
     balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100);
 
     containerBalancer.start(balancerConfiguration);
+    sleepWhileBalancing(500);
 
-    // waiting for balance completed.
-    // TODO: this is a temporary implementation for now
-    // modify this after balancer is fully completed
-    try {
-      Thread.sleep(500);
-    } catch (InterruptedException e) {}
-
-    containerBalancer.stop();
     ContainerBalancerMetrics metrics = containerBalancer.getMetrics();
     Assert.assertEquals(determineExpectedUnBalancedNodes(
             balancerConfiguration.getThreshold()).size(),
-        metrics.getDatanodesNumToBalance());
-    Assert.assertEquals(ContainerBalancer.ratioToPercent(
-            nodeUtilizations.get(nodeUtilizations.size() - 1)),
-        metrics.getMaxDatanodeUtilizedPercentage());
+        metrics.getNumDatanodesUnbalanced());
+    Assert.assertTrue(metrics.getDataSizeMovedGBInLatestIteration() <= 6);
+    Assert.assertEquals(1, metrics.getNumIterations());
+    containerBalancer.stop();
   }
 
   /**
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
index 37f4594..a830a71 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
@@ -550,7 +550,7 @@
       dn.setNodeStatus(new NodeStatus(DECOMMISSIONED, HEALTHY));
     }
 
-    for (int i=0; i<10; i++) {
+    for (int i = 0; i < 10; i++) {
       // Set a random DN to in_service and ensure it is always picked
       int index = new Random().nextInt(dnInfos.size());
       dnInfos.get(index).setNodeStatus(NodeStatus.inServiceHealthy());
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerReplicaCount.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerReplicaCount.java
index 405da0f..a1dd8a8 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerReplicaCount.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerReplicaCount.java
@@ -26,7 +26,10 @@
 import org.apache.hadoop.hdds.scm.container.ContainerReplicaCount;
 import org.junit.Before;
 import org.junit.Test;
-import java.util.*;
+
+import java.util.HashSet;
+import java.util.Set;
+
 import static junit.framework.TestCase.assertEquals;
 import static junit.framework.TestCase.assertTrue;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java
index 18d46c6..c64615f 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java
@@ -107,8 +107,9 @@
 
     ContainerStateManager proxy =
         (ContainerStateManager) Proxy.newProxyInstance(
-        SCMHAInvocationHandler.class.getClassLoader(),
-        new Class<?>[]{ContainerStateManager.class}, scmhaInvocationHandler);
+            SCMHAInvocationHandler.class.getClassLoader(),
+            new Class<?>[]{ContainerStateManager.class},
+            scmhaInvocationHandler);
 
     try {
       proxy.addContainer(HddsProtos.ContainerInfoProto.getDefaultInstance());
@@ -124,8 +125,9 @@
 
     CertificateStore certificateStore =
         (CertificateStore) Proxy.newProxyInstance(
-        SCMHAInvocationHandler.class.getClassLoader(),
-        new Class<?>[]{CertificateStore.class}, scmhaInvocationHandler);
+            SCMHAInvocationHandler.class.getClassLoader(),
+            new Class<?>[]{CertificateStore.class},
+            scmhaInvocationHandler);
 
     KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA");
     try {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java
index 9ec10fc..5f82cce 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java
@@ -64,7 +64,7 @@
     conf.set(ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY, scmServiceId);
 
     String[] nodes = new String[] {"scm1", "scm2", "scm3"};
-    conf.set(ScmConfigKeys.OZONE_SCM_NODES_KEY+"."+scmServiceId,
+    conf.set(ScmConfigKeys.OZONE_SCM_NODES_KEY + "." + scmServiceId,
         "scm1,scm2,scm3");
     conf.set(ScmConfigKeys.OZONE_SCM_NODE_ID_KEY, "scm1");
 
@@ -72,14 +72,14 @@
     int i = 1;
     for (String nodeId : nodes) {
       conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY,
-          scmServiceId, nodeId), "localhost:"+port++);
+          scmServiceId, nodeId), "localhost:" + port++);
       conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_BLOCK_CLIENT_PORT_KEY,
           scmServiceId, nodeId), port);
       conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY,
           scmServiceId, nodeId), "172.28.9.1");
 
       conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY,
-          scmServiceId, nodeId), "localhost:"+port++);
+          scmServiceId, nodeId), "localhost:" + port++);
       conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_SECURITY_SERVICE_PORT_KEY,
           scmServiceId, nodeId), port);
       conf.set(ConfUtils.addKeySuffixes(
@@ -87,26 +87,26 @@
           "172.28.9.1");
 
       conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_ADDRESS_KEY,
-          scmServiceId, nodeId), "localhost:"+port++);
+          scmServiceId, nodeId), "localhost:" + port++);
       conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_PORT_KEY,
           scmServiceId, nodeId), port);
       conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_BIND_HOST_KEY,
           scmServiceId, nodeId), "172.28.9.1");
 
       conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_ADDRESS_KEY,
-          scmServiceId, nodeId), "localhost:"+port++);
+          scmServiceId, nodeId), "localhost:" + port++);
       conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_PORT_KEY,
           scmServiceId, nodeId), port);
       conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_BIND_HOST_KEY,
           scmServiceId, nodeId), "172.28.9.1");
 
       conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_HTTP_ADDRESS_KEY,
-          scmServiceId, nodeId), "localhost:"+port++);
+          scmServiceId, nodeId), "localhost:" + port++);
       conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_HTTP_BIND_HOST_KEY,
           scmServiceId, nodeId), "172.28.9.1");
 
       conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_DB_DIRS,
-          scmServiceId, nodeId), "/var/scm-metadata"+ i++);
+          scmServiceId, nodeId), "/var/scm-metadata" + i++);
 
       conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_ADDRESS_KEY,
           scmServiceId, nodeId), "localhost");
@@ -121,7 +121,7 @@
     port = 9880;
 
     // Validate configs.
-    Assert.assertEquals("localhost:"+port++,
+    Assert.assertEquals("localhost:" + port++,
         conf.get(ConfUtils.addKeySuffixes(OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY,
         scmServiceId, "scm1")));
     Assert.assertEquals(port,
@@ -132,7 +132,7 @@
             scmServiceId, "scm1")));
 
 
-    Assert.assertEquals("localhost:"+port++,
+    Assert.assertEquals("localhost:" + port++,
         conf.get(ConfUtils.addKeySuffixes(
             OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY, scmServiceId, "scm1")));
     Assert.assertEquals(port, conf.getInt(ConfUtils.addKeySuffixes(
@@ -142,7 +142,7 @@
             OZONE_SCM_SECURITY_SERVICE_BIND_HOST_KEY, scmServiceId, "scm1")));
 
 
-    Assert.assertEquals("localhost:"+port++,
+    Assert.assertEquals("localhost:" + port++,
         conf.get(ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_ADDRESS_KEY,
             scmServiceId, "scm1")));
     Assert.assertEquals(port,
@@ -152,7 +152,7 @@
         ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_BIND_HOST_KEY, scmServiceId,
         "scm1")));
 
-    Assert.assertEquals("localhost:"+port++,
+    Assert.assertEquals("localhost:" + port++,
         conf.get(ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_ADDRESS_KEY,
             scmServiceId, "scm1")));
     Assert.assertEquals(port,
@@ -163,7 +163,7 @@
         "scm1")));
 
 
-    Assert.assertEquals("localhost:"+port++,
+    Assert.assertEquals("localhost:" + port++,
         conf.get(ConfUtils.addKeySuffixes(OZONE_SCM_HTTP_ADDRESS_KEY,
         scmServiceId, "scm1")));
     Assert.assertEquals("172.28.9.1",
@@ -192,7 +192,7 @@
     conf.set(ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY, scmServiceId);
 
     String[] nodes = new String[] {"scm1", "scm2", "scm3"};
-    conf.set(ScmConfigKeys.OZONE_SCM_NODES_KEY+"."+scmServiceId,
+    conf.set(ScmConfigKeys.OZONE_SCM_NODES_KEY + "." + scmServiceId,
         "scm1,scm2,scm3");
     conf.set(ScmConfigKeys.OZONE_SCM_NODE_ID_KEY, "scm1");
 
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisRequest.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisRequest.java
index f5913aa..c352ca4 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisRequest.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisRequest.java
@@ -49,7 +49,7 @@
   }
 
   @Test(expected = InvalidProtocolBufferException.class)
-  public void testEncodeWithNonProto() throws Exception{
+  public void testEncodeWithNonProto() throws Exception {
     PipelineID pipelineID = PipelineID.randomId();
     // Non proto args
     Object[] args = new Object[] {pipelineID};
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestPipelineIDCodec.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestPipelineIDCodec.java
index 5543be5..834b539 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestPipelineIDCodec.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestPipelineIDCodec.java
@@ -57,7 +57,7 @@
 
   @Test
   public void testPersistingARandomUUID() throws Exception {
-    for (int i=0; i<100; i++) {
+    for (int i = 0; i < 100; i++) {
       UUID uuid = UUID.randomUUID();
 
       long mask = 0x0000_0000_0000_00FFL;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index b307b57..63ecb29 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -212,7 +212,7 @@
       assertEquals(remaining * nodeCount,
           (long) scmNodeManager.getStats().getRemaining().get());
 
-      xceiverClientManager= new XceiverClientManager(conf);
+      xceiverClientManager = new XceiverClientManager(conf);
 
       ContainerInfo container = containerManager
           .allocateContainer(
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
index 80b7240..82822a1 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
@@ -23,8 +23,13 @@
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
-import org.apache.hadoop.hdds.scm.container.*;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
+import org.apache.hadoop.hdds.scm.container.ContainerReplica;
+import org.apache.hadoop.hdds.scm.container.ContainerReplicaCount;
 import org.apache.hadoop.hdds.scm.container.ReplicationManager;
+import org.apache.hadoop.hdds.scm.container.SimpleMockNodeManager;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.server.events.EventHandler;
@@ -105,7 +110,7 @@
    */
   @Test
   public void testClosePipelinesEventFiredWhenAdminStarted()
-      throws NodeNotFoundException{
+      throws NodeNotFoundException {
     DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails();
     nodeManager.register(dn1,
         new NodeStatus(HddsProtos.NodeOperationalState.DECOMMISSIONING,
@@ -425,7 +430,7 @@
    */
   private Set<ContainerID> generateContainers(int count) {
     Set<ContainerID> containers = new HashSet<>();
-    for (int i=0; i<count; i++) {
+    for (int i = 0; i < count; i++) {
       containers.add(ContainerID.valueOf(i));
     }
     return containers;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
index 4373238..792d62f 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
@@ -94,12 +94,12 @@
 
   @Test
   public void testAnyInvalidHostThrowsException()
-      throws InvalidHostStringException{
+      throws InvalidHostStringException {
     List<DatanodeDetails> dns = generateDatanodes();
 
     // Try to decommission a host that does exist, but give incorrect port
     try {
-      decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()+":10"));
+      decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress() + ":10"));
       fail("InvalidHostStringException expected");
     } catch (InvalidHostStringException e) {
     }
@@ -131,7 +131,7 @@
     // that does not exist
     try {
       decom.decommissionNodes(Arrays.asList(
-          dns.get(0).getIpAddress()+":10"));
+          dns.get(0).getIpAddress() + ":10"));
       fail("InvalidHostStringException expected");
     } catch (InvalidHostStringException e) {
     }
@@ -159,7 +159,7 @@
     // and we hardcoded ports to 3456, 4567, 5678
     DatanodeDetails multiDn = dns.get(10);
     String multiAddr =
-        multiDn.getIpAddress()+":"+multiDn.getPorts().get(0).getValue();
+        multiDn.getIpAddress() + ":" + multiDn.getPorts().get(0).getValue();
     decom.decommissionNodes(Arrays.asList(multiAddr));
     assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
         nodeManager.getNodeStatus(multiDn).getOperationalState());
@@ -202,7 +202,7 @@
     // and we hardcoded ports to 3456, 4567, 5678
     DatanodeDetails multiDn = dns.get(10);
     String multiAddr =
-        multiDn.getIpAddress()+":"+multiDn.getPorts().get(0).getValue();
+        multiDn.getIpAddress() + ":" + multiDn.getPorts().get(0).getValue();
     decom.startMaintenanceNodes(Arrays.asList(multiAddr), 100);
     assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE,
         nodeManager.getNodeStatus(multiDn).getOperationalState());
@@ -296,7 +296,7 @@
    */
   private List<DatanodeDetails> generateDatanodes() {
     List<DatanodeDetails> dns = new ArrayList<>();
-    for (int i=0; i<10; i++) {
+    for (int i = 0; i < 10; i++) {
       DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
       dns.add(dn);
       nodeManager.register(dn, null, null);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java
index da87d18..7dfcbeb 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java
@@ -22,6 +22,7 @@
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
 import org.apache.hadoop.hdds.utils.HddsServerUtil;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
@@ -33,6 +34,7 @@
 import org.apache.hadoop.ozone.upgrade.LayoutVersionManager;
 import org.apache.hadoop.util.Time;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -41,6 +43,7 @@
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
+import java.util.Set;
 import java.util.UUID;
 
 import static junit.framework.TestCase.assertEquals;
@@ -233,6 +236,27 @@
   }
 
   @Test
+  public void testContainerCanBeAddedAndRemovedFromDN()
+      throws NodeAlreadyExistsException, NodeNotFoundException {
+    DatanodeDetails dn = generateDatanode();
+    nsm.addNode(dn, UpgradeUtils.defaultLayoutVersionProto());
+
+    nsm.addContainer(dn.getUuid(), ContainerID.valueOf(1));
+    nsm.addContainer(dn.getUuid(), ContainerID.valueOf(2));
+
+    Set<ContainerID> containerSet = nsm.getContainers(dn.getUuid());
+    assertEquals(2, containerSet.size());
+    Assert.assertTrue(containerSet.contains(ContainerID.valueOf(1)));
+    Assert.assertTrue(containerSet.contains(ContainerID.valueOf(2)));
+
+    nsm.removeContainer(dn.getUuid(), ContainerID.valueOf(2));
+    containerSet = nsm.getContainers(dn.getUuid());
+    assertEquals(1, containerSet.size());
+    Assert.assertTrue(containerSet.contains(ContainerID.valueOf(1)));
+    Assert.assertFalse(containerSet.contains(ContainerID.valueOf(2)));
+  }
+
+  @Test
   public void testHealthEventsFiredWhenOpStateChanged()
       throws NodeAlreadyExistsException, NodeNotFoundException {
     DatanodeDetails dn = generateDatanode();
@@ -304,7 +328,7 @@
       if (events.size() == 0) {
         return null;
       } else {
-        return events.get(events.size()-1);
+        return events.get(events.size() - 1);
       }
     }
 
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
index ab87a59..509eba4 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
@@ -97,10 +97,15 @@
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.finalizeNewLayoutVersionCommand;
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode.errorNodeNotPermitted;
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode.success;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.*;
 import static org.apache.hadoop.hdds.scm.HddsTestUtils.getRandomPipelineReports;
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.*;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
 import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND;
+import static org.apache.hadoop.hdds.scm.events.SCMEvents.NEW_NODE;
 import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.toLayoutVersionProto;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
@@ -457,7 +462,7 @@
       // these pipelines use nodes outside of allowedDNs.
       if (success) {
         for (Pipeline pipeline: pipelines) {
-          for(DatanodeDetails pipelineDN: pipeline.getNodes()) {
+          for (DatanodeDetails pipelineDN: pipeline.getNodes()) {
             // Do not wait for this condition to be true. Disallowed DNs should
             // never be used once we have the expected number of pipelines.
             if (!allowedDnIds.contains(pipelineDN.getUuidString())) {
@@ -1762,7 +1767,7 @@
     final int nodeCount = 6;
     SCMNodeManager nodeManager = createNodeManager(conf);
 
-    for (int i=0; i<nodeCount; i++) {
+    for (int i = 0; i < nodeCount; i++) {
       DatanodeDetails datanodeDetails =
           MockDatanodeDetails.randomDatanodeDetails();
       final long capacity = 2000;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
index 39a72db..89d45e0 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
@@ -29,8 +29,11 @@
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
 import org.apache.ozone.test.GenericTestUtils;
-import org.junit.*;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Rule;
+import org.junit.Test;
 import org.junit.rules.ExpectedException;
 
 import java.util.Arrays;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNodeStateMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNodeStateMap.java
index 4de5eea..303d368 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNodeStateMap.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNodeStateMap.java
@@ -98,8 +98,8 @@
       throws NodeAlreadyExistsException {
     // Add one node for all possible states
     int nodeCount = 0;
-    for(NodeOperationalState op : NodeOperationalState.values()) {
-      for(NodeState health : NodeState.values()) {
+    for (NodeOperationalState op : NodeOperationalState.values()) {
+      for (NodeState health : NodeState.values()) {
         addRandomNodeWithState(op, health);
         nodeCount++;
       }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java
index fec2f2e..9b4d0a8 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java
@@ -110,7 +110,7 @@
 
   @Test
   public void testPipelineDatanodesIntersection() throws IOException {
-    NodeManager nodeManager= new MockNodeManager(true, nodeCount);
+    NodeManager nodeManager = new MockNodeManager(true, nodeCount);
     conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, nodeHeaviness);
     conf.setBoolean(OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, false);
     SCMHAManager scmhaManager = MockSCMHAManager.getInstance(true);
@@ -157,7 +157,7 @@
           }
         }
         createdPipelineCount++;
-      } catch(SCMException e) {
+      } catch (SCMException e) {
         end = true;
       } catch (IOException e) {
         end = true;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
index efb391e..cad4f48 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
@@ -250,7 +250,7 @@
           new ArrayList<>(datanodes.size()), nodesRequired,
           0, 10 * OzoneConsts.TB);
       Assert.fail("SCMException should have been thrown.");
-    } catch(SCMException ex) {
+    } catch (SCMException ex) {
       Assert.assertTrue(ex.getMessage().contains(expectedMessageSubstring));
     }
 
@@ -260,13 +260,13 @@
           new ArrayList<>(datanodes.size()), nodesRequired, 10 * OzoneConsts.TB,
           0);
       Assert.fail("SCMException should have been thrown.");
-    } catch(SCMException ex) {
+    } catch (SCMException ex) {
       Assert.assertTrue(ex.getMessage().contains(expectedMessageSubstring));
     }
   }
 
   @Test
-  public void testPickLowestLoadAnchor() throws IOException{
+  public void testPickLowestLoadAnchor() throws IOException {
     List<DatanodeDetails> healthyNodes = nodeManager
         .getNodes(NodeStatus.inServiceHealthy());
 
@@ -343,7 +343,7 @@
   }
 
   @Test
-  public void testRackAwarenessNotEnabledWithFallBack() throws SCMException{
+  public void testRackAwarenessNotEnabledWithFallBack() throws SCMException {
     DatanodeDetails anchor = placementPolicy
         .chooseNode(nodesWithOutRackAwareness);
     DatanodeDetails randomNode = placementPolicy
@@ -425,12 +425,12 @@
   }
 
   @Test
-  public void testHeavyNodeShouldBeExcluded() throws SCMException{
+  public void testHeavyNodeShouldBeExcluded() throws SCMException {
     List<DatanodeDetails> healthyNodes =
         nodeManager.getNodes(NodeStatus.inServiceHealthy());
     int nodesRequired = HddsProtos.ReplicationFactor.THREE.getNumber();
     // only minority of healthy NODES are heavily engaged in pipelines.
-    int minorityHeavy = healthyNodes.size()/2 - 1;
+    int minorityHeavy = healthyNodes.size() / 2 - 1;
     List<DatanodeDetails> pickedNodes1 = placementPolicy.chooseDatanodes(
         new ArrayList<>(PIPELINE_PLACEMENT_MAX_NODES_COUNT),
         new ArrayList<>(PIPELINE_PLACEMENT_MAX_NODES_COUNT),
@@ -443,7 +443,7 @@
     Assert.assertTrue(checkDuplicateNodesUUID(pickedNodes1));
 
     // majority of healthy NODES are heavily engaged in pipelines.
-    int majorityHeavy = healthyNodes.size()/2 + 2;
+    int majorityHeavy = healthyNodes.size() / 2 + 2;
     insertHeavyNodesIntoNodeManager(healthyNodes, majorityHeavy);
     boolean thrown = false;
     List<DatanodeDetails> pickedNodes2 = null;
@@ -627,7 +627,7 @@
   }
 
   private void insertHeavyNodesIntoNodeManager(
-      List<DatanodeDetails> nodes, int heavyNodeCount) throws SCMException{
+      List<DatanodeDetails> nodes, int heavyNodeCount) throws SCMException {
     if (nodes == null) {
       throw new SCMException("",
           SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
index d1f383c..3d1a707 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
@@ -289,7 +289,7 @@
         provider.create(new RatisReplicationConfig(factor));
         Assert.fail("Expected SCMException for large container size with " +
             "replication factor " + factor.toString());
-      } catch(SCMException ex) {
+      } catch (SCMException ex) {
         Assert.assertTrue(ex.getMessage().contains(expectedErrorSubstring));
       }
     }
@@ -302,7 +302,7 @@
         provider.create(new RatisReplicationConfig(factor));
         Assert.fail("Expected SCMException for large metadata size with " +
             "replication factor " + factor.toString());
-      } catch(SCMException ex) {
+      } catch (SCMException ex) {
         Assert.assertTrue(ex.getMessage().contains(expectedErrorSubstring));
       }
     }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java
index c9840e7..83419e6 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java
@@ -133,7 +133,7 @@
             LoggerFactory.getLogger(SCMSafeModeManager.class));
 
     List<Pipeline> pipelines = pipelineManager.getPipelines();
-    firePipelineEvent(pipelines.subList(0, pipelineFactorThreeCount -1));
+    firePipelineEvent(pipelines.subList(0, pipelineFactorThreeCount - 1));
 
     // As 90% of 7 with ceil is 7, if we send 6 pipeline reports, rule
     // validate should be still false.
@@ -144,7 +144,7 @@
     Assert.assertFalse(rule.validate());
 
     //Fire last pipeline event from datanode.
-    firePipelineEvent(pipelines.subList(pipelineFactorThreeCount -1,
+    firePipelineEvent(pipelines.subList(pipelineFactorThreeCount - 1,
             pipelineFactorThreeCount));
 
     GenericTestUtils.waitFor(() -> rule.validate(), 1000, 5000);
@@ -181,13 +181,13 @@
         pipelineManager.getPipelines(
             new RatisReplicationConfig(ReplicationFactor.THREE));
 
-    firePipelineEvent(pipelines.subList(0, pipelineCountThree -1));
+    firePipelineEvent(pipelines.subList(0, pipelineCountThree - 1));
 
     GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains(
         "reported count is 6"), 1000, 5000);
 
     //Fire last pipeline event from datanode.
-    firePipelineEvent(pipelines.subList(pipelineCountThree -1,
+    firePipelineEvent(pipelines.subList(pipelineCountThree - 1,
             pipelineCountThree));
 
     GenericTestUtils.waitFor(() -> rule.validate(), 1000, 5000);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
index ef6345e..cb200f2 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
@@ -435,7 +435,7 @@
     }, 100, 1000 * 5);
   }
 
-  private void checkHealthy(int expectedCount) throws Exception{
+  private void checkHealthy(int expectedCount) throws Exception {
     GenericTestUtils.waitFor(() -> scmSafeModeManager
             .getHealthyPipelineSafeModeRule()
             .getCurrentHealthyPipelineCount() == expectedCount,
@@ -548,14 +548,14 @@
     assertTrue(scmSafeModeManager.getInSafeMode());
 
     // Register all DataNodes except last one and assert SCM is in safe mode.
-    for (int i = 0; i < numOfDns-1; i++) {
+    for (int i = 0; i < numOfDns - 1; i++) {
       queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT,
           HddsTestUtils.createNodeRegistrationContainerReport(containers));
       assertTrue(scmSafeModeManager.getInSafeMode());
       assertTrue(scmSafeModeManager.getCurrentContainerThreshold() == 1);
     }
 
-    if(numOfDns == 0){
+    if (numOfDns == 0) {
       GenericTestUtils.waitFor(() -> {
         return scmSafeModeManager.getInSafeMode();
       }, 10, 1000 * 10);
@@ -586,7 +586,7 @@
     containers.addAll(HddsTestUtils.getContainerInfo(25 * 4));
     String storageDir = GenericTestUtils.getTempPath(
         TestSCMSafeModeManager.class.getName() + UUID.randomUUID());
-    try{
+    try {
       MockNodeManager nodeManager = new MockNodeManager(true, 3);
       config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
       // enable pipeline check
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMCertStore.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMCertStore.java
index 5ddbe55..7ad118c 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMCertStore.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMCertStore.java
@@ -193,7 +193,7 @@
 
     // Generate 3 more certificates and revoke 2 of them
     List<BigInteger> newSerialIDs = new ArrayList<>();
-    for (int i = 0; i<3; i++) {
+    for (int i = 0; i < 3; i++) {
       X509Certificate cert = generateX509Cert();
       scmCertStore.storeValidCertificate(cert.getSerialNumber(), cert, SCM);
       newSerialIDs.add(cert.getSerialNumber());
@@ -250,7 +250,7 @@
     scmCertStore.storeValidCertificate(serialID, x509Certificate, SCM);
     Date now = new Date();
     // Set revocation time in the future
-    Date revocationTime = new Date(now.getTime()+500);
+    Date revocationTime = new Date(now.getTime() + 500);
 
     X509CertificateHolder caCertificateHolder =
         new X509CertificateHolder(generateX509Cert().getEncoded());
@@ -282,7 +282,7 @@
   private long getTableSize(Iterator iterator) {
     long size = 0;
 
-    while(iterator.hasNext()) {
+    while (iterator.hasNext()) {
       size++;
       iterator.next();
     }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
index 0f0425e..efa9244 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
@@ -70,16 +70,16 @@
     SCMDatanodeHeartbeatDispatcher dispatcher =
         new SCMDatanodeHeartbeatDispatcher(mockNodeManager,
             new EventPublisher() {
-          @Override
-          public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
-              EVENT_TYPE event, PAYLOAD payload) {
-            Assert.assertEquals(event, NODE_REPORT);
-            eventReceived.incrementAndGet();
-            Assert.assertEquals(nodeReport,
-                ((NodeReportFromDatanode)payload).getReport());
+              @Override
+              public <PAYLOAD, EVENT extends Event<PAYLOAD>> void fireEvent(
+                  EVENT event, PAYLOAD payload) {
+                Assert.assertEquals(event, NODE_REPORT);
+                eventReceived.incrementAndGet();
+                Assert.assertEquals(nodeReport,
+                    ((NodeReportFromDatanode)payload).getReport());
 
-          }
-        });
+              }
+            });
 
     DatanodeDetails datanodeDetails = randomDatanodeDetails();
 
@@ -113,24 +113,24 @@
         new SCMDatanodeHeartbeatDispatcher(
             mockNodeManager,
             new EventPublisher() {
-          @Override
-          public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
-              EVENT_TYPE event, PAYLOAD payload) {
-            Assert.assertTrue(
-                event.equals(CONTAINER_REPORT)
-                    || event.equals(CMD_STATUS_REPORT));
+              @Override
+              public <PAYLOAD, EVENT extends Event<PAYLOAD>> void fireEvent(
+                  EVENT event, PAYLOAD payload) {
+                Assert.assertTrue(
+                    event.equals(CONTAINER_REPORT)
+                        || event.equals(CMD_STATUS_REPORT));
 
-            if (payload instanceof ContainerReportFromDatanode) {
-              Assert.assertEquals(containerReport,
-                  ((ContainerReportFromDatanode) payload).getReport());
-            }
-            if (payload instanceof CommandStatusReportFromDatanode) {
-              Assert.assertEquals(commandStatusReport,
-                  ((CommandStatusReportFromDatanode) payload).getReport());
-            }
-            eventReceived.incrementAndGet();
-          }
-        });
+                if (payload instanceof ContainerReportFromDatanode) {
+                  Assert.assertEquals(containerReport,
+                      ((ContainerReportFromDatanode) payload).getReport());
+                }
+                if (payload instanceof CommandStatusReportFromDatanode) {
+                  Assert.assertEquals(commandStatusReport,
+                      ((CommandStatusReportFromDatanode) payload).getReport());
+                }
+                eventReceived.incrementAndGet();
+              }
+            });
 
     DatanodeDetails datanodeDetails = randomDatanodeDetails();
 
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java
index 09dd796..8e1ebb7 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java
@@ -28,7 +28,11 @@
 import java.nio.charset.StandardCharsets;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
-import static org.junit.Assert.*;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 
 /**
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/update/server/TestSCMUpdateServiceGrpcServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/update/server/TestSCMUpdateServiceGrpcServer.java
index 47495c9..e7b2c57 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/update/server/TestSCMUpdateServiceGrpcServer.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/update/server/TestSCMUpdateServiceGrpcServer.java
@@ -57,7 +57,7 @@
   public Timeout timeout = Timeout.seconds(300);
 
   @Rule
-  public ExpectedException thrown= ExpectedException.none();
+  public ExpectedException thrown = ExpectedException.none();
 
   @Rule
   public final TemporaryFolder tempDir = new TemporaryFolder();
@@ -129,13 +129,13 @@
       }
       server.notifyCrlUpdate();
 
-      GenericTestUtils.waitFor(() -> client.getUpdateCount()==4, 100, 2000);
+      GenericTestUtils.waitFor(() -> client.getUpdateCount() == 4, 100, 2000);
       Assert.assertEquals(4, client.getUpdateCount());
       Assert.assertEquals(0, client.getErrorCount());
 
       revokeCertNow(certIds.get(5));
       server.notifyCrlUpdate();
-      GenericTestUtils.waitFor(() -> client.getUpdateCount()>4, 100, 2000);
+      GenericTestUtils.waitFor(() -> client.getUpdateCount() > 4, 100, 2000);
       Assert.assertEquals(5, client.getUpdateCount());
       Assert.assertEquals(0, client.getErrorCount());
     } catch (Exception e) {
@@ -178,7 +178,7 @@
       revokeCertNow((certIds.get(0)));
       server.notifyCrlUpdate();
 
-      GenericTestUtils.waitFor(() -> client.getUpdateCount()==1,
+      GenericTestUtils.waitFor(() -> client.getUpdateCount() == 1,
           100, 2000);
       Assert.assertEquals(1, client.getUpdateCount());
       Assert.assertEquals(0, client.getErrorCount());
@@ -186,14 +186,14 @@
       // revoke cert 5 with 10 seconds delay
       revokeCert(certIds.get(5), Instant.now().plus(Duration.ofSeconds(5)));
       server.notifyCrlUpdate();
-      GenericTestUtils.waitFor(() -> client.getUpdateCount()>1,
+      GenericTestUtils.waitFor(() -> client.getUpdateCount() > 1,
           100, 2000);
       Assert.assertTrue(2 <= client.getUpdateCount());
       Assert.assertEquals(0, client.getErrorCount());
       Assert.assertTrue(1 >= client.getClientCRLStore()
           .getPendingCrlIds().size());
 
-      GenericTestUtils.waitFor(() -> client.getPendingCrlRemoveCount()==1,
+      GenericTestUtils.waitFor(() -> client.getPendingCrlRemoveCount() == 1,
           100, 20_000);
       Assert.assertTrue(client.getClientCRLStore()
           .getPendingCrlIds().isEmpty());
@@ -243,7 +243,7 @@
         revokeCertNow((certIds.get(i)));
       }
       server.notifyCrlUpdate();
-      GenericTestUtils.waitFor(() -> client.getUpdateCount()==4,
+      GenericTestUtils.waitFor(() -> client.getUpdateCount() == 4,
           100, 2000);
       Assert.assertEquals(4, client.getUpdateCount());
 
@@ -257,7 +257,7 @@
       // client retry connect to the server. The client will handle that.
       server.stop();
       server.start();
-      GenericTestUtils.waitFor(() -> client.getErrorCount()==1,
+      GenericTestUtils.waitFor(() -> client.getErrorCount() == 1,
           100, 2000);
       Assert.assertEquals(4, client.getUpdateCount());
       Assert.assertEquals(1, client.getErrorCount());
@@ -266,7 +266,7 @@
 
       revokeCertNow(certIds.get(5));
       server.notifyCrlUpdate();
-      GenericTestUtils.waitFor(() -> client.getUpdateCount()>4,
+      GenericTestUtils.waitFor(() -> client.getUpdateCount() > 4,
           100, 5000);
       Assert.assertEquals(5, client.getUpdateCount());
       Assert.assertEquals(1, client.getErrorCount());
@@ -282,16 +282,16 @@
       client.createChannel();
       client.start();
       Assert.assertEquals(5, clientCRLStore.getLatestCrlId());
-      GenericTestUtils.waitFor(() -> client.getUpdateCount()>5,
+      GenericTestUtils.waitFor(() -> client.getUpdateCount() > 5,
           100, 2000);
       revokeCertNow(certIds.get(6));
       // mostly noop
       server.notifyCrlUpdate();
       LOG.info("Test client restart end.");
 
-      GenericTestUtils.waitFor(() -> client.getUpdateCount()>6,
+      GenericTestUtils.waitFor(() -> client.getUpdateCount() > 6,
           100, 2000);
-      Assert.assertTrue(client.getUpdateCount()>=6);
+      Assert.assertTrue(client.getUpdateCount() >= 6);
       Assert.assertEquals(2, client.getErrorCount());
       Assert.assertEquals(6, clientCRLStore.getLatestCrlId());
     } catch (Exception e) {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestScmStartupSlvLessThanMlv.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestScmStartupSlvLessThanMlv.java
index 9cdad81..762f946 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestScmStartupSlvLessThanMlv.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestScmStartupSlvLessThanMlv.java
@@ -66,7 +66,7 @@
     try {
       new StorageContainerManager(conf);
       Assert.fail("Expected IOException due to incorrect MLV on SCM creation.");
-    } catch(IOException e) {
+    } catch (IOException e) {
       String expectedMessage = String.format("Metadata layout version (%s) > " +
           "software layout version (%s)", mlv, largestSlv);
       GenericTestUtils.assertExceptionContains(expectedMessage, e);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
index 33fb355..77bb579 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
@@ -183,7 +183,7 @@
    * @return DatanodeUsageInfo of the specified datanode
    */
   @Override
-  public DatanodeUsageInfo getUsageInfo(DatanodeDetails dn){
+  public DatanodeUsageInfo getUsageInfo(DatanodeDetails dn) {
     return null;
   }
 
@@ -285,6 +285,12 @@
     throw new UnsupportedOperationException("Not yet implemented");
   }
 
+  @Override
+  public void removeContainer(DatanodeDetails datanodeDetails,
+                           ContainerID containerId) {
+    throw new UnsupportedOperationException("Not yet implemented");
+  }
+
   /**
    * Update set of containers available on a datanode.
    * @param uuid - DatanodeID
@@ -395,6 +401,14 @@
   }
 
   /**
+   * send refresh command to all the healthy datanodes to refresh
+   * volume usage info immediately.
+   */
+  @Override
+  public void refreshAllHealthyDnUsageInfo() {
+    //no op
+  }
+  /**
    * Empty implementation for processNodeReport.
    * @param dnUuid
    * @param nodeReport
diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java
index cf8b22c..eb76c5e 100644
--- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java
+++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java
@@ -253,21 +253,29 @@
 
     public static LogCapturer captureLogs(Log l) {
       Logger logger = ((Log4JLogger) l).getLogger();
-      return new LogCapturer(logger);
+      return new LogCapturer(logger, getDefaultLayout());
     }
 
     public static LogCapturer captureLogs(org.slf4j.Logger logger) {
-      return new LogCapturer(toLog4j(logger));
+      return new LogCapturer(toLog4j(logger), getDefaultLayout());
     }
 
-    private LogCapturer(Logger logger) {
-      this.logger = logger;
+    public static LogCapturer captureLogs(org.slf4j.Logger logger,
+        Layout layout) {
+      return new LogCapturer(toLog4j(logger), layout);
+    }
+
+    private static Layout getDefaultLayout() {
       Appender defaultAppender = Logger.getRootLogger().getAppender("stdout");
       if (defaultAppender == null) {
         defaultAppender = Logger.getRootLogger().getAppender("console");
       }
-      final Layout layout = (defaultAppender == null) ? new PatternLayout() :
+      return (defaultAppender == null) ? new PatternLayout() :
           defaultAppender.getLayout();
+    }
+
+    private LogCapturer(Logger logger, Layout layout) {
+      this.logger = logger;
       this.appender = new WriterAppender(layout, sw);
       logger.addAppender(this.appender);
     }
diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java
index a1e4740..61dd5bf 100644
--- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java
+++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java
@@ -521,7 +521,7 @@
       throws Exception {
     return intercept(clazz, contained,
         "Expecting " + clazz.getName()
-        + (contained != null? (" with text " + contained) : "")
+        + (contained != null ? (" with text " + contained) : "")
         + " but got ",
         () -> {
           eval.call();
@@ -589,7 +589,7 @@
       T expected,
       Optional<T> actual) {
     Assert.assertNotNull(message, actual);
-    Assert.assertTrue(message +" -not present", actual.isPresent());
+    Assert.assertTrue(message + " -not present", actual.isPresent());
     Assert.assertEquals(message, expected, actual.get());
   }
 
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStatusSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStatusSubcommand.java
index e0cd436bd..44e4d4c 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStatusSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStatusSubcommand.java
@@ -36,7 +36,7 @@
   @Override
   public void execute(ScmClient scmClient) throws IOException {
     boolean execReturn = scmClient.getContainerBalancerStatus();
-    if(execReturn){
+    if (execReturn) {
       System.out.println("ContainerBalancer is Running.");
     } else {
       System.out.println("ContainerBalancer is Not Running.");
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java
index c6800be..9bc3649 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java
@@ -43,7 +43,7 @@
     boolean execReturn = scmClient.getReplicationManagerStatus();
 
     // Output data list
-    if(execReturn){
+    if (execReturn) {
       LOG.info("ReplicationManager is Running.");
     } else {
       LOG.info("ReplicationManager is Not Running.");
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java
index ba359af..db2f02c 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java
@@ -51,7 +51,7 @@
     boolean execReturn = scmClient.inSafeMode();
 
     // Output data list
-    if(execReturn){
+    if (execReturn) {
       LOG.info("SCM is in safe mode.");
       if (verbose) {
         for (Map.Entry<String, Pair<Boolean, String>> entry :
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java
index 12490c5..bcf64de 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java
@@ -42,7 +42,7 @@
   @Override
   public void execute(ScmClient scmClient) throws IOException {
     boolean execReturn = scmClient.forceExitSafeMode();
-    if(execReturn){
+    if (execReturn) {
       LOG.info("SCM exit safe mode successfully.");
     }
   }
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CertCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CertCommands.java
index 7d897ff..21ba035 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CertCommands.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CertCommands.java
@@ -38,8 +38,8 @@
     mixinStandardHelpOptions = true,
     versionProvider = HddsVersionProvider.class,
     subcommands = {
-      InfoSubcommand.class,
-      ListSubcommand.class,
+        InfoSubcommand.class,
+        ListSubcommand.class,
     })
 
 @MetaInfServices(SubcommandWithParent.class)
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java
index 232cc8d..23ff917 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java
@@ -52,7 +52,7 @@
           String.join("\n", hosts));
       if (errors.size() > 0) {
         for (DatanodeAdminError error : errors) {
-          System.err.println("Error: " + error.getHostname() +": "
+          System.err.println("Error: " + error.getHostname() + ": "
               + error.getError());
         }
         // Throwing the exception will cause a non-zero exit status for the
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java
index 6432071..6e6e3cf 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java
@@ -71,28 +71,24 @@
   @Override
   public void execute(ScmClient scmClient) throws IOException {
     pipelines = scmClient.listPipelines();
-    if (Strings.isNullOrEmpty(ipaddress) && Strings.isNullOrEmpty(uuid)) {
-      getAllNodes(scmClient).forEach(this::printDatanodeInfo);
-    } else {
-      Stream<DatanodeWithAttributes> allNodes = getAllNodes(scmClient).stream();
-      if (!Strings.isNullOrEmpty(ipaddress)) {
-        allNodes = allNodes.filter(p -> p.getDatanodeDetails().getIpAddress()
-            .compareToIgnoreCase(ipaddress) == 0);
-      }
-      if (!Strings.isNullOrEmpty(uuid)) {
-        allNodes = allNodes.filter(p ->
-            p.getDatanodeDetails().getUuidString().equals(uuid));
-      }
-      if (!Strings.isNullOrEmpty(nodeOperationalState)) {
-        allNodes = allNodes.filter(p -> p.getOpState().toString()
-            .compareToIgnoreCase(nodeOperationalState) == 0);
-      }
-      if (!Strings.isNullOrEmpty(nodeState)) {
-        allNodes = allNodes.filter(p -> p.getHealthState().toString()
-            .compareToIgnoreCase(nodeState) == 0);
-      }
-      allNodes.forEach(this::printDatanodeInfo);
+    Stream<DatanodeWithAttributes> allNodes = getAllNodes(scmClient).stream();
+    if (!Strings.isNullOrEmpty(ipaddress)) {
+      allNodes = allNodes.filter(p -> p.getDatanodeDetails().getIpAddress()
+          .compareToIgnoreCase(ipaddress) == 0);
     }
+    if (!Strings.isNullOrEmpty(uuid)) {
+      allNodes = allNodes.filter(p ->
+          p.getDatanodeDetails().getUuidString().equals(uuid));
+    }
+    if (!Strings.isNullOrEmpty(nodeOperationalState)) {
+      allNodes = allNodes.filter(p -> p.getOpState().toString()
+          .compareToIgnoreCase(nodeOperationalState) == 0);
+    }
+    if (!Strings.isNullOrEmpty(nodeState)) {
+      allNodes = allNodes.filter(p -> p.getHealthState().toString()
+          .compareToIgnoreCase(nodeState) == 0);
+    }
+    allNodes.forEach(this::printDatanodeInfo);
   }
 
   private List<DatanodeWithAttributes> getAllNodes(ScmClient scmClient)
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java
index 6d59e3c..a64c400 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java
@@ -45,7 +45,7 @@
   private List<String> hosts = new ArrayList<>();
 
   @CommandLine.Option(names = {"--end"},
-      description = "Automatically end maintenance after the given hours. "+
+      description = "Automatically end maintenance after the given hours. " +
           "By default, maintenance must be ended manually.")
   private int endInHours = 0;
 
@@ -58,7 +58,7 @@
           String.join("\n", hosts));
       if (errors.size() > 0) {
         for (DatanodeAdminError error : errors) {
-          System.err.println("Error: " + error.getHostname() +": "
+          System.err.println("Error: " + error.getHostname() + ": "
               + error.getError());
         }
         // Throwing the exception will cause a non-zero exit status for the
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java
index 94b97db..61f7826 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java
@@ -53,7 +53,7 @@
           String.join("\n", hosts));
       if (errors.size() > 0) {
         for (DatanodeAdminError error : errors) {
-          System.err.println("Error: " + error.getHostname() +": "
+          System.err.println("Error: " + error.getHostname() + ": "
               + error.getError());
         }
         // Throwing the exception will cause a non-zero exit status for the
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestReportSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestReportSubCommand.java
index 5c71076..be0e2c8 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestReportSubCommand.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestReportSubCommand.java
@@ -73,7 +73,7 @@
 
     for (HddsProtos.LifeCycleState state : HddsProtos.LifeCycleState.values()) {
       Pattern p = Pattern.compile(
-          "^"+state.toString() + ": 0$", Pattern.MULTILINE);
+          "^" + state.toString() + ": 0$", Pattern.MULTILINE);
       Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING));
       assertTrue(m.find());
     }
@@ -81,7 +81,7 @@
     for (ReplicationManagerReport.HealthState state :
         ReplicationManagerReport.HealthState.values()) {
       Pattern p = Pattern.compile(
-          "^"+state.toString() + ": 0$", Pattern.MULTILINE);
+          "^" + state.toString() + ": 0$", Pattern.MULTILINE);
       Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING));
       assertTrue(m.find());
     }
@@ -98,7 +98,7 @@
     int counter = SEED;
     for (HddsProtos.LifeCycleState state : HddsProtos.LifeCycleState.values()) {
       Pattern p = Pattern.compile(
-          "^"+state.toString() + ": " + counter + "$", Pattern.MULTILINE);
+          "^" + state.toString() + ": " + counter + "$", Pattern.MULTILINE);
       Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING));
       assertTrue(m.find());
       counter++;
@@ -108,13 +108,13 @@
     for (ReplicationManagerReport.HealthState state :
         ReplicationManagerReport.HealthState.values()) {
       Pattern p = Pattern.compile(
-          "^"+state.toString() + ": " + counter + "$", Pattern.MULTILINE);
+          "^" + state.toString() + ": " + counter + "$", Pattern.MULTILINE);
       Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING));
       assertTrue(m.find());
 
       // Check the correct samples are returned
       p = Pattern.compile(
-          "^First 100 "+ state + " containers:\n"
+          "^First 100 " + state + " containers:\n"
               + containerList(0, counter) + "$", Pattern.MULTILINE);
       m = p.matcher(outContent.toString(DEFAULT_ENCODING));
       assertTrue(m.find());
@@ -151,7 +151,7 @@
       if (i != start) {
         sb.append(", ");
       }
-      sb.append("#"+i);
+      sb.append("#" + i);
     }
     return sb.toString();
   }
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java
index 70c74a9..69b0efb 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java
@@ -90,7 +90,7 @@
     assertTrue(m.find());
     for (HddsProtos.NodeState state : HddsProtos.NodeState.values()) {
       p = Pattern.compile(
-          "^Health State:\\s+"+state+"$", Pattern.MULTILINE);
+          "^Health State:\\s+" + state + "$", Pattern.MULTILINE);
       m = p.matcher(outContent.toString(DEFAULT_ENCODING));
       assertTrue(m.find());
     }
@@ -106,11 +106,11 @@
   private List<HddsProtos.Node> getNodeDetails() {
     List<HddsProtos.Node> nodes = new ArrayList<>();
 
-    for (int i=0; i<4; i++) {
+    for (int i = 0; i < 4; i++) {
       HddsProtos.DatanodeDetailsProto.Builder dnd =
           HddsProtos.DatanodeDetailsProto.newBuilder();
       dnd.setHostName("host" + i);
-      dnd.setIpAddress("1.2.3." + i+1);
+      dnd.setIpAddress("1.2.3." + i + 1);
       dnd.setNetworkLocation("/default");
       dnd.setNetworkName("host" + i);
       dnd.addPorts(HddsProtos.Port.newBuilder()
diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml
index b5ba8e7..9ba3af6 100644
--- a/hadoop-ozone/client/pom.xml
+++ b/hadoop-ozone/client/pom.xml
@@ -51,6 +51,7 @@
     <dependency>
       <groupId>org.slf4j</groupId>
       <artifactId>slf4j-log4j12</artifactId>
+      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.mockito</groupId>
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
index 1f819ac..2412b88 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
@@ -207,7 +207,7 @@
   public Iterator<? extends OzoneVolume> listVolumesByUser(String user,
       String volumePrefix, String prevVolume)
       throws IOException {
-    if(Strings.isNullOrEmpty(user)) {
+    if (Strings.isNullOrEmpty(user)) {
       user = UserGroupInformation.getCurrentUser().getUserName();
     }
     return new VolumeIterator(user, volumePrefix, prevVolume);
@@ -269,7 +269,7 @@
 
     @Override
     public OzoneVolume next() {
-      if(hasNext()) {
+      if (hasNext()) {
         currentValue = currentIterator.next();
         return currentValue;
       }
@@ -284,7 +284,7 @@
     private List<OzoneVolume> getNextListOfVolumes(String prevVolume) {
       try {
         //if user is null, we do list of all volumes.
-        if(user != null) {
+        if (user != null) {
           return proxy.listVolumes(user, volPrefix, prevVolume, listCacheSize);
         }
         return proxy.listVolumes(volPrefix, prevVolume, listCacheSize);
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index 16d5a1b..a292ae2 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -611,7 +611,7 @@
    * @return {@code Iterator<OzoneKey>}
    */
   public Iterator<? extends OzoneKey> listKeys(String keyPrefix)
-      throws IOException{
+      throws IOException {
     return listKeys(keyPrefix, null);
   }
 
@@ -910,7 +910,7 @@
    * @param userName new owner
    * @throws IOException
    */
-  public boolean setOwner(String userName) throws IOException{
+  public boolean setOwner(String userName) throws IOException {
     boolean result = proxy.setBucketOwner(volumeName, name, userName);
     this.owner = userName;
     return result;
@@ -939,7 +939,7 @@
      * The returned keys match key prefix.
      * @param keyPrefix
      */
-    KeyIterator(String keyPrefix, String prevKey) throws IOException{
+    KeyIterator(String keyPrefix, String prevKey) throws IOException {
       setKeyPrefix(keyPrefix);
       this.currentValue = null;
       this.currentIterator = getNextListOfKeys(prevKey).iterator();
@@ -947,7 +947,7 @@
 
     @Override
     public boolean hasNext() {
-      if(!currentIterator.hasNext() && currentValue != null) {
+      if (!currentIterator.hasNext() && currentValue != null) {
         try {
           currentIterator =
               getNextListOfKeys(currentValue.getName()).iterator();
@@ -960,7 +960,7 @@
 
     @Override
     public OzoneKey next() {
-      if(hasNext()) {
+      if (hasNext()) {
         currentValue = currentIterator.next();
         return currentValue;
       }
@@ -1008,7 +1008,7 @@
    *
    * Note: Does not guarantee to return the list of keys in a sorted order.
    */
-  private class KeyIteratorWithFSO extends KeyIterator{
+  private class KeyIteratorWithFSO extends KeyIterator {
 
     private Stack<String> stack;
     private List<OzoneKey> pendingItemsToBeBatched;
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
index 9bf3973..2830bb1 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
@@ -51,7 +51,7 @@
   /**
    * Private constructor, class is not meant to be initialized.
    */
-  private OzoneClientFactory(){}
+  private OzoneClientFactory() { }
 
 
   /**
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java
index 9326bed..e37969d 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java
@@ -107,7 +107,7 @@
    *
    * @return bucketName
    */
-  public String getBucketName(){
+  public String getBucketName() {
     return bucketName;
   }
 
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
index 3847b12..389ccf8 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
@@ -480,7 +480,7 @@
 
     @Override
     public OzoneBucket next() {
-      if(hasNext()) {
+      if (hasNext()) {
         currentValue = currentIterator.next();
         return currentValue;
       }
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java
index ece725f..d75e878 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.ozone.client.checksum;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
 import org.apache.hadoop.hdds.scm.XceiverClientFactory;
@@ -50,8 +49,8 @@
   private final long length;
   private ClientProtocol rpcClient;
 
-  private XceiverClientFactory xceiverClientFactory;
   private final DataOutputBuffer blockChecksumBuf = new DataOutputBuffer();
+  private XceiverClientFactory xceiverClientFactory;
   private FileChecksum fileChecksum;
   private List<OmKeyLocationInfo> keyLocationInfos;
   private long remaining = 0L;
@@ -161,12 +160,6 @@
     }
   }
 
-  @VisibleForTesting
-  List<OmKeyLocationInfo> getKeyLocationInfos() {
-    return keyLocationInfos;
-  }
-
-
   /**
    * Compute block checksums block by block and append the raw bytes of the
    * block checksums into getBlockChecksumBuf().
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java
index 5b79eb8..0c61432 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java
@@ -30,7 +30,7 @@
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.rpc.RpcClient;
+import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.security.token.Token;
 
@@ -44,9 +44,9 @@
 public class ReplicatedFileChecksumHelper extends BaseFileChecksumHelper {
   private int blockIdx;
 
-  ReplicatedFileChecksumHelper(
+  public ReplicatedFileChecksumHelper(
       OzoneVolume volume, OzoneBucket bucket, String keyName, long length,
-      RpcClient rpcClient) throws IOException {
+      ClientProtocol rpcClient) throws IOException {
     super(volume, bucket, keyName, length, rpcClient);
   }
 
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java
index 45e8473..5aff685 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java
@@ -248,7 +248,7 @@
   /**
    * Increases current position by one. Used in writes.
    */
-  void incCurrentPosition(){
+  void incCurrentPosition() {
     currentPosition++;
   }
 
@@ -280,7 +280,7 @@
     this.blockID = id;
   }
 
-  OzoneClientConfig getConf(){
+  OzoneClientConfig getConf() {
     return this.config;
   }
 
@@ -305,7 +305,7 @@
    * OMKeyLocationInfo.
    * @return
    */
-  Pipeline getPipelineForOMLocationReport(){
+  Pipeline getPipelineForOMLocationReport() {
     return getPipeline();
   }
 
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
index 38f0aa8..7985bf7 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
@@ -208,7 +208,6 @@
     List<OmKeyLocationInfo> locationInfoList = new ArrayList<>();
     for (BlockOutputStreamEntry streamEntry : streams) {
       long length = streamEntry.getCurrentPosition();
-
       // Commit only those blocks to OzoneManager which are not empty
       if (length != 0) {
         OmKeyLocationInfo info =
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/MultipartCryptoKeyInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/MultipartCryptoKeyInputStream.java
index 3a88d5f..530084e 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/MultipartCryptoKeyInputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/MultipartCryptoKeyInputStream.java
@@ -172,6 +172,10 @@
             actualNumBytesRead, numBytesRead, readPositionAdjustedBy,
             actualNumBytesRead - readPositionAdjustedBy);
 
+        if (readLengthAdjustedBy > 0) {
+          current.seek(current.getPos() - readLengthAdjustedBy);
+        }
+
         // Reset readPositionAdjustedBy and readLengthAdjustedBy
         readPositionAdjustedBy = 0;
         readLengthAdjustedBy = 0;
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 8502fec..7da2108 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -762,7 +762,7 @@
    * of the S3 API implementation within Ozone.
    * @param s3Auth authentication information for each S3 API call.
    */
-  void setTheadLocalS3Auth(S3Auth s3Auth);
+  void setThreadLocalS3Auth(S3Auth s3Auth);
 
   /**
    * Gets the S3 Authentication information that is attached to the thread.
@@ -773,7 +773,7 @@
   /**
    * Clears the S3 Authentication information attached to the thread.
    */
-  void clearTheadLocalS3Auth();
+  void clearThreadLocalS3Auth();
 
   /**
    * Sets the owner of bucket.
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java
index eecc73b..470f695 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java
@@ -80,7 +80,7 @@
    */
   public static Text getKeyProviderMapKey(URI namespaceUri) {
     return new Text(O3_KMS_PREFIX + namespaceUri.getScheme()
-        +"://" + namespaceUri.getAuthority());
+        + "://" + namespaceUri.getAuthority());
   }
 
   public static String bytes2String(byte[] bytes) {
@@ -131,7 +131,7 @@
   }
 
   public static KeyProvider getKeyProvider(final ConfigurationSource conf,
-      final URI serverProviderUri) throws IOException{
+      final URI serverProviderUri) throws IOException {
     if (serverProviderUri == null) {
       throw new IOException("KMS serverProviderUri is not configured.");
     }
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index b9db113..28efa8c 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -537,7 +537,7 @@
 
     List<OzoneAcl> listOfAcls = getAclList();
     //ACLs from BucketArgs
-    if(bucketArgs.getAcls() != null) {
+    if (bucketArgs.getAcls() != null) {
       listOfAcls.addAll(bucketArgs.getAcls());
     }
 
@@ -856,7 +856,7 @@
         .setAcls(getAclList())
         .setLatestVersionLocation(getLatestVersionLocation);
     if (Boolean.parseBoolean(metadata.get(OzoneConsts.GDPR_FLAG))) {
-      try{
+      try {
         GDPRSymmetricKey gKey = new GDPRSymmetricKey(new SecureRandom());
         builder.addAllMetadata(gKey.getKeyDetails());
       } catch (Exception e) {
@@ -950,13 +950,13 @@
     List<OmKeyLocationInfo> keyLocationInfos
         = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
 
-    for(OmKeyLocationInfo keyLocationInfo : keyLocationInfos) {
+    for (OmKeyLocationInfo keyLocationInfo : keyLocationInfos) {
       Map<DatanodeDetails, OzoneInputStream> blocks = new HashMap<>();
 
       Pipeline pipelineBefore = keyLocationInfo.getPipeline();
       List<DatanodeDetails> datanodes = pipelineBefore.getNodes();
 
-      for(DatanodeDetails dn : datanodes) {
+      for (DatanodeDetails dn : datanodes) {
         List<DatanodeDetails> nodes = new ArrayList<>();
         nodes.add(dn);
         Pipeline pipeline
@@ -1015,7 +1015,7 @@
       String fromKeyName, String toKeyName) throws IOException {
     verifyVolumeName(volumeName);
     verifyBucketName(bucketName);
-    if(checkKeyNameEnabled){
+    if (checkKeyNameEnabled) {
       HddsClientUtils.verifyKeyName(toKeyName);
     }
     HddsClientUtils.checkNotNull(fromKeyName, toKeyName);
@@ -1157,13 +1157,13 @@
       throws IOException {
     verifyVolumeName(volumeName);
     verifyBucketName(bucketName);
-    if(checkKeyNameEnabled) {
+    if (checkKeyNameEnabled) {
       HddsClientUtils.verifyKeyName(keyName);
     }
     HddsClientUtils.checkNotNull(keyName, uploadID);
-    Preconditions.checkArgument(partNumber > 0 && partNumber <=10000, "Part " +
+    Preconditions.checkArgument(partNumber > 0 && partNumber <= 10000, "Part " +
         "number should be greater than zero and less than or equal to 10000");
-    Preconditions.checkArgument(size >=0, "size should be greater than or " +
+    Preconditions.checkArgument(size >= 0, "size should be greater than or " +
         "equal to zero");
     String requestId = UUID.randomUUID().toString();
     OmKeyArgs keyArgs = new OmKeyArgs.Builder()
@@ -1508,7 +1508,7 @@
       final KeyProvider.KeyVersion decrypted = getDEK(feInfo);
 
       List<OzoneCryptoInputStream> cryptoInputStreams = new ArrayList<>();
-      for(LengthInputStream lengthInputStream : lengthInputStreams) {
+      for (LengthInputStream lengthInputStream : lengthInputStreams) {
         final OzoneCryptoInputStream ozoneCryptoInputStream =
             new OzoneCryptoInputStream(lengthInputStream,
                 OzoneKMSUtil.getCryptoCodec(conf, feInfo),
@@ -1546,11 +1546,11 @@
               decrypted.getMaterial(), feInfo.getIV());
       return new OzoneOutputStream(cryptoOut);
     } else {
-      try{
+      try {
         GDPRSymmetricKey gk;
         Map<String, String> openKeyMetadata =
             openKey.getKeyInfo().getMetadata();
-        if(Boolean.valueOf(openKeyMetadata.get(OzoneConsts.GDPR_FLAG))){
+        if (Boolean.valueOf(openKeyMetadata.get(OzoneConsts.GDPR_FLAG))) {
           gk = new GDPRSymmetricKey(
               openKeyMetadata.get(OzoneConsts.GDPR_SECRET),
               openKeyMetadata.get(OzoneConsts.GDPR_ALGORITHM)
@@ -1559,7 +1559,7 @@
           return new OzoneOutputStream(
               new CipherOutputStream(keyOutputStream, gk.getCipher()));
         }
-      }catch (Exception ex){
+      }  catch (Exception ex) {
         throw new IOException(ex);
       }
 
@@ -1632,7 +1632,7 @@
   }
 
   @Override
-  public void setTheadLocalS3Auth(
+  public void setThreadLocalS3Auth(
       S3Auth ozoneSharedSecretAuth) {
     ozoneManagerClient.setThreadLocalS3Auth(ozoneSharedSecretAuth);
   }
@@ -1643,7 +1643,7 @@
   }
 
   @Override
-  public void clearTheadLocalS3Auth() {
+  public void clearThreadLocalS3Auth() {
     ozoneManagerClient.clearThreadLocalS3Auth();
   }
 
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
index 712120d..6c047ab 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
@@ -65,7 +65,7 @@
   public Timeout timeout = Timeout.seconds(300);
 
   @Rule
-  public ExpectedException thrown= ExpectedException.none();
+  public ExpectedException thrown = ExpectedException.none();
 
   /**
    * Verify client endpoint lookup failure if it is not configured.
@@ -104,7 +104,7 @@
     conf.set(ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY, scmServiceId);
 
     String[] nodes = new String[] {"scm1", "scm2", "scm3"};
-    conf.set(ScmConfigKeys.OZONE_SCM_NODES_KEY+"."+scmServiceId,
+    conf.set(ScmConfigKeys.OZONE_SCM_NODES_KEY + "." + scmServiceId,
         "scm1,scm2,scm3");
     conf.set(ScmConfigKeys.OZONE_SCM_NODE_ID_KEY, "scm1");
 
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java
index 198602d..6232b98 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java
@@ -179,13 +179,13 @@
 
     XceiverClientGrpc xceiverClientGrpc =
         new XceiverClientGrpc(pipeline, conf) {
-      @Override
-      public XceiverClientReply sendCommandAsync(
-          ContainerProtos.ContainerCommandRequestProto request,
-          DatanodeDetails dn) {
-        return buildValidResponse();
-      }
-    };
+          @Override
+          public XceiverClientReply sendCommandAsync(
+              ContainerProtos.ContainerCommandRequestProto request,
+              DatanodeDetails dn) {
+            return buildValidResponse();
+          }
+        };
     XceiverClientFactory factory = Mockito.mock(XceiverClientFactory.class);
     when(factory.acquireClientForReadData(ArgumentMatchers.any())).
         thenReturn(xceiverClientGrpc);
@@ -232,7 +232,7 @@
     helper.compute();
     FileChecksum fileChecksum = helper.getFileChecksum();
     assertTrue(fileChecksum instanceof MD5MD5CRC32GzipFileChecksum);
-    assertEquals(1, helper.getKeyLocationInfos().size());
+    assertEquals(1, helper.getKeyLocationInfoList().size());
   }
 
   private XceiverClientReply buildValidResponse() {
@@ -317,7 +317,7 @@
       helper.compute();
       FileChecksum fileChecksum = helper.getFileChecksum();
       assertTrue(fileChecksum instanceof MD5MD5CRC32GzipFileChecksum);
-      assertEquals(1, helper.getKeyLocationInfos().size());
+      assertEquals(1, helper.getKeyLocationInfoList().size());
     }
   }
 }
\ No newline at end of file
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java
index 49fb5e3..3f597ab 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java
@@ -24,7 +24,8 @@
 
 import java.io.IOException;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
 /**
  * Test class for {@link OzoneKMSUtil}.
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
index 96c5658..7571f4e 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -484,7 +484,7 @@
     // If this key is in a GDPR enforced bucket, then before moving
     // KeyInfo to deletedTable, remove the GDPR related metadata and
     // FileEncryptionInfo from KeyInfo.
-    if(Boolean.valueOf(keyInfo.getMetadata().get(OzoneConsts.GDPR_FLAG))) {
+    if (Boolean.valueOf(keyInfo.getMetadata().get(OzoneConsts.GDPR_FLAG))) {
       keyInfo.getMetadata().remove(OzoneConsts.GDPR_FLAG);
       keyInfo.getMetadata().remove(OzoneConsts.GDPR_ALGORITHM);
       keyInfo.getMetadata().remove(OzoneConsts.GDPR_SECRET);
@@ -494,7 +494,7 @@
     // Set the updateID
     keyInfo.setUpdateID(trxnLogIndex, isRatisEnabled);
 
-    if(repeatedOmKeyInfo == null) {
+    if (repeatedOmKeyInfo == null) {
       //The key doesn't exist in deletedTable, so create a new instance.
       repeatedOmKeyInfo = new RepeatedOmKeyInfo(keyInfo);
     } else {
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
index 6a74342..7ca0634 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
@@ -105,7 +105,7 @@
     Objects.requireNonNull(type);
     Objects.requireNonNull(acls);
 
-    if(acls.cardinality() > ACLType.getNoOfAcls()) {
+    if (acls.cardinality() > ACLType.getNoOfAcls()) {
       throw new IllegalArgumentException("Acl bitset passed has unexpected " +
           "size. bitset size:" + acls.cardinality() + ", bitset:"
           + acls.toString());
@@ -159,7 +159,7 @@
     AclScope aclScope = AclScope.ACCESS;
 
     // Check if acl string contains scope info.
-    if(parts[2].matches(ACL_SCOPE_REGEX)) {
+    if (parts[2].matches(ACL_SCOPE_REGEX)) {
       int indexOfOpenBracket = parts[2].indexOf("[");
       bits = parts[2].substring(0, indexOfOpenBracket);
       aclScope = AclScope.valueOf(parts[2].substring(indexOfOpenBracket + 1,
@@ -194,7 +194,7 @@
     }
     List<OzoneAcl> ozAcls = new ArrayList<>();
 
-    for(String acl:parts) {
+    for (String acl:parts) {
       ozAcls.add(parseAcl(acl));
     }
     return ozAcls;
@@ -289,7 +289,7 @@
   }
 
   public List<ACLType> getAclList() {
-    if(aclBitSet !=  null) {
+    if (aclBitSet !=  null) {
       return aclBitSet.stream().mapToObj(a ->
           ACLType.values()[a]).collect(Collectors.toList());
     }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index e3e7f41..82d26f9 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -201,15 +201,15 @@
   public static final String  DELEGATION_REMOVER_SCAN_INTERVAL_KEY =
       "ozone.manager.delegation.remover.scan.interval";
   public static final long    DELEGATION_REMOVER_SCAN_INTERVAL_DEFAULT =
-      60*60*1000;
+      60 * 60 * 1000;
   public static final String  DELEGATION_TOKEN_RENEW_INTERVAL_KEY =
       "ozone.manager.delegation.token.renew-interval";
   public static final long    DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT =
-      24*60*60*1000;  // 1 day = 86400000 ms
+      24 * 60 * 60 * 1000;  // 1 day = 86400000 ms
   public static final String  DELEGATION_TOKEN_MAX_LIFETIME_KEY =
       "ozone.manager.delegation.token.max-lifetime";
   public static final long    DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT =
-      7*24*60*60*1000; // 7 days
+      7 * 24 * 60 * 60 * 1000; // 7 days
 
   public static final String OZONE_DB_CHECKPOINT_TRANSFER_RATE_KEY =
       "ozone.manager.db.checkpoint.transfer.bandwidthPerSec";
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/DBUpdates.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/DBUpdates.java
index 72f2b64..71916db 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/DBUpdates.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/DBUpdates.java
@@ -29,6 +29,8 @@
 
   private long currentSequenceNumber = -1;
 
+  private long latestSequenceNumber = -1L;
+
   public DBUpdates() {
     this.dataList = new ArrayList<>();
   }
@@ -55,4 +57,12 @@
   public long getCurrentSequenceNumber() {
     return currentSequenceNumber;
   }
+
+  public void setLatestSequenceNumber(long sequenceNumber) {
+    this.latestSequenceNumber = sequenceNumber;
+  }
+
+  public long getLatestSequenceNumber() {
+    return latestSequenceNumber;
+  }
 }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
index 1806a03..9291d33 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
@@ -153,7 +153,7 @@
         this.metadata.get(OzoneConsts.GDPR_FLAG));
     auditMap.put(OzoneConsts.IS_VERSION_ENABLED,
                 String.valueOf(this.isVersionEnabled));
-    if(this.storageType != null){
+    if (this.storageType != null) {
       auditMap.put(OzoneConsts.STORAGE_TYPE, this.storageType.name());
     }
     if (this.ownerName != null) {
@@ -241,16 +241,16 @@
     BucketArgs.Builder builder = BucketArgs.newBuilder();
     builder.setVolumeName(volumeName)
         .setBucketName(bucketName);
-    if(isVersionEnabled != null) {
+    if (isVersionEnabled != null) {
       builder.setIsVersionEnabled(isVersionEnabled);
     }
-    if(storageType != null) {
+    if (storageType != null) {
       builder.setStorageType(storageType.toProto());
     }
-    if(quotaInBytes > 0 || quotaInBytes == OzoneConsts.QUOTA_RESET) {
+    if (quotaInBytes > 0 || quotaInBytes == OzoneConsts.QUOTA_RESET) {
       builder.setQuotaInBytes(quotaInBytes);
     }
-    if(quotaInNamespace > 0 || quotaInNamespace == OzoneConsts.QUOTA_RESET) {
+    if (quotaInNamespace > 0 || quotaInNamespace == OzoneConsts.QUOTA_RESET) {
       builder.setQuotaInNamespace(quotaInNamespace);
     }
     if (ownerName != null) {
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
index adbf398..ad81c8b 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
@@ -319,6 +319,7 @@
     Map<String, String> auditMap = new LinkedHashMap<>();
     auditMap.put(OzoneConsts.VOLUME, this.volumeName);
     auditMap.put(OzoneConsts.BUCKET, this.bucketName);
+    auditMap.put(OzoneConsts.BUCKET_LAYOUT, String.valueOf(this.bucketLayout));
     auditMap.put(OzoneConsts.GDPR_FLAG,
         this.metadata.get(OzoneConsts.GDPR_FLAG));
     auditMap.put(OzoneConsts.ACLS,
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java
index 3d5d6a5..75f90f8 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java
@@ -21,7 +21,12 @@
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 
-import java.util.*;
+import java.util.BitSet;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
 
 /**
  * This class represents the directory information by keeping each component
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index d6f2243..485cf32 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -150,7 +150,7 @@
 
 
   public synchronized OmKeyLocationInfoGroup getLatestVersionLocations() {
-    return keyLocationVersions.size() == 0? null :
+    return keyLocationVersions.size() == 0 ? null :
         keyLocationVersions.get(keyLocationVersions.size() - 1);
   }
 
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
index 1504f4e..9df7518 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
@@ -225,7 +225,7 @@
         getPipeline(keyLocation),
         keyLocation.getLength(),
         keyLocation.getOffset(), keyLocation.getPartNumber());
-    if(keyLocation.hasToken()) {
+    if (keyLocation.hasToken()) {
       info.token = (Token<OzoneBlockTokenIdentifier>)
               OzonePBHelper.tokenFromProto(keyLocation.getToken());
     }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java
index e312138..ec66068 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java
@@ -165,7 +165,7 @@
     }
   }
 
-  void removeBlocks(long versionToRemove){
+  void removeBlocks(long versionToRemove) {
     locationVersionMap.remove(versionToRemove);
   }
 
@@ -181,7 +181,7 @@
     sb.append("version:").append(version).append(" ");
     sb.append("isMultipartKey:").append(isMultipartKey);
     for (List<OmKeyLocationInfo> kliList : locationVersionMap.values()) {
-      for(OmKeyLocationInfo kli: kliList) {
+      for (OmKeyLocationInfo kli: kliList) {
         sb.append(kli.getLocalID()).append(" || ");
       }
     }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
index 85165d6..e296855 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
@@ -64,8 +64,8 @@
    * @param updateID - A sequence number that denotes the last update on this
    * object. This is a monotonically increasing number.
    */
-  @SuppressWarnings({"checkstyle:ParameterNumber", "This is invoked from a " +
-      "builder."})
+  @SuppressWarnings({"checkstyle:ParameterNumber",
+      "This is invoked from a builder."})
   private OmVolumeArgs(String adminName, String ownerName, String volume,
       long quotaInBytes, long quotaInNamespace, long usedNamespace,
       Map<String, String> metadata, List<OzoneAcl> acls, long creationTime,
@@ -94,7 +94,7 @@
   }
 
   public void setQuotaInNamespace(long quotaInNamespace) {
-    this.quotaInNamespace= quotaInNamespace;
+    this.quotaInNamespace = quotaInNamespace;
   }
 
   public void setCreationTime(long time) {
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java
index 0ca1e36..94dff51 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java
@@ -43,7 +43,7 @@
  */
 public final class OzoneAclUtil {
 
-  private OzoneAclUtil(){
+  private OzoneAclUtil() {
   }
 
   /**
@@ -60,7 +60,7 @@
 
     // User ACL.
     listOfAcls.add(new OzoneAcl(USER, userName, userRights, ACCESS));
-    if(userGroups != null) {
+    if (userGroups != null) {
       // Group ACLs of the User.
       Arrays.asList(userGroups).forEach((group) -> listOfAcls.add(
           new OzoneAcl(GROUP, group, groupRights, ACCESS)));
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
index 3f09d4a..1de5934 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
@@ -31,7 +31,7 @@
  */
 public final class OzoneFSUtils {
 
-  private OzoneFSUtils() {}
+  private OzoneFSUtils() { }
 
   /**
    * Returns string representation of path after removing the leading slash.
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java
index cde8e39..83a7184 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java
@@ -54,7 +54,7 @@
   public static RepeatedOmKeyInfo getFromProto(RepeatedKeyInfo
       repeatedKeyInfo) {
     List<OmKeyInfo> list = new ArrayList<>();
-    for(KeyInfo k : repeatedKeyInfo.getKeyInfoList()) {
+    for (KeyInfo k : repeatedKeyInfo.getKeyInfoList()) {
       list.add(OmKeyInfo.getFromProtobuf(k));
     }
     return new RepeatedOmKeyInfo.Builder().setOmKeyInfos(list).build();
@@ -67,7 +67,7 @@
    */
   public RepeatedKeyInfo getProto(boolean compact, int clientVersion) {
     List<KeyInfo> list = new ArrayList<>();
-    for(OmKeyInfo k : omKeyInfoList) {
+    for (OmKeyInfo k : omKeyInfoList) {
       list.add(k.getProtobuf(compact, clientVersion));
     }
 
@@ -82,7 +82,7 @@
   public static class Builder {
     private List<OmKeyInfo> omKeyInfos;
 
-    public Builder(){}
+    public Builder() { }
 
     public Builder setOmKeyInfos(List<OmKeyInfo> infoList) {
       this.omKeyInfos = infoList;
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
index c6eb5dd..0a8b1d6 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
@@ -62,7 +62,7 @@
   /**
    * Default constructor for JSON deserialization.
    */
-  public ServiceInfo() {}
+  public ServiceInfo() { }
 
   /**
    * Constructs the ServiceInfo for the {@code nodeType}.
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java
index 6b12f13..eebb4d87 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java
@@ -61,7 +61,7 @@
    * @param obId - long
    */
   public void setObjectID(long obId) {
-    if(this.objectID != 0) {
+    if (this.objectID != 0) {
       throw new UnsupportedOperationException("Attempt to modify object ID " +
           "which is not zero. Current Object ID is " + this.objectID);
     }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index e2bf9f0..5883ff9 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -161,7 +161,16 @@
 import static org.apache.hadoop.ozone.ClientVersions.CURRENT_VERSION;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER;
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelPrepareRequest;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelPrepareResponse;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareRequest;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareRequestArgs;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareResponse;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusRequest;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusResponse;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Authentication;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetBucketPropertyResponse;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyResponse;
 import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.ACCESS_DENIED;
 import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.DIRECTORY_ALREADY_EXISTS;
 import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK;
@@ -603,7 +612,7 @@
         .setBucketName(args.getBucketName())
         .setKeyName(args.getKeyName());
 
-    if(args.getAcls() != null) {
+    if (args.getAcls() != null) {
       keyArgs.addAllAcls(args.getAcls().stream().distinct().map(a ->
           OzoneAcl.toProtobuf(a)).collect(Collectors.toList()));
     }
@@ -1231,7 +1240,7 @@
           OMPBHelper.convertToDelegationToken(resp.getResponse().getToken())
           : null;
     } catch (IOException e) {
-      if(e instanceof OMException) {
+      if (e instanceof OMException) {
         throw (OMException)e;
       }
       throw new OMException("Get delegation token failed.", e,
@@ -1263,7 +1272,7 @@
           .getRenewDelegationTokenResponse();
       return resp.getResponse().getNewExpiryTime();
     } catch (IOException e) {
-      if(e instanceof OMException) {
+      if (e instanceof OMException) {
         throw (OMException)e;
       }
       throw new OMException("Renew delegation token failed.", e,
@@ -1292,7 +1301,7 @@
     try {
       handleError(submitRequest(omRequest));
     } catch (IOException e) {
-      if(e instanceof OMException) {
+      if (e instanceof OMException) {
         throw (OMException)e;
       }
       throw new OMException("Cancel delegation token failed.", e,
@@ -1506,6 +1515,8 @@
     }
     dbUpdatesWrapper.setCurrentSequenceNumber(
         dbUpdatesResponse.getSequenceNumber());
+    dbUpdatesWrapper.setLatestSequenceNumber(
+        dbUpdatesResponse.getLatestSequenceNumber());
     return dbUpdatesWrapper;
   }
 
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java
index 2ff2dc8..810111d 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java
@@ -24,14 +24,12 @@
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .BucketEncryptionInfoProto;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CipherSuiteProto;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .CryptoProtocolVersionProto;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
-    .FileEncryptionInfoProto;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketEncryptionInfoProto;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CipherSuiteProto;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CryptoProtocolVersionProto;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.FileEncryptionInfoProto;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
 import org.apache.hadoop.ozone.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.token.Token;
@@ -41,6 +39,9 @@
  */
 public final class OMPBHelper {
 
+  public static final ByteString REDACTED =
+      ByteString.copyFromUtf8("<redacted>");
+
   private OMPBHelper() {
     /** Hidden constructor */
   }
@@ -50,7 +51,7 @@
    * @return tokenProto
    */
   public static TokenProto convertToTokenProto(Token<?> tok) {
-    if(tok == null){
+    if (tok == null) {
       throw new IllegalArgumentException("Invalid argument: token is null");
     }
 
@@ -86,9 +87,9 @@
     }
 
     return new BucketEncryptionKeyInfo(
-        beInfo.hasCryptoProtocolVersion()?
+        beInfo.hasCryptoProtocolVersion() ?
             convert(beInfo.getCryptoProtocolVersion()) : null,
-        beInfo.hasSuite()? convert(beInfo.getSuite()) : null,
+        beInfo.hasSuite() ? convert(beInfo.getSuite()) : null,
         beInfo.getKeyName());
   }
 
@@ -106,7 +107,7 @@
     if (beInfo.getSuite() != null) {
       bb.setSuite(convert(beInfo.getSuite()));
     }
-    if (beInfo.getVersion()!= null) {
+    if (beInfo.getVersion() != null) {
       bb.setCryptoProtocolVersion(convert(beInfo.getVersion()));
     }
     return bb.build();
@@ -142,7 +143,7 @@
   }
 
   public static CipherSuite convert(CipherSuiteProto proto) {
-    switch(proto) {
+    switch (proto) {
     case AES_CTR_NOPADDING:
       return CipherSuite.AES_CTR_NOPADDING;
     default:
@@ -166,7 +167,7 @@
 
   public static CryptoProtocolVersionProto convert(
       CryptoProtocolVersion version) {
-    switch(version) {
+    switch (version) {
     case UNKNOWN:
       return OzoneManagerProtocolProtos.CryptoProtocolVersionProto
           .UNKNOWN_PROTOCOL_VERSION;
@@ -180,7 +181,7 @@
 
   public static CryptoProtocolVersion convert(
       CryptoProtocolVersionProto proto) {
-    switch(proto) {
+    switch (proto) {
     case ENCRYPTION_ZONES:
       return CryptoProtocolVersion.ENCRYPTION_ZONES;
     default:
@@ -192,4 +193,24 @@
   }
 
 
+  public static OMRequest processForDebug(OMRequest msg) {
+    return msg;
+  }
+
+  public static OMResponse processForDebug(OMResponse msg) {
+    if (msg == null) {
+      return null;
+    }
+
+    if (msg.hasDbUpdatesResponse()) {
+      OMResponse.Builder builder = msg.toBuilder();
+
+      builder.getDbUpdatesResponseBuilder()
+          .clearData().addData(REDACTED);
+
+      return builder.build();
+    }
+
+    return msg;
+  }
 }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java
index 1916d25..44a3c9e 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java
@@ -24,7 +24,8 @@
 
 import java.util.LinkedHashMap;
 import java.util.Map;
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.StoreType.*;
+
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.StoreType.valueOf;
 
 /**
  * Class representing an unique ozone object.
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java
index 76fb76a..09c8743 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java
@@ -111,7 +111,7 @@
         .setStoreType(StoreType.valueOf(proto.getStoreType().name()));
     String[] tokens = StringUtils.split(proto.getPath(),
         OZONE_URI_DELIMITER, 3);
-    if(tokens == null) {
+    if (tokens == null) {
       throw new IllegalArgumentException("Unexpected path:" + proto.getPath());
     }
     // Set volume name.
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java
index 0b1b787..85e452e 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java
@@ -44,7 +44,7 @@
   public static final RatisVersionInfo RATIS_VERSION_INFO =
       new RatisVersionInfo();
 
-  private OzoneVersionInfo() {}
+  private OzoneVersionInfo() { }
 
   public static void main(String[] args) {
     System.out.println(
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java
index d7794db..0f85973 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java
@@ -129,7 +129,7 @@
       return false;
     }
 
-    if (removePrefixPathInternal(node, path, level+1)) {
+    if (removePrefixPathInternal(node, path, level + 1)) {
       current.getChildren().remove(name);
       return current.hasChildren();
     }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java
index 9bd8398..1585153 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java
@@ -54,15 +54,15 @@
    */
   private static final ThreadLocal<SimpleDateFormat> DATE_FORMAT =
       new ThreadLocal<SimpleDateFormat>() {
-    @Override
-    protected SimpleDateFormat initialValue() {
-      SimpleDateFormat format = new SimpleDateFormat(
-          OzoneConsts.OZONE_DATE_FORMAT, Locale.US);
-      format.setTimeZone(TimeZone.getTimeZone(OzoneConsts.OZONE_TIME_ZONE));
+        @Override
+        protected SimpleDateFormat initialValue() {
+          SimpleDateFormat format = new SimpleDateFormat(
+              OzoneConsts.OZONE_DATE_FORMAT, Locale.US);
+          format.setTimeZone(TimeZone.getTimeZone(OzoneConsts.OZONE_TIME_ZONE));
 
-      return format;
-    }
-  };
+          return format;
+        }
+      };
 
   /**
    * Verifies that max key length is a valid value.
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java
index d190fcb..55485b9 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java
@@ -28,7 +28,15 @@
 import java.util.List;
 import java.util.Map;
 
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.*;
+import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL;
+import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.CREATE;
+import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.DELETE;
+import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.LIST;
+import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.NONE;
+import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ;
+import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ_ACL;
+import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE;
+import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE_ACL;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestOMFailoverProxyProvider.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestOMFailoverProxyProvider.java
index 12b0d40..052ff8f 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestOMFailoverProxyProvider.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestOMFailoverProxyProvider.java
@@ -180,7 +180,7 @@
       String nodeId = NODE_ID_BASE_STR + i;
       ozoneConf.set(
           ConfUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY, OM_SERVICE_ID,
-          nodeId), nodeAddrs.get(i-1));
+          nodeId), nodeAddrs.get(i - 1));
       allNodeIds.add(nodeId);
     }
     ozoneConf.set(ConfUtils.addKeySuffixes(OZONE_OM_NODES_KEY, OM_SERVICE_ID),
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java
index 16285c2..de12e79 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java
@@ -70,7 +70,7 @@
     // Lock re-acquire not allowed by same thread.
     if (resource == OzoneManagerLock.Resource.USER_LOCK ||
         resource == OzoneManagerLock.Resource.S3_SECRET_LOCK ||
-        resource == OzoneManagerLock.Resource.PREFIX_LOCK){
+        resource == OzoneManagerLock.Resource.PREFIX_LOCK) {
       lock.acquireWriteLock(resource, resourceName);
       try {
         lock.acquireWriteLock(resource, resourceName);
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java
index 39c6220..1ddf353 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java
@@ -55,7 +55,7 @@
   @Test
   public void testKeyGenerationWithInvalidInput() throws Exception {
     GDPRSymmetricKey gkey = null;
-    try{
+    try {
       gkey = new GDPRSymmetricKey(RandomStringUtils.randomAlphabetic(5),
           OzoneConsts.GDPR_ALGORITHM_NAME);
     } catch (IllegalArgumentException ex) {
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java
index ab24b1b..fab4f73 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java
@@ -20,8 +20,9 @@
 import org.junit.Test;
 
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType.*;
-import static org.junit.Assert.*;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType.KEY;
+import static org.junit.Assert.assertEquals;
+
 import org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType;
 
 /**
@@ -67,10 +68,10 @@
     objInfo = getBuilder(volume, bucket, key).build();
     assertEquals(objInfo.getBucketName(), bucket);
 
-    objInfo =getBuilder(volume, null, null).build();
+    objInfo = getBuilder(volume, null, null).build();
     assertEquals(objInfo.getBucketName(), null);
 
-    objInfo =getBuilder(null, bucket, null).build();
+    objInfo = getBuilder(null, bucket, null).build();
     assertEquals(objInfo.getBucketName(), bucket);
   }
 
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java
index b97b844..817885e 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java
@@ -80,13 +80,13 @@
   public void testGetLongestPrefixPath() {
     List<RadixNode<Integer>> lpp =
         ROOT.getLongestPrefixPath("/a/b/c/d/g/p");
-    RadixNode<Integer> lpn = lpp.get(lpp.size()-1);
+    RadixNode<Integer> lpn = lpp.get(lpp.size() - 1);
     assertEquals("g", lpn.getName());
     lpn.setValue(100);
 
     List<RadixNode<Integer>> lpq =
         ROOT.getLongestPrefixPath("/a/b/c/d/g/q");
-    RadixNode<Integer> lqn = lpp.get(lpq.size()-1);
+    RadixNode<Integer> lqn = lpp.get(lpq.size() - 1);
     System.out.print(RadixTree.radixPathToString(lpq));
     assertEquals(lpn, lqn);
     assertEquals("g", lqn.getName());
diff --git a/hadoop-ozone/dev-support/checks/_lib.sh b/hadoop-ozone/dev-support/checks/_lib.sh
index 9342219..b81acf9 100644
--- a/hadoop-ozone/dev-support/checks/_lib.sh
+++ b/hadoop-ozone/dev-support/checks/_lib.sh
@@ -91,7 +91,7 @@
   local os=$(uname -s)
   local arch=$(uname -m)
 
-  curl -LSs https://github.com/elek/flekszible/releases/download/v1.8.1/flekszible_1.8.1_${os}_${arch}.tar.gz | tar -xz -f - -C bin
+  curl -LSs https://github.com/elek/flekszible/releases/download/v2.3.0/flekszible_2.3.0_${os}_${arch}.tar.gz | tar -xz -f - -C bin
 
   chmod +x bin/flekszible
 }
diff --git a/hadoop-ozone/dev-support/checks/acceptance.sh b/hadoop-ozone/dev-support/checks/acceptance.sh
index b3a6696..c4d8a89 100755
--- a/hadoop-ozone/dev-support/checks/acceptance.sh
+++ b/hadoop-ozone/dev-support/checks/acceptance.sh
@@ -41,6 +41,7 @@
 cd "$DIST_DIR/compose" || exit 1
 ./test-all.sh 2>&1 | tee "${REPORT_DIR}/output.log"
 RES=$?
-cp result/* "$REPORT_DIR/"
+cp -rv result/* "$REPORT_DIR/"
 cp "$REPORT_DIR/log.html" "$REPORT_DIR/summary.html"
+find "$REPORT_DIR" -type f -empty -print0 | xargs -0 rm -v
 exit $RES
diff --git a/hadoop-ozone/dev-support/checks/bats.sh b/hadoop-ozone/dev-support/checks/bats.sh
index f5b2cc9..dc07132 100755
--- a/hadoop-ozone/dev-support/checks/bats.sh
+++ b/hadoop-ozone/dev-support/checks/bats.sh
@@ -21,13 +21,19 @@
 
 install_bats
 
+git clone https://github.com/bats-core/bats-assert dev-support/ci/bats-assert
+git clone https://github.com/bats-core/bats-support dev-support/ci/bats-support
+
 REPORT_DIR=${OUTPUT_DIR:-"${DIR}/../../../target/bats"}
 mkdir -p "${REPORT_DIR}"
 REPORT_FILE="${REPORT_DIR}/summary.txt"
 
 rm -f "${REPORT_DIR}/output.log"
 
-find * -path '*/src/test/shell/*' -name '*.bats' -print0 \
+find * \( \
+    -path '*/src/test/shell/*' -name '*.bats' \
+    -or -path dev-support/ci/selective_ci_checks.bats \
+    \) -print0 \
   | xargs -0 -n1 bats --formatter tap \
   | tee -a "${REPORT_DIR}/output.log"
 
diff --git a/hadoop-ozone/dev-support/checks/checkstyle.sh b/hadoop-ozone/dev-support/checks/checkstyle.sh
index e76a67a..bd5e7f0 100755
--- a/hadoop-ozone/dev-support/checks/checkstyle.sh
+++ b/hadoop-ozone/dev-support/checks/checkstyle.sh
@@ -44,6 +44,9 @@
       -e 's/<file name="\([^"]*\)".*/\1/' \
       -e 's/<error.*line="\([[:digit:]]*\)".*message="\([^"]*\)".*/ \1: \2/' \
       -e "s!^${BASE_DIR}/!!" \
+      -e "s/&apos;/'/g" \
+      -e "s/&lt;/</g" \
+      -e "s/&gt;/>/g" \
   | tee "$REPORT_FILE"
 
 ## generate counter
diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml
index 45007d2..2023328 100644
--- a/hadoop-ozone/dist/pom.xml
+++ b/hadoop-ozone/dist/pom.xml
@@ -28,7 +28,7 @@
   <properties>
     <file.encoding>UTF-8</file.encoding>
     <downloadSources>true</downloadSources>
-    <docker.ozone-runner.version>20211202-1</docker.ozone-runner.version>
+    <docker.ozone-runner.version>20220212-1</docker.ozone-runner.version>
     <docker.ozone-testkr5b.image>apache/ozone-testkrb5:20210419-1</docker.ozone-testkr5b.image>
   </properties>
 
diff --git a/hadoop-ozone/dist/src/main/compose/compatibility/.env b/hadoop-ozone/dist/src/main/compose/compatibility/.env
index 96ab163..140975d 100644
--- a/hadoop-ozone/dist/src/main/compose/compatibility/.env
+++ b/hadoop-ozone/dist/src/main/compose/compatibility/.env
@@ -16,3 +16,4 @@
 
 HDDS_VERSION=${hdds.version}
 OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
+OZONE_RUNNER_IMAGE=apache/ozone-runner
diff --git a/hadoop-ozone/dist/src/main/compose/compatibility/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/compatibility/docker-compose.yaml
index dcbad22..f76b5e1 100644
--- a/hadoop-ozone/dist/src/main/compose/compatibility/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/compatibility/docker-compose.yaml
@@ -19,7 +19,7 @@
 # reusable fragments (see https://docs.docker.com/compose/compose-file/#extension-fields)
 x-common-config:
   &common-config
-  image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+  image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
   volumes:
     - ../..:/opt/hadoop
   env_file:
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-csi/.env b/hadoop-ozone/dist/src/main/compose/ozone-csi/.env
index 9279c5b..2de359f 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-csi/.env
+++ b/hadoop-ozone/dist/src/main/compose/ozone-csi/.env
@@ -16,4 +16,5 @@
 
 HDDS_VERSION=${hdds.version}
 OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
+OZONE_RUNNER_IMAGE=apache/ozone-runner
 OZONE_OPTS=
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-compose.yaml
index d5891a9..fd99d01 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-compose.yaml
@@ -18,7 +18,7 @@
 
 services:
   datanode:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     volumes:
       - ../..:/opt/hadoop
     env_file:
@@ -30,7 +30,7 @@
       - 9882
     command: ["ozone","datanode"]
   om:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     volumes:
       - ../..:/opt/hadoop
     env_file:
@@ -43,7 +43,7 @@
       - 9862:9862
     command: ["ozone","om"]
   scm:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     volumes:
       - ../..:/opt/hadoop
     env_file:
@@ -56,7 +56,7 @@
       OZONE_OPTS:
     command: ["ozone","scm"]
   csi:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     volumes:
       - ../..:/opt/hadoop
     env_file:
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/.env b/hadoop-ozone/dist/src/main/compose/ozone-ha/.env
index 9279c5b..2de359f 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-ha/.env
+++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/.env
@@ -16,4 +16,5 @@
 
 HDDS_VERSION=${hdds.version}
 OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
+OZONE_RUNNER_IMAGE=apache/ozone-runner
 OZONE_OPTS=
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-compose.yaml
index 615dc9e..e9ec659 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-compose.yaml
@@ -19,7 +19,7 @@
 # reusable fragments (see https://docs.docker.com/compose/compose-file/#extension-fields)
 x-common-config:
   &common-config
-  image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+  image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
   volumes:
     - ../..:/opt/hadoop
   env_file:
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/.env b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/.env
index 8ca6807..c13fddf 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/.env
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/.env
@@ -20,3 +20,4 @@
 HADOOP_IMAGE=flokkr/hadoop
 HADOOP_VERSION=2.7.3
 OZONE_RUNNER_VERSION=@docker.ozone-runner.version@
+OZONE_RUNNER_IMAGE=apache/ozone-runner
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-compose.yaml
index 95c8854..90a8084 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-compose.yaml
@@ -17,7 +17,7 @@
 version: "3"
 services:
   datanode:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     volumes:
       - ../../..:/opt/hadoop
     ports:
@@ -27,7 +27,7 @@
       - docker-config
       - ../common-config
   om:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: om
     volumes:
       - ../../..:/opt/hadoop
@@ -42,7 +42,7 @@
       - ../common-config
     command: ["/opt/hadoop/bin/ozone","om"]
   s3g:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: s3g
     volumes:
       - ../../..:/opt/hadoop
@@ -53,7 +53,7 @@
       - ../common-config
     command: ["/opt/hadoop/bin/ozone","s3g"]
   scm:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: scm
     volumes:
       - ../../..:/opt/hadoop
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/.env b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/.env
index 32a6715..77842d0 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/.env
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/.env
@@ -20,4 +20,5 @@
 HADOOP_IMAGE=flokkr/hadoop
 HADOOP_VERSION=3.1.2
 OZONE_RUNNER_VERSION=@docker.ozone-runner.version@
+OZONE_RUNNER_IMAGE=apache/ozone-runner
 OZONE_OPTS=
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-compose.yaml
index 8571cc5..e0d1d6c 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-compose.yaml
@@ -17,7 +17,7 @@
 version: "3"
 services:
   datanode:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     volumes:
       - ../../..:/opt/hadoop
     ports:
@@ -29,7 +29,7 @@
     environment:
       OZONE_OPTS:
   om:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: om
     volumes:
       - ../../..:/opt/hadoop
@@ -45,7 +45,7 @@
       - ../common-config
     command: ["/opt/hadoop/bin/ozone","om"]
   s3g:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: s3g
     volumes:
       - ../../..:/opt/hadoop
@@ -58,7 +58,7 @@
       OZONE_OPTS:
     command: ["/opt/hadoop/bin/ozone","s3g"]
   scm:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: scm
     volumes:
       - ../../..:/opt/hadoop
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/.env b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/.env
index 01941ef..e696737 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/.env
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/.env
@@ -18,4 +18,5 @@
 HADOOP_IMAGE=flokkr/hadoop
 HADOOP_VERSION=3.2.2
 OZONE_RUNNER_VERSION=@docker.ozone-runner.version@
+OZONE_RUNNER_IMAGE=apache/ozone-runner
 OZONE_OPTS=
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-compose.yaml
index c07282a..9cd616f 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-compose.yaml
@@ -17,7 +17,7 @@
 version: "3"
 services:
   datanode:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     volumes:
       - ../../..:/opt/hadoop
     ports:
@@ -27,7 +27,7 @@
       - docker-config
       - ../common-config
   om:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: om
     volumes:
       - ../../..:/opt/hadoop
@@ -43,7 +43,7 @@
       - ../common-config
     command: ["/opt/hadoop/bin/ozone","om"]
   s3g:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: s3g
     volumes:
       - ../../..:/opt/hadoop
@@ -56,7 +56,7 @@
       OZONE_OPTS:
     command: ["/opt/hadoop/bin/ozone","s3g"]
   scm:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: scm
     volumes:
       - ../../..:/opt/hadoop
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop33/.env b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop33/.env
index 74130cb..65e6887 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop33/.env
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop33/.env
@@ -18,4 +18,5 @@
 HADOOP_IMAGE=flokkr/hadoop
 HADOOP_VERSION=3.3.1
 OZONE_RUNNER_VERSION=@docker.ozone-runner.version@
+OZONE_RUNNER_IMAGE=apache/ozone-runner
 OZONE_OPTS=
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop33/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop33/docker-compose.yaml
index c07282a..9cd616f 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop33/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop33/docker-compose.yaml
@@ -17,7 +17,7 @@
 version: "3"
 services:
   datanode:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     volumes:
       - ../../..:/opt/hadoop
     ports:
@@ -27,7 +27,7 @@
       - docker-config
       - ../common-config
   om:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: om
     volumes:
       - ../../..:/opt/hadoop
@@ -43,7 +43,7 @@
       - ../common-config
     command: ["/opt/hadoop/bin/ozone","om"]
   s3g:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: s3g
     volumes:
       - ../../..:/opt/hadoop
@@ -56,7 +56,7 @@
       OZONE_OPTS:
     command: ["/opt/hadoop/bin/ozone","s3g"]
   scm:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: scm
     volumes:
       - ../../..:/opt/hadoop
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/.env b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/.env
index 96ab163..140975d 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/.env
+++ b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/.env
@@ -16,3 +16,4 @@
 
 HDDS_VERSION=${hdds.version}
 OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
+OZONE_RUNNER_IMAGE=apache/ozone-runner
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/Dockerfile b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/Dockerfile
index 787fde0..79aeec4 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/Dockerfile
+++ b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/Dockerfile
@@ -14,9 +14,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+ARG OZONE_RUNNER_IMAGE
 ARG OZONE_RUNNER_VERSION
 
-FROM apache/ozone-runner:${OZONE_RUNNER_VERSION}
+FROM ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
 
 # Install ssh
 RUN sudo yum install -y openssh-clients openssh-server
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/.env b/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/.env
index 20bf1c8..dd37cf5 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/.env
+++ b/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/.env
@@ -14,9 +14,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-HDDS_VERSION=1.1.0-SNAPSHOT
-OZONE_RUNNER_VERSION=20200625-1
-OZONE_IMAGE=apache/ozone-runner:20200625-1
+HDDS_VERSION=${hdds.version}
+OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
+OZONE_RUNNER_IMAGE=apache/ozone-runner
 OZONE_DIR=/opt/hadoop
 OZONE_VOLUME=.
 # Indicates no arguments to the OM.
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-compose.yaml
index 88e2a4b..e0d6d47 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-compose.yaml
@@ -21,7 +21,7 @@
   &common-config
   env_file:
     - docker-config
-  image: ${OZONE_IMAGE}
+  image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
 
 x-replication:
   &replication
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/.env b/hadoop-ozone/dist/src/main/compose/ozone-topology/.env
index 9279c5b..2de359f 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-topology/.env
+++ b/hadoop-ozone/dist/src/main/compose/ozone-topology/.env
@@ -16,4 +16,5 @@
 
 HDDS_VERSION=${hdds.version}
 OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
+OZONE_RUNNER_IMAGE=apache/ozone-runner
 OZONE_OPTS=
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml
index 96780df..77eeaf0 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml
@@ -17,7 +17,7 @@
 version: "3"
 services:
    datanode_1:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+      image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
       privileged: true #required by the profiler
       volumes:
         - ../..:/opt/hadoop
@@ -33,7 +33,7 @@
          net:
             ipv4_address: 10.5.0.4
    datanode_2:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+      image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
       privileged: true #required by the profiler
       volumes:
         - ../..:/opt/hadoop
@@ -49,7 +49,7 @@
          net:
             ipv4_address: 10.5.0.5
    datanode_3:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+      image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
       privileged: true #required by the profiler
       volumes:
         - ../..:/opt/hadoop
@@ -65,7 +65,7 @@
          net:
             ipv4_address: 10.5.0.6
    datanode_4:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+      image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
       privileged: true #required by the profiler
       volumes:
         - ../..:/opt/hadoop
@@ -81,7 +81,7 @@
          net:
             ipv4_address: 10.5.0.7
    datanode_5:
-     image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+     image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
      privileged: true #required by the profiler
      volumes:
        - ../..:/opt/hadoop
@@ -97,7 +97,7 @@
        net:
          ipv4_address: 10.5.0.8
    datanode_6:
-     image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+     image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
      privileged: true #required by the profiler
      volumes:
        - ../..:/opt/hadoop
@@ -113,7 +113,7 @@
        net:
          ipv4_address: 10.5.0.9
    om:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+      image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
       privileged: true #required by the profiler
       volumes:
          - ../..:/opt/hadoop
@@ -130,7 +130,7 @@
          net:
             ipv4_address: 10.5.0.70
    scm:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+      image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
       privileged: true #required by the profiler
       volumes:
          - ../..:/opt/hadoop
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/.env b/hadoop-ozone/dist/src/main/compose/ozone/.env
index 9279c5b..2de359f 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone/.env
+++ b/hadoop-ozone/dist/src/main/compose/ozone/.env
@@ -16,4 +16,5 @@
 
 HDDS_VERSION=${hdds.version}
 OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
+OZONE_RUNNER_IMAGE=apache/ozone-runner
 OZONE_OPTS=
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml
index 524d2e5..72303ab 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml
@@ -19,7 +19,7 @@
 # reusable fragments (see https://docs.docker.com/compose/compose-file/#extension-fields)
 x-common-config:
   &common-config
-  image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+  image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
   volumes:
     - ../..:/opt/hadoop
   env_file:
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/freon-ockg.yaml b/hadoop-ozone/dist/src/main/compose/ozone/freon-ockg.yaml
index 765827d..62dabda 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone/freon-ockg.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone/freon-ockg.yaml
@@ -17,7 +17,7 @@
 version: "3.4"
 services:
   freon:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     volumes:
       - ../..:/opt/hadoop
     env_file:
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/freon-rk.yaml b/hadoop-ozone/dist/src/main/compose/ozone/freon-rk.yaml
index 7bb1dbf..10a21b0 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone/freon-rk.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone/freon-rk.yaml
@@ -17,7 +17,7 @@
 version: "3.4"
 services:
   freon:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     volumes:
       - ../..:/opt/hadoop
     env_file:
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/test.sh b/hadoop-ozone/dist/src/main/compose/ozone/test.sh
index 5aacbfc..59a90ed 100755
--- a/hadoop-ozone/dist/src/main/compose/ozone/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozone/test.sh
@@ -53,12 +53,6 @@
 execute_robot_test scm -v SCHEME:ofs -v BUCKET_TYPE:link -N ozonefs-fso-ofs-link ozonefs/ozonefs.robot
 execute_robot_test scm -v SCHEME:o3fs -v BUCKET_TYPE:bucket -N ozonefs-fso-o3fs-bucket ozonefs/ozonefs.robot
 
-execute_robot_test scm -v BUCKET:${bucket} -N s3-${bucket}-fso-layout-objectputget s3/objectputget.robot
-execute_robot_test scm -v BUCKET:${bucket} -N s3-${bucket}-fso-layout-objectdelete s3/objectdelete.robot
-execute_robot_test scm -v BUCKET:${bucket} -N s3-${bucket}-fso-layout-objectcopy s3/objectcopy.robot
-execute_robot_test scm -v BUCKET:${bucket} -N s3-${bucket}-fso-layout-objectmultidelete s3/objectmultidelete.robot
-execute_robot_test scm -v BUCKET:${bucket} -N s3-${bucket}-fso-layout-MultipartUpload s3/MultipartUpload.robot
-
 stop_docker_env
 
 generate_report
diff --git a/hadoop-ozone/dist/src/main/compose/ozoneblockade/.env b/hadoop-ozone/dist/src/main/compose/ozoneblockade/.env
index 96ab163..140975d 100644
--- a/hadoop-ozone/dist/src/main/compose/ozoneblockade/.env
+++ b/hadoop-ozone/dist/src/main/compose/ozoneblockade/.env
@@ -16,3 +16,4 @@
 
 HDDS_VERSION=${hdds.version}
 OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
+OZONE_RUNNER_IMAGE=apache/ozone-runner
diff --git a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-compose.yaml
index 175347f..b3d21dd 100644
--- a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-compose.yaml
@@ -17,7 +17,7 @@
 version: "3"
 services:
    datanode:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+      image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
       volumes:
         - ../..:/opt/hadoop
       ports:
@@ -26,7 +26,7 @@
       env_file:
         - ./docker-config
    om:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+      image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
       volumes:
          - ../..:/opt/hadoop
       ports:
@@ -38,7 +38,7 @@
           - ./docker-config
       command: ["/opt/hadoop/bin/ozone","om"]
    scm:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+      image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
       volumes:
          - ../..:/opt/hadoop
       ports:
@@ -50,7 +50,7 @@
           ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
       command: ["/opt/hadoop/bin/ozone","scm"]
    ozone_client:
-       image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+       image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
        volumes:
          - ../..:/opt/hadoop
        ports:
diff --git a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/.env b/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/.env
index 96ab163..140975d 100644
--- a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/.env
+++ b/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/.env
@@ -16,3 +16,4 @@
 
 HDDS_VERSION=${hdds.version}
 OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
+OZONE_RUNNER_IMAGE=apache/ozone-runner
diff --git a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-compose.yaml
index 22861e3..1809457 100644
--- a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-compose.yaml
@@ -24,7 +24,7 @@
       ports:
          - 9878:9878
    datanode:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+      image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
       volumes:
         - ../..:/opt/hadoop
       ports:
@@ -33,7 +33,7 @@
       env_file:
         - ./docker-config
    om:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+      image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
       volumes:
          - ../..:/opt/hadoop
       ports:
@@ -45,7 +45,7 @@
           - ./docker-config
       command: ["ozone","om"]
    scm:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+      image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
       volumes:
          - ../..:/opt/hadoop
       ports:
@@ -58,7 +58,7 @@
           OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "${OZONE_SAFEMODE_MIN_DATANODES:-1}"
       command: ["ozone","scm"]
    s3g1:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+      image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
       volumes:
          - ../..:/opt/hadoop
       ports:
@@ -67,7 +67,7 @@
           - ./docker-config
       command: ["ozone","s3g"]
    s3g2:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+      image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
       volumes:
          - ../..:/opt/hadoop
       ports:
@@ -76,7 +76,7 @@
          - ./docker-config
       command: ["ozone","s3g"]
    s3g3:
-      image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+      image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
       volumes:
          - ../..:/opt/hadoop
       ports:
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.env b/hadoop-ozone/dist/src/main/compose/ozonescripts/.env
index 96ab163..140975d 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonescripts/.env
+++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/.env
@@ -16,3 +16,4 @@
 
 HDDS_VERSION=${hdds.version}
 OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
+OZONE_RUNNER_IMAGE=apache/ozone-runner
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile b/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile
index da2adab..fd9993b 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile
+++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile
@@ -14,9 +14,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+ARG OZONE_RUNNER_IMAGE
 ARG OZONE_RUNNER_VERSION
 
-FROM apache/ozone-runner:${OZONE_RUNNER_VERSION}
+FROM ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
 
 RUN sudo yum install -y openssh-clients openssh-server
 
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-compose.yaml
index 9b49a2a..786fce8 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-compose.yaml
@@ -20,6 +20,7 @@
       build:
          context: .
          args:
+            - OZONE_RUNNER_IMAGE
             - OZONE_RUNNER_VERSION
       image: ozone-runner-scripts:${OZONE_RUNNER_VERSION}
       volumes:
@@ -32,6 +33,7 @@
       build:
          context: .
          args:
+            - OZONE_RUNNER_IMAGE
             - OZONE_RUNNER_VERSION
       image: ozone-runner-scripts:${OZONE_RUNNER_VERSION}
       volumes:
@@ -45,6 +47,7 @@
       build:
          context: .
          args:
+            - OZONE_RUNNER_IMAGE
             - OZONE_RUNNER_VERSION
       image: ozone-runner-scripts:${OZONE_RUNNER_VERSION}
       volumes:
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/.env b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/.env
index 85cf1d2..1ddd453 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/.env
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/.env
@@ -17,5 +17,6 @@
 HDDS_VERSION=${hdds.version}
 HADOOP_VERSION=3
 OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
+OZONE_RUNNER_IMAGE=apache/ozone-runner
 OZONE_TESTKRB5_IMAGE=${docker.ozone-testkr5b.image}
 OZONE_OPTS=
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-compose.yaml
index 6a02661..914156d 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-compose.yaml
@@ -43,7 +43,7 @@
       ozone_net:
         ipv4_address: 172.25.0.101
   datanode1:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     volumes:
       - ../..:/opt/hadoop
       - ../_keytabs:/etc/security/keytabs
@@ -65,7 +65,7 @@
       ozone_net:
         ipv4_address: 172.25.0.102
   datanode2:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     volumes:
       - ../..:/opt/hadoop
       - ../_keytabs:/etc/security/keytabs
@@ -87,7 +87,7 @@
       ozone_net:
         ipv4_address: 172.25.0.103
   datanode3:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     volumes:
       - ../..:/opt/hadoop
       - ../_keytabs:/etc/security/keytabs
@@ -109,7 +109,7 @@
       ozone_net:
         ipv4_address: 172.25.0.104
   om1:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: om1
     volumes:
       - ../..:/opt/hadoop
@@ -134,7 +134,7 @@
       ozone_net:
         ipv4_address: 172.25.0.111
   om2:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: om2
     volumes:
       - ../..:/opt/hadoop
@@ -159,7 +159,7 @@
       ozone_net:
         ipv4_address: 172.25.0.112
   om3:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: om3
     volumes:
       - ../..:/opt/hadoop
@@ -184,7 +184,7 @@
       ozone_net:
         ipv4_address: 172.25.0.113
   s3g:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: s3g
     volumes:
       - ../..:/opt/hadoop
@@ -201,7 +201,7 @@
       ozone_net:
         ipv4_address: 172.25.0.114
   scm1.org:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: scm1.org
     volumes:
       - ../..:/opt/hadoop
@@ -228,7 +228,7 @@
       ozone_net:
         ipv4_address: 172.25.0.116
   scm2.org:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: scm2.org
     volumes:
       - ../..:/opt/hadoop
@@ -256,7 +256,7 @@
       ozone_net:
         ipv4_address: 172.25.0.117
   scm3.org:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: scm3.org
     volumes:
       - ../..:/opt/hadoop
@@ -284,7 +284,7 @@
       ozone_net:
         ipv4_address: 172.25.0.118
   recon:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: recon
     volumes:
       - ../..:/opt/hadoop
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env
index 4b4a913..682aa1d 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env
@@ -18,5 +18,6 @@
 HADOOP_IMAGE=flokkr/hadoop
 HADOOP_VERSION=3.3.1
 OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
+OZONE_RUNNER_IMAGE=apache/ozone-runner
 OZONE_TESTKRB5_IMAGE=${docker.ozone-testkr5b.image}
 OZONE_OPTS=
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml
index 124d2b8..9405931 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml
@@ -38,7 +38,7 @@
       - ../../libexec/transformation.py:/opt/transformation.py
     command: ["hadoop", "kms"]
   datanode:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     networks:
       - ozone
     volumes:
@@ -53,7 +53,7 @@
     environment:
       OZONE_OPTS:
   om:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: om
     networks:
       - ozone
@@ -71,7 +71,7 @@
       - docker-config
     command: ["/opt/hadoop/bin/ozone","om"]
   s3g:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: s3g
     networks:
       - ozone
@@ -87,7 +87,7 @@
       OZONE_OPTS:
     command: ["/opt/hadoop/bin/ozone","s3g"]
   scm:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: scm
     networks:
       - ozone
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/test.sh
index 85992f1..6acfa04 100755
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/test.sh
@@ -27,9 +27,6 @@
 
 start_docker_env
 
-execute_command_in_container rm sudo bash -c "sed -i -e 's/^mirrorlist/#&/' -e 's/^#baseurl/baseurl/' -e 's/mirror.centos.org/vault.centos.org/' /etc/yum.repos.d/*.repo"
-execute_command_in_container rm sudo yum install -y krb5-workstation
-
 execute_robot_test om kinit.robot
 
 execute_robot_test om createmrenv.robot
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/.env b/hadoop-ozone/dist/src/main/compose/ozonesecure/.env
index 85cf1d2..1ddd453 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/.env
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/.env
@@ -17,5 +17,6 @@
 HDDS_VERSION=${hdds.version}
 HADOOP_VERSION=3
 OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
+OZONE_RUNNER_IMAGE=apache/ozone-runner
 OZONE_TESTKRB5_IMAGE=${docker.ozone-testkr5b.image}
 OZONE_OPTS=
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml
index cfd0de8..f4465cf 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml
@@ -35,7 +35,7 @@
       - ../../libexec/transformation.py:/opt/transformation.py
     command: ["hadoop", "kms"]
   datanode:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     volumes:
       - ../..:/opt/hadoop
       - ../_keytabs:/etc/security/keytabs
@@ -48,7 +48,7 @@
     environment:
       OZONE_OPTS:
   om:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: om
     volumes:
       - ../..:/opt/hadoop
@@ -65,7 +65,7 @@
     command: ["/opt/hadoop/bin/ozone","om"]
 
   s3g:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: s3g
     volumes:
       - ../..:/opt/hadoop
@@ -79,7 +79,7 @@
     environment:
       OZONE_OPTS:
   recon:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: recon
     volumes:
       - ../..:/opt/hadoop
@@ -93,7 +93,7 @@
       OZONE_OPTS:
     command: ["/opt/hadoop/bin/ozone","recon"]
   scm:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     hostname: scm
     volumes:
       - ../..:/opt/hadoop
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
index fa31ee4..ad445a0 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
@@ -116,6 +116,10 @@
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true
 HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
 
+KMS-SITE.XML_hadoop.kms.proxyuser.root.users=*
+KMS-SITE.XML_hadoop.kms.proxyuser.root.groups=*
+KMS-SITE.XML_hadoop.kms.proxyuser.root.hosts=*
+
 #Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation.
 #BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
 
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
index 5477a76..cbcea39 100755
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
@@ -43,7 +43,7 @@
   done
 done
 
-for bucket in link generated; do
+for bucket in encrypted link generated; do
   execute_robot_test s3g -v BUCKET:${bucket} -N s3-${bucket} s3
 done
 
diff --git a/hadoop-ozone/dist/src/main/compose/restart/.env b/hadoop-ozone/dist/src/main/compose/restart/.env
index 6f757c5..b8d2247 100644
--- a/hadoop-ozone/dist/src/main/compose/restart/.env
+++ b/hadoop-ozone/dist/src/main/compose/restart/.env
@@ -16,6 +16,6 @@
 
 HDDS_VERSION=${hdds.version}
 OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
-OZONE_IMAGE=apache/ozone-runner:${docker.ozone-runner.version}
+OZONE_RUNNER_IMAGE=apache/ozone-runner
 OZONE_DIR=/opt/hadoop
 OZONE_VOLUME=.
diff --git a/hadoop-ozone/dist/src/main/compose/restart/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/restart/docker-compose.yaml
index a15c2f9..7e79667 100644
--- a/hadoop-ozone/dist/src/main/compose/restart/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/restart/docker-compose.yaml
@@ -21,7 +21,7 @@
   &common-config
   env_file:
     - docker-config
-  image: ${OZONE_IMAGE}
+  image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
 
 x-replication:
   &replication
diff --git a/hadoop-ozone/dist/src/main/compose/testlib.sh b/hadoop-ozone/dist/src/main/compose/testlib.sh
index 9c3d6c4..f652d40 100755
--- a/hadoop-ozone/dist/src/main/compose/testlib.sh
+++ b/hadoop-ozone/dist/src/main/compose/testlib.sh
@@ -214,7 +214,7 @@
 copy_daemon_logs() {
   local c f
   for c in $(docker-compose ps | grep "^${COMPOSE_ENV_NAME}_" | awk '{print $1}'); do
-    for f in $(docker exec "${c}" ls -1 /var/log/hadoop | grep -F '.out'); do
+    for f in $(docker exec "${c}" ls -1 /var/log/hadoop 2> /dev/null | grep -F -e '.out' -e audit); do
       docker cp "${c}:/var/log/hadoop/${f}" "$RESULT_DIR/"
     done
   done
@@ -320,13 +320,12 @@
   local result_dir="${test_dir}/result"
   local test_dir_name=$(basename ${test_dir})
   if [[ -n "$(find "${result_dir}" -name "*.xml")" ]]; then
-    rebot --nostatusrc -N "${test_dir_name}" -l NONE -r NONE -o "${all_result_dir}/${test_dir_name}.xml" "${result_dir}/*.xml"
+    rebot --nostatusrc -N "${test_dir_name}" -l NONE -r NONE -o "${all_result_dir}/${test_dir_name}.xml" "${result_dir}"/*.xml
+    rm -fv "${result_dir}"/*.xml "${result_dir}"/log.html "${result_dir}"/report.html
   fi
 
-  cp "${result_dir}"/docker-*.log "${all_result_dir}"/
-  if [[ -n "$(find "${result_dir}" -name "*.out")" ]]; then
-    cp "${result_dir}"/*.out* "${all_result_dir}"/
-  fi
+  mkdir -p "${all_result_dir}"/"${test_dir_name}"
+  mv -v "${result_dir}"/* "${all_result_dir}"/"${test_dir_name}"/
 }
 
 run_test_script() {
@@ -401,9 +400,10 @@
 prepare_for_runner_image() {
   local default_version=${docker.ozone-runner.version} # set at build-time from Maven property
   local runner_version=${OZONE_RUNNER_VERSION:-${default_version}} # may be specified by user running the test
+  local runner_image=${OZONE_RUNNER_IMAGE:-apache/ozone-runner} # may be specified by user running the test
   local v=${1:-${runner_version}} # prefer explicit argument
 
   export OZONE_DIR=/opt/hadoop
-  export OZONE_IMAGE="apache/ozone-runner:${v}"
+  export OZONE_IMAGE="${runner_image}:${v}"
 }
 
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/.env b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/.env
index 68f4302..4d1c35c 100644
--- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/.env
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/.env
@@ -16,6 +16,7 @@
 
 HDDS_VERSION=${hdds.version}
 OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
+OZONE_RUNNER_IMAGE=apache/ozone-runner
 OZONE_IMAGE=apache/ozone-runner:${docker.ozone-runner.version}
 OZONE_DIR=/opt/hadoop
 OZONE_VOLUME=./data
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config
index 9a2b700..8e01004 100644
--- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config
@@ -24,8 +24,8 @@
 OZONE-SITE.XML_ozone.om.address.omservice.om2=om2
 OZONE-SITE.XML_ozone.om.address.omservice.om3=om3
 OZONE-SITE.XML_ozone.om.ratis.enable=true
-// setting ozone.scm.ratis.enable to false for now, as scm ha upgrade is
-// not supported yet. This is supposed to work without SCM HA configuration
+# setting ozone.scm.ratis.enable to false for now, as scm ha upgrade is
+# not supported yet. This is supposed to work without SCM HA configuration
 OZONE-SITE.XML_ozone.scm.ratis.enable=false
 OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s
 OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/.env b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/.env
index 0699724..616f960 100644
--- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/.env
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/.env
@@ -16,6 +16,7 @@
 
 HDDS_VERSION=${hdds.version}
 OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
+OZONE_RUNNER_IMAGE=apache/ozone-runner
 OZONE_IMAGE=apache/ozone-runner:${docker.ozone-runner.version}
 OZONE_DIR=/opt/hadoop
 OZONE_VOLUME=./data
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh b/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh
index ab64e95..b001496 100755
--- a/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh
@@ -94,7 +94,6 @@
     RESULT=1
   fi
 
-  generate_report 'upgrade' "$RESULT_DIR"
   copy_results "$test_subdir" "$ALL_RESULT_DIR"
 }
 
diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/.env b/hadoop-ozone/dist/src/main/compose/xcompat/.env
index 96ab163..140975d 100644
--- a/hadoop-ozone/dist/src/main/compose/xcompat/.env
+++ b/hadoop-ozone/dist/src/main/compose/xcompat/.env
@@ -16,3 +16,4 @@
 
 HDDS_VERSION=${hdds.version}
 OZONE_RUNNER_VERSION=${docker.ozone-runner.version}
+OZONE_RUNNER_IMAGE=apache/ozone-runner
diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml b/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml
index 95951f4..59158a4 100644
--- a/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml
+++ b/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml
@@ -26,8 +26,26 @@
     environment:
       HADOOP_OPTS:
     command: ["sleep","1000000"]
+  old_client_1_1_0:
+    image: apache/ozone:1.1.0
+    env_file:
+      - docker-config
+    volumes:
+      - ../..:/opt/ozone
+    environment:
+      HADOOP_OPTS:
+    command: ["sleep","1000000"]
+  old_client_1_2_1:
+    image: apache/ozone:1.2.1
+    env_file:
+      - docker-config
+    volumes:
+      - ../..:/opt/ozone
+    environment:
+      HADOOP_OPTS:
+    command: ["sleep","1000000"]
   new_client:
-    image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+    image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
     env_file:
       - docker-config
     volumes:
diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml b/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml
index f0a0a2b..95921f1 100644
--- a/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml
+++ b/hadoop-ozone/dist/src/main/compose/xcompat/new-cluster.yaml
@@ -19,7 +19,7 @@
 # reusable fragments (see https://docs.docker.com/compose/compose-file/#extension-fields)
 x-new-config:
   &new-config
-  image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+  image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
   env_file:
     - docker-config
   volumes:
diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh
index b15c6e5..a46590f 100755
--- a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh
@@ -19,7 +19,8 @@
 export COMPOSE_DIR
 basename=$(basename ${COMPOSE_DIR})
 
-current_version=1.1.0
+current_version=1.3.0
+old_versions="1.0.0 1.1.0 1.2.1" # container is needed for each version in clients.yaml
 
 # shellcheck source=hadoop-ozone/dist/src/main/compose/testlib.sh
 source "${COMPOSE_DIR}/../testlib.sh"
@@ -59,11 +60,8 @@
   new_client _write
   new_client _read ${current_version}
 
-  for client in $(docker ps | grep _old_client_ | awk '{ print $NF }'); do
-    client=${client#${basename}_}
-    client=${client%_1}
-    client_version=${client#old_client_}
-    client_version=${client_version//_/.}
+  for client_version in "$@"; do
+    client="old_client_${client_version//./_}"
 
     old_client _write
     old_client _read ${client_version}
@@ -78,11 +76,12 @@
 create_results_dir
 
 # current cluster with various clients
-COMPOSE_FILE=new-cluster.yaml:clients.yaml cluster_version=${current_version} test_cross_compatibility
+COMPOSE_FILE=new-cluster.yaml:clients.yaml cluster_version=${current_version} test_cross_compatibility ${old_versions}
 
-for cluster_version in 1.0.0; do
+# old cluster with clients: same version and current version
+for cluster_version in ${old_versions}; do
   export OZONE_VERSION=${cluster_version}
-  COMPOSE_FILE=old-cluster.yaml:clients.yaml test_cross_compatibility
+  COMPOSE_FILE=old-cluster.yaml:clients.yaml test_cross_compatibility ${cluster_version}
 done
 
 generate_report
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-controller.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-controller.yaml
index 53d0e5c..511f48f 100644
--- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-controller.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-controller.yaml
@@ -50,4 +50,4 @@
            - csi
       volumes:
         - name: socket-dir
-          emptyDir:
+          emptyDir: {}
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/Flekszible b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/Flekszible
index 39fc53a..2ea9721 100644
--- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/Flekszible
+++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/Flekszible
@@ -28,6 +28,8 @@
     transformations:
     - type: Image
       image: "@docker.image@"
+transformations:
+ - type: kustomize
 header: |-
   # Licensed to the Apache Software Foundation (ASF) under one
   # or more contributor license agreements.  See the NOTICE file
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml
index f02fb56..5bd722d 100644
--- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml
@@ -27,6 +27,7 @@
   OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm
   OZONE-SITE.XML_ozone.scm.names: scm-0.scm
   OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3"
+  OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1"
   LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout
   LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender
   LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/kustomization.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/kustomization.yaml
new file mode 100644
index 0000000..3059b9c
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/kustomization.yaml
@@ -0,0 +1,30 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+resources:
+- config-configmap.yaml
+- datanode-service.yaml
+- datanode-statefulset.yaml
+- om-service.yaml
+- om-statefulset.yaml
+- s3g-service.yaml
+- s3g-statefulset.yaml
+- scm-service.yaml
+- scm-statefulset.yaml
+- datanode-public-service.yaml
+- om-public-service.yaml
+- s3g-public-service.yaml
+- scm-public-service.yaml
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/Flekszible b/hadoop-ozone/dist/src/main/k8s/examples/minikube/Flekszible
index 3390db0..60b6bd2 100644
--- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/Flekszible
+++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/Flekszible
@@ -36,6 +36,8 @@
     transformations:
     - type: Image
       image: "@docker.image@"
+transformations:
+ - type: kustomize
 header: |-
   # Licensed to the Apache Software Foundation (ASF) under one
   # or more contributor license agreements.  See the NOTICE file
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml
index f02fb56..5bd722d 100644
--- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml
@@ -27,6 +27,7 @@
   OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm
   OZONE-SITE.XML_ozone.scm.names: scm-0.scm
   OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3"
+  OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1"
   LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout
   LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender
   LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/kustomization.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/kustomization.yaml
new file mode 100644
index 0000000..3059b9c
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/k8s/examples/minikube/kustomization.yaml
@@ -0,0 +1,30 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+resources:
+- config-configmap.yaml
+- datanode-service.yaml
+- datanode-statefulset.yaml
+- om-service.yaml
+- om-statefulset.yaml
+- s3g-service.yaml
+- s3g-statefulset.yaml
+- scm-service.yaml
+- scm-statefulset.yaml
+- datanode-public-service.yaml
+- om-public-service.yaml
+- s3g-public-service.yaml
+- scm-public-service.yaml
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/Flekszible b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/Flekszible
index 3d9bfcd..aad4836 100644
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/Flekszible
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/Flekszible
@@ -45,3 +45,4 @@
     destination: pv-test
 transformations:
   - type: Namespace
+  - type: kustomize
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml
index 5807630..4be594b 100644
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml
@@ -27,6 +27,7 @@
   OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm
   OZONE-SITE.XML_ozone.scm.names: scm-0.scm
   OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3"
+  OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1"
   LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout
   LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender
   LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/kustomization.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/kustomization.yaml
new file mode 100644
index 0000000..cf0cbe1
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/kustomization.yaml
@@ -0,0 +1,39 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+resources:
+- config-configmap.yaml
+- datanode-service.yaml
+- datanode-statefulset.yaml
+- om-service.yaml
+- om-statefulset.yaml
+- s3g-service.yaml
+- s3g-statefulset.yaml
+- scm-service.yaml
+- scm-statefulset.yaml
+- prometheusconf-configmap.yaml
+- prometheus-deployment.yaml
+- prometheus-clusterrole.yaml
+- prometheus-operator-clusterrolebinding.yaml
+- prometheus-operator-serviceaccount.yaml
+- prometheus-service.yaml
+- jaeger-service.yaml
+- jaeger-statefulset.yaml
+- datanode-public-service.yaml
+- om-public-service.yaml
+- s3g-public-service.yaml
+- scm-public-service.yaml
+- jaeger-public-service.yaml
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/Flekszible b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/Flekszible
index b800f89..445f255 100644
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/Flekszible
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/Flekszible
@@ -26,5 +26,8 @@
     transformations:
     - type: Image
       image: "@docker.image@"
+    - type: kustomize
 transformations:
   - type: Namespace
+  - type: kustomize
+
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/kustomization.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/kustomization.yaml
new file mode 100644
index 0000000..6b3d553
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/kustomization.yaml
@@ -0,0 +1,26 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+resources:
+- config-configmap.yaml
+- datanode-service.yaml
+- datanode-statefulset.yaml
+- om-service.yaml
+- om-statefulset.yaml
+- s3g-service.yaml
+- s3g-statefulset.yaml
+- scm-service.yaml
+- scm-statefulset.yaml
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/Flekszible b/hadoop-ozone/dist/src/main/k8s/examples/ozone/Flekszible
index ec6d745..5562aac 100644
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/Flekszible
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone/Flekszible
@@ -27,11 +27,10 @@
     transformations:
     - type: Image
       image: "@docker.image@"
-  - path: pv-test
-    destination: pv-test
   - path: ozone-csi
     destination: csi
   - path: test-webserver
     destination: pv-test
 transformations:
   - type: Namespace
+  - type: kustomize
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml
index 820c197..c7ac534 100644
--- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml
@@ -27,6 +27,7 @@
   OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm
   OZONE-SITE.XML_ozone.scm.names: scm-0.scm
   OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3"
+  OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1"
   LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout
   LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender
   LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/kustomization.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/kustomization.yaml
new file mode 100644
index 0000000..6b3d553
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone/kustomization.yaml
@@ -0,0 +1,26 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+resources:
+- config-configmap.yaml
+- datanode-service.yaml
+- datanode-statefulset.yaml
+- om-service.yaml
+- om-statefulset.yaml
+- s3g-service.yaml
+- s3g-statefulset.yaml
+- scm-service.yaml
+- scm-statefulset.yaml
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/testlib.sh b/hadoop-ozone/dist/src/main/k8s/examples/testlib.sh
index 0ae44e4..0efdfff 100644
--- a/hadoop-ozone/dist/src/main/k8s/examples/testlib.sh
+++ b/hadoop-ozone/dist/src/main/k8s/examples/testlib.sh
@@ -118,8 +118,9 @@
 
   local default_version=${docker.ozone-runner.version} # set at build-time from Maven property
   local runner_version=${OZONE_RUNNER_VERSION:-${default_version}} # may be specified by user running the test
+  local runner_image="${OZONE_RUNNER_IMAGE:-apache/ozone-runner}" # may be specified by user running the test
 
-  flekszible generate -t mount:hostPath="$OZONE_ROOT",path=/opt/hadoop -t image:image=apache/ozone-runner:${runner_version} -t ozone/onenode
+  flekszible generate -t mount:hostPath="$OZONE_ROOT",path=/opt/hadoop -t image:image="${runner_image}:${runner_version}" -t ozone/onenode
 }
 
 revert_resources() {
diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot
index fc133c1..511679c 100644
--- a/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot
@@ -16,18 +16,21 @@
 *** Settings ***
 Documentation       Read Compatibility
 Resource            ../ozone-lib/shell.robot
+Resource            setup.robot
 Test Timeout        5 minutes
+Suite Setup         Create Local Test File
 
 *** Variables ***
 ${SUFFIX}    ${EMPTY}
 
 *** Test Cases ***
 Key Can Be Read
-    Key Should Match Local File    /vol1/bucket1/key-${SUFFIX}    /etc/passwd
+    Key Should Match Local File    /vol1/bucket1/key-${SUFFIX}    ${TESTFILE}
 
 Dir Can Be Listed
     Execute    ozone fs -ls o3fs://bucket1.vol1/dir-${SUFFIX}
 
 File Can Be Get
-    Execute    ozone fs -get o3fs://bucket1.vol1/dir-${SUFFIX}/passwd /tmp/passwd-${SUFFIX}
-    [teardown]    Execute    rm /tmp/passwd-${SUFFIX}
+    Execute    ozone fs -get o3fs://bucket1.vol1/dir-${SUFFIX}/file-${SUFFIX} /tmp/
+    Execute    diff -q ${TESTFILE} /tmp/file-${SUFFIX}
+    [teardown]    Execute    rm /tmp/file-${SUFFIX}
diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/setup.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/setup.robot
new file mode 100644
index 0000000..ae765f2
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/setup.robot
@@ -0,0 +1,28 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation       Setup for Compatibility Tests
+Library             OperatingSystem
+Resource            ../ozone-lib/shell.robot
+
+*** Variables ***
+${SUFFIX}    ${EMPTY}
+
+
+*** Keywords ***
+Create Local Test File
+    Set Suite Variable    ${TESTFILE}    /tmp/test-data-${SUFFIX}.txt
+    Create File    ${TESTFILE}    Compatibility Test
diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/write.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/write.robot
index f5c9201..4c611d4 100644
--- a/hadoop-ozone/dist/src/main/smoketest/compatibility/write.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/write.robot
@@ -16,17 +16,20 @@
 *** Settings ***
 Documentation       Write Compatibility
 Resource            ../ozone-lib/shell.robot
+Resource            setup.robot
 Test Timeout        5 minutes
+Suite Setup         Create Local Test File
 
 *** Variables ***
 ${SUFFIX}    ${EMPTY}
 
+
 *** Test Cases ***
 Key Can Be Written
-    Create Key    /vol1/bucket1/key-${SUFFIX}    /etc/passwd
+    Create Key    /vol1/bucket1/key-${SUFFIX}    ${TESTFILE}
 
 Dir Can Be Created
     Execute    ozone fs -mkdir o3fs://bucket1.vol1/dir-${SUFFIX}
 
 File Can Be Put
-    Execute    ozone fs -put /etc/passwd o3fs://bucket1.vol1/dir-${SUFFIX}/
+    Execute    ozone fs -put ${TESTFILE} o3fs://bucket1.vol1/dir-${SUFFIX}/file-${SUFFIX}
diff --git a/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot b/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot
index c11695a..82387c9 100644
--- a/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot
@@ -38,6 +38,7 @@
     ${root} =       Format FS URL    ${SCHEME}    ${volume}    ${bucket}
                     ${output} =      Execute                 yarn jar ${exampleJar} pi -D fs.defaultFS=${root} 3 3
                     Should Contain   ${output}               completed successfully
+                    Should Not Contain   ${output}           multiple SLF4J bindings
 
 Execute WordCount
                     ${exampleJar}    Find example jar
diff --git a/hadoop-ozone/dist/src/main/smoketest/ozonefs/hadoopo3fs.robot b/hadoop-ozone/dist/src/main/smoketest/ozonefs/hadoopo3fs.robot
index a2779d9..51fab51 100644
--- a/hadoop-ozone/dist/src/main/smoketest/ozonefs/hadoopo3fs.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/ozonefs/hadoopo3fs.robot
@@ -33,6 +33,7 @@
     ${dir} =          Format FS URL         ${SCHEME}     ${volume}    ${bucket}
     ${random} =        Generate Random String  5  [NUMBERS]
     ${result} =        Execute                    hdfs dfs -put /opt/hadoop/NOTICE.txt ${dir}/${PREFIX}-${random}
+                       Should Not Contain         ${result}           multiple SLF4J bindings
     ${result} =        Execute                    hdfs dfs -ls ${dir}
                        Should contain             ${result}   ${PREFIX}-${random}
     ${result} =        Execute                    hdfs dfs -cat ${dir}/${PREFIX}-${random}
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
index 6b5d7c1..4ccbc7e 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
@@ -21,13 +21,26 @@
 Resource            ../commonlib.robot
 Resource            commonawslib.robot
 Test Timeout        5 minutes
-Suite Setup         Setup s3 tests
+Suite Setup         Setup Multipart Tests
 Test Setup          Generate random prefix
 
 *** Keywords ***
+Setup Multipart Tests
+    Setup s3 tests
+
+    # 5MB + a bit
+    Create Random File KB    /tmp/part1    5121
+
+    # 1MB - a bit
+    Create Random File KB    /tmp/part2    1023
+
 Create Random file
     [arguments]             ${size_in_megabytes}
-    Execute                 dd if=/dev/urandom of=/tmp/part1 bs=1048576 count=${size_in_megabytes}
+    Execute                 dd if=/dev/urandom of=/tmp/part1 bs=1048576 count=${size_in_megabytes} status=none
+
+Create Random File KB
+    [arguments]             ${file}    ${size_in_kilobytes}
+    Execute                 dd if=/dev/urandom of=${file} bs=1024 count=${size_in_kilobytes} status=none
 
 Wait Til Date Past
     [arguments]         ${date}
@@ -41,6 +54,10 @@
 
 *** Test Cases ***
 
+Test Multipart Upload With Adjusted Length
+    Perform Multipart Upload    ${BUCKET}    multipart/adjusted_length_${PREFIX}    /tmp/part1    /tmp/part2
+    Verify Multipart Upload     ${BUCKET}    multipart/adjusted_length_${PREFIX}    /tmp/part1    /tmp/part2
+
 Test Multipart Upload
     ${result} =         Execute AWSS3APICli     create-multipart-upload --bucket ${BUCKET} --key ${PREFIX}/multipartKey
     ${uploadID} =       Execute and checkrc     echo '${result}' | jq -r '.UploadId'    0
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot
index 5fceca1..96939c9 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot
@@ -14,6 +14,7 @@
 # limitations under the License.
 
 *** Settings ***
+Library             Collections
 Resource            ../commonlib.robot
 Resource            ../ozone-lib/shell.robot
 
@@ -22,6 +23,7 @@
 ${OZONE_S3_HEADER_VERSION}     v4
 ${OZONE_S3_SET_CREDENTIALS}    true
 ${BUCKET}                      generated
+${KEY_NAME}                    key1
 
 *** Keywords ***
 Execute AWSS3APICli
@@ -102,6 +104,7 @@
     ...                ELSE                                      Set Variable    ${BUCKET}
                        Set Suite Variable                        ${BUCKET}
                        Run Keyword if                            '${BUCKET}' == 'link'                 Setup links for S3 tests
+                       Run Keyword if                            '${BUCKET}' == 'encrypted'            Create encrypted bucket
 
 Setup links for S3 tests
     ${exists} =        Bucket Exists    o3://${OM_SERVICE_ID}/s3v/link
@@ -110,6 +113,12 @@
     Execute            ozone sh bucket create o3://${OM_SERVICE_ID}/legacy/source-bucket
     Create link        link
 
+Create encrypted bucket
+    Return From Keyword if    '${SECURITY_ENABLED}' == 'false'
+    ${exists} =        Bucket Exists    o3://${OM_SERVICE_ID}/s3v/encrypted
+    Return From Keyword If    ${exists}
+    Execute            ozone sh bucket create -k ${KEY_NAME} o3://${OM_SERVICE_ID}/s3v/encrypted
+
 Create link
     [arguments]       ${bucket}
     Execute           ozone sh bucket link o3://${OM_SERVICE_ID}/legacy/source-bucket o3://${OM_SERVICE_ID}/s3v/${bucket}
@@ -118,3 +127,30 @@
 Generate random prefix
     ${random} =          Generate Ozone String
                          Set Suite Variable  ${PREFIX}  ${random}
+
+Perform Multipart Upload
+    [arguments]    ${bucket}    ${key}    @{files}
+
+    ${result} =         Execute AWSS3APICli     create-multipart-upload --bucket ${bucket} --key ${key}
+    ${upload_id} =      Execute and checkrc     echo '${result}' | jq -r '.UploadId'    0
+
+    @{etags} =    Create List
+    FOR    ${i}    ${file}    IN ENUMERATE    @{files}
+        ${part} =    Evaluate    ${i} + 1
+        ${result} =   Execute AWSS3APICli     upload-part --bucket ${bucket} --key ${key} --part-number ${part} --body ${file} --upload-id ${upload_id}
+        ${etag} =     Execute                 echo '${result}' | jq -r '.ETag'
+        Append To List    ${etags}    {ETag=${etag},PartNumber=${part}}
+    END
+
+    ${parts} =    Catenate    SEPARATOR=,    @{etags}
+    Execute AWSS3APICli     complete-multipart-upload --bucket ${bucket} --key ${key} --upload-id ${upload_id} --multipart-upload 'Parts=[${parts}]'
+
+Verify Multipart Upload
+    [arguments]    ${bucket}    ${key}    @{files}
+
+    ${random} =    Generate Ozone String
+
+    Execute AWSS3APICli     get-object --bucket ${bucket} --key ${key} /tmp/verify${random}
+    ${tmp} =    Catenate    @{files}
+    Execute    cat ${tmp} > /tmp/original${random}
+    Compare files    /tmp/original${random}    /tmp/verify${random}
diff --git a/hadoop-ozone/dist/src/shell/upgrade/1.0.0.sh b/hadoop-ozone/dist/src/shell/upgrade/1.0.0.sh
index 6573978..49a8f57 100755
--- a/hadoop-ozone/dist/src/shell/upgrade/1.0.0.sh
+++ b/hadoop-ozone/dist/src/shell/upgrade/1.0.0.sh
@@ -19,5 +19,6 @@
 
 : "${SCM_DIR:="${OZONE_VOLUME}/scm"}"
 : "${OZONE_RUNNER_VERSION:="20200625-1"}"
+: "${OZONE_RUNNER_IMAGE:="apache/ozone-runner"}"
 
-docker run --rm -v "${SCM_DIR}":/scm -v "${SCRIPT_DIR}/1.0.0":/upgrade -w /scm/metadata apache/ozone-runner:"${OZONE_RUNNER_VERSION}" /upgrade/01-migrate-scm-db.sh
+docker run --rm -v "${SCM_DIR}":/scm -v "${SCRIPT_DIR}/1.0.0":/upgrade -w /scm/metadata "${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}" /upgrade/01-migrate-scm-db.sh
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
index 05094b0..b6ef865 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
@@ -229,7 +229,7 @@
     protected void initializeConfiguration() throws IOException {
       super.initializeConfiguration();
 
-      OzoneClientConfig clientConfig =new OzoneClientConfig();
+      OzoneClientConfig clientConfig = new OzoneClientConfig();
       clientConfig.setStreamBufferFlushSize(8 * 1024 * 1024);
       clientConfig.setStreamBufferMaxSize(16 * 1024 * 1024);
       clientConfig.setStreamBufferSize(4 * 1024);
@@ -331,7 +331,7 @@
 
   public Set<OzoneManager> omToFail() {
     int numNodesToFail = getNumberOfOmToFail();
-    if (failedOmSet.size() >= numOzoneManagers/2) {
+    if (failedOmSet.size() >= numOzoneManagers / 2) {
       return Collections.emptySet();
     }
 
@@ -359,7 +359,7 @@
 
   // Should the selected node be stopped or started.
   public boolean shouldStopOm() {
-    if (failedOmSet.size() >= numOzoneManagers/2) {
+    if (failedOmSet.size() >= numOzoneManagers / 2) {
       return false;
     }
     return RandomUtils.nextBoolean();
@@ -407,7 +407,7 @@
 
   public Set<StorageContainerManager> scmToFail() {
     int numNodesToFail = getNumberOfScmToFail();
-    if (failedScmSet.size() >= numStorageContainerManagers/2) {
+    if (failedScmSet.size() >= numStorageContainerManagers / 2) {
       return Collections.emptySet();
     }
 
@@ -434,7 +434,7 @@
 
   // Should the selected node be stopped or started.
   public boolean shouldStopScm() {
-    if (failedScmSet.size() >= numStorageContainerManagers/2) {
+    if (failedScmSet.size() >= numStorageContainerManagers / 2) {
       return false;
     }
     return RandomUtils.nextBoolean();
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java
index 7e78e0f..f9c7fd0 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java
@@ -58,7 +58,7 @@
     this.conf = conf;
     this.omServiceID = omServiceId;
 
-    for(Class<? extends LoadGenerator> clazz : loadGeneratorClazzes) {
+    for (Class<? extends LoadGenerator> clazz : loadGeneratorClazzes) {
       addLoads(clazz, buffer);
     }
 
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestAllMiniChaosOzoneCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestAllMiniChaosOzoneCluster.java
index 8232c40..d124a9f 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestAllMiniChaosOzoneCluster.java
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestAllMiniChaosOzoneCluster.java
@@ -19,7 +19,7 @@
 
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
 import org.apache.hadoop.ozone.failure.Failures;
-import org.apache.hadoop.ozone.loadgenerators.*;
+import org.apache.hadoop.ozone.loadgenerators.LoadGenerator;
 import picocli.CommandLine;
 
 import java.util.concurrent.Callable;
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadBucket.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadBucket.java
index c6ccb3a..4a380fe 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadBucket.java
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadBucket.java
@@ -195,7 +195,7 @@
     @Override
     public String toString() {
       return super.toString() + " "
-          + (readDir ? "readDirectory": "writeDirectory");
+          + (readDir ? "readDirectory" : "writeDirectory");
     }
   }
 
diff --git a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java
index 42fdb39..b6f897d 100644
--- a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java
+++ b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java
@@ -17,13 +17,12 @@
  */
 package org.apache.hadoop.ozone.insight;
 
+import org.junit.Assert;
+import org.junit.Test;
+
 import java.util.HashMap;
 import java.util.Map;
 
-import org.junit.Assert;
-import static org.junit.Assert.*;
-import org.junit.Test;
-
 /**
  * Test common insight point utility methods.
  */
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
index dc86044..752962f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
@@ -177,7 +177,7 @@
     keys.add("/dir1/dir2");
     keys.add("/dir1/dir2/dir3");
     keys.add("/dir1/dir2/dir3/dir4/");
-    for (int i=1; i <= 3; i++) {
+    for (int i = 1; i <= 3; i++) {
       int length = 10;
       String fileName = parentDir.concat("/file" + i + "/");
       keys.add(fileName);
@@ -190,7 +190,7 @@
     }
 
     // check
-    for (int i=1; i <= 3; i++) {
+    for (int i = 1; i <= 3; i++) {
       String fileName = parentDir.concat("/file" + i + "/");
       Path p = new Path(fileName);
       Assert.assertTrue(o3fs.getFileStatus(p).isFile());
@@ -209,12 +209,12 @@
     Assert.assertTrue(result);
 
     // No Key should exist.
-    for(String key : keys) {
+    for (String key : keys) {
       checkPath(new Path(key));
     }
 
 
-    for (int i=1; i <= 3; i++) {
+    for (int i = 1; i <= 3; i++) {
       int length = 10;
       String fileName = parentDir.concat("/file" + i + "/");
       OzoneOutputStream ozoneOutputStream =
@@ -229,12 +229,12 @@
     o3fs.rename(new Path("/dir1"), new Path("/dest"));
 
     // No source Key should exist.
-    for(String key : keys) {
+    for (String key : keys) {
       checkPath(new Path(key));
     }
 
     // check dest path.
-    for (int i=1; i <= 3; i++) {
+    for (int i = 1; i <= 3; i++) {
       String fileName = "/dest/".concat(parentDir.concat("/file" + i + "/"));
       Path p = new Path(fileName);
       Assert.assertTrue(o3fs.getFileStatus(p).isFile());
@@ -467,7 +467,7 @@
 
   private void checkAncestors(Path p) throws Exception {
     p = p.getParent();
-    while(p.getParent() != null) {
+    while (p.getParent() != null) {
       FileStatus fileStatus = o3fs.getFileStatus(p);
       Assert.assertTrue(fileStatus.isDirectory());
       p = p.getParent();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
index 0e9c360..5c2e0cf 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
@@ -523,7 +523,7 @@
 
     assertEquals(0, blockLocations[0].getOffset());
     assertEquals(blockSize, blockLocations[1].getOffset());
-    assertEquals(2*blockSize, blockLocations[2].getOffset());
+    assertEquals(2 * blockSize, blockLocations[2].getOffset());
     assertEquals(blockSize, blockLocations[0].getLength());
     assertEquals(blockSize, blockLocations[1].getLength());
     assertEquals(837, blockLocations[2].getLength());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
index 8bd4ea6..5393ffd 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -207,7 +207,7 @@
       for (FileStatus fileStatus : fileStatuses) {
         fs.delete(fileStatus.getPath(), true);
       }
-    } catch (IOException ex){
+    } catch (IOException ex) {
       fail("Failed to cleanup files.");
     }
   }
@@ -260,7 +260,7 @@
     fs.mkdirs(dir1);
     try (FSDataOutputStream outputStream1 = fs.create(dir1, false)) {
       fail("Should throw FileAlreadyExistsException");
-    } catch (FileAlreadyExistsException fae){
+    } catch (FileAlreadyExistsException fae) {
       // ignore as its expected
     }
 
@@ -291,14 +291,14 @@
             fileStatus.isDirectory());
 
     // invalid sub directory
-    try{
+    try {
       fs.getFileStatus(new Path("/d1/d2/d3/d4/key3/invalid"));
       fail("Should throw FileNotFoundException");
     } catch (FileNotFoundException fnfe) {
       // ignore as its expected
     }
     // invalid file name
-    try{
+    try {
       fs.getFileStatus(new Path("/d1/d2/d3/d4/invalidkey"));
       fail("Should throw FileNotFoundException");
     } catch (FileNotFoundException fnfe) {
@@ -345,10 +345,10 @@
   }
 
   private void checkInvalidPath(Path path) throws Exception {
-    try{
+    try {
       LambdaTestUtils.intercept(InvalidPathException.class, "Invalid path Name",
           () -> fs.create(path, false));
-    } catch (AssertionError e){
+    } catch (AssertionError e) {
       fail("testCreateWithInvalidPaths failed for path" + path);
     }
   }
@@ -417,7 +417,7 @@
     Path grandparent = new Path("/gdir1");
 
     for (int i = 1; i <= 10; i++) {
-      Path parent = new Path(grandparent, "pdir" +i);
+      Path parent = new Path(grandparent, "pdir" + i);
       Path child = new Path(parent, "child");
       ContractTestUtils.touch(fs, child);
     }
@@ -446,7 +446,7 @@
     checkPath(grandparent);
 
     for (int i = 1; i <= 10; i++) {
-      Path parent = new Path(grandparent, "dir" +i);
+      Path parent = new Path(grandparent, "dir" + i);
       Path child = new Path(parent, "child");
       checkPath(parent);
       checkPath(child);
@@ -456,8 +456,8 @@
     Path level0 = new Path("/level0");
 
     for (int i = 1; i <= 3; i++) {
-      Path level1 = new Path(level0, "level" +i);
-      Path level2 = new Path(level1, "level" +i);
+      Path level1 = new Path(level0, "level" + i);
+      Path level2 = new Path(level1, "level" + i);
       Path level1File = new Path(level1, "file1");
       Path level2File = new Path(level2, "file1");
       ContractTestUtils.touch(fs, level1File);
@@ -466,8 +466,8 @@
 
     // Delete at sub directory level.
     for (int i = 1; i <= 3; i++) {
-      Path level1 = new Path(level0, "level" +i);
-      Path level2 = new Path(level1, "level" +i);
+      Path level1 = new Path(level0, "level" + i);
+      Path level2 = new Path(level1, "level" + i);
       fs.delete(level2, true);
       fs.delete(level1, true);
     }
@@ -480,8 +480,8 @@
     checkPath(level0);
 
     for (int i = 1; i <= 3; i++) {
-      Path level1 = new Path(level0, "level" +i);
-      Path level2 = new Path(level1, "level" +i);
+      Path level1 = new Path(level0, "level" + i);
+      Path level2 = new Path(level1, "level" + i);
       Path level1File = new Path(level1, "file1");
       Path level2File = new Path(level2, "file1");
       checkPath(level1);
@@ -591,9 +591,9 @@
 
     // Wait until the filestatus is updated
     if (!enabledFileSystemPaths) {
-      GenericTestUtils.waitFor(()-> {
+      GenericTestUtils.waitFor(() -> {
         try {
-          return fs.listStatus(parent).length!=0;
+          return fs.listStatus(parent).length != 0;
         } catch (IOException e) {
           LOG.error("listStatus() Failed", e);
           Assert.fail("listStatus() Failed");
@@ -644,7 +644,7 @@
     deleteRootDir(); // cleanup
     Set<String> paths = new TreeSet<>();
     int numDirs = LISTING_PAGE_SIZE + LISTING_PAGE_SIZE / 2;
-    for(int i = 0; i < numDirs; i++) {
+    for (int i = 0; i < numDirs; i++) {
       Path p = new Path(root, String.valueOf(i));
       fs.mkdirs(p);
       paths.add(p.getName());
@@ -677,7 +677,7 @@
         "Total directories listed do not match the existing directories",
         numDirs, fileStatuses.length);
 
-    for (int i=0; i < numDirs; i++) {
+    for (int i = 0; i < numDirs; i++) {
       assertTrue(paths.contains(fileStatuses[i].getPath().getName()));
     }
   }
@@ -1278,7 +1278,7 @@
     Path trashPath = new Path(userTrashCurrent, testKeyName);
 
     // Wait until the TrashEmptier purges the key
-    GenericTestUtils.waitFor(()-> {
+    GenericTestUtils.waitFor(() -> {
       try {
         return !o3fs.exists(trashPath);
       } catch (IOException e) {
@@ -1292,9 +1292,9 @@
     Assert.assertEquals(1, fs.listStatus(userTrash).length);
 
     // wait for deletion of checkpoint dir
-    GenericTestUtils.waitFor(()-> {
+    GenericTestUtils.waitFor(() -> {
       try {
-        return o3fs.listStatus(userTrash).length==0;
+        return o3fs.listStatus(userTrash).length == 0;
       } catch (IOException e) {
         LOG.error("Delete from Trash Failed", e);
         Assert.fail("Delete from Trash Failed");
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithLinks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithLinks.java
index c5e7bb5..c1481e3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithLinks.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithLinks.java
@@ -17,15 +17,19 @@
  */
 package org.apache.hadoop.fs.ozone;
 
-import org.apache.commons.io.IOUtils;;
+import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.TestDataUtil;
-import org.apache.hadoop.ozone.client.*;
+import org.apache.hadoop.ozone.client.BucketArgs;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.BucketLayout;;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
 import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
 import org.junit.rules.Timeout;
 import org.slf4j.Logger;
@@ -35,9 +39,9 @@
 import java.io.IOException;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_CHECKPOINT_INTERVAL_KEY;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
 
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileStatus;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java
index 974912a..4153962 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java
@@ -188,7 +188,7 @@
    */
   private String getHostFromAddress(String addr) {
     Optional<String> hostOptional = getHostName(addr);
-    assert(hostOptional.isPresent());
+    assert (hostOptional.isPresent());
     return hostOptional.get();
   }
 
@@ -199,7 +199,7 @@
    */
   private int getPortFromAddress(String addr) {
     OptionalInt portOptional = getHostPort(addr);
-    assert(portOptional.isPresent());
+    assert (portOptional.isPresent());
     return portOptional.getAsInt();
   }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
index 96b461f..9ddd2d1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
@@ -157,7 +157,7 @@
   }
 
   @Rule
-  public Timeout globalTimeout = Timeout.seconds(300);;
+  public Timeout globalTimeout = Timeout.seconds(300);
 
   private static boolean enabledFileSystemPaths;
   private static boolean omRatisEnabled;
@@ -191,7 +191,7 @@
     conf = new OzoneConfiguration();
     conf.setFloat(OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, TRASH_INTERVAL);
     conf.setFloat(FS_TRASH_INTERVAL_KEY, TRASH_INTERVAL);
-    conf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, TRASH_INTERVAL/2);
+    conf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, TRASH_INTERVAL / 2);
     conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled);
     if (isBucketFSOptimized) {
       bucketLayout = BucketLayout.FILE_SYSTEM_OPTIMIZED;
@@ -546,7 +546,7 @@
     Path root = new Path("/" + volumeName + "/" + bucketName);
     Set<String> paths = new TreeSet<>();
     int numDirs = LISTING_PAGE_SIZE + LISTING_PAGE_SIZE / 2;
-    for(int i = 0; i < numDirs; i++) {
+    for (int i = 0; i < numDirs; i++) {
       Path p = new Path(root, String.valueOf(i));
       fs.mkdirs(p);
       paths.add(p.getName());
@@ -557,12 +557,12 @@
         "Total directories listed do not match the existing directories",
         numDirs, fileStatuses.length);
 
-    for (int i=0; i < numDirs; i++) {
+    for (int i = 0; i < numDirs; i++) {
       Assert.assertTrue(paths.contains(fileStatuses[i].getPath().getName()));
     }
 
     // Cleanup
-    for(int i = 0; i < numDirs; i++) {
+    for (int i = 0; i < numDirs; i++) {
       Path p = new Path(root, String.valueOf(i));
       fs.delete(p, true);
     }
@@ -1362,7 +1362,7 @@
 
 
     // Wait until the TrashEmptier purges the keys
-    GenericTestUtils.waitFor(()-> {
+    GenericTestUtils.waitFor(() -> {
       try {
         return !ofs.exists(trashPath) && !ofs.exists(trashPath2);
       } catch (IOException e) {
@@ -1372,7 +1372,7 @@
       }
     }, 1000, 180000);
 
-    if (isBucketFSOptimized){
+    if (isBucketFSOptimized) {
       Assert.assertTrue(getOMMetrics()
           .getNumTrashAtomicDirRenames() > prevNumTrashAtomicDirRenames);
     } else {
@@ -1385,10 +1385,10 @@
     }
 
     // wait for deletion of checkpoint dir
-    GenericTestUtils.waitFor(()-> {
+    GenericTestUtils.waitFor(() -> {
       try {
-        return ofs.listStatus(userTrash).length==0 &&
-            ofs.listStatus(userTrash2).length==0;
+        return ofs.listStatus(userTrash).length == 0 &&
+            ofs.listStatus(userTrash2).length == 0;
       } catch (IOException e) {
         LOG.error("Delete from Trash Failed", e);
         Assert.fail("Delete from Trash Failed");
@@ -1397,7 +1397,7 @@
     }, 1000, 120000);
 
     // This condition should succeed once the checkpoint directory is deleted
-    if(isBucketFSOptimized){
+    if (isBucketFSOptimized) {
       GenericTestUtils.waitFor(
           () -> getOMMetrics().getNumTrashAtomicDirDeletes() >
               prevNumTrashAtomicDirDeletes, 100, 180000);
@@ -1444,7 +1444,7 @@
   @Test
   public void testRenameFile() throws Exception {
     final String dir = "/dir" + new Random().nextInt(1000);
-    Path dirPath = new Path(getBucketPath() +dir);
+    Path dirPath = new Path(getBucketPath() + dir);
     getFs().mkdirs(dirPath);
 
     Path file1Source = new Path(getBucketPath() + dir
@@ -1466,7 +1466,7 @@
   @Test
   public void testRenameFileToDir() throws Exception {
     final String dir = "/dir" + new Random().nextInt(1000);
-    Path dirPath = new Path(getBucketPath() +dir);
+    Path dirPath = new Path(getBucketPath() + dir);
     getFs().mkdirs(dirPath);
 
     Path file1Destin = new Path(getBucketPath() + dir  + "/file1");
@@ -1531,11 +1531,11 @@
     final Path sourceRoot = new Path(getBucketPath() + root);
     LOG.info("Rename op-> source:{} to destin:{}", sourceRoot, subDir1);
     //  rename should fail and return false
-    try{
+    try {
       getFs().rename(sourceRoot, subDir1);
       fail("Should throw exception : Cannot rename a directory to" +
           " its own subdirectory");
-    } catch (IllegalArgumentException e){
+    } catch (IllegalArgumentException e) {
       //expected
     }
   }
@@ -1560,7 +1560,7 @@
     try {
       getFs().rename(dir2SourcePath, destinPath);
       fail("Should fail as parent of dst does not exist!");
-    } catch (FileNotFoundException fnfe){
+    } catch (FileNotFoundException fnfe) {
       //expected
     }
     // (b) parent of dst is a file. /root_dir/file1/c
@@ -1568,10 +1568,10 @@
     ContractTestUtils.touch(getFs(), filePath);
     Path newDestinPath = new Path(filePath, "c");
     // rename shouldthrow exception
-    try{
+    try {
       getFs().rename(dir2SourcePath, newDestinPath);
       fail("Should fail as parent of dst is a file!");
-    } catch (IOException e){
+    } catch (IOException e) {
       //expected
     }
   }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUtils.java
index 0fc23c3..f7858d1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUtils.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUtils.java
@@ -27,7 +27,7 @@
  */
 public final class ITestOzoneContractUtils {
 
-  private ITestOzoneContractUtils(){}
+  private ITestOzoneContractUtils() { }
 
   private static List<Object> fsoCombinations = Arrays.asList(new Object[] {
       // FSO configuration is a cluster level server side configuration.
@@ -47,7 +47,7 @@
       // and old buckets will be operated on
   });
 
-  static List<Object> getFsoCombinations(){
+  static List<Object> getFsoCombinations() {
     return fsoCombinations;
   }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
index f6c9a25..784897a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
@@ -66,7 +66,7 @@
     return path;
   }
 
-  public static void initOzoneConfiguration(boolean fsoServer){
+  public static void initOzoneConfiguration(boolean fsoServer) {
     fsOptimizedServer = fsoServer;
   }
 
@@ -92,7 +92,7 @@
 
     conf.addResource(CONTRACT_XML);
 
-    if (fsOptimizedServer){
+    if (fsOptimizedServer) {
       // Default bucket layout is set to FSO in case of FSO server.
       conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT,
           OMConfigKeys.OZONE_BUCKET_LAYOUT_FILE_SYSTEM_OPTIMIZED);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
index 6d4a1f4..ce90783 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
@@ -398,7 +398,7 @@
     // Test 1: no replica's exist
     ContainerID containerID = ContainerID.valueOf(RandomUtils.nextLong());
     Set<ContainerReplica> replicaSet;
-    containerStateManager.getContainerReplicas(containerID.getProtobuf());
+    containerStateManager.getContainerReplicas(containerID);
     Assert.fail();
 
     ContainerWithPipeline container = scm.getClientProtocolServer()
@@ -419,44 +419,44 @@
         .setContainerState(ContainerReplicaProto.State.OPEN)
         .setDatanodeDetails(dn2)
         .build();
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaOne);
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaTwo);
-    replicaSet = containerStateManager.getContainerReplicas(id.getProtobuf());
+    containerStateManager.updateContainerReplica(id, replicaOne);
+    containerStateManager.updateContainerReplica(id, replicaTwo);
+    replicaSet = containerStateManager.getContainerReplicas(id);
     Assert.assertEquals(2, replicaSet.size());
     Assert.assertTrue(replicaSet.contains(replicaOne));
     Assert.assertTrue(replicaSet.contains(replicaTwo));
 
     // Test 3: Remove one replica node and then test
-    containerStateManager.removeContainerReplica(id.getProtobuf(), replicaOne);
-    replicaSet = containerStateManager.getContainerReplicas(id.getProtobuf());
+    containerStateManager.removeContainerReplica(id, replicaOne);
+    replicaSet = containerStateManager.getContainerReplicas(id);
     Assert.assertEquals(1, replicaSet.size());
     Assert.assertFalse(replicaSet.contains(replicaOne));
     Assert.assertTrue(replicaSet.contains(replicaTwo));
 
     // Test 3: Remove second replica node and then test
-    containerStateManager.removeContainerReplica(id.getProtobuf(), replicaTwo);
-    replicaSet = containerStateManager.getContainerReplicas(id.getProtobuf());
+    containerStateManager.removeContainerReplica(id, replicaTwo);
+    replicaSet = containerStateManager.getContainerReplicas(id);
     Assert.assertEquals(0, replicaSet.size());
     Assert.assertFalse(replicaSet.contains(replicaOne));
     Assert.assertFalse(replicaSet.contains(replicaTwo));
 
     // Test 4: Re-insert dn1
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaOne);
-    replicaSet = containerStateManager.getContainerReplicas(id.getProtobuf());
+    containerStateManager.updateContainerReplica(id, replicaOne);
+    replicaSet = containerStateManager.getContainerReplicas(id);
     Assert.assertEquals(1, replicaSet.size());
     Assert.assertTrue(replicaSet.contains(replicaOne));
     Assert.assertFalse(replicaSet.contains(replicaTwo));
 
     // Re-insert dn2
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaTwo);
-    replicaSet = containerStateManager.getContainerReplicas(id.getProtobuf());
+    containerStateManager.updateContainerReplica(id, replicaTwo);
+    replicaSet = containerStateManager.getContainerReplicas(id);
     Assert.assertEquals(2, replicaSet.size());
     Assert.assertTrue(replicaSet.contains(replicaOne));
     Assert.assertTrue(replicaSet.contains(replicaTwo));
 
     // Re-insert dn1
-    containerStateManager.updateContainerReplica(id.getProtobuf(), replicaOne);
-    replicaSet = containerStateManager.getContainerReplicas(id.getProtobuf());
+    containerStateManager.updateContainerReplica(id, replicaOne);
+    replicaSet = containerStateManager.getContainerReplicas(id);
     Assert.assertEquals(2, replicaSet.size());
     Assert.assertTrue(replicaSet.contains(replicaOne));
     Assert.assertTrue(replicaSet.contains(replicaTwo));
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
index d741117..c1bbcf4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
@@ -59,7 +59,7 @@
 
     cluster = MiniOzoneCluster.newBuilder(conf)
             .setNumDatanodes(numDatanodes)
-            .setTotalPipelineNumLimit(numDatanodes + numDatanodes/3)
+            .setTotalPipelineNumLimit(numDatanodes + numDatanodes / 3)
             .setHbInterval(2000)
             .setHbProcessorInterval(1000)
             .build();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java
index 4ebcab1..b4d7270 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java
@@ -154,7 +154,7 @@
     shutdown();
   }
   private void assertNotSamePeers() {
-    nodeManager.getAllNodes().forEach((dn) ->{
+    nodeManager.getAllNodes().forEach((dn) -> {
       Collection<DatanodeDetails> peers = nodeManager.getPeerList(dn);
       Assert.assertFalse(peers.contains(dn));
       List<DatanodeDetails> trimList = nodeManager.getAllNodes();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java
index 58c0c62..457e12a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java
@@ -61,7 +61,7 @@
 
     cluster = MiniOzoneCluster.newBuilder(conf)
             .setNumDatanodes(numDatanodes)
-            .setTotalPipelineNumLimit(numDatanodes + numDatanodes/3)
+            .setTotalPipelineNumLimit(numDatanodes + numDatanodes / 3)
             .setHbInterval(2000)
             .setHbProcessorInterval(1000)
             .build();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
index 724d34c..4c97b51 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
@@ -86,10 +86,10 @@
     int datanodeCount = 6;
     setup(datanodeCount);
 
-    waitForRatis3NodePipelines(datanodeCount/3);
+    waitForRatis3NodePipelines(datanodeCount / 3);
     waitForRatis1NodePipelines(datanodeCount);
 
-    int totalPipelineCount = datanodeCount + (datanodeCount/3);
+    int totalPipelineCount = datanodeCount + (datanodeCount / 3);
 
     //Cluster is started successfully
     cluster.stop();
@@ -178,7 +178,7 @@
     });
 
     waitForRatis1NodePipelines(datanodeCount);
-    waitForRatis3NodePipelines(datanodeCount/3);
+    waitForRatis3NodePipelines(datanodeCount / 3);
 
   }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
index e7f6f34..800992d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
@@ -189,7 +189,7 @@
    * Some tests repeatedly modify the cluster. Helper function to reload the
    * latest SCM state.
    */
-  private void loadSCMState(){
+  private void loadSCMState() {
     scm = cluster.getStorageContainerManager();
     scmContainerManager = scm.getContainerManager();
     scmPipelineManager = scm.getPipelineManager();
@@ -503,7 +503,7 @@
       IOException {
     // For some tests this could get called in a different thread context.
     // We need to guard concurrent updates to the cluster.
-    synchronized(cluster) {
+    synchronized (cluster) {
       cluster.restartStorageContainerManager(true);
       loadSCMState();
     }
@@ -1090,7 +1090,7 @@
     // Verify that new pipeline can be created with upgraded datanodes.
     try {
       testPostUpgradePipelineCreation();
-    } catch(SCMException e) {
+    } catch (SCMException e) {
       // If pipeline creation fails, make sure that there is a valid reason
       // for this i.e. all datanodes are already part of some pipeline.
       for (HddsDatanodeService dataNode : cluster.getHddsDatanodes()) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index b6d4b31..4b9bd5f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -160,6 +160,8 @@
    */
   List<HddsDatanodeService> getHddsDatanodes();
 
+  HddsDatanodeService getHddsDatanode(DatanodeDetails dn) throws IOException;
+
   /**
    * Returns a {@link ReconServer} instance.
    *
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 6e56a4d..7d6b080 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -290,6 +290,18 @@
   }
 
   @Override
+  public HddsDatanodeService getHddsDatanode(DatanodeDetails dn)
+      throws IOException {
+    for (HddsDatanodeService service : hddsDatanodes) {
+      if (service.getDatanodeDetails().equals(dn)) {
+        return service;
+      }
+    }
+    throw new IOException(
+        "Not able to find datanode with datanode Id " + dn.getUuid());
+  }
+
+  @Override
   public ReconServer getReconServer() {
     return this.reconServer;
   }
@@ -677,7 +689,7 @@
       // In this way safemode exit will happen only when atleast we have one
       // pipeline.
       conf.setInt(HddsConfigKeys.HDDS_SCM_SAFEMODE_MIN_DATANODE,
-          numOfDatanodes >=3 ? 3 : 1);
+          numOfDatanodes >= 3 ? 3 : 1);
       configureTrace();
     }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java
index 8904620..ab2405a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java
@@ -189,7 +189,7 @@
 
   private Thread reapClusters() {
     Thread t = new Thread(() -> {
-      while(!shutdown || !expiredClusters.isEmpty()) {
+      while (!shutdown || !expiredClusters.isEmpty()) {
         try {
           // Why not just call take and wait forever until interrupt is
           // thrown? Inside MiniCluster.shutdown, there are places where it
@@ -251,7 +251,7 @@
   }
 
   private void destroyRemainingClusters() {
-    while(!clusters.isEmpty()) {
+    while (!clusters.isEmpty()) {
       try {
         MiniOzoneCluster cluster = clusters.poll();
         if (cluster != null) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
index 440f5ca..556af06 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
@@ -490,7 +490,7 @@
         try {
           initOMHAConfig();
 
-          for (int i = 1; i<= numOfOMs; i++) {
+          for (int i = 1; i <= numOfOMs; i++) {
             // Set nodeId
             String nodeId = OM_NODE_ID_PREFIX + i;
             OzoneConfiguration config = new OzoneConfiguration(conf);
@@ -564,7 +564,7 @@
         try {
           initSCMHAConfig();
 
-          for (int i = 1; i<= numOfSCMs; i++) {
+          for (int i = 1; i <= numOfSCMs; i++) {
             // Set nodeId
             String nodeId = SCM_NODE_ID_PREFIX + i;
             String metaDirPath = path + "/" + nodeId;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
index e4cb1a1..dba9739 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
@@ -129,7 +129,7 @@
    */
   public static void performOperationOnKeyContainers(
       CheckedConsumer<BlockID, Exception> consumer,
-      List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups) throws Exception{
+      List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups) throws Exception {
 
     for (OmKeyLocationInfoGroup omKeyLocationInfoGroup :
         omKeyLocationInfoGroups) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStandardOutputUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/StandardOutputTestBase.java
similarity index 98%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStandardOutputUtil.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/StandardOutputTestBase.java
index ec80a49..40a5447 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStandardOutputUtil.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/StandardOutputTestBase.java
@@ -28,7 +28,7 @@
 /**
  * Utility class to check standard output.
  */
-public class TestStandardOutputUtil {
+public class StandardOutputTestBase {
   private final ByteArrayOutputStream outContent =
       new ByteArrayOutputStream();
   private final ByteArrayOutputStream errContent =
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java
index 2b0dfc2..8663c72 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java
@@ -64,7 +64,7 @@
 
   @AfterClass
   public static void cleanup() throws Exception {
-    if(cluster != null) {
+    if (cluster != null) {
       cluster.shutdown();
     }
   }
@@ -100,7 +100,7 @@
     // modify this after balancer is fully completed
     try {
       Thread.sleep(100);
-    } catch (InterruptedException e) {}
+    } catch (InterruptedException e) { }
 
     running = containerBalancerClient.getContainerBalancerStatus();
     assertFalse(running);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
index 5516468..e15d7b1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
@@ -64,7 +64,7 @@
 
   @AfterClass
   public static void cleanup() throws Exception {
-    if(cluster != null) {
+    if (cluster != null) {
       cluster.shutdown();
     }
   }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
index e7436e0..2b09514 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
@@ -119,7 +119,7 @@
     cluster.waitForClusterToBeReady();
     List<HddsDatanodeService> datanodes = cluster.getHddsDatanodes();
     assertEquals(numberOfNodes, datanodes.size());
-    for(HddsDatanodeService dn : datanodes) {
+    for (HddsDatanodeService dn : datanodes) {
       // Create a single member pipe line
       List<DatanodeDetails> dns = new ArrayList<>();
       dns.add(dn.getDatanodeDetails());
@@ -132,7 +132,7 @@
           .build();
 
       // Verify client is able to connect to the container
-      try (XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf)){
+      try (XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf)) {
         client.connect();
         assertTrue(client.isConnected(pipeline.getFirstNode()));
       }
@@ -285,7 +285,7 @@
   }
 
   private void createMalformedIDFile(File malformedFile)
-      throws IOException{
+      throws IOException {
     malformedFile.delete();
     DatanodeDetails id = randomDatanodeDetails();
     ContainerUtils.writeDatanodeDetailsTo(id, malformedFile);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java
index 44b635f..fe87984 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java
@@ -54,7 +54,7 @@
   public ExpectedException exception = ExpectedException.none();
 
   @Rule
-  public Timeout timeout = Timeout.seconds(300);;
+  public Timeout timeout = Timeout.seconds(300);
 
   /**
    * Create a MiniOzoneHAClusterImpl for testing.
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
index 494f74c..cd6d816 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
@@ -86,7 +86,18 @@
 import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
 import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.*;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_GRPC_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY;
 import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY;
 import static org.apache.hadoop.net.ServerSocketUtil.getPort;
@@ -138,7 +149,7 @@
   public Timeout timeout = Timeout.seconds(80);
 
   @Rule
-  public TemporaryFolder folder= new TemporaryFolder();
+  public TemporaryFolder folder = new TemporaryFolder();
 
   private MiniKdc miniKdc;
   private OzoneConfiguration conf;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 14aa85d..4e14ea8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -82,7 +82,10 @@
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.ha.*;
+import org.apache.hadoop.hdds.scm.ha.RatisUtil;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMHAUtils;
+import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl;
 import org.apache.hadoop.hdds.scm.node.DatanodeInfo;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.node.NodeStatus;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java
index b37f785..58021f3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java
@@ -177,12 +177,12 @@
   }
 
   @Override
-  public String getSignatureAlgorithm(){
+  public String getSignatureAlgorithm() {
     return securityConfig.getSignatureAlgo();
   }
 
   @Override
-  public String getSecurityProvider(){
+  public String getSecurityProvider() {
     return securityConfig.getProvider();
   }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
index 612d5ac..e255875 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
@@ -49,7 +49,9 @@
     HDDS_CONTAINER_REPORT_INTERVAL;
 import static org.apache.hadoop.hdds.HddsConfigKeys
     .HDDS_SCM_SAFEMODE_PIPELINE_CREATION;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.*;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
 
 import org.junit.Rule;
 import org.junit.rules.Timeout;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java
index e03c5a9..82f23a6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java
@@ -314,7 +314,7 @@
       // is updated to the latest index in putBlock response.
       watcher.watchForCommit(replies.get(1).getLogIndex() + 100);
       Assert.fail("Expected exception not thrown");
-    } catch(IOException ioe) {
+    } catch (IOException ioe) {
       // with retry count set to noRetry and a lower watch request
       // timeout, watch request will eventually
       // fail with TimeoutIOException from ratis client or the client
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
index 43b488d..ad87bc4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
@@ -50,7 +50,9 @@
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.*;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
 
 import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage;
 import org.junit.After;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
index caf3ae0..247a6ea 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
@@ -35,7 +35,11 @@
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.ozone.test.GenericTestUtils;
-import org.junit.*;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
 import org.junit.rules.Timeout;
 
 import java.io.File;
@@ -46,8 +50,12 @@
 import java.util.concurrent.TimeUnit;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.hadoop.hdds.HddsConfigKeys.*;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.*;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
 
 /**
  * Tests the containerStateMachine failure handling by set flush delay.
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
index ac62bc0..b16b824 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
@@ -59,7 +59,7 @@
 /**
  * Tests Close Container Exception handling by Ozone Client.
  */
-public class TestDiscardPreallocatedBlocks{
+public class TestDiscardPreallocatedBlocks {
 
   /**
    * Set a timeout for each test.
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
index e1358eb..41a0892 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
@@ -53,6 +53,11 @@
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.container.TestHelper;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -60,6 +65,7 @@
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Ignore;
@@ -167,10 +173,8 @@
     startCluster();
     String keyName = UUID.randomUUID().toString();
     OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
-    byte[] data =
-        ContainerTestHelper
-        .getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes(
-            UTF_8);
+    byte[] data = ContainerTestHelper.getFixedLengthString(
+        keyString, 2 * chunkSize + chunkSize / 2).getBytes(UTF_8);
     key.write(data);
 
     // get the name of a valid container
@@ -201,8 +205,110 @@
         .setRefreshPipeline(true)
         .build();
     OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
+
     Assert.assertEquals(data.length, keyInfo.getDataSize());
     validateData(keyName, data);
+
+    // Verify that the block information is updated correctly in the DB on
+    // failures
+    testBlockCountOnFailures(keyInfo);
+  }
+
+  /**
+   * Test whether blockData and Container metadata (block count and used
+   * bytes) is updated correctly when there is a write failure.
+   * We can combine this test with {@link #testBlockWritesWithDnFailures()}
+   * as that test also simulates a write failure and client writes failed
+   * chunk writes to a new block.
+   */
+  private void testBlockCountOnFailures(OmKeyInfo omKeyInfo) throws Exception {
+    // testBlockWritesWithDnFailures writes chunkSize*2.5 size of data into
+    // KeyOutputStream. But before closing the outputStream, 2 of the DNs in
+    // the pipeline being written to are closed. So the key will be written
+    // to 2 blocks as atleast the last 0.5 chunk would not be committed to the
+    // first block before the stream is closed.
+    /**
+     * There are 3 possible scenarios:
+     * 1. Block1 has 2 chunks and OMKeyInfo also has 2 chunks against this block
+     *    => Block2 should have 1 chunk
+     *    (2 chunks were written to Block1, committed and acknowledged by
+     *    CommitWatcher)
+     * 2. Block1 has 1 chunk and OMKeyInfo has 1 chunk against this block
+     *    => Block2 should have 2 chunks
+     *    (Possibly 2 chunks were written but only 1 was committed to the
+     *    block)
+     * 3. Block1 has 2 chunks but OMKeyInfo has only 1 chunk against this block
+     *    => Block2 should have 2 chunks
+     *    (This happens when the 2nd chunk has been committed to Block1 but
+     *    not acknowledged by CommitWatcher before pipeline shutdown)
+     */
+
+    // Get information about the first and second block (in different pipelines)
+    List<OmKeyLocationInfo> locationList = omKeyInfo.getLatestVersionLocations()
+        .getLocationList();
+    long containerId1 = locationList.get(0).getContainerID();
+    List<DatanodeDetails> block1DNs = locationList.get(0).getPipeline()
+        .getNodes();
+    long containerId2 = locationList.get(1).getContainerID();
+    List<DatanodeDetails> block2DNs = locationList.get(1).getPipeline()
+        .getNodes();
+
+
+    int block2ExpectedChunkCount;
+    if (locationList.get(0).getLength() == 2 * chunkSize) {
+      // Scenario 1
+      block2ExpectedChunkCount = 1;
+    } else {
+      // Scenario 2
+      block2ExpectedChunkCount = 2;
+    }
+
+    // For the first block, first 2 DNs in the pipeline are shutdown (to
+    // simulate a failure). It should have 1 or 2 chunks (depending on
+    // whether the DN CommitWatcher successfully acknowledged the 2nd chunk
+    // write or not). The 3rd chunk would not exist on the first pipeline as
+    // the pipeline would be closed before the last 0.5 chunk was committed
+    // to the block.
+    KeyValueContainerData containerData1 =
+        ((KeyValueContainer) cluster.getHddsDatanode(block1DNs.get(2))
+            .getDatanodeStateMachine().getContainer().getContainerSet()
+            .getContainer(containerId1)).getContainerData();
+    try (ReferenceCountedDB containerDb1 = BlockUtils.getDB(containerData1,
+        conf)) {
+      BlockData blockData1 = containerDb1.getStore().getBlockDataTable().get(
+          Long.toString(locationList.get(0).getBlockID().getLocalID()));
+      // The first Block could have 1 or 2 chunkSize of data
+      int block1NumChunks = blockData1.getChunks().size();
+      Assert.assertTrue(block1NumChunks >= 1);
+
+      Assert.assertEquals(chunkSize * block1NumChunks, blockData1.getSize());
+      Assert.assertEquals(1, containerData1.getBlockCount());
+      Assert.assertEquals(chunkSize * block1NumChunks,
+          containerData1.getBytesUsed());
+    }
+
+    // Verify that the second block has the remaining 0.5*chunkSize of data
+    KeyValueContainerData containerData2 =
+        ((KeyValueContainer) cluster.getHddsDatanode(block2DNs.get(0))
+            .getDatanodeStateMachine().getContainer().getContainerSet()
+            .getContainer(containerId2)).getContainerData();
+    try (ReferenceCountedDB containerDb2 = BlockUtils.getDB(containerData2,
+        conf)) {
+      BlockData blockData2 = containerDb2.getStore().getBlockDataTable().get(
+          Long.toString(locationList.get(1).getBlockID().getLocalID()));
+      // The second Block should have 0.5 chunkSize of data
+      Assert.assertEquals(block2ExpectedChunkCount,
+          blockData2.getChunks().size());
+      Assert.assertEquals(1, containerData2.getBlockCount());
+      int expectedBlockSize;
+      if (block2ExpectedChunkCount == 1) {
+        expectedBlockSize = chunkSize / 2;
+      } else {
+        expectedBlockSize = chunkSize + chunkSize / 2;
+      }
+      Assert.assertEquals(expectedBlockSize, blockData2.getSize());
+      Assert.assertEquals(expectedBlockSize, containerData2.getBytesUsed());
+    }
   }
 
   @Test
@@ -212,7 +318,7 @@
     OzoneOutputStream key =
         createKey(keyName, ReplicationType.RATIS, 0);
     String data = ContainerTestHelper
-        .getFixedLengthString(keyString,  chunkSize/2);
+        .getFixedLengthString(keyString,  chunkSize / 2);
     key.write(data.getBytes(UTF_8));
     // get the name of a valid container
     Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
index 7563003..044d519 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
@@ -46,7 +46,10 @@
 import org.apache.hadoop.ozone.container.TestHelper;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.junit.*;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
 import org.junit.rules.Timeout;
 
 import java.io.IOException;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
index 5bc1de2..a81c6a4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
@@ -55,7 +55,9 @@
 import org.junit.rules.Timeout;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.*;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
 
 /**
  * Tests MultiBlock Writes with Dn failures by Ozone Client.
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
index 9a4d691..3480738 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
@@ -178,7 +178,7 @@
    */
   @AfterClass
   public static void shutdown() throws IOException {
-    if(ozClient != null) {
+    if (ozClient != null) {
       ozClient.close();
     }
 
@@ -251,7 +251,7 @@
     byte[] fileContent;
     int len = 0;
 
-    try(OzoneInputStream is = bucket.readKey(keyName)) {
+    try (OzoneInputStream is = bucket.readKey(keyName)) {
       fileContent = new byte[value.getBytes(StandardCharsets.UTF_8).length];
       len = is.read(fileContent);
     }
@@ -267,7 +267,7 @@
   }
 
   private OzoneBucket createVolumeAndBucket(String volumeName,
-      String bucketName) throws Exception{
+      String bucketName) throws Exception {
     store.createVolume(volumeName);
     OzoneVolume volume = store.getVolume(volumeName);
     BucketArgs bucketArgs = BucketArgs.newBuilder()
@@ -333,7 +333,7 @@
     byte[] fileContent;
     int len = 0;
 
-    try(OzoneInputStream is = bucket.readKey(keyName)) {
+    try (OzoneInputStream is = bucket.readKey(keyName)) {
       fileContent = new byte[value.getBytes(StandardCharsets.UTF_8).length];
       len = is.read(fileContent);
     }
@@ -475,8 +475,9 @@
       // Adding a random int with a cap at 8K (the default crypto buffer
       // size) to get parts whose last byte does not coincide with crypto
       // buffer boundary.
-      byte[] data = generateRandomData((MPU_PART_MIN_SIZE * i) +
-          RANDOM.nextInt(DEFAULT_CRYPTO_BUFFER_SIZE));
+      int partSize = (MPU_PART_MIN_SIZE * i) +
+          RANDOM.nextInt(DEFAULT_CRYPTO_BUFFER_SIZE - 1) + 1;
+      byte[] data = generateRandomData(partSize);
       String partName = uploadPart(bucket, keyName, uploadID, i, data);
       partsMap.put(i, partName);
       partsData.add(data);
@@ -495,10 +496,20 @@
     // Complete MPU
     completeMultipartUpload(bucket, keyName, uploadID, partsMap);
 
+    // Create an input stream to read the data
+    OzoneInputStream inputStream = bucket.readKey(keyName);
+    Assert.assertTrue(inputStream instanceof MultipartCryptoKeyInputStream);
+
+    // Test complete read
+    byte[] completeRead = new byte[keySize];
+    int bytesRead = inputStream.read(completeRead, 0, keySize);
+    Assert.assertEquals(bytesRead, keySize);
+    Assert.assertArrayEquals(inputData, completeRead);
+
     // Read different data lengths and starting from different offsets and
     // verify the data matches.
     Random random = new Random();
-    int randomSize = random.nextInt(keySize/2);
+    int randomSize = random.nextInt(keySize / 2);
     int randomOffset = random.nextInt(keySize - randomSize);
 
     int[] readDataSizes = {keySize, keySize / 3 + 1, BLOCK_SIZE,
@@ -510,12 +521,6 @@
         BLOCK_SIZE - DEFAULT_CRYPTO_BUFFER_SIZE + 1, BLOCK_SIZE, keySize / 3,
         keySize - 1, randomOffset};
 
-    // Create an input stream to read the data
-    OzoneInputStream inputStream = bucket.readKey(keyName);
-    Assert.assertTrue(inputStream instanceof MultipartCryptoKeyInputStream);
-    MultipartCryptoKeyInputStream cryptoInputStream =
-        (MultipartCryptoKeyInputStream) inputStream;
-
     for (int readDataLen : readDataSizes) {
       for (int readFromPosition : readFromPositions) {
         // Check that offset + buffer size does not exceed the key size
@@ -524,10 +529,13 @@
         }
 
         byte[] readData = new byte[readDataLen];
-        cryptoInputStream.seek(readFromPosition);
-        inputStream.read(readData, 0, readDataLen);
+        inputStream.seek(readFromPosition);
+        int actualReadLen = inputStream.read(readData, 0, readDataLen);
 
         assertReadContent(inputData, readData, readFromPosition);
+        Assert.assertEquals(readFromPosition + readDataLen,
+            inputStream.getPos());
+        Assert.assertEquals(readDataLen, actualReadLen);
       }
     }
   }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
index 636029c..9ea04d4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
@@ -140,7 +140,7 @@
    * Close OzoneClient and shutdown MiniOzoneCluster.
    */
   static void shutdownCluster() throws IOException {
-    if(ozClient != null) {
+    if (ozClient != null) {
       ozClient.close();
     }
 
@@ -609,11 +609,11 @@
         generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
     partsMap.put(1, partName1);
 
-    String partName2 =uploadPart(bucket, keyName, uploadID, 2,
+    String partName2 = uploadPart(bucket, keyName, uploadID, 2,
         generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
     partsMap.put(2, partName2);
 
-    String partName3 =uploadPart(bucket, keyName, uploadID, 3,
+    String partName3 = uploadPart(bucket, keyName, uploadID, 3,
         generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
     partsMap.put(3, partName3);
 
@@ -711,11 +711,11 @@
         generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
     partsMap.put(1, partName1);
 
-    String partName2 =uploadPart(bucket, keyName, uploadID, 2,
+    String partName2 = uploadPart(bucket, keyName, uploadID, 2,
         generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
     partsMap.put(2, partName2);
 
-    String partName3 =uploadPart(bucket, keyName, uploadID, 3,
+    String partName3 = uploadPart(bucket, keyName, uploadID, 3,
         generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
     partsMap.put(3, partName3);
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
index 3cda449..ea992f5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
@@ -191,8 +191,8 @@
   @Test
   public void testMaxRetriesByOzoneClient() throws Exception {
     String keyName = getKeyName();
-    OzoneOutputStream key =
-        createKey(keyName, ReplicationType.RATIS, (MAX_RETRIES+1) * blockSize);
+    OzoneOutputStream key = createKey(
+        keyName, ReplicationType.RATIS, (MAX_RETRIES + 1) * blockSize);
     Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
     KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
     List<BlockOutputStreamEntry> entries = keyOutputStream.getStreamEntries();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index e70087a..128c407 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -203,7 +203,7 @@
    * Close OzoneClient and shutdown MiniOzoneCluster.
    */
   static void shutdownCluster() throws IOException {
-    if(ozClient != null) {
+    if (ozClient != null) {
       ozClient.close();
     }
 
@@ -224,7 +224,7 @@
     TestOzoneRpcClientAbstract.ozClient = ozClient;
   }
 
-  public static void setOzoneManager(OzoneManager ozoneManager){
+  public static void setOzoneManager(OzoneManager ozoneManager) {
     TestOzoneRpcClientAbstract.ozoneManager = ozoneManager;
   }
 
@@ -1091,7 +1091,7 @@
 
   private void writeKey(OzoneBucket bucket, String keyName,
       ReplicationFactor replication, String value, int valueLength)
-      throws IOException{
+      throws IOException {
     OzoneOutputStream out = bucket.createKey(keyName, valueLength, RATIS,
         replication, new HashMap<>());
     out.write(value.getBytes(UTF_8));
@@ -1100,7 +1100,7 @@
 
   private void writeFile(OzoneBucket bucket, String keyName,
       ReplicationFactor replication, String value, int valueLength)
-      throws IOException{
+      throws IOException {
     OzoneOutputStream out = bucket.createFile(keyName, valueLength, RATIS,
         replication, true, true);
     out.write(value.getBytes(UTF_8));
@@ -1901,33 +1901,33 @@
     String volBase = "vol-list-";
     //Create 10 volume vol-list-a-0-<random> to vol-list-a-9-<random>
     String volBaseNameA = volBase + "a-";
-    for(int i = 0; i < 10; i++) {
+    for (int i = 0; i < 10; i++) {
       store.createVolume(
           volBaseNameA + i + "-" + RandomStringUtils.randomNumeric(5));
     }
     //Create 10 volume vol-list-b-0-<random> to vol-list-b-9-<random>
     String volBaseNameB = volBase + "b-";
-    for(int i = 0; i < 10; i++) {
+    for (int i = 0; i < 10; i++) {
       store.createVolume(
           volBaseNameB + i + "-" + RandomStringUtils.randomNumeric(5));
     }
     Iterator<? extends OzoneVolume> volIterator = store.listVolumes(volBase);
     int totalVolumeCount = 0;
-    while(volIterator.hasNext()) {
+    while (volIterator.hasNext()) {
       volIterator.next();
       totalVolumeCount++;
     }
     Assert.assertEquals(20, totalVolumeCount);
     Iterator<? extends OzoneVolume> volAIterator = store.listVolumes(
         volBaseNameA);
-    for(int i = 0; i < 10; i++) {
+    for (int i = 0; i < 10; i++) {
       Assert.assertTrue(volAIterator.next().getName()
           .startsWith(volBaseNameA + i + "-"));
     }
     Assert.assertFalse(volAIterator.hasNext());
     Iterator<? extends OzoneVolume> volBIterator = store.listVolumes(
         volBaseNameB);
-    for(int i = 0; i < 10; i++) {
+    for (int i = 0; i < 10; i++) {
       Assert.assertTrue(volBIterator.next().getName()
           .startsWith(volBaseNameB + i + "-"));
     }
@@ -1950,7 +1950,7 @@
 
     //Create 10 buckets in  vol-a-<random> and 10 in vol-b-<random>
     String bucketBaseNameA = "bucket-a-";
-    for(int i = 0; i < 10; i++) {
+    for (int i = 0; i < 10; i++) {
       volA.createBucket(
           bucketBaseNameA + i + "-" + RandomStringUtils.randomNumeric(5));
       volB.createBucket(
@@ -1958,7 +1958,7 @@
     }
     //Create 10 buckets in vol-a-<random> and 10 in vol-b-<random>
     String bucketBaseNameB = "bucket-b-";
-    for(int i = 0; i < 10; i++) {
+    for (int i = 0; i < 10; i++) {
       volA.createBucket(
           bucketBaseNameB + i + "-" + RandomStringUtils.randomNumeric(5));
       volB.createBucket(
@@ -1967,7 +1967,7 @@
     Iterator<? extends OzoneBucket> volABucketIter =
         volA.listBuckets("bucket-");
     int volABucketCount = 0;
-    while(volABucketIter.hasNext()) {
+    while (volABucketIter.hasNext()) {
       volABucketIter.next();
       volABucketCount++;
     }
@@ -1975,7 +1975,7 @@
     Iterator<? extends OzoneBucket> volBBucketIter =
         volA.listBuckets("bucket-");
     int volBBucketCount = 0;
-    while(volBBucketIter.hasNext()) {
+    while (volBBucketIter.hasNext()) {
       volBBucketIter.next();
       volBBucketCount++;
     }
@@ -1984,7 +1984,7 @@
     Iterator<? extends OzoneBucket> volABucketAIter =
         volA.listBuckets("bucket-a-");
     int volABucketACount = 0;
-    while(volABucketAIter.hasNext()) {
+    while (volABucketAIter.hasNext()) {
       volABucketAIter.next();
       volABucketACount++;
     }
@@ -1992,21 +1992,21 @@
     Iterator<? extends OzoneBucket> volBBucketBIter =
         volA.listBuckets("bucket-b-");
     int volBBucketBCount = 0;
-    while(volBBucketBIter.hasNext()) {
+    while (volBBucketBIter.hasNext()) {
       volBBucketBIter.next();
       volBBucketBCount++;
     }
     Assert.assertEquals(10, volBBucketBCount);
     Iterator<? extends OzoneBucket> volABucketBIter = volA.listBuckets(
         "bucket-b-");
-    for(int i = 0; i < 10; i++) {
+    for (int i = 0; i < 10; i++) {
       Assert.assertTrue(volABucketBIter.next().getName()
           .startsWith(bucketBaseNameB + i + "-"));
     }
     Assert.assertFalse(volABucketBIter.hasNext());
     Iterator<? extends OzoneBucket> volBBucketAIter = volB.listBuckets(
         "bucket-a-");
-    for(int i = 0; i < 10; i++) {
+    for (int i = 0; i < 10; i++) {
       Assert.assertTrue(volBBucketAIter.next().getName()
           .startsWith(bucketBaseNameA + i + "-"));
     }
@@ -2021,7 +2021,7 @@
     store.createVolume(volume);
     OzoneVolume vol = store.getVolume(volume);
     Iterator<? extends OzoneBucket> buckets = vol.listBuckets("");
-    while(buckets.hasNext()) {
+    while (buckets.hasNext()) {
       fail();
     }
   }
@@ -2115,7 +2115,7 @@
     Iterator<? extends OzoneKey> volABucketAIter =
         volAbucketA.listKeys("key-");
     int volABucketAKeyCount = 0;
-    while(volABucketAIter.hasNext()) {
+    while (volABucketAIter.hasNext()) {
       volABucketAIter.next();
       volABucketAKeyCount++;
     }
@@ -2123,7 +2123,7 @@
     Iterator<? extends OzoneKey> volABucketBIter =
         volAbucketB.listKeys("key-");
     int volABucketBKeyCount = 0;
-    while(volABucketBIter.hasNext()) {
+    while (volABucketBIter.hasNext()) {
       volABucketBIter.next();
       volABucketBKeyCount++;
     }
@@ -2131,7 +2131,7 @@
     Iterator<? extends OzoneKey> volBBucketAIter =
         volBbucketA.listKeys("key-");
     int volBBucketAKeyCount = 0;
-    while(volBBucketAIter.hasNext()) {
+    while (volBBucketAIter.hasNext()) {
       volBBucketAIter.next();
       volBBucketAKeyCount++;
     }
@@ -2139,7 +2139,7 @@
     Iterator<? extends OzoneKey> volBBucketBIter =
         volBbucketB.listKeys("key-");
     int volBBucketBKeyCount = 0;
-    while(volBBucketBIter.hasNext()) {
+    while (volBBucketBIter.hasNext()) {
       volBBucketBIter.next();
       volBBucketBKeyCount++;
     }
@@ -2147,14 +2147,14 @@
     Iterator<? extends OzoneKey> volABucketAKeyAIter =
         volAbucketA.listKeys("key-a-");
     int volABucketAKeyACount = 0;
-    while(volABucketAKeyAIter.hasNext()) {
+    while (volABucketAKeyAIter.hasNext()) {
       volABucketAKeyAIter.next();
       volABucketAKeyACount++;
     }
     Assert.assertEquals(10, volABucketAKeyACount);
     Iterator<? extends OzoneKey> volABucketAKeyBIter =
         volAbucketA.listKeys("key-b-");
-    for(int i = 0; i < 10; i++) {
+    for (int i = 0; i < 10; i++) {
       Assert.assertTrue(volABucketAKeyBIter.next().getName()
           .startsWith("key-b-" + i + "-"));
     }
@@ -2171,7 +2171,7 @@
     vol.createBucket(bucket);
     OzoneBucket buc = vol.getBucket(bucket);
     Iterator<? extends OzoneKey> keys = buc.listKeys("");
-    while(keys.hasNext()) {
+    while (keys.hasNext()) {
       fail();
     }
   }
@@ -2821,11 +2821,11 @@
         generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
     partsMap.put(1, partName1);
 
-    String partName2 =uploadPart(bucket, keyName, uploadID, 2,
+    String partName2 = uploadPart(bucket, keyName, uploadID, 2,
         generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
     partsMap.put(2, partName2);
 
-    String partName3 =uploadPart(bucket, keyName, uploadID, 3,
+    String partName3 = uploadPart(bucket, keyName, uploadID, 3,
         generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
     partsMap.put(3, partName3);
 
@@ -2872,11 +2872,11 @@
         generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
     partsMap.put(1, partName1);
 
-    String partName2 =uploadPart(bucket, keyName, uploadID, 2,
+    String partName2 = uploadPart(bucket, keyName, uploadID, 2,
         generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
     partsMap.put(2, partName2);
 
-    String partName3 =uploadPart(bucket, keyName, uploadID, 3,
+    String partName3 = uploadPart(bucket, keyName, uploadID, 3,
         generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
     partsMap.put(3, partName3);
 
@@ -3261,7 +3261,7 @@
     List<OzoneAcl> expectedAcls = getAclList(new OzoneConfiguration());
 
     // Case:1 Add new acl permission to existing acl.
-    if(expectedAcls.size()>0) {
+    if (expectedAcls.size() > 0) {
       OzoneAcl oldAcl = expectedAcls.get(0);
       OzoneAcl newAcl = new OzoneAcl(oldAcl.getType(), oldAcl.getName(),
           ACLType.READ_ACL, oldAcl.getAclScope());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
index 8ad62f2..b0d04da 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
@@ -162,7 +162,7 @@
    * Close OzoneClient and shutdown MiniOzoneCluster.
    */
   private static void shutdownCluster() throws IOException {
-    if(ozClient != null) {
+    if (ozClient != null) {
       ozClient.close();
     }
 
@@ -246,28 +246,28 @@
 
     // xxxAcl will fail as current ugi user doesn't have the required access
     // for volume
-    try{
+    try {
       List<OzoneAcl> acls = store.getAcl(volObj);
     } catch (Exception ex) {
       verifyLog(OMAction.GET_ACL.name(), volumeName,
           AuditEventStatus.FAILURE.name());
     }
 
-    try{
+    try {
       store.addAcl(volObj, USER_ACL);
     } catch (Exception ex) {
       verifyLog(OMAction.ADD_ACL.name(), volumeName,
           AuditEventStatus.FAILURE.name());
     }
 
-    try{
+    try {
       store.removeAcl(volObj, USER_ACL);
     } catch (Exception ex) {
       verifyLog(OMAction.REMOVE_ACL.name(), volumeName,
           AuditEventStatus.FAILURE.name());
     }
 
-    try{
+    try {
       store.setAcl(volObj, aclListToAdd);
     } catch (Exception ex) {
       verifyLog(OMAction.SET_ACL.name(), volumeName, "johndoe", "jane",
@@ -279,19 +279,18 @@
   private void verifyLog(String... expected) throws Exception {
     File file = new File("audit.log");
     final List<String> lines = FileUtils.readLines(file, (String)null);
-    GenericTestUtils.waitFor(() ->
-        (lines != null) ? true : false, 100, 60000);
+    GenericTestUtils.waitFor(() -> lines != null, 100, 60000);
 
-    try{
+    try {
       // When log entry is expected, the log file will contain one line and
       // that must be equal to the expected string
       assertTrue(lines.size() != 0);
-      for(String exp: expected){
+      for (String exp: expected) {
         assertTrue(lines.get(0).contains(exp));
       }
-    } catch (AssertionError ex){
+    } catch (AssertionError ex) {
       LOG.error("Error occurred in log verification", ex);
-      if(lines.size() != 0){
+      if (lines.size() != 0) {
         LOG.error("Actual line ::: " + lines.get(0));
         LOG.error("Expected tokens ::: " + Arrays.toString(expected));
       }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java
index 6cd85a1..5ac78b8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java
@@ -191,7 +191,7 @@
   }
 
   private void testListStatus(String volumeName, String bucketName,
-      String keyName, boolean versioning) throws Exception{
+      String keyName, boolean versioning) throws Exception {
     OzoneVolume volume = objectStore.getVolume(volumeName);
     OzoneBucket ozoneBucket = volume.getBucket(bucketName);
     List<OzoneFileStatus> ozoneFileStatusList = ozoneBucket.listStatus(keyName,
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
index 751fd26..791a226 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
@@ -139,7 +139,7 @@
    */
   @After
   public void shutdown() throws IOException {
-    if(ozClient != null) {
+    if (ozClient != null) {
       ozClient.close();
     }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
index a525499..5abc09e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
@@ -119,7 +119,7 @@
         .setCertificateClient(certificateClientTest)
         .build();
     secretManager = new OzoneBlockTokenSecretManager(new SecurityConfig(conf),
-        60 *60, certificateClientTest.getCertificate().
+        60 * 60, certificateClientTest.getCertificate().
         getSerialNumber().toString());
     secretManager.start(certificateClientTest);
     cluster.getOzoneManager().startSecretManager();
@@ -168,7 +168,7 @@
       OzoneKey key = bucket.getKey(keyName);
       Assert.assertEquals(keyName, key.getName());
       byte[] fileContent;
-      try(OzoneInputStream is = bucket.readKey(keyName)) {
+      try (OzoneInputStream is = bucket.readKey(keyName)) {
         fileContent = new byte[value.getBytes(UTF_8).length];
         is.read(fileContent);
       }
@@ -342,7 +342,7 @@
    */
   @AfterClass
   public static void shutdown() throws IOException {
-    if(ozClient != null) {
+    if (ozClient != null) {
       ozClient.close();
     }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
index d6821c4..aa6ff93 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
@@ -364,7 +364,7 @@
           .watchForCommit(reply.getLogIndex() +
                   new Random().nextInt(100) + 10);
       Assert.fail("Expected exception not thrown");
-    } catch(Exception e) {
+    } catch (Exception e) {
       Assert.assertTrue(HddsClientUtils
           .checkForException(e) instanceof GroupMismatchException);
     }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java
index e9b82e4..7f0ab38 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java
@@ -198,7 +198,7 @@
     byte[] expectedData = new byte[readDataLen];
     System.arraycopy(inputData, (int) offset, expectedData, 0, readDataLen);
 
-    for (int i=0; i < readDataLen; i++) {
+    for (int i = 0; i < readDataLen; i++) {
       Assert.assertEquals("Read data at does not match the input data at " +
               "position " + (offset + i), expectedData[i], readData[i]);
     }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java
index f16c3ed..4b83429 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java
@@ -77,18 +77,18 @@
   private void randomSeek(int dataLength, KeyInputStream keyInputStream,
       byte[] inputData) throws Exception {
     // Do random seek.
-    for (int i=0; i<dataLength - 300; i+=20) {
+    for (int i = 0; i < dataLength - 300; i += 20) {
       validate(keyInputStream, inputData, i, 200);
     }
 
     // Seek to end and read in reverse order. And also this is partial chunks
     // as readLength is 20, chunk length is 100.
-    for (int i=dataLength - 100; i>=100; i-=20) {
+    for (int i = dataLength - 100; i >= 100; i -= 20) {
       validate(keyInputStream, inputData, i, 20);
     }
 
     // Start from begin and seek such that we read partially chunks.
-    for (int i=0; i<dataLength - 300; i+=20) {
+    for (int i = 0; i < dataLength - 300; i += 20) {
       validate(keyInputStream, inputData, i, 90);
     }
 
@@ -173,7 +173,7 @@
     KeyInputStream keyInputStream = getKeyInputStream(keyName);
 
     // Seek to some where end.
-    validate(keyInputStream, inputData, dataLength-200, 100);
+    validate(keyInputStream, inputData, dataLength - 200, 100);
 
     // Now seek to start.
     validate(keyInputStream, inputData, 0, 140);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
index cf81bd9..7a4e1c3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
@@ -20,7 +20,13 @@
 
 import java.io.IOException;
 import java.security.MessageDigest;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
 import java.util.concurrent.TimeoutException;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
index 3382180..d629f2f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
@@ -263,7 +263,7 @@
       Container dnContainer = cluster.getHddsDatanodes().get(index)
           .getDatanodeStateMachine().getContainer().getContainerSet()
           .getContainer(containerID);
-      try(ReferenceCountedDB store = BlockUtils.getDB(
+      try (ReferenceCountedDB store = BlockUtils.getDB(
           (KeyValueContainerData) dnContainer.getContainerData(), conf)) {
         metadataStores.add(store);
       }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
index 0a0e0fa..d298ac5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
@@ -282,7 +282,7 @@
   private Boolean isContainerClosed(HddsDatanodeService hddsDatanodeService,
       long containerID) {
     ContainerData containerData;
-    containerData =hddsDatanodeService
+    containerData = hddsDatanodeService
         .getDatanodeStateMachine().getContainer().getContainerSet()
         .getContainer(containerID).getContainerData();
     return !containerData.isOpen();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java
new file mode 100644
index 0000000..f8a90f4
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java
@@ -0,0 +1,145 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
+
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.ozone.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import java.util.HashMap;
+import java.util.concurrent.TimeoutException;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_NODE_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
+
+/**
+ * Test the behaviour of the datanode and scm when communicating
+ * with refresh volume usage command.
+ */
+public class TestRefreshVolumeUsageHandler {
+  /**
+    * Set a timeout for each test.
+    */
+  @Rule
+  public Timeout timeout = Timeout.seconds(300);
+
+  private MiniOzoneCluster cluster;
+  private OzoneConfiguration conf;
+
+  @Before
+  public void setup() throws Exception {
+    //setup a cluster (1G free space is enough for a unit test)
+    conf = new OzoneConfiguration();
+    conf.set(OZONE_SCM_CONTAINER_SIZE, "1GB");
+    conf.set(HDDS_NODE_REPORT_INTERVAL, "1s");
+    conf.set("hdds.datanode.du.factory.classname",
+        "org.apache.hadoop.ozone.container.common.volume.HddsVolumeFactory");
+    conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN,
+        0, StorageUnit.MB);
+    conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false);
+    cluster = MiniOzoneCluster.newBuilder(conf)
+        .setNumDatanodes(1).build();
+    cluster.waitForClusterToBeReady();
+    cluster.waitForPipelineTobeReady(ONE, 30000);
+  }
+
+  @After
+  public void teardown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void test() throws Exception {
+    cluster.waitForClusterToBeReady();
+    DatanodeDetails datanodeDetails =
+        cluster.getHddsDatanodes().get(0).getDatanodeDetails();
+    Long currentScmUsed = cluster.getStorageContainerManager()
+        .getScmNodeManager().getUsageInfo(datanodeDetails)
+        .getScmNodeStat().getScmUsed().get();
+
+    //creating a key to take some storage space
+    OzoneClient client = OzoneClientFactory.getRpcClient(conf);
+    ObjectStore objectStore = client.getObjectStore();
+    objectStore.createVolume("test");
+    objectStore.getVolume("test").createBucket("test");
+    OzoneOutputStream key = objectStore.getVolume("test").getBucket("test")
+        .createKey("test", 4096, ReplicationType.RATIS,
+            ReplicationFactor.ONE, new HashMap<>());
+    key.write("test".getBytes(UTF_8));
+    key.close();
+
+    //a new key is created , but the datanode default REFRESH_PERIOD is 1 hour,
+    //so scm does not get the latest usage info of this datanode for now.
+    Assert.assertTrue(cluster.getStorageContainerManager()
+            .getScmNodeManager().getUsageInfo(datanodeDetails)
+            .getScmNodeStat().getScmUsed().isEqual(currentScmUsed));
+
+    try {
+      GenericTestUtils.waitFor(() -> isUsageInfoRefreshed(cluster,
+          datanodeDetails, currentScmUsed), 500, 5 * 1000);
+    } catch (TimeoutException te) {
+      //no op , this is to show that if we do not trigger refresh volume
+      //usage info command, we can not get the latest usage info within
+      // a refresh period
+    } catch (InterruptedException ie) {
+      //no op
+    }
+
+    //after waiting for several node report , this usage info in scm
+    //is still not updated
+    Assert.assertTrue(cluster.getStorageContainerManager()
+        .getScmNodeManager().getUsageInfo(datanodeDetails)
+        .getScmNodeStat().getScmUsed().isEqual(currentScmUsed));
+
+    //send refresh volume usage command to datanode
+    cluster.getStorageContainerManager()
+        .getScmNodeManager().refreshAllHealthyDnUsageInfo();
+
+    //waiting for the new usage info is refreshed
+    GenericTestUtils.waitFor(() -> isUsageInfoRefreshed(cluster,
+        datanodeDetails, currentScmUsed), 500, 5 * 1000);
+  }
+
+  private static Boolean isUsageInfoRefreshed(MiniOzoneCluster cluster,
+                                              DatanodeDetails datanodeDetails,
+                                              long currentScmUsed) {
+    return cluster.getStorageContainerManager().getScmNodeManager()
+      .getUsageInfo(datanodeDetails).getScmNodeStat()
+      .getScmUsed().isGreater(currentScmUsed);
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
index de3b37b..2822731 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
@@ -35,7 +35,8 @@
       .ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
       .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.scm.*;
+import org.apache.hadoop.hdds.scm.XceiverClientRatis;
+import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.hdds.scm.pipeline.MockPipeline;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
index b6a5a03..fb015bb 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
@@ -112,7 +112,7 @@
             Handler.getHandlerForContainerType(containerType, conf,
                 context.getParent().getDatanodeDetails().getUuidString(),
                 containerSet, volumeSet, metrics,
-                c -> {}));
+                c -> { }));
       }
       HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet,
           volumeSet, handlers, context, metrics, null);
@@ -182,7 +182,7 @@
       }
       // clean up volume dir
       File file = new File(path);
-      if(file.exists()) {
+      if (file.exists()) {
         FileUtil.fullyDelete(file);
       }
     }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 4f0c437..4240fca 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -40,7 +40,11 @@
 import org.junit.rules.Timeout;
 import org.mockito.Mockito;
 
-import java.util.*;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
 import java.util.concurrent.CompletableFuture;
 
 import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
@@ -231,18 +235,9 @@
           getChunksCount();
       ContainerTestHelper.verifyGetBlock(request, response, chunksCount);
 
-
-      // Delete Block
-      request =
-          ContainerTestHelper.getDeleteBlockRequest(
-              pipeline, putBlockRequest.getPutBlock());
-      response = client.sendCommand(request);
-      Assert.assertNotNull(response);
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-
-      //Delete Chunk
-      request = ContainerTestHelper.getDeleteChunkRequest(
-          pipeline, writeChunkRequest.getWriteChunk());
+      // Delete Block and Delete Chunk are handled by BlockDeletingService
+      // ContainerCommandRequestProto DeleteBlock and DeleteChunk requests
+      // are deprecated
 
       response = client.sendCommand(request);
       Assert.assertNotNull(response);
@@ -397,10 +392,6 @@
           .getChunksCount();
       ContainerTestHelper.verifyGetBlock(request, response, chunksCount);
 
-      // Delete block must fail on a closed container.
-      request =
-          ContainerTestHelper.getDeleteBlockRequest(client.getPipeline(),
-              putBlockRequest.getPutBlock());
       response = client.sendCommand(request);
       Assert.assertNotNull(response);
       Assert.assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO,
@@ -487,7 +478,7 @@
       final List<CompletableFuture> computeResults = new LinkedList<>();
       int requestCount = 1000;
       // Create a bunch of Async calls from this test.
-      for(int x = 0; x <requestCount; x++) {
+      for (int x = 0; x < requestCount; x++) {
         BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
         final ContainerProtos.ContainerCommandRequestProto smallFileRequest
             = ContainerTestHelper.getWriteSmallFileRequest(
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
index 7373b20..5b65d92 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
@@ -38,7 +38,11 @@
 import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
 import org.apache.hadoop.security.token.Token;
 import org.apache.ozone.test.GenericTestUtils;
-import org.junit.*;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Rule;
+import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 import org.junit.rules.Timeout;
 import org.junit.runner.RunWith;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
index ddfbc45..5de250a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
@@ -212,7 +212,7 @@
             Handler.getHandlerForContainerType(containerType, conf,
                 context.getParent().getDatanodeDetails().getUuidString(),
                 containerSet, volumeSet, metrics,
-                c -> {}));
+                c -> { }));
       }
       HddsDispatcher dispatcher = new HddsDispatcher(
           conf, containerSet, volumeSet, handlers, context, metrics, null);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
index a5fcbb9..cd7c995 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
@@ -80,8 +80,6 @@
 import static org.apache.hadoop.ozone.container.ContainerTestHelper.getCreateContainerRequest;
 import static org.apache.hadoop.ozone.container.ContainerTestHelper.getTestBlockID;
 import static org.apache.hadoop.ozone.container.ContainerTestHelper.getTestContainerID;
-import static org.apache.hadoop.ozone.container.ContainerTestHelper.newDeleteBlockRequestBuilder;
-import static org.apache.hadoop.ozone.container.ContainerTestHelper.newDeleteChunkRequestBuilder;
 import static org.apache.hadoop.ozone.container.ContainerTestHelper.newGetBlockRequestBuilder;
 import static org.apache.hadoop.ozone.container.ContainerTestHelper.newGetCommittedBlockLengthBuilder;
 import static org.apache.hadoop.ozone.container.ContainerTestHelper.newPutBlockRequestBuilder;
@@ -169,7 +167,7 @@
                     .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()),
         XceiverClientGrpc::new,
         (dn, conf) -> new XceiverServerGrpc(dd, conf,
-            hddsDispatcher, caClient), (dn, p) -> {}, (p) -> {});
+            hddsDispatcher, caClient), (dn, p) -> {  }, (p) -> { });
   }
 
   private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId,
@@ -194,7 +192,7 @@
           Handler.getHandlerForContainerType(containerType, conf,
               dd.getUuid().toString(),
               containerSet, volumeSet, metrics,
-              c -> {}));
+              c -> { }));
     }
     HddsDispatcher hddsDispatcher = new HddsDispatcher(
         conf, containerSet, volumeSet, handlers, context, metrics,
@@ -234,7 +232,7 @@
         XceiverClientRatis::newXceiverClientRatis,
         TestSecureContainerServer::newXceiverServerRatis,
         (dn, p) -> RatisTestHelper.initXceiverServerRatis(rpc, dn, p),
-        (p) -> {});
+        (p) -> { });
   }
 
   private static void runTestClientServer(
@@ -297,14 +295,6 @@
       ContainerCommandRequestProto.Builder getCommittedBlockLength =
           newGetCommittedBlockLengthBuilder(pipeline, putBlock.getPutBlock());
       assertRequiresToken(client, encodedToken, getCommittedBlockLength);
-
-      ContainerCommandRequestProto.Builder deleteChunk =
-          newDeleteChunkRequestBuilder(pipeline, writeChunk.getWriteChunk());
-      assertRequiresToken(client, encodedToken, deleteChunk);
-
-      ContainerCommandRequestProto.Builder deleteBlock =
-          newDeleteBlockRequestBuilder(pipeline, putBlock.getPutBlock());
-      assertRequiresToken(client, encodedToken, deleteBlock);
     } finally {
       stopServer.accept(pipeline);
       servers.forEach(XceiverServerSpi::stop);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java
index af39055..2a58730 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java
@@ -224,7 +224,7 @@
     try {
       c1.close();
       Assert.fail();
-    } catch(Exception e) {
+    } catch (Exception e) {
       Assert.assertTrue(e instanceof IOException);
     }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java
index a01d492..f67783b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java
@@ -152,28 +152,28 @@
                         int span, int actualDepth) throws IOException {
     int depth = 0;
     Path p = null;
-    if(span > 0){
+    if (span > 0) {
       depth = 0;
-    } else if(span == 0){
+    } else if (span == 0) {
       depth = 1;
-    } else{
+    } else {
       LOG.info("Span value can never be negative");
     }
     LinkedList<FileStatus> queue = new LinkedList<FileStatus>();
     FileStatus f1 = fileStatuses[0];
     queue.add(f1);
-    while(queue.size() != 0){
+    while (queue.size() != 0) {
       FileStatus f = queue.poll();
       FileStatus[] temp = fs.listStatus(f.getPath());
-      if(temp.length > 0){
+      if (temp.length > 0) {
         ++depth;
-        for(int i = 0; i < temp.length; i++){
+        for (int i = 0; i < temp.length; i++) {
           queue.add(temp[i]);
         }
       }
-      if(span == 0){
+      if (span == 0) {
         p = f.getPath();
-      } else{
+      } else {
         p = f.getPath().getParent();
       }
     }
@@ -188,17 +188,17 @@
      * and count the span directories.
      */
 
-  private int spanCheck(FileSystem fs, int span, Path p) throws IOException{
+  private int spanCheck(FileSystem fs, int span, Path p) throws IOException {
     int sp = 0;
     int depth = 0;
-    if(span >= 0){
+    if (span >= 0) {
       depth = 0;
-    } else{
+    } else {
       LOG.info("Span value can never be negative");
     }
     FileStatus[] fileStatuses = fs.listStatus(p);
-    for (FileStatus fileStatus : fileStatuses){
-      if(fileStatus.isDirectory()){
+    for (FileStatus fileStatus : fileStatuses) {
+      if (fileStatus.isDirectory()) {
         ++sp;
       }
     }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java
index b182047..bb780e4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java
@@ -25,7 +25,11 @@
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.client.*;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.ozone.test.GenericTestUtils;
@@ -35,13 +39,19 @@
 import org.junit.Test;
 import java.io.File;
 import java.io.IOException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+
 import org.junit.Rule;
 import org.junit.rules.Timeout;
 
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
 
 /**
  * Test cases for ContainerMapper.
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java
index 0cbc0da..db93a22 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java
@@ -20,14 +20,22 @@
 import org.apache.hadoop.hdds.protocol.StorageType;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.client.*;
+import org.apache.hadoop.ozone.client.BucketArgs;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
 import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.*;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
 import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -36,7 +44,9 @@
 import java.util.UUID;
 
 import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.*;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
 import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER;
 import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
 import static org.junit.Assert.fail;
@@ -49,16 +59,15 @@
 
   private static MiniOzoneCluster cluster;
   private static final Logger LOG =
-          LoggerFactory.getLogger(TestBucketOwner.class);
+      LoggerFactory.getLogger(TestBucketOwner.class);
   private static  UserGroupInformation adminUser =
-          UserGroupInformation.createUserForTesting("om",
-          new String[] {"ozone"});
+      UserGroupInformation.createUserForTesting("om", new String[]{"ozone"});
   private static  UserGroupInformation user1 = UserGroupInformation
-          .createUserForTesting("user1", new String[] {"test1"});
+      .createUserForTesting("user1", new String[] {"test1"});
   private static UserGroupInformation user2 = UserGroupInformation
-          .createUserForTesting("user2", new String[] {"test2"});
+      .createUserForTesting("user2", new String[] {"test2"});
   private static UserGroupInformation user3 = UserGroupInformation
-          .createUserForTesting("user3", new String[] {"test3"});
+      .createUserForTesting("user3", new String[] {"test3"});
   private static OzoneClient client;
   private static ObjectStore objectStore;
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
index 9104d98..334dfe0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
@@ -27,7 +27,9 @@
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.client.*;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
index d691f2c..68cc824 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
@@ -348,7 +348,7 @@
     OmKeyArgs keyArgs = createBuilder()
         .setKeyName(keyNameBuf.toString())
         .build();
-    for (int i =0; i< 5; i++) {
+    for (int i = 0; i < 5; i++) {
       keyNameBuf.append("/").append(RandomStringUtils.randomAlphabetic(5));
     }
     String keyName = keyNameBuf.toString();
@@ -418,7 +418,7 @@
     // recursive flag is set to false
     StringBuffer keyNameBuf = new StringBuffer();
     keyNameBuf.append(RandomStringUtils.randomAlphabetic(5));
-    for (int i =0; i< 5; i++) {
+    for (int i = 0; i < 5; i++) {
       keyNameBuf.append("/").append(RandomStringUtils.randomAlphabetic(5));
     }
     keyName = keyNameBuf.toString();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStore.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStore.java
index 69aee26..1f433d3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStore.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStore.java
@@ -18,10 +18,18 @@
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.client.*;
+import org.apache.hadoop.ozone.client.BucketArgs;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
-import org.junit.*;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
 import org.junit.rules.Timeout;
 
 import java.io.IOException;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java
index a59983f..beaf10c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java
@@ -70,7 +70,7 @@
 
   @After
   public void shutdown() throws Exception {
-    if (dbStore!=null){
+    if (dbStore != null) {
       dbStore.close();
     }
   }
@@ -78,7 +78,7 @@
   @Test
   public void testOMDB() throws Exception {
     File newFolder = folder.newFolder();
-    if(!newFolder.exists()) {
+    if (!newFolder.exists()) {
       Assert.assertTrue(newFolder.mkdirs());
     }
     // Dummy om.db with only keyTable
@@ -88,11 +88,11 @@
       .addTable("keyTable")
       .build();
     // insert 5 keys
-    for (int i = 0; i<5; i++) {
+    for (int i = 0; i < 5; i++) {
       OmKeyInfo value = OMRequestTestUtils.createOmKeyInfo("sampleVol",
-          "sampleBuck", "key" + (i+1), HddsProtos.ReplicationType.STAND_ALONE,
+          "sampleBuck", "key" + (i + 1), HddsProtos.ReplicationType.STAND_ALONE,
           HddsProtos.ReplicationFactor.ONE);
-      String key = "key"+ (i);
+      String key = "key" + (i);
       Table<byte[], byte[]> keyTable = dbStore.getTable("keyTable");
       byte[] arr = value.getProtobuf(CURRENT_VERSION).toByteArray();
       keyTable.put(key.getBytes(UTF_8), arr);
@@ -111,7 +111,7 @@
     try {
       getKeyNames(dbScanner);
       Assert.fail("IllegalArgumentException is expected");
-    }catch (IllegalArgumentException e){
+    }  catch (IllegalArgumentException e) {
       //ignore
     }
 
@@ -177,7 +177,7 @@
     scanner.setTableName("keyTable");
     scanner.call();
     Assert.assertFalse(scanner.getScannedObjects().isEmpty());
-    for (Object o : scanner.getScannedObjects()){
+    for (Object o : scanner.getScannedObjects()) {
       OmKeyInfo keyInfo = (OmKeyInfo)o;
       keyNames.add(keyInfo.getKeyName());
     }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
index 9410973..9b9d6d8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
@@ -521,7 +521,7 @@
     }
 
     try {
-      long id = (keySession != null)?keySession.getId():0;
+      long id = (keySession != null) ? keySession.getId() : 0;
       writeClient.commitKey(keyArgs, id);
     } catch (IOException ignored) {
     }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmStartupSlvLessThanMlv.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmStartupSlvLessThanMlv.java
index adccc7a..f773861 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmStartupSlvLessThanMlv.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmStartupSlvLessThanMlv.java
@@ -70,7 +70,7 @@
     try {
       clusterBuilder.build();
       Assert.fail("Expected OMException due to incorrect MLV on OM creation.");
-    } catch(OMException e) {
+    } catch (OMException e) {
       String expectedMessage = String.format("Cannot initialize " +
               "VersionManager. Metadata layout version (%s) > software layout" +
               " version (%s)", mlv, largestSlv);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
index 36a17a8..389217a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
@@ -240,7 +240,7 @@
             OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT;
         break;
       case omNode2Id :
-        expectedPeerAddress = "0.0.0.0:"+
+        expectedPeerAddress = "0.0.0.0:" +
             OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT;
         break;
       case omNode3Id :
@@ -323,7 +323,7 @@
             OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT;
         break;
       case omNode2Id :
-        expectedPeerAddress = "0.0.0.0:"+
+        expectedPeerAddress = "0.0.0.0:" +
             OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT;
         break;
       case omNode3Id :
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithACL.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithACL.java
index 06a3a97..3af7f01 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithACL.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithACL.java
@@ -184,7 +184,7 @@
   public void testAddPrefixAcl() throws Exception {
     OzoneBucket ozoneBucket = setupBucket();
     String remoteUserName = "remoteUser";
-    String prefixName = RandomStringUtils.randomAlphabetic(5) +"/";
+    String prefixName = RandomStringUtils.randomAlphabetic(5) + "/";
     OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName,
         READ, DEFAULT);
 
@@ -201,7 +201,7 @@
   public void testRemovePrefixAcl() throws Exception {
     OzoneBucket ozoneBucket = setupBucket();
     String remoteUserName = "remoteUser";
-    String prefixName = RandomStringUtils.randomAlphabetic(5) +"/";
+    String prefixName = RandomStringUtils.randomAlphabetic(5) + "/";
     OzoneAcl userAcl = new OzoneAcl(USER, remoteUserName,
         READ, ACCESS);
     OzoneAcl userAcl1 = new OzoneAcl(USER, "remote",
@@ -237,7 +237,7 @@
   public void testSetPrefixAcl() throws Exception {
     OzoneBucket ozoneBucket = setupBucket();
     String remoteUserName = "remoteUser";
-    String prefixName = RandomStringUtils.randomAlphabetic(5) +"/";
+    String prefixName = RandomStringUtils.randomAlphabetic(5) + "/";
     OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName,
         READ, DEFAULT);
 
@@ -304,7 +304,7 @@
   }
 
   private void testRemoveAcl(String remoteUserName, OzoneObj ozoneObj,
-      OzoneAcl userAcl) throws Exception{
+      OzoneAcl userAcl) throws Exception {
     ObjectStore objectStore = getObjectStore();
 
     // As by default create will add some default acls in RpcClient.
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java
index 7dbacc4..0c2f526 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java
@@ -537,7 +537,7 @@
 
     Assert.assertTrue(partInfoList.size() == partsMap.size());
 
-    for (int i=0; i< partsMap.size(); i++) {
+    for (int i = 0; i < partsMap.size(); i++) {
       Assert.assertEquals(partsMap.get(partInfoList.get(i).getPartNumber()),
           partInfoList.get(i).getPartName());
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java
index 9b0d8bc..9de622c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java
@@ -88,7 +88,7 @@
 
     ObjectMapper objectMapper = new ObjectMapper();
     TypeReference<List<ServiceInfo>> serviceInfoReference =
-        new TypeReference<List<ServiceInfo>>() {};
+        new TypeReference<List<ServiceInfo>>() { };
     List<ServiceInfo> serviceInfos = objectMapper.readValue(
         serviceListJson, serviceInfoReference);
     Map<HddsProtos.NodeType, ServiceInfo> serviceMap = new HashMap<>();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
index 2face8f..c23595f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
@@ -20,6 +20,7 @@
 
 import java.io.IOException;
 import java.util.HashMap;
+import java.util.Map;
 import java.util.UUID;
 
 import org.apache.hadoop.hdds.client.ReplicationFactor;
@@ -32,6 +33,7 @@
 import org.apache.hadoop.ozone.client.OzoneKey;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.ozone.test.GenericTestUtils;
 
 import org.apache.commons.lang3.RandomStringUtils;
@@ -44,6 +46,9 @@
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
 import org.junit.AfterClass;
 import org.junit.Assert;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PARTIAL_RENAME;
 import static org.junit.Assert.fail;
 import org.junit.BeforeClass;
 import org.junit.Rule;
@@ -171,7 +176,11 @@
   public void testRestartOMWithKeyOperation() throws Exception {
     String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
     String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String key = "key" + RandomStringUtils.randomNumeric(5);
+    String key1 = "key1" + RandomStringUtils.randomNumeric(5);
+    String key2 = "key2" + RandomStringUtils.randomNumeric(5);
+
+    String newKey1 = "key1new" + RandomStringUtils.randomNumeric(5);
+    String newKey2 = "key2new" + RandomStringUtils.randomNumeric(5);
 
     OzoneClient client = cluster.getClient();
 
@@ -188,12 +197,29 @@
     Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
 
     String data = "random data";
-    OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key,
+    OzoneOutputStream ozoneOutputStream1 = ozoneBucket.createKey(key1,
         data.length(), ReplicationType.RATIS, ReplicationFactor.ONE,
         new HashMap<>());
 
-    ozoneOutputStream.write(data.getBytes(UTF_8), 0, data.length());
-    ozoneOutputStream.close();
+    ozoneOutputStream1.write(data.getBytes(UTF_8), 0, data.length());
+    ozoneOutputStream1.close();
+
+    Map<String, String> keyMap = new HashMap();
+    keyMap.put(key1, newKey1);
+    keyMap.put(key2, newKey2);
+
+    try {
+      ozoneBucket.renameKeys(keyMap);
+    } catch (OMException ex) {
+      Assert.assertEquals(PARTIAL_RENAME, ex.getResult());
+    }
+
+    // Get original Key1, it should not exist
+    try {
+      ozoneBucket.getKey(key1);
+    } catch (OMException ex) {
+      Assert.assertEquals(KEY_NOT_FOUND, ex.getResult());
+    }
 
     cluster.restartOzoneManager();
     cluster.restartStorageContainerManager(true);
@@ -202,10 +228,17 @@
     // As we allow override of keys, not testing re-create key. We shall see
     // after restart key exists or not.
 
-    // Get key.
-    OzoneKey ozoneKey = ozoneBucket.getKey(key);
-    Assert.assertTrue(ozoneKey.getName().equals(key));
+    // Get newKey1.
+    OzoneKey ozoneKey = ozoneBucket.getKey(newKey1);
+    Assert.assertTrue(ozoneKey.getName().equals(newKey1));
     Assert.assertTrue(ozoneKey.getReplicationType().equals(
         ReplicationType.RATIS));
+
+    // Get newKey2, it should not exist
+    try {
+      ozoneBucket.getKey(newKey2);
+    } catch (OMException ex) {
+      Assert.assertEquals(KEY_NOT_FOUND, ex.getResult());
+    }
   }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java
index c932504..24d2739 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java
@@ -52,7 +52,8 @@
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
 import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
-import static org.apache.ozone.test.GenericTestUtils.*;
+import static org.apache.ozone.test.GenericTestUtils.LogCapturer;
+import static org.apache.ozone.test.GenericTestUtils.getTempPath;
 
 /**
  * Test secure Ozone Manager operation in distributed handler scenario.
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java
index 08f465f..db5c327 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java
@@ -69,7 +69,7 @@
     testSnapshot(ozoneCluster);
   }
 
-  public static void testSnapshot(MiniOzoneCluster cluster) throws Exception{
+  public static void testSnapshot(MiniOzoneCluster cluster) throws Exception {
     GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
         .captureLogs(LoggerFactory.getLogger(
         ReconStorageContainerManagerFacade.class));
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java
index 2818ed0..701a799 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java
@@ -24,6 +24,8 @@
 import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_CONNECTION_TIMEOUT_DEFAULT;
 import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SOCKET_TIMEOUT;
 import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SOCKET_TIMEOUT_DEFAULT;
+import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LIMIT;
+import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LOOP_LIMIT;
 import static org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl.OmSnapshotTaskName.OmDeltaRequest;
 import static org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl.OmSnapshotTaskName.OmSnapshotRequest;
 
@@ -52,6 +54,7 @@
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.recon.metrics.OzoneManagerSyncMetrics;
 import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
 import org.apache.http.HttpEntity;
 import org.apache.http.HttpResponse;
@@ -109,6 +112,9 @@
             OZONE_RECON_OM_CONNECTION_REQUEST_TIMEOUT_DEFAULT),
         TimeUnit.MILLISECONDS
     );
+    conf.setLong(RECON_OM_DELTA_UPDATE_LIMIT, 2);
+    conf.setLong(RECON_OM_DELTA_UPDATE_LOOP_LIMIT, 10);
+
     RequestConfig config = RequestConfig.custom()
         .setConnectTimeout(socketTimeout)
         .setConnectionRequestTimeout(connectionTimeout)
@@ -196,6 +202,7 @@
     OzoneManagerServiceProviderImpl impl = (OzoneManagerServiceProviderImpl)
         cluster.getReconServer().getOzoneManagerServiceProvider();
     impl.syncDataFromOM();
+    OzoneManagerSyncMetrics metrics = impl.getMetrics();
 
     // HTTP call to /api/containers
     String containerResponse = makeHttpCall(containerKeyServiceURL);
@@ -224,6 +231,7 @@
 
     // verify sequence number after full snapshot
     Assert.assertEquals(omLatestSeqNumber, reconLatestSeqNumber);
+    Assert.assertEquals(0, metrics.getSequenceNumberLag().value());
 
     //add 4 keys to check for delta updates
     addKeys(1, 5);
@@ -259,6 +267,7 @@
 
     //verify sequence number after Delta Updates
     Assert.assertEquals(omLatestSeqNumber, reconLatestSeqNumber);
+    Assert.assertEquals(0, metrics.getSequenceNumberLag().value());
 
     long beforeRestartSnapShotTimeStamp = getReconTaskAttributeFromJson(
         taskStatusResponse,
@@ -316,6 +325,7 @@
 
     //verify sequence number after Delta Updates
     Assert.assertEquals(omLatestSeqNumber, reconLatestSeqNumber);
+    Assert.assertEquals(0, metrics.getSequenceNumberLag().value());
   }
 
   private long getReconTaskAttributeFromJson(String taskStatusResponse,
@@ -350,7 +360,7 @@
    * For test purpose each container will have only one key.
    */
   private void addKeys(int start, int end) throws Exception {
-    for(int i = start; i < end; i++) {
+    for (int i = start; i < end; i++) {
       Pipeline pipeline = HddsTestUtils.getRandomPipeline();
       List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
       BlockID blockID = new BlockID(i, 1);
@@ -359,7 +369,7 @@
       omKeyLocationInfoList.add(omKeyLocationInfo1);
       OmKeyLocationInfoGroup omKeyLocationInfoGroup = new
           OmKeyLocationInfoGroup(0, omKeyLocationInfoList);
-      writeDataToOm("key"+i, "bucket"+i, "vol"+i,
+      writeDataToOm("key" + i, "bucket" + i, "vol" + i,
           Collections.singletonList(omKeyLocationInfoGroup));
     }
   }
@@ -367,7 +377,7 @@
   private long getTableKeyCount(TableIterator<String, ? extends
       Table.KeyValue<String, OmKeyInfo>> iterator) {
     long keyCount = 0;
-    while(iterator.hasNext()) {
+    while (iterator.hasNext()) {
       keyCount++;
       iterator.next();
     }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java
index c16583c..e2d59df 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java
@@ -133,7 +133,7 @@
     Assert.assertEquals(0, entity.getNumTotalDir());
     for (int i = 0; i < 10; i++) {
       Assert.assertNotNull(impl.getOMMetadataManagerInstance()
-              .getVolumeTable().get("/vol"+ i));
+              .getVolumeTable().get("/vol" + i));
     }
     addKeys(10, 12, "dir");
     impl.syncDataFromOM();
@@ -141,7 +141,7 @@
     // test Recon is sync'ed with OM.
     for (int i = 10; i < 12; i++) {
       Assert.assertNotNull(impl.getOMMetadataManagerInstance()
-              .getVolumeTable().getSkipCache("/vol"+ i));
+              .getVolumeTable().getSkipCache("/vol" + i));
     }
 
     // test root response
@@ -161,8 +161,8 @@
    * For test purpose each container will have only one key.
    */
   private void addKeys(int start, int end, String dirPrefix) throws Exception {
-    for(int i = start; i < end; i++) {
-      writeKeys("vol"+i, "bucket"+i, dirPrefix + i + "/key"+i);
+    for (int i = start; i < end; i++) {
+      writeKeys("vol" + i, "bucket" + i, dirPrefix + i + "/key" + i);
     }
   }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java
index 81caef9..9623a6d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java
@@ -54,7 +54,7 @@
  */
 public class TestReconWithOzoneManagerHA {
   @Rule
-  public Timeout timeout = Timeout.seconds(300);;
+  public Timeout timeout = Timeout.seconds(300);
 
   private MiniOzoneHAClusterImpl cluster;
   private ObjectStore objectStore;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
index e480d51..6586055 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
@@ -64,7 +64,7 @@
 
   @AfterClass
   public static void shutdown() throws InterruptedException {
-    if(cluster != null) {
+    if (cluster != null) {
       cluster.shutdown();
     }
     IOUtils.cleanupWithLogger(null, storageContainerLocationClient);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java
index 94b9984..2e67a88 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java
@@ -102,8 +102,8 @@
   public void testReplicasAreReportedForClosedContainerAfterRestart()
       throws Exception {
     // Create some keys to write data into the open containers
-    for (int i=0; i<10; i++) {
-      TestDataUtil.createKey(bucket, "key"+i, ReplicationFactor.THREE,
+    for (int i = 0; i < 10; i++) {
+      TestDataUtil.createKey(bucket, "key" + i, ReplicationFactor.THREE,
           ReplicationType.RATIS, "this is the content");
     }
     StorageContainerManager scm = cluster.getStorageContainerManager();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
index 8ba2d52..9e00867 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
@@ -151,14 +151,14 @@
     ContainerManager scmContainerManager = scm.getContainerManager();
 
     List<ContainerInfo> containerInfoList = new ArrayList<>();
-    for (int i=0; i < 10; i++) {
+    for (int i = 0; i < 10; i++) {
       containerInfoList.add(
           scmContainerManager.allocateContainer(new StandaloneReplicationConfig(
                   ReplicationFactor.ONE),
               UUID.randomUUID().toString()));
     }
     long containerID;
-    for (int i=0; i < 10; i++) {
+    for (int i = 0; i < 10; i++) {
       if (i % 2 == 0) {
         containerID = containerInfoList.get(i).getContainerID();
         scmContainerManager.updateContainerState(
@@ -184,7 +184,7 @@
     containerStateCount = scm.getContainerStateCount();
 
     containerStateCount.forEach((k, v) -> {
-      if(k.equals(HddsProtos.LifeCycleState.CLOSING.toString())) {
+      if (k.equals(HddsProtos.LifeCycleState.CLOSING.toString())) {
         assertEquals((int)v, 5);
       } else if (k.equals(HddsProtos.LifeCycleState.CLOSED.toString())) {
         assertEquals((int)v, 5);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java
index a3bd295..8b7e0b4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java
@@ -61,7 +61,7 @@
     dns.add(MockDatanodeDetails.randomDatanodeDetails());
 
     dnsInOrder = new ArrayList<>();
-    for (int i=2; i>=0; i--) {
+    for (int i = 2; i >= 0; i--) {
       dnsInOrder.add(dns.get(i));
     }
 
@@ -82,14 +82,14 @@
     Assert.assertNotEquals(dns.get(0), dnsInOrder.get(0));
   }
 
-  @Test(timeout=5000)
+  @Test(timeout = 5000)
   public void testRandomFirstNodeIsCommandTarget() throws IOException {
     final ArrayList<DatanodeDetails> allDNs = new ArrayList<>(dns);
     // Using a new Xceiver Client, call it repeatedly until all DNs in the
     // pipeline have been the target of the command, indicating it is shuffling
     // the DNs on each call with a new client. This test will timeout if this
     // is not happening.
-    while(allDNs.size() > 0) {
+    while (allDNs.size() > 0) {
       XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf) {
         @Override
         public XceiverClientReply sendCommandAsync(
@@ -112,7 +112,7 @@
     // With a new Client, make 100 calls and ensure the first sortedDN is used
     // each time. The logic should always use the sorted node, so we can check
     // only a single DN is ever seen after 100 calls.
-    for (int i=0; i<100; i++) {
+    for (int i = 0; i < 100; i++) {
       XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf) {
         @Override
         public XceiverClientReply sendCommandAsync(
@@ -131,7 +131,7 @@
   public void testConnectionReusedAfterGetBlock() throws IOException {
     // With a new Client, make 100 calls. On each call, ensure that only one
     // DN is seen, indicating the same DN connection is reused.
-    for (int i=0; i<100; i++) {
+    for (int i = 0; i < 100; i++) {
       final Set<DatanodeDetails> seenDNs = new HashSet<>();
       XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf) {
         @Override
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java
index 91bb899..cb91593 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java
@@ -75,7 +75,11 @@
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.*;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADMIN_MONITOR_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
 import static org.junit.Assert.fail;
 
 /**
@@ -358,14 +362,14 @@
     Set<ContainerReplica> replicas = getContainerReplicas(container);
 
     List<DatanodeDetails> forMaintenance = new ArrayList<>();
-    replicas.forEach(r ->forMaintenance.add(r.getDatanodeDetails()));
+    replicas.forEach(r -> forMaintenance.add(r.getDatanodeDetails()));
 
     scmClient.startMaintenanceNodes(forMaintenance.stream()
         .map(d -> getDNHostAndPort(d))
         .collect(Collectors.toList()), 0);
 
     // Ensure all 3 DNs go to maintenance
-    for(DatanodeDetails dn : forMaintenance) {
+    for (DatanodeDetails dn : forMaintenance) {
       waitForDnToReachPersistedOpState(dn, IN_MAINTENANCE);
     }
 
@@ -379,7 +383,7 @@
         .collect(Collectors.toList()));
 
     // Ensure all 3 DNs go to maintenance
-    for(DatanodeDetails dn : forMaintenance) {
+    for (DatanodeDetails dn : forMaintenance) {
       waitForDnToReachOpState(dn, IN_SERVICE);
     }
 
@@ -400,26 +404,26 @@
     Set<ContainerReplica> replicas = getContainerReplicas(container);
 
     List<DatanodeDetails> forMaintenance = new ArrayList<>();
-    replicas.forEach(r ->forMaintenance.add(r.getDatanodeDetails()));
+    replicas.forEach(r -> forMaintenance.add(r.getDatanodeDetails()));
 
     scmClient.startMaintenanceNodes(forMaintenance.stream()
         .map(d -> getDNHostAndPort(d))
         .collect(Collectors.toList()), 0);
 
     // Ensure all 3 DNs go to entering_maintenance
-    for(DatanodeDetails dn : forMaintenance) {
+    for (DatanodeDetails dn : forMaintenance) {
       waitForDnToReachPersistedOpState(dn, ENTERING_MAINTENANCE);
     }
     cluster.restartStorageContainerManager(true);
     setManagers();
 
     List<DatanodeDetails> newDns = new ArrayList<>();
-    for(DatanodeDetails dn : forMaintenance) {
+    for (DatanodeDetails dn : forMaintenance) {
       newDns.add(nm.getNodeByUuid(dn.getUuid().toString()));
     }
 
     // Ensure all 3 DNs go to maintenance
-    for(DatanodeDetails dn : newDns) {
+    for (DatanodeDetails dn : newDns) {
       waitForDnToReachOpState(dn, IN_MAINTENANCE);
     }
 
@@ -550,7 +554,7 @@
    */
   private void generateData(int keyCount, String keyPrefix,
       ReplicationFactor repFactor, ReplicationType repType) throws IOException {
-    for (int i=0; i<keyCount; i++) {
+    for (int i = 0; i < keyCount; i++) {
       TestDataUtil.createKey(bucket, keyPrefix + i, repFactor, repType,
           "this is the content");
     }
@@ -610,7 +614,7 @@
    * @return host:port for the given DN.
    */
   private String getDNHostAndPort(DatanodeDetails dn) {
-    return dn.getHostName()+":"+dn.getPorts().get(0).getValue();
+    return dn.getHostName() + ":" + dn.getPorts().get(0).getValue();
   }
 
   /**
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java
index 3ead3f4..56a61d1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java
@@ -92,7 +92,7 @@
 
     cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(numOfDatanodes)
-        .setTotalPipelineNumLimit(numOfDatanodes + numOfDatanodes/2)
+        .setTotalPipelineNumLimit(numOfDatanodes + numOfDatanodes / 2)
         .build();
     cluster.waitForClusterToBeReady();
     scmClient = new ContainerOperationClient(conf);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java
index 60fffdd..69c4a76 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java
@@ -21,7 +21,7 @@
 import org.apache.hadoop.hdds.cli.OzoneAdmin;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.TestStandardOutputUtil;
+import org.apache.hadoop.ozone.StandardOutputTestBase;
 import org.apache.hadoop.ozone.client.BucketArgs;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneVolume;
@@ -40,7 +40,7 @@
 /**
  * Test for Namespace CLI.
  */
-public class TestNSSummaryAdmin extends TestStandardOutputUtil {
+public class TestNSSummaryAdmin extends StandardOutputTestBase {
   private static ObjectStore store;
 
   private static OzoneAdmin ozoneAdmin;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
index f525942..c0f6342 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
@@ -28,6 +28,7 @@
 import java.util.List;
 import java.util.UUID;
 
+import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
@@ -264,7 +265,7 @@
    * @return String array.
    */
   private String[] getHASetConfStrings(int numOfArgs) {
-    assert(numOfArgs >= 0);
+    assert (numOfArgs >= 0);
     String[] res = new String[1 + 1 + numOfOMs + numOfArgs];
     final int indexOmServiceIds = 0;
     final int indexOmNodes = 1;
@@ -280,7 +281,7 @@
 
     String[] omNodesArr = omNodesVal.split(",");
     // Sanity check
-    assert(omNodesArr.length == numOfOMs);
+    assert (omNodesArr.length == numOfOMs);
     for (int i = 0; i < numOfOMs; i++) {
       res[indexOmAddressStart + i] =
           getSetConfStringFromConf(ConfUtils.addKeySuffixes(
@@ -575,17 +576,22 @@
       Assert.assertEquals(0, res);
       // Verify key1 creation
       FileStatus statusPathKey1 = fs.getFileStatus(pathKey1);
+
+      FileChecksum previousFileChecksum = fs.getFileChecksum(pathKey1);
+
       Assert.assertEquals(strKey1, statusPathKey1.getPath().toString());
       // rm without -skipTrash. since trash interval > 0, should moved to trash
       res = ToolRunner.run(shell, new String[]{"-rm", strKey1});
       Assert.assertEquals(0, res);
+
+      FileChecksum afterFileChecksum = fs.getFileChecksum(trashPathKey1);
+
       // Verify that the file is moved to the correct trash location
       FileStatus statusTrashPathKey1 = fs.getFileStatus(trashPathKey1);
       // It'd be more meaningful if we actually write some content to the file
       Assert.assertEquals(
           statusPathKey1.getLen(), statusTrashPathKey1.getLen());
-      Assert.assertEquals(
-          fs.getFileChecksum(pathKey1), fs.getFileChecksum(trashPathKey1));
+      Assert.assertEquals(previousFileChecksum, afterFileChecksum);
 
       // Check delete skip trash behavior
       res = ToolRunner.run(shell, new String[]{"-touch", strKey2});
@@ -624,7 +630,7 @@
 
     // create volume: vol1 with bucket: bucket1
     final String testVolBucket = "/vol1/bucket1";
-    final String testKey = testVolBucket+"/key1";
+    final String testKey = testVolBucket + "/key1";
 
     final String[] volBucketArgs = new String[] {"-mkdir", "-p", testVolBucket};
     final String[] keyArgs = new String[] {"-touch", testKey};
@@ -652,7 +658,7 @@
 
     final String[] rmKeyArgs = new String[] {"-rm", "-R", testKey};
     final String[] rmTrashArgs = new String[] {"-rm", "-R",
-                                               testVolBucket+"/.Trash"};
+                                               testVolBucket + "/.Trash"};
     final Path trashPathKey1 = Path.mergePaths(new Path(
             new OFSPath(testKey).getTrashRoot(), new Path("Current")),
             new Path(testKey));
@@ -666,11 +672,11 @@
       Assert.assertEquals(0, res);
 
       LOG.info("Executing testDeleteTrashNoSkipTrash: key1 deleted moved to"
-              +" Trash: "+trashPathKey1.toString());
+              + " Trash: " + trashPathKey1.toString());
       fs.getFileStatus(trashPathKey1);
 
       LOG.info("Executing testDeleteTrashNoSkipTrash: deleting trash FsShell "
-              +"with args{}: ", Arrays.asList(rmTrashArgs));
+              + "with args{}: ", Arrays.asList(rmTrashArgs));
       res = ToolRunner.run(shell, rmTrashArgs);
       Assert.assertEquals(0, res);
 
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index df3f2cc..2b539bc 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -1115,6 +1115,7 @@
 message DBUpdatesResponse {
     required uint64 sequenceNumber = 1;
     repeated bytes data = 2;
+    optional uint64 latestSequenceNumber = 3;
 }
 
 message FinalizeUpgradeRequest {
diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java
index fc16b3c..a28c6d1 100644
--- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java
+++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java
@@ -38,7 +38,7 @@
 public class TestOmPrefixInfo {
 
   private static OzoneManagerStorageProtos.OzoneAclInfo buildTestOzoneAclInfo(
-      String aclString){
+      String aclString) {
     OzoneAcl oacl = OzoneAcl.parseAcl(aclString);
     ByteString rights = ByteString.copyFrom(oacl.getAclBitSet().toByteArray());
     return OzoneManagerStorageProtos.OzoneAclInfo.newBuilder()
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
index 058adf3..b818a22 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
@@ -621,7 +621,7 @@
       }
       return hasAccess;
     } catch (IOException ex) {
-      if(ex instanceof OMException) {
+      if (ex instanceof OMException) {
         throw (OMException) ex;
       }
       LOG.error("CheckAccess operation failed for bucket:{}/{}.",
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index b8857fd..8c3f295 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -749,7 +749,7 @@
       }
     } catch (OMException ex) {
       throw ex;
-    } catch (IOException ex){
+    } catch (IOException ex) {
       LOG.error(
           "List Multipart Upload Parts Failed: volume: {}, bucket: {}, ,key: "
               + "{} ",
@@ -1071,7 +1071,7 @@
       }
       return hasAccess;
     } catch (IOException ex) {
-      if(ex instanceof OMException) {
+      if (ex instanceof OMException) {
         throw (OMException) ex;
       }
       LOG.error("CheckAccess operation failed for key:{}/{}/{}", volume,
@@ -1397,13 +1397,20 @@
     refreshPipeline(Arrays.asList(key));
   }
 
+  private boolean isKeyDeleted(String key, Table keyTable) {
+    CacheValue<OmKeyInfo> omKeyInfoCacheValue
+        = keyTable.getCacheValue(new CacheKey(key));
+    return omKeyInfoCacheValue != null
+        && omKeyInfoCacheValue.getCacheValue() == null;
+  }
+
   /**
    * Helper function for listStatus to find key in TableCache.
    */
   private void listStatusFindKeyInTableCache(
       Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>> cacheIter,
       String keyArgs, String startCacheKey, boolean recursive,
-      TreeMap<String, OzoneFileStatus> cacheKeyMap, Set<String> deletedKeySet) {
+      TreeMap<String, OzoneFileStatus> cacheKeyMap) {
 
     while (cacheIter.hasNext()) {
       Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>> entry =
@@ -1414,23 +1421,20 @@
       }
       OmKeyInfo cacheOmKeyInfo = entry.getValue().getCacheValue();
       // cacheOmKeyInfo is null if an entry is deleted in cache
-      if (cacheOmKeyInfo != null) {
-        if (cacheKey.startsWith(startCacheKey) &&
-            cacheKey.compareTo(startCacheKey) >= 0) {
-          if (!recursive) {
-            String remainingKey = StringUtils.stripEnd(cacheKey.substring(
-                startCacheKey.length()), OZONE_URI_DELIMITER);
-            // For non-recursive, the remaining part of key can't have '/'
-            if (remainingKey.contains(OZONE_URI_DELIMITER)) {
-              continue;
-            }
+      if (cacheOmKeyInfo != null
+          && cacheKey.startsWith(startCacheKey)
+          && cacheKey.compareTo(startCacheKey) >= 0) {
+        if (!recursive) {
+          String remainingKey = StringUtils.stripEnd(cacheKey.substring(
+              startCacheKey.length()), OZONE_URI_DELIMITER);
+          // For non-recursive, the remaining part of key can't have '/'
+          if (remainingKey.contains(OZONE_URI_DELIMITER)) {
+            continue;
           }
-          OzoneFileStatus fileStatus = new OzoneFileStatus(
-              cacheOmKeyInfo, scmBlockSize, !OzoneFSUtils.isFile(cacheKey));
-          cacheKeyMap.put(cacheKey, fileStatus);
         }
-      } else {
-        deletedKeySet.add(cacheKey);
+        OzoneFileStatus fileStatus = new OzoneFileStatus(
+            cacheOmKeyInfo, scmBlockSize, !OzoneFSUtils.isFile(cacheKey));
+        cacheKeyMap.put(cacheKey, fileStatus);
       }
     }
   }
@@ -1488,8 +1492,6 @@
     String keyName = args.getKeyName();
     // A map sorted by OmKey to combine results from TableCache and DB.
     TreeMap<String, OzoneFileStatus> cacheKeyMap = new TreeMap<>();
-    // A set to keep track of keys deleted in cache but not flushed to DB.
-    Set<String> deletedKeySet = new TreeSet<>();
 
     if (Strings.isNullOrEmpty(startKey)) {
       OzoneFileStatus fileStatus = getFileStatus(args, clientAddress);
@@ -1519,7 +1521,7 @@
 
       // First, find key in TableCache
       listStatusFindKeyInTableCache(cacheIter, keyArgs, startCacheKey,
-          recursive, cacheKeyMap, deletedKeySet);
+          recursive, cacheKeyMap);
       iterator = keyTable.iterator();
     } finally {
       metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
@@ -1545,7 +1547,8 @@
           String entryKeyName = omKeyInfo.getKeyName();
           if (recursive) {
             // for recursive list all the entries
-            if (!deletedKeySet.contains(entryInDb)) {
+
+            if (!isKeyDeleted(entryInDb, keyTable)) {
               cacheKeyMap.put(entryInDb, new OzoneFileStatus(omKeyInfo,
                   scmBlockSize, !OzoneFSUtils.isFile(entryKeyName)));
               countEntries++;
@@ -1559,14 +1562,14 @@
                 .getImmediateChild(entryKeyName, keyName);
             boolean isFile = OzoneFSUtils.isFile(immediateChild);
             if (isFile) {
-              if (!deletedKeySet.contains(entryInDb)) {
+              if (!isKeyDeleted(entryInDb, keyTable)) {
                 cacheKeyMap.put(entryInDb,
                     new OzoneFileStatus(omKeyInfo, scmBlockSize, !isFile));
                 countEntries++;
               }
             } else {
               // if entry is a directory
-              if (!deletedKeySet.contains(entryInDb)) {
+              if (!isKeyDeleted(entryInDb, keyTable)) {
                 if (!entryKeyName.equals(immediateChild)) {
                   OmKeyInfo fakeDirEntry = createDirectoryKey(
                       omKeyInfo.getVolumeName(), omKeyInfo.getBucketName(),
@@ -1605,7 +1608,6 @@
     }
     // Clean up temp map and set
     cacheKeyMap.clear();
-    deletedKeySet.clear();
 
     List<OmKeyInfo> keyInfoList = new ArrayList<>(fileStatusList.size());
     fileStatusList.stream().map(s -> s.getKeyInfo()).forEach(keyInfoList::add);
@@ -1763,7 +1765,7 @@
       if (fileStatusInfo != null) {
         prefixKeyInDB = fileStatusInfo.getKeyInfo().getParentObjectID();
 
-        if(fileStatusInfo.isDirectory()){
+        if (fileStatusInfo.isDirectory()) {
           seekDirInDB = metadataManager.getOzonePathKey(prefixKeyInDB,
               fileStatusInfo.getKeyInfo().getFileName());
 
@@ -2005,7 +2007,7 @@
       String cacheKey = entry.getKey().getCacheKey();
       OmKeyInfo cacheOmKeyInfo = entry.getValue().getCacheValue();
       // cacheOmKeyInfo is null if an entry is deleted in cache
-      if(cacheOmKeyInfo == null){
+      if (cacheOmKeyInfo == null) {
         deletedKeySet.add(cacheKey);
         continue;
       }
@@ -2050,7 +2052,7 @@
       String cacheKey = entry.getKey().getCacheKey();
       OmDirectoryInfo cacheOmDirInfo = entry.getValue().getCacheValue();
       // cacheOmKeyInfo is null if an entry is deleted in cache
-      if(cacheOmDirInfo == null){
+      if (cacheOmDirInfo == null) {
         deletedKeySet.add(cacheKey);
         continue;
       }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
index 69871fd..e002966 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
@@ -31,7 +31,7 @@
  * This class is for maintaining Ozone Manager statistics.
  */
 @InterfaceAudience.Private
-@Metrics(about="Ozone Manager Metrics", context="dfs")
+@Metrics(about = "Ozone Manager Metrics", context = "dfs")
 public class OMMetrics {
   private static final String SOURCE_NAME =
       OMMetrics.class.getSimpleName();
@@ -48,10 +48,12 @@
   private @Metric MutableCounterLong numVolumeInfos;
   private @Metric MutableCounterLong numVolumeCheckAccesses;
   private @Metric MutableCounterLong numBucketCreates;
+  private @Metric MutableCounterLong numFSOBucketCreates;
   private @Metric MutableCounterLong numVolumeDeletes;
   private @Metric MutableCounterLong numBucketInfos;
   private @Metric MutableCounterLong numBucketUpdates;
   private @Metric MutableCounterLong numBucketDeletes;
+  private @Metric MutableCounterLong numFSOBucketDeletes;
   private @Metric MutableCounterLong numKeyAllocate;
   private @Metric MutableCounterLong numKeyLookup;
   private @Metric MutableCounterLong numKeyRenames;
@@ -246,17 +248,17 @@
 
   public void setNumKeys(long val) {
     long oldVal = this.numKeys.value();
-    this.numKeys.incr(val- oldVal);
+    this.numKeys.incr(val - oldVal);
   }
 
   public void setNumDirs(long val) {
     long oldVal = this.numDirs.value();
-    this.numDirs.incr(val- oldVal);
+    this.numDirs.incr(val - oldVal);
   }
 
   public void setNumFiles(long val) {
     long oldVal = this.numDirs.value();
-    this.numDirs.incr(val- oldVal);
+    this.numDirs.incr(val - oldVal);
   }
 
   public void decNumKeys(long val) {
@@ -306,6 +308,10 @@
     numBucketCreates.incr();
   }
 
+  public void incNumFSOBucketCreates() {
+    numFSOBucketCreates.incr();
+  }
+
   public void incNumBucketInfos() {
     numBucketOps.incr();
     numBucketInfos.incr();
@@ -321,6 +327,10 @@
     numBucketDeletes.incr();
   }
 
+  public void incNumFSOBucketDeletes() {
+    numFSOBucketDeletes.incr();
+  }
+
   public void incNumBucketLists() {
     numBucketOps.incr();
     numBucketLists.incr();
@@ -630,6 +640,11 @@
   }
 
   @VisibleForTesting
+  public long getNumFSOBucketCreates() {
+    return numFSOBucketCreates.value();
+  }
+
+  @VisibleForTesting
   public long getNumBucketInfos() {
     return numBucketInfos.value();
   }
@@ -645,6 +660,11 @@
   }
 
   @VisibleForTesting
+  public long getNumFSOBucketDeletes() {
+    return numFSOBucketDeletes.value();
+  }
+
+  @VisibleForTesting
   public long getNumBucketLists() {
     return numBucketLists.value();
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index d1095ad..c2dcb20 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -834,13 +834,13 @@
 
   @Override
   public Iterator<Map.Entry<CacheKey<String>, CacheValue<OmBucketInfo>>>
-      getBucketIterator(){
+      getBucketIterator() {
     return bucketTable.cacheIterator();
   }
 
   @Override
   public TableIterator<String, ? extends KeyValue<String, OmKeyInfo>>
-      getKeyIterator(){
+      getKeyIterator() {
     return keyTable.iterator();
   }
 
@@ -891,7 +891,6 @@
 
 
     TreeMap<String, OmKeyInfo> cacheKeyMap = new TreeMap<>();
-    Set<String> deletedKeySet = new TreeSet<>();
     Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>> iterator =
         keyTable.cacheIterator();
 
@@ -911,12 +910,10 @@
       OmKeyInfo omKeyInfo = entry.getValue().getCacheValue();
       // Making sure that entry in cache is not for delete key request.
 
-      if (omKeyInfo != null) {
-        if (key.startsWith(seekPrefix) && key.compareTo(seekKey) >= 0) {
-          cacheKeyMap.put(key, omKeyInfo);
-        }
-      } else {
-        deletedKeySet.add(key);
+      if (omKeyInfo != null
+          && key.startsWith(seekPrefix)
+          && key.compareTo(seekKey) >= 0) {
+        cacheKeyMap.put(key, omKeyInfo);
       }
     }
 
@@ -934,7 +931,9 @@
 
           // Entry should not be marked for delete, consider only those
           // entries.
-          if(!deletedKeySet.contains(kv.getKey())) {
+          CacheValue<OmKeyInfo> cacheValue =
+              keyTable.getCacheValue(new CacheKey<>(kv.getKey()));
+          if (cacheValue == null || cacheValue.getCacheValue() != null) {
             cacheKeyMap.put(kv.getKey(), kv.getValue());
             currentCount++;
           }
@@ -965,7 +964,6 @@
 
     // Clear map and set.
     cacheKeyMap.clear();
-    deletedKeySet.clear();
 
     return result;
   }
@@ -1107,7 +1105,7 @@
         if (kv != null) {
           RepeatedOmKeyInfo infoList = kv.getValue();
           // Get block keys as a list.
-          for(OmKeyInfo info : infoList.getOmKeyInfoList()){
+          for (OmKeyInfo info : infoList.getOmKeyInfoList()) {
             OmKeyLocationInfoGroup latest = info.getLatestVersionLocations();
             List<BlockID> item = latest.getLocationList().stream()
                 .map(b -> new BlockID(b.getContainerID(), b.getLocalID()))
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneAclUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneAclUtils.java
index e7834db..7c32258 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneAclUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneAclUtils.java
@@ -63,7 +63,7 @@
     //OzoneNativeAuthorizer differs from Ranger Authorizer as Ranger requires
     // only READ access on parent level access. OzoneNativeAuthorizer has
     // different parent level access based on the child level access type
-    if(ozoneManager.isNativeAuthorizerEnabled()) {
+    if (ozoneManager.isNativeAuthorizerEnabled()) {
       if (aclType == IAccessAuthorizer.ACLType.CREATE ||
           aclType == IAccessAuthorizer.ACLType.DELETE ||
           aclType == IAccessAuthorizer.ACLType.WRITE_ACL) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index b92f006..d75312c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -2398,11 +2398,11 @@
 
     if (!accessAuthorizer.checkAccess(obj, context)) {
       if (throwIfPermissionDenied) {
-        String volumeName = obj.getVolumeName() != null?
-                "Volume:" + obj.getVolumeName() + " ": "";
-        String bucketName = obj.getBucketName() != null?
-                "Bucket:" + obj.getBucketName() + " ": "";
-        String keyName = obj.getKeyName() != null?
+        String volumeName = obj.getVolumeName() != null ?
+                "Volume:" + obj.getVolumeName() + " " : "";
+        String bucketName = obj.getBucketName() != null ?
+                "Bucket:" + obj.getBucketName() + " " : "";
+        String keyName = obj.getKeyName() != null ?
                 "Key:" + obj.getKeyName() : "";
         LOG.warn("User {} doesn't have {} permission to access {} {}{}{}",
             context.getClientUgi().getUserName(), context.getAclRights(),
@@ -3516,6 +3516,7 @@
         .getUpdatesSince(dbUpdatesRequest.getSequenceNumber(), limitCount);
     DBUpdates dbUpdates = new DBUpdates(updatesSince.getData());
     dbUpdates.setCurrentSequenceNumber(updatesSince.getCurrentSequenceNumber());
+    dbUpdates.setLatestSequenceNumber(updatesSince.getLatestSequenceNumber());
     return dbUpdates;
   }
 
@@ -3730,7 +3731,7 @@
 
 
       // Commit to DB.
-      try(BatchOperation batchOperation =
+      try (BatchOperation batchOperation =
           metadataManager.getStore().initBatchOperation()) {
         metadataManager.getVolumeTable().putWithBatch(batchOperation,
             dbVolumeKey, omVolumeArgs);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerPrepareState.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerPrepareState.java
index 130ce4d..2ffb388 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerPrepareState.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerPrepareState.java
@@ -182,7 +182,7 @@
     File prepareMarkerFile = getPrepareMarkerFile();
     if (prepareMarkerFile.exists()) {
       byte[] data = new byte[(int) prepareMarkerFile.length()];
-      try(FileInputStream stream = new FileInputStream(prepareMarkerFile)) {
+      try (FileInputStream stream = new FileInputStream(prepareMarkerFile)) {
         stream.read(data);
       } catch (IOException e) {
         throwPrepareException(e, "Failed to read prepare marker " +
@@ -255,7 +255,7 @@
     File parentDir = markerFile.getParentFile();
     Files.createDirectories(parentDir.toPath());
 
-    try(FileOutputStream stream = new FileOutputStream(markerFile)) {
+    try (FileOutputStream stream = new FileOutputStream(markerFile)) {
       stream.write(Long.toString(index).getBytes(StandardCharsets.UTF_8));
     }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
index ac2091f..8a22bff 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
@@ -178,7 +178,7 @@
    * to execute its tasks. This allows the dependency to be injected for unit
    * testing.
    */
-  static class OMStarterHelper implements OMStarterInterface{
+  static class OMStarterHelper implements OMStarterInterface {
 
     @Override
     public void start(OzoneConfiguration conf) throws IOException,
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzonePrefixPathImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzonePrefixPathImpl.java
index 1172d12..b325952 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzonePrefixPathImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzonePrefixPathImpl.java
@@ -20,6 +20,7 @@
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.security.acl.OzonePrefixPath;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -43,6 +44,7 @@
   // TODO: based on need can make batchSize configurable.
   private int batchSize = 1000;
   private OzoneFileStatus pathStatus;
+  private boolean checkRecursiveAccess = false;
 
   public OzonePrefixPathImpl(String volumeName, String bucketName,
       String keyPrefix, KeyManager keyManagerImpl) throws IOException {
@@ -66,6 +68,15 @@
       }
       throw ome;
     }
+
+    // Check if this key is a directory.
+    // NOTE: checkRecursiveAccess is always false for a file keyPrefix.
+    if (pathStatus != null && pathStatus.isDirectory()) {
+      // set recursive access check to true if this directory contains
+      // sub-directories or sub-files.
+      checkRecursiveAccess = OMFileRequest.hasChildren(
+          pathStatus.getKeyInfo(), keyManager.getMetadataManager());
+    }
   }
 
   @Override
@@ -161,4 +172,11 @@
       return statuses;
     }
   }
+
+  /**
+   * @return true if no sub-directories or sub-files exist, false otherwise
+   */
+  public boolean isCheckRecursiveAccess() {
+    return checkRecursiveAccess;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java
index 57d17cd..634968a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java
@@ -65,7 +65,7 @@
     try {
       S3SecretValue s3Secret =
           omMetadataManager.getS3SecretTable().get(kerberosID);
-      if(s3Secret == null) {
+      if (s3Secret == null) {
         byte[] secret = OmUtils.getSHADigest();
         result = new S3SecretValue(kerberosID, DigestUtils.sha256Hex(secret));
         omMetadataManager.getS3SecretTable().put(kerberosID, result);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
index 92b7691..68d3814 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
@@ -135,7 +135,7 @@
   public FSDataOutputStream create(Path path,
       FsPermission fsPermission,
       boolean b, int i, short i1,
-      long l, Progressable progressable){
+      long l, Progressable progressable) {
     throw new UnsupportedOperationException(
         "fs.create() not implemented in TrashOzoneFileSystem");
   }
@@ -173,12 +173,12 @@
     OzoneManagerProtocolProtos.OMRequest omRequest =
         getRenameKeyRequest(srcPath, dstPath);
     try {
-      if(omRequest != null) {
+      if (omRequest != null) {
         submitRequest(omRequest);
         return true;
       }
       return false;
-    } catch (Exception e){
+    } catch (Exception e) {
       LOG.error("Couldn't send rename request", e);
       return false;
     }
@@ -203,7 +203,7 @@
     OzoneManagerProtocolProtos.OMRequest omRequest =
         getDeleteKeyRequest(srcPath);
     try {
-      if(omRequest != null) {
+      if (omRequest != null) {
         submitRequest(omRequest);
         return true;
       }
@@ -299,7 +299,7 @@
         CacheValue<OmBucketInfo>>> bucketIterator =
         ozoneManager.getMetadataManager().getBucketIterator();
     List<FileStatus> ret = new ArrayList<>();
-    while (bucketIterator.hasNext()){
+    while (bucketIterator.hasNext()) {
       Map.Entry<CacheKey<String>, CacheValue<OmBucketInfo>> entry =
           bucketIterator.next();
       OmBucketInfo omBucketInfo = entry.getValue().getCacheValue();
@@ -316,7 +316,7 @@
             }
           }
         }
-      } catch (Exception e){
+      } catch (Exception e) {
         LOG.error("Couldn't perform fs operation " +
             "fs.listStatus()/fs.exists()", e);
       }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java
index d8bc270..e1138af 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java
@@ -70,7 +70,7 @@
   /** Format of checkpoint directories used prior to Hadoop 0.23. */
   private static final DateFormat OLD_CHECKPOINT =
       new SimpleDateFormat("yyMMddHHmm");
-  private static final int MSECS_PER_MINUTE = 60*1000;
+  private static final int MSECS_PER_MINUTE = 60 * 1000;
 
   private long emptierInterval;
 
@@ -78,7 +78,7 @@
 
   private OzoneManager om;
 
-  public TrashPolicyOzone(){
+  public TrashPolicyOzone() {
   }
 
   @Override
@@ -110,7 +110,7 @@
     }
   }
 
-  TrashPolicyOzone(FileSystem fs, Configuration conf, OzoneManager om){
+  TrashPolicyOzone(FileSystem fs, Configuration conf, OzoneManager om) {
     initialize(conf, fs);
     this.om = om;
   }
@@ -198,7 +198,7 @@
           // sleep for interval
           Thread.sleep(end - now);
           // if not leader, thread will always be sleeping
-          if (!om.isLeaderReady()){
+          if (!om.isLeaderReady()) {
             continue;
           }
         } catch (InterruptedException e) {
@@ -219,7 +219,7 @@
                 continue;
               }
               TrashPolicyOzone trash = new TrashPolicyOzone(fs, conf, om);
-              Runnable task = ()->{
+              Runnable task = () -> {
                 try {
                   om.getMetrics().incNumTrashRootsProcessed();
                   trash.deleteCheckpoint(trashRoot.getPath(), false);
@@ -241,7 +241,7 @@
       }
       try {
         fs.close();
-      } catch(IOException e) {
+      } catch (IOException e) {
         LOG.warn("Trash cannot close FileSystem: ", e);
       } finally {
         executor.shutdown();
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
index 4b2ac49..ef61417 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
@@ -179,11 +179,11 @@
 
   public static final DBColumnFamilyDefinition<String, String>
       META_TABLE = new DBColumnFamilyDefinition<>(
-      OmMetadataManagerImpl.META_TABLE,
-      String.class,
-      new StringCodec(),
-      String.class,
-      new StringCodec());
+          OmMetadataManagerImpl.META_TABLE,
+          String.class,
+          new StringCodec(),
+          String.class,
+          new StringCodec());
 
   @Override
   public String getName() {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
index e3ab5bd..eaa38ef 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
@@ -243,7 +243,7 @@
 
           setReadyBuffer();
           List<Long> flushedEpochs = null;
-          try(BatchOperation batchOperation = omMetadataManager.getStore()
+          try (BatchOperation batchOperation = omMetadataManager.getStore()
               .initBatchOperation()) {
 
             AtomicReference<String> lastTraceId = new AtomicReference<>();
@@ -376,7 +376,7 @@
         responseClass.getAnnotation(CleanupTableInfo.class);
     if (cleanupTableInfo != null) {
       String[] cleanupTables;
-      if (cleanupTableInfo.cleanupAll()){
+      if (cleanupTableInfo.cleanupAll()) {
         cleanupTables = Arrays
             .stream(new OMDBDefinition().getColumnFamilies())
             .map(DBColumnFamilyDefinition::getTableName)
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java
index 1b7dc46..e45c52c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java
@@ -311,7 +311,7 @@
       CompletableFuture<OMResponse> future = CompletableFuture.supplyAsync(
           () -> runCommand(request, trxLogIndex), executorService);
       future.thenApply(omResponse -> {
-        if(!omResponse.getSuccess()) {
+        if (!omResponse.getSuccess()) {
           // When INTERNAL_ERROR or METADATA_ERROR it is considered as
           // critical error and terminate the OM. Considering INTERNAL_ERROR
           // also for now because INTERNAL_ERROR is thrown for any error
@@ -516,8 +516,8 @@
    */
   public void updateLastAppliedIndex(List<Long> flushedEpochs) {
     Preconditions.checkArgument(flushedEpochs.size() > 0);
-    computeAndUpdateLastAppliedIndex(flushedEpochs.get(flushedEpochs.size() -1),
-        -1L, flushedEpochs, true);
+    computeAndUpdateLastAppliedIndex(
+        flushedEpochs.get(flushedEpochs.size() - 1), -1L, flushedEpochs, true);
   }
 
   /**
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
index a4ef4a1..266024a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
@@ -40,7 +40,10 @@
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LayoutVersion;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
-import org.apache.hadoop.ozone.security.acl.*;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
+import org.apache.hadoop.ozone.security.acl.RequestContext;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -155,7 +158,7 @@
   public OzoneManagerProtocolProtos.UserInfo getUserIfNotExists(
       OzoneManager ozoneManager) {
     OzoneManagerProtocolProtos.UserInfo userInfo = getUserInfo();
-    if (!userInfo.hasRemoteAddress() || !userInfo.hasUserName()){
+    if (!userInfo.hasRemoteAddress() || !userInfo.hasUserName()) {
       OzoneManagerProtocolProtos.UserInfo.Builder newuserInfo =
           OzoneManagerProtocolProtos.UserInfo.newBuilder();
       UserGroupInformation user;
@@ -164,7 +167,7 @@
         user = UserGroupInformation.getCurrentUser();
         remoteAddress = ozoneManager.getOmRpcServerAddr()
             .getAddress();
-      } catch (Exception e){
+      } catch (Exception e) {
         LOG.debug("Couldn't get om Rpc server address", e);
         return getUserInfo();
       }
@@ -205,8 +208,9 @@
    * @param keyName
    * @throws IOException
    */
-  protected void checkACLs(OzoneManager ozoneManager, String volumeName,
-      String bucketName, String keyName, IAccessAuthorizer.ACLType aclType)
+  protected void checkACLsWithFSO(OzoneManager ozoneManager, String volumeName,
+                                  String bucketName, String keyName,
+                                  IAccessAuthorizer.ACLType aclType)
       throws IOException {
 
     // TODO: Presently not populating sub-paths under a single bucket
@@ -223,11 +227,10 @@
         .setKeyName(keyName)
         .setOzonePrefixPath(pathViewer).build();
 
-    boolean isDirectory = pathViewer.getOzoneFileStatus().isDirectory();
-
     RequestContext.Builder contextBuilder = RequestContext.newBuilder()
         .setAclRights(aclType)
-        .setRecursiveAccessCheck(isDirectory); // recursive checks for a dir
+        // recursive checks for a dir with sub-directories or sub-files
+        .setRecursiveAccessCheck(pathViewer.isCheckRecursiveAccess());
 
     // check Acl
     if (ozoneManager.getAclsEnabled()) {
@@ -517,7 +520,7 @@
     if (path.length() == 0) {
       throw new OMException("Invalid KeyPath, empty keyName" + path,
           INVALID_KEY_NAME);
-    } else if(path.startsWith("/")) {
+    } else if (path.startsWith("/")) {
       isValid = false;
     } else {
       // Check for ".." "." ":" "/"
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
index b3acaaa..1565a1b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
@@ -44,7 +44,13 @@
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketEncryptionInfoProto;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketLayoutProto;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateBucketResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
@@ -149,7 +155,9 @@
     } else {
       omBucketInfo = OmBucketInfo.getFromProtobuf(bucketInfo);
     }
-
+    if (omBucketInfo.getBucketLayout().isFileSystemOptimized()) {
+      omMetrics.incNumFSOBucketCreates();
+    }
     AuditLogger auditLogger = ozoneManager.getAuditLogger();
     OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
 
@@ -336,13 +344,13 @@
 
     List<OmBucketInfo>  bucketList = metadataManager.listBuckets(
         omVolumeArgs.getVolume(), null, null, Integer.MAX_VALUE);
-    for(OmBucketInfo bucketInfo : bucketList) {
+    for (OmBucketInfo bucketInfo : bucketList) {
       long nextQuotaInBytes = bucketInfo.getQuotaInBytes();
-      if(nextQuotaInBytes > OzoneConsts.QUOTA_RESET) {
+      if (nextQuotaInBytes > OzoneConsts.QUOTA_RESET) {
         totalBucketQuota += nextQuotaInBytes;
       }
     }
-    if(volumeQuotaInBytes < totalBucketQuota
+    if (volumeQuotaInBytes < totalBucketQuota
         && volumeQuotaInBytes != OzoneConsts.QUOTA_RESET) {
       throw new IllegalArgumentException("Total buckets quota in this volume " +
           "should not be greater than volume quota : the total space quota is" +
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java
index aa79ad5..4e234aa 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java
@@ -22,6 +22,7 @@
 import java.util.Map;
 
 import com.google.common.base.Optional;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
@@ -114,7 +115,10 @@
       // with out volume creation. Check if bucket exists
       String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
 
-      if (!omMetadataManager.getBucketTable().isExist(bucketKey)) {
+      OmBucketInfo omBucketInfo =
+          omMetadataManager.getBucketTable().get(bucketKey);
+
+      if (omBucketInfo == null) {
         LOG.debug("bucket: {} not found ", bucketName);
         throw new OMException("Bucket not exists", BUCKET_NOT_FOUND);
       }
@@ -125,6 +129,9 @@
         throw new OMException("Bucket is not empty",
             OMException.ResultCodes.BUCKET_NOT_EMPTY);
       }
+      if (omBucketInfo.getBucketLayout().isFileSystemOptimized()) {
+        omMetrics.incNumFSOBucketDeletes();
+      }
       omMetrics.decNumBuckets();
 
       // Update table cache.
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java
index 44c3756..17c4e39 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java
@@ -267,15 +267,15 @@
     }
     List<OmBucketInfo> bucketList = metadataManager.listBuckets(
         omVolumeArgs.getVolume(), null, null, Integer.MAX_VALUE);
-    for(OmBucketInfo bucketInfo : bucketList) {
+    for (OmBucketInfo bucketInfo : bucketList) {
       long nextQuotaInBytes = bucketInfo.getQuotaInBytes();
-      if(nextQuotaInBytes > OzoneConsts.QUOTA_RESET &&
+      if (nextQuotaInBytes > OzoneConsts.QUOTA_RESET &&
           !omBucketArgs.getBucketName().equals(bucketInfo.getBucketName())) {
         totalBucketQuota += nextQuotaInBytes;
       }
     }
 
-    if(volumeQuotaInBytes < totalBucketQuota &&
+    if (volumeQuotaInBytes < totalBucketQuota &&
         volumeQuotaInBytes != OzoneConsts.QUOTA_RESET) {
       throw new OMException("Total buckets quota in this volume " +
           "should not be greater than volume quota : the total space quota is" +
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java
index d4ba017..5d01c4f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java
@@ -120,7 +120,7 @@
   @Override
   void onComplete(boolean operationResult, IOException exception,
       OMMetrics omMetrics, AuditLogger auditLogger,
-      Map<String, String> auditMap){
+      Map<String, String> auditMap) {
     auditLog(auditLogger, buildAuditMessage(OMAction.SET_ACL, auditMap,
         exception, getOmRequest().getUserInfo()));
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
index e197cca..48d4274 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
@@ -193,7 +193,7 @@
       OmKeyInfo dirKeyInfo = null;
       if (omDirectoryResult == FILE_EXISTS ||
           omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) {
-        throw new OMException("Unable to create directory: " +keyName
+        throw new OMException("Unable to create directory: " + keyName
             + " in volume/bucket: " + volumeName + "/" + bucketName,
             FILE_ALREADY_EXISTS);
       } else if (omDirectoryResult == DIRECTORY_EXISTS_IN_GIVENPATH ||
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java
index 0f90b6c..2b65cdf 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java
@@ -61,7 +61,10 @@
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME;
 import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
-import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.*;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.NONE;
 
 /**
  * Handle create directory request. It will add path components to the directory
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
index 4e19b99..24994d7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
@@ -97,7 +97,7 @@
     final boolean checkKeyNameEnabled = ozoneManager.getConfiguration()
          .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY,
                  OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT);
-    if(checkKeyNameEnabled){
+    if (checkKeyNameEnabled) {
       OmUtils.validateKeyName(StringUtils.removeEnd(keyArgs.getKeyName(),
               OzoneConsts.FS_FILE_COPYING_TEMP_SUFFIX));
     }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index 9f05fee..f46b2dd 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -199,7 +199,7 @@
         // Add all the sub-dirs to the missing list except the leaf element.
         // For example, /vol1/buck1/a/b/c/d/e/f/file1.txt.
         // Assume /vol1/buck1/a/b/c exists, then add d, e, f into missing list.
-        if(elements.hasNext()){
+        if (elements.hasNext()) {
           // skips leaf node.
           missing.add(fileName);
         }
@@ -273,7 +273,7 @@
    * Includes the list of missing intermediate directories and
    * the directory search result code.
    */
-  public static class OMPathInfoWithFSO extends OMPathInfo{
+  public static class OMPathInfoWithFSO extends OMPathInfo {
     private String leafNodeName;
     private long lastKnownParentId;
     private long leafNodeObjectId;
@@ -759,7 +759,7 @@
    * @param keyInfo omKeyInfo
    * @return omDirectoryInfo object
    */
-  public static OmDirectoryInfo getDirectoryInfo(OmKeyInfo keyInfo){
+  public static OmDirectoryInfo getDirectoryInfo(OmKeyInfo keyInfo) {
     OmDirectoryInfo.Builder builder = new OmDirectoryInfo.Builder();
     builder.setParentObjectID(keyInfo.getParentObjectID());
     builder.setAcls(keyInfo.getAcls());
@@ -838,7 +838,7 @@
               "Failed to rename %s to %s, %s doesn't exist", fromKeyName,
               toKeyName, toKeyParentDir),
               OMException.ResultCodes.KEY_RENAME_ERROR);
-    } else if (toKeyParentDirStatus.isFile()){
+    } else if (toKeyParentDirStatus.isFile()) {
       throw new OMException(String.format(
               "Failed to rename %s to %s, %s is a file", fromKeyName, toKeyName,
               toKeyParentDir), OMException.ResultCodes.KEY_RENAME_ERROR);
@@ -975,7 +975,7 @@
     long lastKnownParentId = bucketId;
 
     // If no sub-dirs then bucketID is the root/parent.
-    if(!pathComponents.hasNext()){
+    if (!pathComponents.hasNext()) {
       return bucketId;
     }
     if (StringUtils.isBlank(errMsg)) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
index 9a2ac61..9bdb51f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
@@ -88,7 +88,7 @@
     final boolean checkKeyNameEnabled = ozoneManager.getConfiguration()
          .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY,
                  OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT);
-    if(checkKeyNameEnabled){
+    if (checkKeyNameEnabled) {
       OmUtils.validateKeyName(StringUtils.removeEnd(keyArgs.getKeyName(),
               OzoneConsts.FS_FILE_COPYING_TEMP_SUFFIX));
     }
@@ -265,7 +265,7 @@
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
           omDoubleBufferHelper);
 
-      if(bucketLockAcquired) {
+      if (bucketLockAcquired) {
         omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
             bucketName);
       }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
index 50d9e4c..ffa3ebf 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
@@ -98,7 +98,7 @@
     final boolean checkKeyNameEnabled = ozoneManager.getConfiguration()
          .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY,
                  OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT);
-    if(checkKeyNameEnabled){
+    if (checkKeyNameEnabled) {
       OmUtils.validateKeyName(keyArgs.getKeyName());
     }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java
index f621cc1..0a98ad2 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java
@@ -100,7 +100,7 @@
       volumeName = keyArgs.getVolumeName();
       bucketName = keyArgs.getBucketName();
 
-      checkACLs(ozoneManager, volumeName, bucketName, keyName,
+      checkACLsWithFSO(ozoneManager, volumeName, bucketName, keyName,
           IAccessAuthorizer.ACLType.DELETE);
 
       acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java
index 99ca308..0c96756 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java
@@ -88,7 +88,7 @@
     final boolean checkKeyNameEnabled = ozoneManager.getConfiguration()
          .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY,
                  OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT);
-    if(checkKeyNameEnabled){
+    if (checkKeyNameEnabled) {
       OmUtils.validateKeyName(renameKeyRequest.getToKeyName());
     }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java
index 2235baf..37497de 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java
@@ -113,7 +113,7 @@
       // old key and create operation on new key
 
       // check Acl fromKeyName
-      checkACLs(ozoneManager, volumeName, bucketName, fromKeyName,
+      checkACLsWithFSO(ozoneManager, volumeName, bucketName, fromKeyName,
           IAccessAuthorizer.ACLType.DELETE);
 
       // check Acl toKeyName
@@ -149,7 +149,7 @@
                       volumeName, bucketName, toKeyName, 0);
 
       // Check if toKey exists.
-      if(toKeyFileStatus != null) {
+      if (toKeyFileStatus != null) {
         // Destination exists and following are different cases:
         OmKeyInfo toKeyValue = toKeyFileStatus.getKeyInfo();
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
index b4528b8..8fe9011 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
@@ -259,22 +259,22 @@
       OmBucketInfo bucketInfo, PrefixManager prefixManager) {
     List<OzoneAcl> acls = new ArrayList<>();
 
-    if(keyArgs.getAclsList() != null) {
+    if (keyArgs.getAclsList() != null) {
       acls.addAll(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList()));
     }
 
     // Inherit DEFAULT acls from prefix.
-    if(prefixManager != null) {
+    if (prefixManager != null) {
       List< OmPrefixInfo > prefixList = prefixManager.getLongestPrefixPath(
           OZONE_URI_DELIMITER +
               keyArgs.getVolumeName() + OZONE_URI_DELIMITER +
               keyArgs.getBucketName() + OZONE_URI_DELIMITER +
               keyArgs.getKeyName());
 
-      if(prefixList.size() > 0) {
+      if (prefixList.size() > 0) {
         // Add all acls from direct parent to key.
         OmPrefixInfo prefixInfo = prefixList.get(prefixList.size() - 1);
-        if(prefixInfo  != null) {
+        if (prefixInfo  != null) {
           if (OzoneAclUtil.inheritDefaultAcls(acls, prefixInfo.getAcls())) {
             return acls;
           }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java
index 4a45ef9..0fe8925 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java
@@ -112,7 +112,8 @@
       if (getOmRequest().getAddAclRequest().hasObj() && operationResult) {
         modificationTime = getOmRequest().getAddAclRequest()
             .getModificationTime();
-      } else if (getOmRequest().getSetAclRequest().hasObj() && operationResult){
+      } else if (getOmRequest().getSetAclRequest().hasObj()
+          && operationResult) {
         modificationTime = getOmRequest().getSetAclRequest()
             .getModificationTime();
       } else if (getOmRequest().getRemoveAclRequest().hasObj()
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
index 21d10c4..b7cf656 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
@@ -168,7 +168,7 @@
       int keyFactor = omKeyInfo.getReplicationConfig().getRequiredNodes();
       Iterator iter =
           multipartKeyInfo.getPartKeyInfoMap().entrySet().iterator();
-      while(iter.hasNext()) {
+      while (iter.hasNext()) {
         Map.Entry entry = (Map.Entry)iter.next();
         PartKeyInfo iterPartKeyInfo = (PartKeyInfo)entry.getValue();
         quotaReleased +=
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java
index 26d9627..d8848fc 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java
@@ -197,13 +197,13 @@
 
     List<OmBucketInfo> bucketList = metadataManager.listBuckets(
         volumeName, null, null, Integer.MAX_VALUE);
-    for(OmBucketInfo bucketInfo : bucketList) {
+    for (OmBucketInfo bucketInfo : bucketList) {
       long nextQuotaInBytes = bucketInfo.getQuotaInBytes();
-      if(nextQuotaInBytes > OzoneConsts.QUOTA_RESET) {
+      if (nextQuotaInBytes > OzoneConsts.QUOTA_RESET) {
         totalBucketQuota += nextQuotaInBytes;
       }
     }
-    if(volumeQuotaInBytes < totalBucketQuota &&
+    if (volumeQuotaInBytes < totalBucketQuota &&
         volumeQuotaInBytes != OzoneConsts.QUOTA_RESET) {
       throw new OMException("Total buckets quota in this volume " +
           "should not be greater than volume quota : the total space quota is" +
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java
index b91aef8..ce1a4d0 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java
@@ -102,7 +102,7 @@
         if (getOmRequest().getAddAclRequest().hasObj()) {
           modificationTime = getOmRequest().getAddAclRequest()
               .getModificationTime();
-        } else if (getOmRequest().getSetAclRequest().hasObj()){
+        } else if (getOmRequest().getSetAclRequest().hasObj()) {
           modificationTime = getOmRequest().getSetAclRequest()
               .getModificationTime();
         } else if (getOmRequest().getRemoveAclRequest().hasObj()) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java
index 8df7792..e776df0 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java
@@ -110,7 +110,7 @@
 
   @Override
   OMClientResponse onSuccess(OMResponse.Builder omResponse,
-      OmVolumeArgs omVolumeArgs, boolean aclApplied){
+      OmVolumeArgs omVolumeArgs, boolean aclApplied) {
     omResponse.setAddAclResponse(OzoneManagerProtocolProtos.AddAclResponse
         .newBuilder().setResponse(aclApplied).build());
     return new OMVolumeAclOpResponse(omResponse.build(), omVolumeArgs);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java
index 4ab55f3..ff2792d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java
@@ -110,7 +110,7 @@
 
   @Override
   OMClientResponse onSuccess(OMResponse.Builder omResponse,
-      OmVolumeArgs omVolumeArgs, boolean aclApplied){
+      OmVolumeArgs omVolumeArgs, boolean aclApplied) {
     omResponse.setRemoveAclResponse(OzoneManagerProtocolProtos.RemoveAclResponse
         .newBuilder().setResponse(aclApplied).build());
     return new OMVolumeAclOpResponse(omResponse.build(), omVolumeArgs);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java
index 7102509..95d98f4 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java
@@ -107,7 +107,7 @@
 
   @Override
   OMClientResponse onSuccess(OMResponse.Builder omResponse,
-      OmVolumeArgs omVolumeArgs, boolean aclApplied){
+      OmVolumeArgs omVolumeArgs, boolean aclApplied) {
     omResponse.setSetAclResponse(OzoneManagerProtocolProtos.SetAclResponse
         .newBuilder().setResponse(aclApplied).build());
     return new OMVolumeAclOpResponse(omResponse.build(), omVolumeArgs);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseWithFSO.java
index 0c592a2..4085008 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseWithFSO.java
@@ -30,7 +30,8 @@
 import javax.annotation.Nonnull;
 import java.io.IOException;
 
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.*;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
 
 /**
  * Response for RenameKey request - prefix layout.
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysRenameResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysRenameResponse.java
index ccf2315..b09ff9e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysRenameResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysRenameResponse.java
@@ -31,6 +31,8 @@
 import java.util.Map;
 
 import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK;
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_RENAME;
 
 /**
  * Response for RenameKeys request.
@@ -46,6 +48,14 @@
     this.omRenameKeys = omRenameKeys;
   }
 
+  @Override
+  public void checkAndUpdateDB(OMMetadataManager omMetadataManager,
+          BatchOperation batchOperation) throws IOException {
+    if (getOMResponse().getStatus() == OK ||
+        getOMResponse().getStatus() == PARTIAL_RENAME) {
+      addToDBBatch(omMetadataManager, batchOperation);
+    }
+  }
 
   /**
    * For when the request is not successful or it is a replay transaction.
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
index 291e09c..0f08de6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
@@ -20,7 +20,11 @@
 
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.*;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
 import org.apache.hadoop.ozone.om.response.key.OmKeyResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
index ad42e8d..2eb4bc6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
@@ -20,7 +20,11 @@
 
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.*;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
 import org.apache.hadoop.ozone.om.response.key.OmKeyResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
index 7946f65..ef82069 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
@@ -109,7 +109,7 @@
     }
     this.omRatisServer = ratisServer;
     dispatcher = new OzoneProtocolMessageDispatcher<>("OzoneProtocol",
-        metrics, LOG);
+        metrics, LOG, OMPBHelper::processForDebug, OMPBHelper::processForDebug);
   }
 
   /**
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
index 5674d6a..dbc63e3 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
@@ -264,6 +264,7 @@
           dbUpdatesWrapper.getData().get(i)));
     }
     builder.setSequenceNumber(dbUpdatesWrapper.getCurrentSequenceNumber());
+    builder.setLatestSequenceNumber(dbUpdatesWrapper.getLatestSequenceNumber());
     return builder.build();
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java
index 5a988a5..8d40559 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java
@@ -405,7 +405,7 @@
   @Override
   public byte[] retrievePassword(OzoneTokenIdentifier identifier)
       throws InvalidToken {
-    if(identifier.getTokenType().equals(S3AUTHINFO)) {
+    if (identifier.getTokenType().equals(S3AUTHINFO)) {
       return validateS3AuthInfo(identifier);
     }
     return validateToken(identifier).getPassword();
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretStore.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretStore.java
index 756e821..fb348fd 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretStore.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretStore.java
@@ -102,9 +102,9 @@
     int loadedToken = 0;
     try (TableIterator<OzoneTokenIdentifier, ? extends
         KeyValue<OzoneTokenIdentifier, Long>> iterator =
-             omMetadataManager.getDelegationTokenTable().iterator()){
+             omMetadataManager.getDelegationTokenTable().iterator()) {
       iterator.seekToFirst();
-      while(iterator.hasNext()) {
+      while (iterator.hasNext()) {
         KeyValue<OzoneTokenIdentifier, Long> kv = iterator.next();
         state.tokenState.put(kv.getKey(), kv.getValue());
         loadedToken++;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
index 542c774..237428d 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
@@ -28,8 +28,13 @@
 import org.apache.hadoop.hdds.server.ServerUtils;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
-import org.apache.hadoop.ozone.om.helpers.*;
 
+import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
 import org.junit.Assert;
 import org.junit.Ignore;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java
index 6a2cf58..cfeab4b 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java
@@ -208,7 +208,7 @@
 
     initMultipartUpload(writeClient, volume, bucket, "dir/ozonekey2");
 
-    OmMultipartInfo omMultipartInfo3 =addinitMultipartUploadToCache(volume,
+    OmMultipartInfo omMultipartInfo3 = addinitMultipartUploadToCache(volume,
         bucket, "dir/ozonekey3");
 
     OmMultipartInfo omMultipartInfo4 = initMultipartUpload(writeClient,
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java
index 73e9ea5..277969c 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java
@@ -59,7 +59,7 @@
     missingOmDBTables.remove("default");
     int countOmDBTables = missingOmDBTables.size();
     // Remove the file if it is found in both the datastructures
-    for(DBColumnFamilyDefinition definition : columnFamilyDefinitions) {
+    for (DBColumnFamilyDefinition definition : columnFamilyDefinitions) {
       if (!missingOmDBTables.remove(definition.getName())) {
         missingDBDefTables.add(definition.getName());
       }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
index 2ad44d1..7354a94 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
@@ -278,7 +278,7 @@
     // volumeB with prefixBucketNameWithHadoopOwner.
     startBucket = null;
     TreeSet<String> expectedBuckets = new TreeSet<>();
-    for (int i=0; i<5; i++) {
+    for (int i = 0; i < 5; i++) {
 
       omBucketInfoList = omMetadataManager.listBuckets(volumeName2,
           startBucket, prefixBucketNameWithHadoopOwner, 10);
@@ -341,7 +341,7 @@
     TreeSet<String> keysASet = new TreeSet<>();
     TreeSet<String> keysBSet = new TreeSet<>();
     TreeSet<String> keysCSet = new TreeSet<>();
-    for (int i=1; i<= 100; i++) {
+    for (int i = 1; i <= 100; i++) {
       if (i % 2 == 0) {
         keysASet.add(
             prefixKeyA + i);
@@ -357,7 +357,7 @@
 
     TreeSet<String> keysAVolumeBSet = new TreeSet<>();
     TreeSet<String> keysBVolumeBSet = new TreeSet<>();
-    for (int i=1; i<= 100; i++) {
+    for (int i = 1; i <= 100; i++) {
       if (i % 2 == 0) {
         keysAVolumeBSet.add(
             prefixKeyA + i);
@@ -422,7 +422,7 @@
     // volumeB/ozoneBucket with "key-a".
     startKey = null;
     TreeSet<String> expectedKeys = new TreeSet<>();
-    for (int i=0; i<5; i++) {
+    for (int i = 0; i < 5; i++) {
 
       omKeyInfoList = omMetadataManager.listKeys(volumeNameB, hadoopBucket,
           startKey, prefixKeyB, 10);
@@ -472,7 +472,7 @@
     TreeSet<String> deleteKeySet = new TreeSet<>();
 
 
-    for (int i=1; i<= 100; i++) {
+    for (int i = 1; i <= 100; i++) {
       if (i % 2 == 0) {
         keysASet.add(
             prefixKeyA + i);
@@ -510,7 +510,7 @@
     // Now get key count by 10.
     String startKey = null;
     expectedKeys = new TreeSet<>();
-    for (int i=0; i<5; i++) {
+    for (int i = 0; i < 5; i++) {
 
       omKeyInfoList = omMetadataManager.listKeys(volumeNameA, ozoneBucket,
           startKey, prefixKeyA, 10);
@@ -618,7 +618,7 @@
   private void addKeysToOM(String volumeName, String bucketName,
       String keyName, int i) throws Exception {
 
-    if (i%2== 0) {
+    if (i % 2 == 0) {
       OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
           1000L, HddsProtos.ReplicationType.RATIS,
           HddsProtos.ReplicationFactor.ONE, omMetadataManager);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java
index fb42efa..19183f3 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java
@@ -30,7 +30,9 @@
 import java.util.regex.Pattern;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 /**
  * This class is used to test the CLI provided by OzoneManagerStarter, which is
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java
index 7d7c310..0160166 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java
@@ -109,7 +109,7 @@
     public OMResponse submitRequest(RpcController controller,
         OzoneManagerProtocolProtos.OMRequest request) throws ServiceException {
       throw new ServiceException("ServiceException of type " +
-          exception.getClass() + " for "+ omNodeId, exception);
+          exception.getClass() + " for " + omNodeId, exception);
     }
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java
index 0d7f95b..16dc322 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java
@@ -109,7 +109,7 @@
     assertEquals(0, metrics.getTotalNumOfFlushedTransactions());
     assertEquals(0, metrics.getMaxNumberOfTransactionsFlushedInOneIteration());
 
-    for (int i=0; i < bucketCount; i++) {
+    for (int i = 0; i < bucketCount; i++) {
       doubleBuffer.add(createDummyBucketResponse(volumeName),
           trxId.incrementAndGet());
     }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java
index 050417a..92d5c62 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java
@@ -284,7 +284,7 @@
   private void doMixTransactions(String volumeName, int bucketCount,
       Queue<OMBucketDeleteResponse> deleteBucketQueue,
       Queue<OMBucketCreateResponse> bucketQueue) {
-    for (int i=0; i < bucketCount; i++) {
+    for (int i = 0; i < bucketCount; i++) {
       String bucketName = UUID.randomUUID().toString();
       long transactionID = trxId.incrementAndGet();
       OMBucketCreateResponse omBucketCreateResponse = createBucket(volumeName,
@@ -434,7 +434,7 @@
   private void doTransactions(int bucketCount) {
     String volumeName = UUID.randomUUID().toString();
     createVolume(volumeName, trxId.incrementAndGet());
-    for (int i=0; i< bucketCount; i++) {
+    for (int i = 0; i < bucketCount; i++) {
       createBucket(volumeName, UUID.randomUUID().toString(),
           trxId.incrementAndGet());
     }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java
index 2b2b75a..351f524 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java
@@ -185,7 +185,7 @@
       OmUtils.isReadOnly(request);
       assertFalse(cmdtype + " is not categorized in " +
               "OmUtils#isReadyOnly",
-          logCapturer.getOutput().contains("CmdType " + cmdtype +" is not " +
+          logCapturer.getOutput().contains("CmdType " + cmdtype + " is not " +
               "categorized as readOnly or not."));
       logCapturer.clearOutput();
     }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java
index b73bbc5..a0a7cd8 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java
@@ -303,7 +303,7 @@
           mockTransactionContext(createKeyRequest));
       Assert.fail("Expected StateMachineException to be thrown when " +
           "submitting write request while prepared.");
-    } catch(StateMachineException smEx) {
+    } catch (StateMachineException smEx) {
       Assert.assertFalse(smEx.leaderShouldStepDown());
 
       Throwable cause = smEx.getCause();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java
index f3dd34c..3dbddd7 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java
@@ -45,11 +45,15 @@
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
 
+    Assert.assertEquals(0, omMetrics.getNumFSOBucketCreates());
+
     OMBucketCreateRequest omBucketCreateRequest = doPreExecute(volumeName,
         bucketName);
 
     doValidateAndUpdateCache(volumeName, bucketName,
         omBucketCreateRequest.getOmRequest());
+
+    Assert.assertEquals(1, omMetrics.getNumFSOBucketCreates());
   }
 
   private OMBucketCreateRequest doPreExecute(String volumeName,
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequestWithFSO.java
new file mode 100644
index 0000000..499f393
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequestWithFSO.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.om.request.bucket;
+
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .DeleteBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .OMRequest;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.UUID;
+
+/**
+ * Tests OMBucketDeleteRequest class which handles DeleteBucket request.
+ */
+public class TestOMBucketDeleteRequestWithFSO
+    extends TestOMBucketDeleteRequest {
+
+  @Test
+  public void testValidateAndUpdateCacheWithFSO() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+
+    Assert.assertEquals(0, omMetrics.getNumFSOBucketDeletes());
+
+    OzoneManagerProtocolProtos.OMRequest omRequest =
+        createDeleteBucketRequest(volumeName, bucketName);
+
+    OMBucketDeleteRequest omBucketDeleteRequest =
+        new OMBucketDeleteRequest(omRequest);
+
+    // Create Volume and bucket entries in DB.
+    OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+        omMetadataManager, BucketLayout.FILE_SYSTEM_OPTIMIZED);
+
+    omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1,
+        ozoneManagerDoubleBufferHelper);
+
+    Assert.assertNull(omMetadataManager.getBucketTable().get(
+        omMetadataManager.getBucketKey(volumeName, bucketName)));
+
+    Assert.assertEquals(1, omMetrics.getNumFSOBucketDeletes());
+  }
+
+  private OMRequest createDeleteBucketRequest(
+      String volumeName,
+      String bucketName) {
+    return OMRequest.newBuilder()
+        .setDeleteBucketRequest(
+            DeleteBucketRequest.newBuilder()
+                .setBucketName(bucketName).setVolumeName(volumeName))
+        .setCmdType(OzoneManagerProtocolProtos.Type.DeleteBucket)
+        .setClientId(UUID.randomUUID().toString()).build();
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java
index 46bdb5e..9519eb3 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java
@@ -396,7 +396,7 @@
   private String genRandomKeyName() {
     StringBuilder keyNameBuilder = new StringBuilder();
     keyNameBuilder.append(RandomStringUtils.randomAlphabetic(5));
-    for (int i =0; i< 3; i++) {
+    for (int i = 0; i < 3; i++) {
       keyNameBuilder.append("/").append(RandomStringUtils.randomAlphabetic(5));
     }
     return keyNameBuilder.toString();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
index 153a4ea..5c93dae 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
@@ -51,7 +51,7 @@
 public class TestOMFileCreateRequest extends TestOMKeyRequest {
 
   @Test
-  public void testPreExecute() throws Exception{
+  public void testPreExecute() throws Exception {
     OMRequest omRequest = createFileRequest(volumeName, bucketName, keyName,
         HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
         false, false);
@@ -91,7 +91,7 @@
   }
 
   @Test
-  public void testPreExecuteWithBlankKey() throws Exception{
+  public void testPreExecuteWithBlankKey() throws Exception {
     OMRequest omRequest = createFileRequest(volumeName, bucketName, "",
         HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
         false, false);
@@ -404,7 +404,7 @@
    * @return OMFileCreateRequest reference
    */
   @NotNull
-  protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest){
+  protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest) {
     return new OMFileCreateRequest(omRequest);
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
index e35645e..d4122c0 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
@@ -445,12 +445,12 @@
   private List<KeyLocation> getKeyLocation(int count) {
     List<KeyLocation> keyLocations = new ArrayList<>();
 
-    for (int i=0; i < count; i++) {
+    for (int i = 0; i < count; i++) {
       KeyLocation keyLocation =
           KeyLocation.newBuilder()
               .setBlockID(HddsProtos.BlockID.newBuilder()
                   .setContainerBlockID(HddsProtos.ContainerBlockID.newBuilder()
-                      .setContainerID(i+1000).setLocalID(i+100).build()))
+                      .setContainerID(i + 1000).setLocalID(i + 100).build()))
               .setOffset(0).setLength(200).setCreateVersion(version).build();
       keyLocations.add(keyLocation);
     }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java
index 0e27c1c..f64250a 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java
@@ -45,7 +45,7 @@
     String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
     OmBucketInfo omBucketInfo =
             omMetadataManager.getBucketTable().get(bucketKey);
-    if(omBucketInfo!= null){
+    if (omBucketInfo != null) {
       return omBucketInfo.getObjectID();
     }
     // bucket doesn't exists in DB
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
index 9ff7d7d..5bc9d45 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
@@ -574,7 +574,7 @@
   protected long checkIntermediatePaths(Path keyPath) throws Exception {
     // Check intermediate paths are created
     keyPath = keyPath.getParent();
-    while(keyPath != null) {
+    while (keyPath != null) {
       Assert.assertNotNull(omMetadataManager.getKeyTable(getBucketLayout()).get(
           omMetadataManager
               .getOzoneDirKey(volumeName, bucketName, keyPath.toString())));
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java
index 5d006ca..4dce60b 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java
@@ -39,6 +39,10 @@
  * Tests OmKeyDelete request with prefix layout.
  */
 public class TestOMKeyDeleteRequestWithFSO extends TestOMKeyDeleteRequest {
+  private static final String INTERMEDIATE_DIR = "c/d/";
+  private static final String PARENT_DIR = "c/d/e";
+  private static final String FILE_NAME = "file1";
+  private static final String FILE_KEY = PARENT_DIR + "/" + FILE_NAME;
 
   @Override
   protected OMKeyDeleteRequest getOmKeyDeleteRequest(
@@ -54,24 +58,37 @@
 
   @Override
   protected String addKeyToTable() throws Exception {
-    String parentDir = "c/d/e";
-    String fileName = "file1";
-    String key = parentDir + "/" + fileName;
-    keyName = key; // updated key name
+    keyName = FILE_KEY; // updated key name
 
     // Create parent dirs for the path
     long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName,
-            bucketName, parentDir, omMetadataManager);
+            bucketName, PARENT_DIR, omMetadataManager);
 
     OmKeyInfo omKeyInfo =
-            OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key,
+            OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, FILE_KEY,
                     HddsProtos.ReplicationType.RATIS,
                     HddsProtos.ReplicationFactor.ONE,
                     parentId + 1,
                     parentId, 100, Time.now());
-    omKeyInfo.setKeyName(fileName);
+    omKeyInfo.setKeyName(FILE_NAME);
     OMRequestTestUtils.addFileToKeyTable(false, false,
-            fileName, omKeyInfo, -1, 50, omMetadataManager);
+        FILE_NAME, omKeyInfo, -1, 50, omMetadataManager);
+    return omKeyInfo.getPath();
+  }
+
+  protected String addKeyToDirTable(String volumeName, String bucketName,
+                                    String key) throws Exception {
+    // Create parent dirs for the path
+    long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName,
+        bucketName, key, omMetadataManager);
+
+    OmKeyInfo omKeyInfo =
+        OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key,
+            HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.ONE,
+            parentId + 1,
+            parentId, 100, Time.now());
+    omKeyInfo.setKeyName(key);
     return omKeyInfo.getPath();
   }
 
@@ -125,11 +142,75 @@
         pathName);
     Assert.assertTrue("Failed to list keyPaths", pathItr.hasNext());
     Assert.assertEquals(expectedPath, pathItr.next().getTrimmedName());
-    try{
+    try {
       pathItr.next();
       Assert.fail("Reached end of the list!");
-    } catch (NoSuchElementException nse){
+    } catch (NoSuchElementException nse) {
       // expected
     }
   }
+
+  @Test
+  public void testRecursiveAccessCheck() throws Exception {
+    // Add volume, bucket and key entries to OM DB.
+    OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+        omMetadataManager, getBucketLayout());
+
+    // Case 1:
+    // We create an empty directory structure.
+    String parentKey = "x/y/";
+    String key = "x/y/z/";
+    addKeyToDirTable(volumeName, bucketName, key);
+
+    // Instantiate PrefixPath for complete key.
+    OzonePrefixPathImpl pathViewer = new OzonePrefixPathImpl(volumeName,
+        bucketName, key, ozoneManager.getKeyManager());
+
+    // 'x/y/z' has no sub-directories or sub files - recursive access check
+    // should not be enabled for this case.
+    Assert.assertFalse(pathViewer.isCheckRecursiveAccess());
+
+    // Instantiate PrefixPath for parent key.
+    pathViewer = new OzonePrefixPathImpl(volumeName,
+        bucketName, parentKey, ozoneManager.getKeyManager());
+
+    // 'x/y/' has a sub-directory 'z', hence, we should be performing recursive
+    // access check.
+    Assert.assertTrue(pathViewer.isCheckRecursiveAccess());
+
+    // Case 2:
+    // We create a directory structure with a file as the leaf node.
+    // 'c/d/e/file1'.
+    String ozoneKey = addKeyToTable();
+
+    OmKeyInfo omKeyInfo =
+        omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey);
+
+    // As we added manually to key table.
+    Assert.assertNotNull(omKeyInfo);
+
+    // Instantiate PrefixPath for parent key 'c/d/'.
+    pathViewer = new OzonePrefixPathImpl(volumeName,
+        bucketName, INTERMEDIATE_DIR, ozoneManager.getKeyManager());
+
+    // 'c/d' has a sub-directory 'e', hence, we should be performing recursive
+    // access check.
+    Assert.assertTrue(pathViewer.isCheckRecursiveAccess());
+
+    // Instantiate PrefixPath for complete directory structure (without file).
+    pathViewer = new OzonePrefixPathImpl(volumeName,
+        bucketName, PARENT_DIR, ozoneManager.getKeyManager());
+
+    // 'c/d/e/' has a 'file1' under it, hence, we should be performing recursive
+    // access check.
+    Assert.assertTrue(pathViewer.isCheckRecursiveAccess());
+
+    // Instantiate PrefixPath for complete file1.
+    pathViewer = new OzonePrefixPathImpl(volumeName,
+        bucketName, FILE_KEY, ozoneManager.getKeyManager());
+
+    // Recursive access check is only enabled for directories, hence should be
+    // false for file1.
+    Assert.assertFalse(pathViewer.isCheckRecursiveAccess());
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java
index d4fe1da..4b2b467 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java
@@ -137,7 +137,7 @@
         .setStatus(Status.OK)
         .build();
 
-    try(BatchOperation batchOperation =
+    try (BatchOperation batchOperation =
         omMetadataManager.getStore().initBatchOperation()) {
 
       OMKeyPurgeResponse omKeyPurgeResponse = new OMKeyPurgeResponse(
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java
index d3fcee7..958a6a0 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java
@@ -337,7 +337,7 @@
   private List<String> getFullOpenKeyNames(OpenKeyBucket... openKeyBuckets) {
     List<String> fullKeyNames = new ArrayList<>();
 
-    for(OpenKeyBucket keysPerBucket: openKeyBuckets) {
+    for (OpenKeyBucket keysPerBucket: openKeyBuckets) {
       String volume = keysPerBucket.getVolumeName();
       String bucket = keysPerBucket.getBucketName();
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java
index f91f27c..2e12aa7 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java
@@ -163,7 +163,7 @@
 
     List<Part> partList = new ArrayList<>();
 
-    String partName= getPartName(volumeName, bucketName, keyName,
+    String partName = getPartName(volumeName, bucketName, keyName,
         multipartUploadID, 23);
 
     partList.add(Part.newBuilder().setPartName(partName).setPartNumber(23)
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java
index c854773..a0b94d1 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java
@@ -84,7 +84,7 @@
       Assert.assertEquals(expectedObjId, respone.getOmVolumeArgs()
           .getObjectID());
       Assert.assertEquals(txLogIndex, respone.getOmVolumeArgs().getUpdateID());
-    } catch (IllegalArgumentException ex){
+    } catch (IllegalArgumentException ex) {
       GenericTestUtils.assertExceptionContains("should be greater than zero",
           ex);
     }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java
index 89137a9..6539f48 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java
@@ -121,7 +121,7 @@
       throws Exception {
     String volumeName = UUID.randomUUID().toString();
     long quotaInBytes = 100L;
-    long quotaInNamespace= 100L;
+    long quotaInNamespace = 100L;
 
     OMRequest originalRequest =
         OMRequestTestUtils.createSetVolumePropertyRequest(volumeName,
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java
index 230360b..3c17c8d 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java
@@ -217,7 +217,7 @@
     for (String tableName : om.getMetadataManager().listTableNames()) {
       if (!cleanup.contains(tableName)) {
         assertEquals(
-            "Cache item count of table " +tableName,
+            "Cache item count of table " + tableName,
             cacheItemCount.get(tableName).intValue(),
             Iterators.size(
                 om.getMetadataManager().getTable(tableName).cacheIterator()
@@ -289,7 +289,7 @@
     return new OMFileCreateRequest(protoRequest);
   }
 
-  private OMKeyCreateRequest anOMKeyCreateRequest(){
+  private OMKeyCreateRequest anOMKeyCreateRequest() {
     OMRequest protoRequest = mock(OMRequest.class);
     when(protoRequest.getCreateKeyRequest()).thenReturn(aKeyCreateRequest());
     when(protoRequest.getCmdType()).thenReturn(Type.CreateKey);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java
index 9699524..acae362 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java
@@ -258,7 +258,7 @@
   private Iterable<OMLayoutFeature> mockFeatures(
       int startFromLV, String... names
   ) {
-    int i=startFromLV;
+    int i = startFromLV;
     List<OMLayoutFeature> ret = new ArrayList<>();
     for (String name : names) {
       ret.add(mockFeature(name, i));
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOzoneManagerPrepareState.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOzoneManagerPrepareState.java
index a23469a..7162189 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOzoneManagerPrepareState.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOzoneManagerPrepareState.java
@@ -203,7 +203,7 @@
     if (!mkdirs) {
       throw new IOException("Unable to create marker file directory.");
     }
-    try(FileOutputStream stream =
+    try (FileOutputStream stream =
             new FileOutputStream(markerFile)) {
       stream.write(bytes);
     }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestAWSV4AuthValidator.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestAWSV4AuthValidator.java
index 2784b6c..142aabb 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestAWSV4AuthValidator.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestAWSV4AuthValidator.java
@@ -24,7 +24,7 @@
 import java.util.Arrays;
 import java.util.Collection;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertTrue;
 
 /**
  * Test for {@link AWSV4AuthValidator}.
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java
index 670b7ee..e39fe39 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java
@@ -91,7 +91,7 @@
     s3SecretManager = new S3SecretManagerImpl(conf, metadataManager) {
       @Override
       public S3SecretValue getS3Secret(String kerberosID) {
-        if(s3Secrets.containsKey(kerberosID)) {
+        if (s3Secrets.containsKey(kerberosID)) {
           return new S3SecretValue(kerberosID, s3Secrets.get(kerberosID));
         }
         return null;
@@ -99,7 +99,7 @@
 
       @Override
       public String getS3UserSecretString(String awsAccessKey) {
-        if(s3Secrets.containsKey(awsAccessKey)) {
+        if (s3Secrets.containsKey(awsAccessKey)) {
           return s3Secrets.get(awsAccessKey);
         }
         return null;
@@ -322,7 +322,7 @@
     OzoneTokenIdentifier id = new OzoneTokenIdentifier();
     // set invalid om cert serial id
     id.setOmCertSerialId("1927393");
-    id.setMaxDate(Time.now() + 60*60*24);
+    id.setMaxDate(Time.now() + 60 * 60 * 24);
     id.setOwner(new Text("test"));
     Assert.assertFalse(secretManager.verifySignature(id, id.getBytes()));
   }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java
index 48ed205..bb81572 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java
@@ -218,7 +218,7 @@
     }
     long duration = Time.monotonicNowNanos() - startTime;
     LOG.info("Average token sign time with HmacSha256(RSA/1024 key) is {} ns",
-        duration/testTokenCount);
+        duration / testTokenCount);
 
     startTime = Time.monotonicNowNanos();
     for (int i = 0; i < testTokenCount; i++) {
@@ -226,7 +226,7 @@
     }
     duration = Time.monotonicNowNanos() - startTime;
     LOG.info("Average token verify time with HmacSha256(RSA/1024 key) "
-        + "is {} ns", duration/testTokenCount);
+        + "is {} ns", duration / testTokenCount);
   }
 
   @Test
@@ -273,7 +273,7 @@
     }
     long duration = Time.monotonicNowNanos() - startTime;
     LOG.info("Average token sign time with {}({} symmetric key) is {} ns",
-        hmacAlgorithm, keyLen, duration/testTokenCount);
+        hmacAlgorithm, keyLen, duration / testTokenCount);
   }
 
   /*
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java
index a5568d0..9b2a3e3 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java
@@ -75,7 +75,9 @@
 import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.PREFIX;
 import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.VOLUME;
 import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 /**
  * Test class for {@link OzoneNativeAuthorizer}.
@@ -349,7 +351,7 @@
     List<OzoneAcl> acls;
     String user = testUgi.getUserName();
     String group = (testUgi.getGroups().size() > 0) ?
-        testUgi.getGroups().get(0): "";
+        testUgi.getGroups().get(0) : "";
 
     RequestContext.Builder builder = new RequestContext.Builder()
         .setClientUgi(testUgi)
@@ -372,7 +374,7 @@
       // Reset acls to only one right.
       if (obj.getResourceType() == VOLUME) {
         setVolumeAcl(Collections.singletonList(newAcl));
-      } else if (obj.getResourceType() == BUCKET){
+      } else if (obj.getResourceType() == BUCKET) {
         setBucketAcl(Collections.singletonList(newAcl));
       } else {
         aclImplementor.setAcl(obj, Collections.singletonList(newAcl));
@@ -450,7 +452,7 @@
           // only DB not cache.
           if (obj.getResourceType() == VOLUME) {
             addVolumeAcl(addAcl);
-          } else if (obj.getResourceType() == BUCKET){
+          } else if (obj.getResourceType() == BUCKET) {
             addBucketAcl(addAcl);
           } else {
             aclImplementor.addAcl(obj, addAcl);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java
index 145e599..3f73deb 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java
@@ -187,7 +187,7 @@
 
     List<IAccessAuthorizer.ACLType> aclsToTest =
         Arrays.stream(IAccessAuthorizer.ACLType.values()).filter(
-            (type)-> type != NONE && type != CREATE)
+            (type) -> type != NONE && type != CREATE)
             .collect(Collectors.toList());
     for (IAccessAuthorizer.ACLType type: aclsToTest) {
       nonAdminOwnerContext = getUserRequestContext(getTestVolOwnerName(0),
@@ -296,6 +296,6 @@
 
   List<IAccessAuthorizer.ACLType> getAclsToTest() {
     return Arrays.stream(IAccessAuthorizer.ACLType.values()).filter(
-        (type)-> type != NONE).collect(Collectors.toList());
+        (type) -> type != NONE).collect(Collectors.toList());
   }
 }
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
index 2c3a465..55bce59 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
@@ -30,6 +30,7 @@
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
@@ -38,6 +39,7 @@
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ozone.OmUtils;
@@ -342,7 +344,7 @@
 
 
   @Override
-  public Iterator<BasicKeyInfo> listKeys(String pathKey) throws IOException{
+  public Iterator<BasicKeyInfo> listKeys(String pathKey) throws IOException {
     incrementCounter(Statistic.OBJECTS_LIST, 1);
     return new IteratorAdapter(bucket.listKeys(pathKey));
   }
@@ -559,4 +561,16 @@
   public boolean isFSOptimizedBucket() {
     return bucket.getBucketLayout().isFileSystemOptimized();
   }
+
+  @Override
+  public FileChecksum getFileChecksum(String keyName, long length)
+      throws IOException {
+    OzoneClientConfig.ChecksumCombineMode combineMode =
+        config.getObject(OzoneClientConfig.class).getChecksumCombineMode();
+
+    return OzoneClientUtils.getFileChecksumWithCombineMode(
+        volume, bucket, keyName,
+        length, combineMode, ozoneClient.getObjectStore().getClientProxy());
+
+  }
 }
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index c920747..3496105 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -285,7 +285,7 @@
     @Override
     boolean processKey(List<String> keyList) throws IOException {
       // TODO RenameKey needs to be changed to batch operation
-      for(String key : keyList) {
+      for (String key : keyList) {
         String newKeyName = dstKey.concat(key.substring(srcKey.length()));
         adapter.renameKey(key, newKeyName);
       }
@@ -512,7 +512,7 @@
 
     if (adapter.isFSOptimizedBucket()) {
       if (f.isRoot()) {
-        if (!recursive && listStatus(f).length!=0){
+        if (!recursive && listStatus(f).length != 0) {
           throw new PathIsNotEmptyDirectoryException(f.toString());
         }
         LOG.warn("Cannot delete root directory.");
@@ -800,7 +800,10 @@
   @Override
   public FileChecksum getFileChecksum(Path f, long length) throws IOException {
     incrementCounter(Statistic.INVOCATION_GET_FILE_CHECKSUM);
-    return super.getFileChecksum(f, length);
+    statistics.incrementReadOps(1);
+    Path qualifiedPath = f.makeQualified(uri, workingDir);
+    String key = pathToKey(qualifiedPath);
+    return adapter.getFileChecksum(key, length);
   }
 
   @Override
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
index 2577105..35a027d 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
@@ -36,6 +36,7 @@
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -47,6 +48,7 @@
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ozone.OFSPath;
@@ -1066,4 +1068,20 @@
     // TODO: Need to refine this part.
     return false;
   }
+
+  @Override
+  public FileChecksum getFileChecksum(String keyName, long length)
+      throws IOException {
+    OzoneClientConfig.ChecksumCombineMode combineMode =
+        config.getObject(OzoneClientConfig.class).getChecksumCombineMode();
+
+    OFSPath ofsPath = new OFSPath(keyName);
+
+    OzoneVolume volume = objectStore.getVolume(ofsPath.getVolumeName());
+    OzoneBucket bucket = getBucket(ofsPath, false);
+    return OzoneClientUtils.getFileChecksumWithCombineMode(
+        volume, bucket, ofsPath.getKeyName(),
+        length, combineMode, ozoneClient.getObjectStore().getClientProxy());
+
+  }
 }
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
index 35065f0..09383a6 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
@@ -832,7 +832,8 @@
   @Override
   public FileChecksum getFileChecksum(Path f, long length) throws IOException {
     incrementCounter(Statistic.INVOCATION_GET_FILE_CHECKSUM);
-    return super.getFileChecksum(f, length);
+    String key = pathToKey(f);
+    return adapter.getFileChecksum(key, length);
   }
 
   @Override
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
index 0258f69..31bf351 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
@@ -24,6 +24,7 @@
 import java.util.List;
 
 import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
 import org.apache.hadoop.security.token.Token;
@@ -78,4 +79,6 @@
       Path qualifiedPath, String userName) throws IOException;
 
   boolean isFSOptimizedBucket();
+
+  FileChecksum getFileChecksum(String keyName, long length) throws IOException;
 }
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java
index 5c31917..92333b2 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java
@@ -16,9 +16,16 @@
  */
 package org.apache.hadoop.fs.ozone;
 
+import com.google.common.base.Preconditions;
 import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+import org.apache.hadoop.ozone.client.checksum.BaseFileChecksumHelper;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.checksum.ReplicatedFileChecksumHelper;
+import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
 
@@ -31,7 +38,7 @@
  * Shared Utilities for Ozone FS and related classes.
  */
 public final class OzoneClientUtils {
-  private OzoneClientUtils(){
+  private OzoneClientUtils() {
     // Not used.
   }
   public static BucketLayout resolveLinkBucketLayout(OzoneBucket bucket,
@@ -64,4 +71,19 @@
     }
     return bucket.getBucketLayout();
   }
+
+  public static FileChecksum getFileChecksumWithCombineMode(OzoneVolume volume,
+      OzoneBucket bucket, String keyName, long length,
+      OzoneClientConfig.ChecksumCombineMode combineMode,
+      ClientProtocol rpcClient) throws IOException {
+    Preconditions.checkArgument(length >= 0);
+
+    if (keyName.length() == 0) {
+      return null;
+    }
+    BaseFileChecksumHelper helper = new ReplicatedFileChecksumHelper(
+        volume, bucket, keyName, length, rpcClient);
+    helper.compute();
+    return helper.getFileChecksum();
+  }
 }
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java
index d7888a5..9186407 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java
@@ -101,7 +101,7 @@
    */
   @Override
   public int read(ByteBuffer buf) throws IOException {
-    if (buf.isReadOnly()){
+    if (buf.isReadOnly()) {
       throw new ReadOnlyBufferException();
     }
 
diff --git a/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneClientUtils.java b/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneClientUtils.java
new file mode 100644
index 0000000..a9d4f98
--- /dev/null
+++ b/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneClientUtils.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.junit.Assert.assertNull;
+import static org.mockito.Mockito.mock;
+
+/**
+ * Unit tests for OzoneClientUtils.
+ */
+public class TestOzoneClientUtils {
+  @Test(expected = IllegalArgumentException.class)
+  public void testNegativeLength() throws IOException {
+    OzoneVolume volume = mock(OzoneVolume.class);
+    OzoneBucket bucket = mock(OzoneBucket.class);
+    String keyName = "dummy";
+    ClientProtocol clientProtocol = mock(ClientProtocol.class);
+    OzoneClientUtils.getFileChecksumWithCombineMode(volume, bucket, keyName,
+        -1, OzoneClientConfig.ChecksumCombineMode.MD5MD5CRC, clientProtocol);
+
+  }
+
+  @Test
+  public void testEmptyKeyName() throws IOException {
+    OzoneVolume volume = mock(OzoneVolume.class);
+    OzoneBucket bucket = mock(OzoneBucket.class);
+    String keyName = "";
+    ClientProtocol clientProtocol = mock(ClientProtocol.class);
+    FileChecksum checksum =
+        OzoneClientUtils.getFileChecksumWithCombineMode(volume, bucket, keyName,
+            1, OzoneClientConfig.ChecksumCombineMode.MD5MD5CRC,
+            clientProtocol);
+
+    assertNull(checksum);
+  }
+}
\ No newline at end of file
diff --git a/hadoop-ozone/ozonefs-shaded/pom.xml b/hadoop-ozone/ozonefs-shaded/pom.xml
index 257279c..f28f37c 100644
--- a/hadoop-ozone/ozonefs-shaded/pom.xml
+++ b/hadoop-ozone/ozonefs-shaded/pom.xml
@@ -57,7 +57,7 @@
         </exclusion>
         <exclusion>
           <groupId>org.slf4j</groupId>
-          <artifactId>slf4j-api</artifactId>
+          <artifactId>*</artifactId>
         </exclusion>
         <exclusion>
           <groupId>com.google.protobuf</groupId>
diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/SqlDbUtils.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/SqlDbUtils.java
index 55069ce..a3675dc 100644
--- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/SqlDbUtils.java
+++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/SqlDbUtils.java
@@ -61,7 +61,7 @@
     System.setProperty("derby.stream.error.method",
         DERBY_DISABLE_LOG_METHOD);
     Class.forName(DERBY_DRIVER_CLASS);
-    try(Connection connection = DriverManager.getConnection(jdbcUrl
+    try (Connection connection = DriverManager.getConnection(jdbcUrl
         + ";user=" + schemaName
         + ";create=true")) {
       LOG.info("Created derby database at {}.", jdbcUrl);
@@ -72,7 +72,7 @@
    * Used to suppress embedded derby database logging.
    * @return No-Op output stream.
    */
-  public static OutputStream disableDerbyLogFile(){
+  public static OutputStream disableDerbyLogFile() {
     return new OutputStream() {
       @Override
       public void write(int b) throws IOException {
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java
index 15640ae..6b2bdd8 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java
@@ -36,6 +36,7 @@
 import org.apache.hadoop.ozone.recon.persistence.JooqPersistenceModule;
 import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
 import org.apache.hadoop.ozone.recon.recovery.ReconOmMetadataManagerImpl;
+import org.apache.hadoop.ozone.recon.scm.ReconStorageConfig;
 import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade;
 import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager;
 import org.apache.hadoop.ozone.recon.spi.OzoneManagerServiceProvider;
@@ -87,6 +88,7 @@
   protected void configure() {
     bind(OzoneConfiguration.class).toProvider(ConfigurationProvider.class);
     bind(ReconHttpServer.class).in(Singleton.class);
+    bind(ReconStorageConfig.class).in(Singleton.class);
     bind(ReconDBProvider.class).in(Singleton.class);
     bind(ReconOMMetadataManager.class)
         .to(ReconOmMetadataManagerImpl.class);
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
index faf60fc..8b0e9f1 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
@@ -25,10 +25,18 @@
 import org.apache.hadoop.hdds.StringUtils;
 import org.apache.hadoop.hdds.cli.GenericCli;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos;
+import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdds.recon.ReconConfig;
 import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.hdds.security.x509.SecurityConfig;
+import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
+import org.apache.hadoop.hdds.security.x509.certificate.client.ReconCertificateClient;
 import org.apache.hadoop.hdds.utils.HddsServerUtil;
 import org.apache.hadoop.ozone.OzoneSecurityUtil;
+import org.apache.hadoop.ozone.recon.scm.ReconStorageConfig;
+import org.apache.hadoop.ozone.recon.metrics.ReconTaskStatusMetrics;
 import org.apache.hadoop.ozone.recon.spi.OzoneManagerServiceProvider;
 import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager;
 import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
@@ -39,15 +47,21 @@
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.bouncycastle.pkcs.PKCS10CertificationRequest;
 import org.hadoop.ozone.recon.codegen.ReconSchemaGenerationModule;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.net.InetAddress;
 import java.net.InetSocketAddress;
+import java.security.cert.CertificateException;
 
 import static org.apache.hadoop.hdds.recon.ReconConfig.ConfigStrings.OZONE_RECON_KERBEROS_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.hdds.recon.ReconConfig.ConfigStrings.OZONE_RECON_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec.getX509Certificate;
+import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.getEncodedString;
+import static org.apache.hadoop.ozone.common.Storage.StorageState.INITIALIZED;
 import static org.apache.hadoop.ozone.conf.OzoneServiceConfig.DEFAULT_SHUTDOWN_HOOK_PRIORITY;
 
 /**
@@ -65,6 +79,9 @@
   private ReconNamespaceSummaryManager reconNamespaceSummaryManager;
   private OzoneStorageContainerManager reconStorageContainerManager;
   private OzoneConfiguration configuration;
+  private ReconStorageConfig reconStorage;
+  private CertificateClient certClient;
+  private ReconTaskStatusMetrics reconTaskStatusMetrics;
 
   private volatile boolean isStarted = false;
 
@@ -90,9 +107,29 @@
     //Pass on injector to listener that does the Guice - Jersey HK2 bridging.
     ReconGuiceServletContextListener.setInjector(injector);
 
+    reconStorage = injector.getInstance(ReconStorageConfig.class);
+
     LOG.info("Initializing Recon server...");
     try {
       loginReconUserIfSecurityEnabled(configuration);
+      try {
+        if (reconStorage.getState() != INITIALIZED) {
+          if (OzoneSecurityUtil.isSecurityEnabled(configuration)) {
+            initializeCertificateClient(configuration);
+          }
+          reconStorage.initialize();
+        } else {
+          if (OzoneSecurityUtil.isSecurityEnabled(configuration) &&
+              reconStorage.getReconCertSerialId() == null) {
+            LOG.info("ReconStorageConfig is already initialized." +
+                "Initializing certificate.");
+            initializeCertificateClient(configuration);
+            reconStorage.persistCurrentState();
+          }
+        }
+      } catch (Exception e) {
+        LOG.error("Error during initializing Recon certificate", e);
+      }
       this.reconDBProvider = injector.getInstance(ReconDBProvider.class);
       this.reconContainerMetadataManager =
           injector.getInstance(ReconContainerMetadataManager.class);
@@ -109,6 +146,8 @@
           injector.getInstance(OzoneManagerServiceProvider.class);
       this.reconStorageContainerManager =
           injector.getInstance(OzoneStorageContainerManager.class);
+      this.reconTaskStatusMetrics =
+          injector.getInstance(ReconTaskStatusMetrics.class);
       LOG.info("Recon server initialized successfully!");
 
     } catch (Exception e) {
@@ -130,6 +169,88 @@
   }
 
   /**
+   * Initializes secure Recon.
+   * */
+  private void initializeCertificateClient(OzoneConfiguration conf)
+      throws IOException {
+    LOG.info("Initializing secure Recon.");
+    certClient = new ReconCertificateClient(
+        new SecurityConfig(configuration),
+        reconStorage.getReconCertSerialId());
+
+    CertificateClient.InitResponse response = certClient.init();
+    LOG.info("Init response: {}", response);
+    switch (response) {
+    case SUCCESS:
+      LOG.info("Initialization successful, case:{}.", response);
+      break;
+    case GETCERT:
+      getSCMSignedCert(conf);
+      LOG.info("Successfully stored SCM signed certificate, case:{}.",
+          response);
+      break;
+    case FAILURE:
+      LOG.error("Recon security initialization failed, case:{}.", response);
+      throw new RuntimeException("Recon security initialization failed.");
+    case RECOVER:
+      LOG.error("Recon security initialization failed. Recon certificate is " +
+          "missing.");
+      throw new RuntimeException("Recon security initialization failed.");
+    default:
+      LOG.error("Recon security initialization failed. Init response: {}",
+          response);
+      throw new RuntimeException("Recon security initialization failed.");
+    }
+  }
+
+  /**
+   * Get SCM signed certificate and store it using certificate client.
+   * @param config
+   * */
+  private void getSCMSignedCert(OzoneConfiguration config) {
+    try {
+      PKCS10CertificationRequest csr = ReconUtils.getCSR(config, certClient);
+      LOG.info("Creating CSR for Recon.");
+
+      SCMSecurityProtocolClientSideTranslatorPB secureScmClient =
+          HddsServerUtil.getScmSecurityClientWithMaxRetry(config);
+      HddsProtos.NodeDetailsProto.Builder reconDetailsProtoBuilder =
+          HddsProtos.NodeDetailsProto.newBuilder()
+              .setHostName(InetAddress.getLocalHost().getHostName())
+              .setClusterId(reconStorage.getClusterID())
+              .setUuid(reconStorage.getReconId())
+              .setNodeType(HddsProtos.NodeType.RECON);
+
+      SCMSecurityProtocolProtos.SCMGetCertResponseProto response =
+          secureScmClient.getCertificateChain(
+              reconDetailsProtoBuilder.build(),
+              getEncodedString(csr));
+      // Persist certificates.
+      if (response.hasX509CACertificate()) {
+        String pemEncodedCert = response.getX509Certificate();
+        certClient.storeCertificate(pemEncodedCert, true);
+        certClient.storeCertificate(response.getX509CACertificate(), true,
+            true);
+
+        // Store Root CA certificate.
+        if (response.hasX509RootCACertificate()) {
+          certClient.storeRootCACertificate(
+              response.getX509RootCACertificate(), true);
+        }
+        String reconCertSerialId = getX509Certificate(pemEncodedCert).
+            getSerialNumber().toString();
+        reconStorage.setReconCertSerialId(reconCertSerialId);
+      } else {
+        throw new RuntimeException("Unable to retrieve recon certificate " +
+            "chain");
+      }
+    } catch (IOException | CertificateException e) {
+      LOG.error("Error while storing SCM signed certificate.", e);
+      throw new RuntimeException(e);
+    }
+  }
+
+  /**
    * Need a way to restart services from tests.
    */
   public void start() throws Exception {
@@ -138,6 +259,7 @@
       isStarted = true;
       // Initialize metrics for Recon
       HddsServerUtil.initializeMetrics(configuration, "Recon");
+      reconTaskStatusMetrics.register();
       if (httpServer != null) {
         httpServer.start();
       }
@@ -165,6 +287,9 @@
       if (reconDBProvider != null) {
         reconDBProvider.close();
       }
+      if (reconTaskStatusMetrics != null) {
+        reconTaskStatusMetrics.unregister();
+      }
       isStarted = false;
     }
   }
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java
index dd9f0c9..d23ffe9 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java
@@ -96,6 +96,13 @@
   public static final String RECON_OM_SNAPSHOT_TASK_FLUSH_PARAM =
       "recon.om.snapshot.task.flush.param";
 
+  public static final String RECON_OM_DELTA_UPDATE_LIMIT =
+      "recon.om.delta.update.limit";
+  public static final long RECON_OM_DELTA_UPDATE_LIMIT_DEFUALT = 2000;
+  public static final String RECON_OM_DELTA_UPDATE_LOOP_LIMIT =
+      "recon.om.delta.update.loop.limit";
+  public static final int RECON_OM_DELTA_UPDATE_LOOP_LIMIT_DEFUALT = 10;
+
   public static final String OZONE_RECON_TASK_THREAD_COUNT_KEY =
       "ozone.recon.task.thread.count";
   public static final int OZONE_RECON_TASK_THREAD_COUNT_DEFAULT = 5;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
index 3d0476e..c6f735e 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
@@ -25,9 +25,11 @@
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.net.HttpURLConnection;
+import java.net.InetAddress;
 import java.net.URL;
 import java.nio.file.Path;
 import java.nio.file.Paths;
+import java.security.KeyPair;
 import java.sql.Timestamp;
 import java.util.zip.GZIPOutputStream;
 
@@ -35,6 +37,9 @@
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
+import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest;
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.io.IOUtils;
 
@@ -49,7 +54,9 @@
 import static org.jooq.impl.DSL.select;
 import static org.jooq.impl.DSL.using;
 
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.bouncycastle.pkcs.PKCS10CertificationRequest;
 import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao;
 import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats;
 import org.jooq.Configuration;
@@ -330,4 +337,26 @@
     }
     return index;
   }
+
+  /**
+   * Creates CertificateSignRequest.
+   * @param config
+   * */
+  public static PKCS10CertificationRequest getCSR(OzoneConfiguration config,
+      CertificateClient certClient) throws IOException {
+    CertificateSignRequest.Builder builder = certClient.getCSRBuilder();
+    KeyPair keyPair = new KeyPair(certClient.getPublicKey(),
+        certClient.getPrivateKey());
+
+    String hostname = InetAddress.getLocalHost().getCanonicalHostName();
+    String subject = UserGroupInformation.getCurrentUser()
+        .getShortUserName() + "@" + hostname;
+
+    builder.setCA(false)
+        .setKey(keyPair)
+        .setConfiguration(config)
+        .setSubject(subject);
+
+    return builder.build();
+  }
 }
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java
index 2b6e0f8..17c5a27 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java
@@ -44,6 +44,7 @@
 import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE;
 import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE;
 import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
 
 /**
  * Endpoint to fetch current state of ozone cluster.
@@ -92,17 +93,29 @@
         TableCountTask.getRowKeyFromTable(VOLUME_TABLE));
     GlobalStats bucketRecord = globalStatsDao.findById(
         TableCountTask.getRowKeyFromTable(BUCKET_TABLE));
+    // Keys from OBJECT_STORE buckets.
     GlobalStats keyRecord = globalStatsDao.findById(
         TableCountTask.getRowKeyFromTable(KEY_TABLE));
+    // Keys from FILE_SYSTEM_OPTIMIZED buckets
+    GlobalStats fileRecord = globalStatsDao.findById(
+        TableCountTask.getRowKeyFromTable(FILE_TABLE));
+
     if (volumeRecord != null) {
       builder.setVolumes(volumeRecord.getValue());
     }
     if (bucketRecord != null) {
       builder.setBuckets(bucketRecord.getValue());
     }
+
+    Long totalKeys = 0L;
     if (keyRecord != null) {
-      builder.setKeys(keyRecord.getValue());
+      totalKeys += keyRecord.getValue();
     }
+    if (fileRecord != null) {
+      totalKeys += fileRecord.getValue();
+    }
+    builder.setKeys(totalKeys);
+
     ClusterStateResponse response = builder
         .setStorageReport(storageReport)
         .setPipelines(pipelines)
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/MetricsProxyEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/MetricsProxyEndpoint.java
index 327e9b1..eec3346 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/MetricsProxyEndpoint.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/MetricsProxyEndpoint.java
@@ -93,7 +93,7 @@
         ) {
         final ByteBuffer buffer = ByteBuffer.allocateDirect(16 * 1024);
 
-        while(inputChannel.read(buffer) != -1) {
+        while (inputChannel.read(buffer) != -1) {
           buffer.flip();
           outputChannel.write(buffer);
           buffer.compact();
@@ -101,7 +101,7 @@
 
         buffer.flip();
 
-        while(buffer.hasRemaining()) {
+        while (buffer.hasRemaining()) {
           outputChannel.write(buffer);
         }
       } finally {
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java
index cbcd9ca..ac34c58 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java
@@ -449,7 +449,7 @@
 
       for (OmVolumeArgs volume: volumes) {
         final long quota = volume.getQuotaInBytes();
-        assert(quota >= -1L);
+        assert (quota >= -1L);
         if (quota == -1L) {
           // If one volume has unlimited quota, the "root" quota is unlimited.
           quotaInBytes = -1L;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PipelineEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PipelineEndpoint.java
index c7e5cc7..d475be4 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PipelineEndpoint.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PipelineEndpoint.java
@@ -116,7 +116,7 @@
         // ex. group id of 48981bf7-8bea-4fbd-9857-79df51ee872d
         // is group-79DF51EE872D
         String[] splits = pipelineId.toString().split("-");
-        String groupId = "group-" + splits[splits.length-1].toUpperCase();
+        String groupId = "group-" + splits[splits.length - 1].toUpperCase();
         Optional<Long> leaderElectionCount = getMetricValue(
             "ratis_leader_election_electionCount", groupId);
         leaderElectionCount.ifPresent(pipelineBuilder::setLeaderElections);
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
index b605c0f..fdf493f 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
@@ -82,7 +82,7 @@
     res.setNumOfFiles(in.readInt());
     res.setSizeOfFiles(in.readLong());
     short len = in.readShort();
-    assert(len == (short) ReconConstants.NUM_OF_BINS);
+    assert (len == (short) ReconConstants.NUM_OF_BINS);
     int[] fileSizeBucket = new int[len];
     for (int i = 0; i < len; ++i) {
       fileSizeBucket[i] = in.readInt();
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
index 67c6355..d29bf50 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
@@ -144,7 +144,7 @@
              containerHealthSchemaManager.getAllUnhealthyRecordsCursor()) {
       ContainerHealthStatus currentContainer = null;
       Set<String> existingRecords = new HashSet<>();
-      while(cursor.hasNext()) {
+      while (cursor.hasNext()) {
         recordCount++;
         UnhealthyContainersRecord rec = cursor.fetchNext();
         try {
@@ -259,7 +259,7 @@
     public static boolean retainOrUpdateRecord(
         ContainerHealthStatus container, UnhealthyContainersRecord rec) {
       boolean returnValue = false;
-      switch(UnHealthyContainerStates.valueOf(rec.getContainerState())) {
+      switch (UnHealthyContainerStates.valueOf(rec.getContainerState())) {
       case MISSING:
         returnValue = container.isMissing();
         break;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/OzoneManagerSyncMetrics.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/OzoneManagerSyncMetrics.java
index 1e3fa5a..e6ad328 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/OzoneManagerSyncMetrics.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/OzoneManagerSyncMetrics.java
@@ -25,6 +25,7 @@
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 import org.apache.hadoop.metrics2.lib.MutableGaugeFloat;
+import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
 import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.ozone.OzoneConsts;
 
@@ -75,6 +76,9 @@
   @Metric(about = "Average number of updates got per OM delta request")
   private MutableGaugeFloat averageNumUpdatesInDeltaRequest;
 
+  @Metric(about = "The lag of sequence number between Recon and OM")
+  private MutableGaugeLong sequenceNumberLag;
+
   public void incrNumSnapshotRequests() {
     this.numSnapshotRequests.incr();
   }
@@ -130,4 +134,12 @@
   public MutableCounterLong getNumNonZeroDeltaRequests() {
     return numNonZeroDeltaRequests;
   }
+
+  public void setSequenceNumberLag(long lag) {
+    sequenceNumberLag.set(lag);
+  }
+
+  public MutableGaugeLong getSequenceNumberLag() {
+    return sequenceNumberLag;
+  }
 }
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/ReconTaskStatusMetrics.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/ReconTaskStatusMetrics.java
new file mode 100644
index 0000000..a8db882
--- /dev/null
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/ReconTaskStatusMetrics.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.metrics;
+
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.MetricsTag;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao;
+import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus;
+
+import java.util.List;
+
+/**
+ * Ship ReconTaskStatus table on persistent DB as a metrics.
+ */
+@Singleton
+@Metrics(about = "Recon Task Status Metrics", context = OzoneConsts.OZONE)
+public class ReconTaskStatusMetrics implements MetricsSource {
+
+  private static final String SOURCE_NAME =
+      ReconTaskStatusMetrics.class.getSimpleName();
+
+  @Inject
+  private ReconTaskStatusDao reconTaskStatusDao;
+
+  private static final MetricsInfo RECORD_INFO_LAST_UPDATED_TS =
+      Interns.info("lastUpdatedTimestamp",
+          "Last updated timestamp of corresponding Recon Task");
+
+  private static final MetricsInfo RECORD_INFO_LAST_UPDATED_SEQ =
+      Interns.info("lastUpdatedSeqNumber",
+          "Last updated sequence number of corresponding Recon Task");
+
+  public void register() {
+    DefaultMetricsSystem.instance()
+        .register(SOURCE_NAME, "Recon Task Metrics", this);
+  }
+
+  public void unregister() {
+    DefaultMetricsSystem.instance()
+        .unregisterSource(SOURCE_NAME);
+  }
+
+  public void getMetrics(MetricsCollector collector, boolean all) {
+    List<ReconTaskStatus> rows = reconTaskStatusDao.findAll();
+    rows.forEach((rts) -> {
+      MetricsRecordBuilder builder = collector.addRecord(SOURCE_NAME);
+      builder.add(
+          new MetricsTag(
+              Interns.info("type", "Recon Task type"),
+              rts.getTaskName()));
+      builder.addGauge(RECORD_INFO_LAST_UPDATED_TS,
+          rts.getLastUpdatedTimestamp());
+      builder.addCounter(RECORD_INFO_LAST_UPDATED_SEQ,
+          rts.getLastUpdatedSeqNumber());
+      builder.endRecord();
+    });
+  }
+}
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
index 8e15162..b8d7be0 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
@@ -150,7 +150,7 @@
       existContainers = containers.get(true);
     }
     List<Long> noExistContainers = null;
-    if (containers.containsKey(false)){
+    if (containers.containsKey(false)) {
       noExistContainers = containers.get(false).parallelStream().
           map(ContainerReplicaProto::getContainerID)
           .collect(Collectors.toList());
@@ -178,7 +178,7 @@
         ContainerReplicaProto.State crpState = crp.getState();
         try {
           checkContainerStateAndUpdate(cID, crpState);
-        } catch (Exception ioe){
+        } catch (Exception ioe) {
           LOG.error("Exception while " +
               "checkContainerStateAndUpdate container", ioe);
         }
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
index 41cdc7a..e57bb70 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
@@ -253,6 +253,15 @@
         reportedDn.getPersistedOpStateExpiryEpochSec());
   }
 
+  /**
+   * send refresh command to all the healthy datanodes to refresh
+   * volume usage info immediately.
+   */
+  @Override
+  public void refreshAllHealthyDnUsageInfo() {
+    //no op
+  }
+
   @Override
   public RegisteredCommand register(
       DatanodeDetails datanodeDetails, NodeReportProto nodeReport,
@@ -303,6 +312,13 @@
 
   @VisibleForTesting
   public long getNodeDBKeyCount() throws IOException {
-    return nodeDB.getEstimatedKeyCount();
+    long nodeCount = 0;
+    TableIterator<UUID, ? extends Table.KeyValue<UUID, DatanodeDetails>>
+        iterator = nodeDB.iterator();
+    while (iterator.hasNext()) {
+      iterator.next();
+      nodeCount++;
+    }
+    return nodeCount;
   }
 }
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineFactory.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineFactory.java
index f390ed7..02f2751 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineFactory.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineFactory.java
@@ -43,7 +43,7 @@
       PipelineProvider<ReplicationConfig> {
 
     @Override
-    public Pipeline create(ReplicationConfig config){
+    public Pipeline create(ReplicationConfig config) {
       // We don't expect this to be called at all. But adding this as a red
       // flag for troubleshooting.
       throw new UnsupportedOperationException(
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageConfig.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageConfig.java
index 6804c29..7a82542 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageConfig.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageConfig.java
@@ -18,22 +18,77 @@
 
 package org.apache.hadoop.ozone.recon.scm;
 
+import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR;
 import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_STORAGE_DIR;
 
 import java.io.IOException;
+import java.util.Properties;
+import java.util.UUID;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
 import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.ozone.recon.ReconUtils;
 
+import javax.inject.Inject;
+
 /**
  * Recon's extension of SCMStorageConfig.
  */
 public class ReconStorageConfig extends SCMStorageConfig {
 
-  public ReconStorageConfig(OzoneConfiguration conf) throws IOException {
-    super(NodeType.RECON, ReconUtils.getReconScmDbDir(conf), RECON_STORAGE_DIR);
+  public static final String RECON_CERT_SERIAL_ID = "reconCertSerialId";
+  public static final String RECON_ID = "uuid";
+
+  @Inject
+  public ReconStorageConfig(OzoneConfiguration conf, ReconUtils reconUtils)
+      throws IOException {
+    super(NodeType.RECON, reconUtils.getReconDbDir(conf, OZONE_RECON_DB_DIR),
+        RECON_STORAGE_DIR);
   }
-  
+
+  public void setReconCertSerialId(String certSerialId) throws IOException {
+    getStorageInfo().setProperty(RECON_CERT_SERIAL_ID, certSerialId);
+  }
+
+  public void setReconId(String uuid) throws IOException {
+    if (getState() == StorageState.INITIALIZED) {
+      throw new IOException("Recon is already initialized.");
+    } else {
+      getStorageInfo().setProperty(RECON_ID, uuid);
+    }
+  }
+
+  /**
+   * Retrieves the Recon ID from the version file.
+   * @return RECON_ID
+   */
+  public String getReconId() {
+    return getStorageInfo().getProperty(RECON_ID);
+  }
+
+  @Override
+  protected Properties getNodeProperties() {
+    String reconId = getReconId();
+    if (reconId == null) {
+      reconId = UUID.randomUUID().toString();
+    }
+    Properties reconProperties = new Properties();
+    reconProperties.setProperty(RECON_ID, reconId);
+
+    if (getReconCertSerialId() != null) {
+      reconProperties.setProperty(RECON_CERT_SERIAL_ID, getReconCertSerialId());
+    }
+    return reconProperties;
+  }
+
+  /**
+   * Retrieves the serial id of certificate issued by SCM.
+   * @return RECON_CERT_SERIAL_ID
+   */
+  public String getReconCertSerialId() {
+    return getStorageInfo().getProperty(RECON_CERT_SERIAL_ID);
+  }
+
+
 }
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
index 081281d..b80d93d 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
@@ -72,6 +72,7 @@
 import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.recon.ReconServerConfigKeys;
+import org.apache.hadoop.ozone.recon.ReconUtils;
 import org.apache.hadoop.ozone.recon.fsck.ContainerHealthTask;
 import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager;
 import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager;
@@ -122,8 +123,8 @@
       StorageContainerServiceProvider scmServiceProvider,
       ReconTaskStatusDao reconTaskStatusDao,
       ContainerHealthSchemaManager containerHealthSchemaManager,
-      ReconContainerMetadataManager reconContainerMetadataManager)
-      throws IOException {
+      ReconContainerMetadataManager reconContainerMetadataManager,
+      ReconUtils reconUtils) throws IOException {
     reconNodeDetails = getReconNodeDetails(conf);
     this.eventQueue = new EventQueue();
     eventQueue.setSilent(true);
@@ -132,7 +133,7 @@
         .setSCM(this)
         .build();
     this.ozoneConfiguration = getReconScmConfiguration(conf);
-    this.scmStorageConfig = new ReconStorageConfig(conf);
+    this.scmStorageConfig = new ReconStorageConfig(conf, reconUtils);
     this.clusterMap = new NetworkTopologyImpl(conf);
     this.dbStore = DBStoreBuilder
         .createDBStore(ozoneConfiguration, new ReconSCMDBDefinition());
@@ -264,7 +265,7 @@
     boolean isSCMSnapshotEnabled = ozoneConfiguration.getBoolean(
         ReconServerConfigKeys.OZONE_RECON_SCM_SNAPSHOT_ENABLED,
         ReconServerConfigKeys.OZONE_RECON_SCM_SNAPSHOT_ENABLED_DEFAULT);
-    if(isSCMSnapshotEnabled) {
+    if (isSCMSnapshotEnabled) {
       initializeSCMDB();
       LOG.info("SCM DB initialized");
     } else {
@@ -335,7 +336,7 @@
           ReconServerConfigKeys.OZONE_RECON_SCM_CONTAINER_THRESHOLD,
           ReconServerConfigKeys.OZONE_RECON_SCM_CONTAINER_THRESHOLD_DEFAULT);
 
-      if(Math.abs(scmContainersCount - reconContainerCount) > threshold) {
+      if (Math.abs(scmContainersCount - reconContainerCount) > threshold) {
         LOG.info("Recon Container Count: {}, SCM Container Count: {}",
             reconContainerCount, scmContainersCount);
         updateReconSCMDBWithNewSnapshot();
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java
index 9625db6..1ebeedd 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java
@@ -34,7 +34,7 @@
 /**
  * Codec to encode ContainerKeyPrefix as byte array.
  */
-public class ContainerKeyPrefixCodec implements Codec<ContainerKeyPrefix>{
+public class ContainerKeyPrefixCodec implements Codec<ContainerKeyPrefix> {
 
   private static final String KEY_DELIMITER = "_";
 
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
index 021d014..efebbf0 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
@@ -71,6 +71,10 @@
 import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY_DEFAULT;
 import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_TASK_INTERVAL_DELAY;
 import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_TASK_INTERVAL_DEFAULT;
+import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LIMIT;
+import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LIMIT_DEFUALT;
+import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LOOP_LIMIT;
+import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LOOP_LIMIT_DEFUALT;
 import static org.apache.ratis.proto.RaftProtos.RaftPeerRole.LEADER;
 import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao;
 import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus;
@@ -105,6 +109,9 @@
   private ReconUtils reconUtils;
   private OzoneManagerSyncMetrics metrics;
 
+  private long deltaUpdateLimit;
+  private int deltaUpdateLoopLimit;
+
   /**
    * OM Snapshot related task names.
    */
@@ -145,6 +152,12 @@
     String ozoneManagerHttpsAddress = configuration.get(OMConfigKeys
         .OZONE_OM_HTTPS_ADDRESS_KEY);
 
+    long deltaUpdateLimits = configuration.getLong(RECON_OM_DELTA_UPDATE_LIMIT,
+        RECON_OM_DELTA_UPDATE_LIMIT_DEFUALT);
+    int deltaUpdateLoopLimits = configuration.getInt(
+        RECON_OM_DELTA_UPDATE_LOOP_LIMIT,
+        RECON_OM_DELTA_UPDATE_LOOP_LIMIT_DEFUALT);
+
     omSnapshotDBParentDir = reconUtils.getReconDbDir(configuration,
         OZONE_RECON_OM_SNAPSHOT_DB_DIR);
 
@@ -176,6 +189,8 @@
     this.ozoneManagerClient = ozoneManagerClient;
     this.configuration = configuration;
     this.metrics = OzoneManagerSyncMetrics.create();
+    this.deltaUpdateLimit = deltaUpdateLimits;
+    this.deltaUpdateLoopLimit = deltaUpdateLoopLimits;
   }
 
   public void registerOMDBTasks() {
@@ -183,7 +198,7 @@
         OmSnapshotTaskName.OmDeltaRequest.name(),
         System.currentTimeMillis(), getCurrentOMDBSequenceNumber());
     if (!reconTaskStatusDao.existsById(
-        OmSnapshotTaskName.OmDeltaRequest.name())){
+        OmSnapshotTaskName.OmDeltaRequest.name())) {
       reconTaskStatusDao.insert(reconTaskStatusRecord);
       LOG.info("Registered {} task ",
           OmSnapshotTaskName.OmDeltaRequest.name());
@@ -193,7 +208,7 @@
         OmSnapshotTaskName.OmSnapshotRequest.name(),
         System.currentTimeMillis(), getCurrentOMDBSequenceNumber());
     if (!reconTaskStatusDao.existsById(
-        OmSnapshotTaskName.OmSnapshotRequest.name())){
+        OmSnapshotTaskName.OmSnapshotRequest.name())) {
       reconTaskStatusDao.insert(reconTaskStatusRecord);
       LOG.info("Registered {} task ",
           OmSnapshotTaskName.OmSnapshotRequest.name());
@@ -356,14 +371,49 @@
   void getAndApplyDeltaUpdatesFromOM(
       long fromSequenceNumber, OMDBUpdatesHandler omdbUpdatesHandler)
       throws IOException, RocksDBException {
+    int loopCount = 0;
+    LOG.info("OriginalFromSequenceNumber : {} ", fromSequenceNumber);
+    long deltaUpdateCnt = Long.MAX_VALUE;
+    long inLoopStartSequenceNumber = fromSequenceNumber;
+    long inLoopLatestSequenceNumber;
+    while (loopCount < deltaUpdateLoopLimit &&
+        deltaUpdateCnt >= deltaUpdateLimit) {
+      innerGetAndApplyDeltaUpdatesFromOM(
+          inLoopStartSequenceNumber, omdbUpdatesHandler);
+      inLoopLatestSequenceNumber = getCurrentOMDBSequenceNumber();
+      deltaUpdateCnt = inLoopLatestSequenceNumber - inLoopStartSequenceNumber;
+      inLoopStartSequenceNumber = inLoopLatestSequenceNumber;
+      loopCount++;
+    }
+    LOG.info("Delta updates received from OM : {} loops, {} records", loopCount,
+        getCurrentOMDBSequenceNumber() - fromSequenceNumber
+    );
+  }
+
+  /**
+   * Get Delta updates from OM through RPC call and apply to local OM DB as
+   * well as accumulate in a buffer.
+   * @param fromSequenceNumber from sequence number to request from.
+   * @param omdbUpdatesHandler OM DB updates handler to buffer updates.
+   * @throws IOException when OM RPC request fails.
+   * @throws RocksDBException when writing to RocksDB fails.
+   */
+  @VisibleForTesting
+  void innerGetAndApplyDeltaUpdatesFromOM(long fromSequenceNumber,
+      OMDBUpdatesHandler omdbUpdatesHandler)
+      throws IOException, RocksDBException {
     DBUpdatesRequest dbUpdatesRequest = DBUpdatesRequest.newBuilder()
-        .setSequenceNumber(fromSequenceNumber).build();
+        .setSequenceNumber(fromSequenceNumber)
+        .setLimitCount(deltaUpdateLimit)
+        .build();
     DBUpdates dbUpdates = ozoneManagerClient.getDBUpdates(dbUpdatesRequest);
-    if (null != dbUpdates) {
+    int numUpdates = 0;
+    long latestSequenceNumberOfOM = -1L;
+    if (null != dbUpdates && dbUpdates.getCurrentSequenceNumber() != -1) {
+      latestSequenceNumberOfOM = dbUpdates.getLatestSequenceNumber();
       RDBStore rocksDBStore = (RDBStore) omMetadataManager.getStore();
       RocksDB rocksDB = rocksDBStore.getDb();
-      int numUpdates = dbUpdates.getData().size();
-      LOG.info("Number of updates received from OM : {}", numUpdates);
+      numUpdates = dbUpdates.getData().size();
       if (numUpdates > 0) {
         metrics.incrNumUpdatesInDeltaTotal(numUpdates);
       }
@@ -379,6 +429,12 @@
         }
       }
     }
+    long lag = latestSequenceNumberOfOM == -1 ? 0 :
+        latestSequenceNumberOfOM - getCurrentOMDBSequenceNumber();
+    metrics.setSequenceNumberLag(lag);
+    LOG.info("Number of updates received from OM : {}, " +
+            "SequenceNumber diff: {}, SequenceNumber Lag from OM {}.",
+        numUpdates, getCurrentOMDBSequenceNumber() - fromSequenceNumber, lag);
   }
 
   /**
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/StorageContainerServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/StorageContainerServiceProviderImpl.java
index 8f83c66..e624b3f 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/StorageContainerServiceProviderImpl.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/StorageContainerServiceProviderImpl.java
@@ -49,12 +49,13 @@
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.client.SCMCertificateClient;
+import org.apache.hadoop.hdds.security.x509.certificate.client.ReconCertificateClient;
 import org.apache.hadoop.hdds.server.http.HttpConfig;
 import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
 import org.apache.hadoop.hdds.utils.db.RocksDBCheckpoint;
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.ozone.recon.ReconUtils;
+import org.apache.hadoop.ozone.recon.scm.ReconStorageConfig;
 import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.ratis.proto.RaftProtos;
@@ -76,12 +77,14 @@
   private File scmSnapshotDBParentDir;
   private URLConnectionFactory connectionFactory;
   private ReconUtils reconUtils;
+  private ReconStorageConfig reconStorage;
 
   @Inject
   public StorageContainerServiceProviderImpl(
       StorageContainerLocationProtocol scmClient,
       ReconUtils reconUtils,
-      OzoneConfiguration configuration) {
+      OzoneConfiguration configuration,
+      ReconStorageConfig reconStorage) {
 
     int connectionTimeout = (int) configuration.getTimeDuration(
         OZONE_RECON_SCM_CONNECTION_TIMEOUT,
@@ -115,6 +118,7 @@
     this.reconUtils = reconUtils;
     this.scmClient = scmClient;
     this.configuration = configuration;
+    this.reconStorage = reconStorage;
   }
 
   @Override
@@ -187,10 +191,10 @@
                 ScmConfigKeys.OZONE_SCM_GRPC_PORT_KEY,
                 ScmConfigKeys.OZONE_SCM_GRPC_PORT_DEFAULT);
 
-            try (SCMSnapshotDownloader downloadClient =
-                 new InterSCMGrpcClient(hostAddress, grpcPort,
-                 configuration, new SCMCertificateClient(
-                 new SecurityConfig(configuration)))) {
+            try (SCMSnapshotDownloader downloadClient = new InterSCMGrpcClient(
+                hostAddress, grpcPort, configuration,
+                new ReconCertificateClient(new SecurityConfig(configuration),
+                    reconStorage.getReconCertSerialId()))) {
               downloadClient.download(targetFile.toPath()).get();
             } catch (ExecutionException | InterruptedException e) {
               LOG.error("Rocks DB checkpoint downloading failed", e);
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java
index 8c390d4..e9d8cbb 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java
@@ -128,7 +128,7 @@
       String updatedKey = omdbUpdateEvent.getKey();
       OmKeyInfo omKeyInfo = omdbUpdateEvent.getValue();
 
-      try{
+      try {
         switch (omdbUpdateEvent.getAction()) {
         case PUT:
           handlePutKeyEvent(omKeyInfo, fileSizeCountMap);
@@ -258,7 +258,7 @@
 
     @Override
     public boolean equals(Object obj) {
-      if(obj instanceof FileSizeCountKey) {
+      if (obj instanceof FileSizeCountKey) {
         FileSizeCountKey s = (FileSizeCountKey) obj;
         return volume.equals(s.volume) && bucket.equals(s.bucket) &&
             fileSizeUpperBound.equals(s.fileSizeUpperBound);
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/TableCountTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/TableCountTask.java
index 79b28fe..6e827c7 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/TableCountTask.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/TableCountTask.java
@@ -123,7 +123,7 @@
         continue;
       }
       String rowKey = getRowKeyFromTable(omdbUpdateEvent.getTable());
-      try{
+      try {
         switch (omdbUpdateEvent.getAction()) {
         case PUT:
           objectCountMap.computeIfPresent(rowKey, (k, count) -> count + 1L);
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
index 43df965..9353215 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
@@ -21,6 +21,7 @@
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos
@@ -74,6 +75,7 @@
 import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl;
 import org.apache.hadoop.ozone.recon.tasks.FileSizeCountTask;
 import org.apache.hadoop.ozone.recon.tasks.TableCountTask;
+import org.apache.ozone.test.GenericTestUtils;
 import org.apache.ozone.test.LambdaTestUtils;
 import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition;
 import org.hadoop.ozone.recon.schema.tables.daos.FileCountBySizeDao;
@@ -200,6 +202,8 @@
     when(urlConnectionMock.getInputStream()).thenReturn(inputStream);
     when(reconUtilsMock.makeHttpCall(any(URLConnectionFactory.class),
         anyString(), anyBoolean())).thenReturn(urlConnectionMock);
+    when(reconUtilsMock.getReconDbDir(any(OzoneConfiguration.class),
+        anyString())).thenReturn(GenericTestUtils.getRandomizedTestDir());
 
     ReconTestInjector reconTestInjector =
         new ReconTestInjector.Builder(temporaryFolder)
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java
index 0e66553..a2f72ba 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.ozone.recon.api;
 
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -53,6 +54,7 @@
 import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
 import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
 import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl;
+import org.apache.ozone.test.GenericTestUtils;
 import org.apache.ozone.test.LambdaTestUtils;
 import org.junit.Assert;
 import org.junit.Before;
@@ -76,7 +78,9 @@
 
 import java.io.IOException;
 import java.net.HttpURLConnection;
-import java.util.*;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.UUID;
 import java.util.concurrent.Callable;
 
 /**
@@ -181,6 +185,8 @@
             .thenReturn(HttpServletResponse.SC_OK);
     when(reconUtilsMock.makeHttpCall(any(URLConnectionFactory.class),
             anyString(), anyBoolean())).thenReturn(urlConnectionMock);
+    when(reconUtilsMock.getReconDbDir(any(OzoneConfiguration.class),
+        anyString())).thenReturn(GenericTestUtils.getRandomizedTestDir());
 
     ReconTestInjector reconTestInjector =
             new ReconTestInjector.Builder(temporaryFolder)
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java
index 9485dbf..d5da4a3 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java
@@ -68,7 +68,7 @@
         response.getEntity();
 
     Assert.assertEquals(resultList.size(), responseList.size());
-    for(ReconTaskStatus r : responseList) {
+    for (ReconTaskStatus r : responseList) {
       Assert.assertEquals(reconTaskStatusRecord.getTaskName(), r.getTaskName());
       Assert.assertEquals(reconTaskStatusRecord.getLastUpdatedTimestamp(),
           r.getLastUpdatedTimestamp());
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
index ac5aeaf..db2448b 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
@@ -370,7 +370,7 @@
     }
 
     private boolean isDnPresent(List<DatanodeDetails> dns) {
-      for(DatanodeDetails dn : dns) {
+      for (DatanodeDetails dn : dns) {
         if (misRepWhenDnPresent != null
             && dn.getUuid().equals(misRepWhenDnPresent)) {
           return true;
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java
index ab21a35..5968923 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java
@@ -94,7 +94,7 @@
         new ImmutablePair<>("count", Types.BIGINT));
 
     List<Pair<String, Integer>> actualPairsFileCount = new ArrayList<>();
-    while(resultSetFileCount.next()) {
+    while (resultSetFileCount.next()) {
       actualPairsFileCount.add(new ImmutablePair<>(resultSetFileCount.getString(
           "COLUMN_NAME"), resultSetFileCount.getInt(
               "DATA_TYPE")));
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
index 0996f3f..0ee6e5f 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
@@ -44,6 +44,7 @@
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
 import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.recon.ReconUtils;
 import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager;
 import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager;
 import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
@@ -93,7 +94,7 @@
     sequenceIdGen = new SequenceIdGenerator(
         conf, scmhaManager, ReconSCMDBDefinition.SEQUENCE_ID.getTable(store));
     scmContext = SCMContext.emptyContext();
-    scmStorageConfig = new ReconStorageConfig(conf);
+    scmStorageConfig = new ReconStorageConfig(conf, new ReconUtils());
     NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
     EventQueue eventQueue = new EventQueue();
     layoutVersionManager = mock(HDDSLayoutVersionManager.class);
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
index 4687aa0..a4d0260 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
@@ -50,6 +50,7 @@
 import org.apache.hadoop.ozone.protocol.commands.ReregisterCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.ozone.protocol.commands.SetNodeOperationalStateCommand;
+import org.apache.hadoop.ozone.recon.ReconUtils;
 import org.apache.ozone.test.LambdaTestUtils;
 import org.junit.After;
 import org.junit.Before;
@@ -76,7 +77,7 @@
     conf.set(OZONE_METADATA_DIRS,
         temporaryFolder.newFolder().getAbsolutePath());
     conf.set(OZONE_SCM_NAMES, "localhost");
-    reconStorageConfig = new ReconStorageConfig(conf);
+    reconStorageConfig = new ReconStorageConfig(conf, new ReconUtils());
     versionManager = new HDDSLayoutVersionManager(
         reconStorageConfig.getLayoutVersion());
     store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition());
@@ -89,7 +90,8 @@
 
   @Test
   public void testReconNodeDB() throws IOException, NodeNotFoundException {
-    ReconStorageConfig scmStorageConfig = new ReconStorageConfig(conf);
+    ReconStorageConfig scmStorageConfig =
+        new ReconStorageConfig(conf, new ReconUtils());
     EventQueue eventQueue = new EventQueue();
     NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
     Table<UUID, DatanodeDetails> nodeTable =
@@ -174,7 +176,8 @@
 
   @Test
   public void testUpdateNodeOperationalStateFromScm() throws Exception {
-    ReconStorageConfig scmStorageConfig = new ReconStorageConfig(conf);
+    ReconStorageConfig scmStorageConfig =
+        new ReconStorageConfig(conf, new ReconUtils());
     EventQueue eventQueue = new EventQueue();
     NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
     Table<UUID, DatanodeDetails> nodeTable =
@@ -207,7 +210,8 @@
 
   @Test
   public void testDatanodeUpdate() throws IOException {
-    ReconStorageConfig scmStorageConfig = new ReconStorageConfig(conf);
+    ReconStorageConfig scmStorageConfig =
+        new ReconStorageConfig(conf, new ReconUtils());
     EventQueue eventQueue = new EventQueue();
     NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
     Table<UUID, DatanodeDetails> nodeTable =
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java
index 3acfbd5..7db9262 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java
@@ -45,6 +45,7 @@
 import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
+import org.apache.hadoop.ozone.recon.ReconUtils;
 import org.apache.hadoop.ozone.recon.scm.ReconPipelineFactory.ReconPipelineProvider;
 
 import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
@@ -85,7 +86,7 @@
     conf.set(OZONE_METADATA_DIRS,
         temporaryFolder.newFolder().getAbsolutePath());
     conf.set(OZONE_SCM_NAMES, "localhost");
-    scmStorageConfig = new ReconStorageConfig(conf);
+    scmStorageConfig = new ReconStorageConfig(conf, new ReconUtils());
     store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition());
     scmhaManager = MockSCMHAManager.getInstance(
         true, new MockSCMHADBTransactionBuffer(store));
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java
index c8d2544..c739902 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java
@@ -25,12 +25,15 @@
 import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDataToOm;
 import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR;
 import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR;
+import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LIMIT;
+import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LOOP_LIMIT;
 import static org.apache.hadoop.ozone.recon.ReconUtils.createTarFile;
 import static org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl.OmSnapshotTaskName.OmDeltaRequest;
 import static org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl.OmSnapshotTaskName.OmSnapshotRequest;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.ArgumentMatchers.anyString;
@@ -214,7 +217,7 @@
     RocksDB rocksDB = ((RDBStore)sourceOMMetadataMgr.getStore()).getDb();
     TransactionLogIterator transactionLogIterator = rocksDB.getUpdatesSince(0L);
     DBUpdates dbUpdatesWrapper = new DBUpdates();
-    while(transactionLogIterator.isValid()) {
+    while (transactionLogIterator.isValid()) {
       TransactionLogIterator.BatchResult result =
           transactionLogIterator.getBatch();
       result.writeBatch().markWalTerminationPoint();
@@ -245,7 +248,7 @@
         metrics.getAverageNumUpdatesInDeltaRequest().value(), 0.0);
     assertEquals(1, metrics.getNumNonZeroDeltaRequests().value());
 
-    // In this method, we have to assert the "GET" part and the "APPLY" path.
+    // In this method, we have to assert the "GET" path and the "APPLY" path.
 
     // Assert GET path --> verify if the OMDBUpdatesHandler picked up the 4
     // events ( 1 Vol PUT + 1 Bucket PUT + 2 Key PUTs).
@@ -264,6 +267,76 @@
   }
 
   @Test
+  public void testGetAndApplyDeltaUpdatesFromOMWithLimit() throws Exception {
+
+    // Writing 2 Keys into a source OM DB and collecting it in a
+    // DBUpdatesWrapper.
+    OMMetadataManager sourceOMMetadataMgr =
+        initializeNewOmMetadataManager(temporaryFolder.newFolder());
+    writeDataToOm(sourceOMMetadataMgr, "key_one");
+    writeDataToOm(sourceOMMetadataMgr, "key_two");
+
+    RocksDB rocksDB = ((RDBStore)sourceOMMetadataMgr.getStore()).getDb();
+    TransactionLogIterator transactionLogIterator = rocksDB.getUpdatesSince(0L);
+    DBUpdates[] dbUpdatesWrapper = new DBUpdates[4];
+    int index = 0;
+    while (transactionLogIterator.isValid()) {
+      TransactionLogIterator.BatchResult result =
+          transactionLogIterator.getBatch();
+      result.writeBatch().markWalTerminationPoint();
+      WriteBatch writeBatch = result.writeBatch();
+      dbUpdatesWrapper[index] = new DBUpdates();
+      dbUpdatesWrapper[index].addWriteBatch(writeBatch.data(),
+          result.sequenceNumber());
+      index++;
+      transactionLogIterator.next();
+    }
+
+    // OM Service Provider's Metadata Manager.
+    OMMetadataManager omMetadataManager =
+        initializeNewOmMetadataManager(temporaryFolder.newFolder());
+
+    OzoneConfiguration withLimitConfiguration =
+        new OzoneConfiguration(configuration);
+    withLimitConfiguration.setLong(RECON_OM_DELTA_UPDATE_LIMIT, 1);
+    withLimitConfiguration.setLong(RECON_OM_DELTA_UPDATE_LOOP_LIMIT, 3);
+    OzoneManagerServiceProviderImpl ozoneManagerServiceProvider =
+        new OzoneManagerServiceProviderImpl(withLimitConfiguration,
+            getTestReconOmMetadataManager(omMetadataManager,
+                temporaryFolder.newFolder()),
+            getMockTaskController(), new ReconUtils(),
+            getMockOzoneManagerClientWith4Updates(dbUpdatesWrapper[0],
+                dbUpdatesWrapper[1], dbUpdatesWrapper[2], dbUpdatesWrapper[3]));
+
+    OMDBUpdatesHandler updatesHandler =
+        new OMDBUpdatesHandler(omMetadataManager);
+    ozoneManagerServiceProvider.getAndApplyDeltaUpdatesFromOM(
+        0L, updatesHandler);
+
+    OzoneManagerSyncMetrics metrics = ozoneManagerServiceProvider.getMetrics();
+    assertEquals(1.0,
+        metrics.getAverageNumUpdatesInDeltaRequest().value(), 0.0);
+    assertEquals(3, metrics.getNumNonZeroDeltaRequests().value());
+
+    // In this method, we have to assert the "GET" path and the "APPLY" path.
+
+    // Assert GET path --> verify if the OMDBUpdatesHandler picked up the first
+    // 3 of 4 events ( 1 Vol PUT + 1 Bucket PUT + 2 Key PUTs).
+    assertEquals(3, updatesHandler.getEvents().size());
+
+    // Assert APPLY path --> Verify if the OM service provider's RocksDB got
+    // the first 3 changes, last change not applied.
+    String fullKey = omMetadataManager.getOzoneKey("sampleVol",
+        "bucketOne", "key_one");
+    assertTrue(ozoneManagerServiceProvider.getOMMetadataManagerInstance()
+        .getKeyTable(getBucketLayout()).isExist(fullKey));
+    fullKey = omMetadataManager.getOzoneKey("sampleVol",
+        "bucketOne", "key_two");
+    assertFalse(ozoneManagerServiceProvider.getOMMetadataManagerInstance()
+        .getKeyTable(getBucketLayout()).isExist(fullKey));
+  }
+
+  @Test
   public void testSyncDataFromOMFullSnapshot() throws Exception {
 
     // Empty OM DB to start with.
@@ -364,6 +437,17 @@
     return ozoneManagerProtocolMock;
   }
 
+  private OzoneManagerProtocol getMockOzoneManagerClientWith4Updates(
+      DBUpdates updates1, DBUpdates updates2, DBUpdates updates3,
+      DBUpdates updates4) throws IOException {
+    OzoneManagerProtocol ozoneManagerProtocolMock =
+        mock(OzoneManagerProtocol.class);
+    when(ozoneManagerProtocolMock.getDBUpdates(any(OzoneManagerProtocolProtos
+        .DBUpdatesRequest.class))).thenReturn(updates1, updates2, updates3,
+        updates4);
+    return ozoneManagerProtocolMock;
+  }
+
   private BucketLayout getBucketLayout() {
     return BucketLayout.DEFAULT;
   }
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java
index 469f04f..8576484 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java
@@ -20,7 +20,11 @@
 
 import org.apache.hadoop.ozone.recon.ReconTestInjector;
 import org.apache.hadoop.ozone.recon.api.types.NSSummary;
-import org.junit.*;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 
 import java.io.IOException;
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java
index 06157d3..ff5a5bb 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java
@@ -85,7 +85,7 @@
   }
 
   @Test
-  public void testReprocessOMDB() throws Exception{
+  public void testReprocessOMDB() throws Exception {
 
     Map<ContainerKeyPrefix, Integer> keyPrefixesForContainer =
         reconContainerMetadataManager.getKeyPrefixesForContainer(1);
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java
index 2ff20cf..3dd0d2f 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java
@@ -107,7 +107,7 @@
         rocksDB.getUpdatesSince(0);
     List<byte[]> writeBatches = new ArrayList<>();
 
-    while(transactionLogIterator.isValid()) {
+    while (transactionLogIterator.isValid()) {
       TransactionLogIterator.BatchResult result =
           transactionLogIterator.getBatch();
       result.writeBatch().markWalTerminationPoint();
@@ -190,7 +190,7 @@
         rocksDB.getUpdatesSince(3);
     List<byte[]> writeBatches = new ArrayList<>();
 
-    while(transactionLogIterator.isValid()) {
+    while (transactionLogIterator.isValid()) {
       TransactionLogIterator.BatchResult result =
           transactionLogIterator.getBatch();
       result.writeBatch().markWalTerminationPoint();
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java
index 8f7f76c..81c406e 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java
@@ -116,7 +116,7 @@
     ArrayList<OMDBUpdateEvent> events = new ArrayList<>();
     // Create 5 put, 1 delete and 1 update event for each table
     for (String tableName: tableCountTask.getTaskTables()) {
-      for (int i=0; i<5; i++) {
+      for (int i = 0; i < 5; i++) {
         events.add(getOMUpdateEvent("item" + i, null, tableName, PUT));
       }
       // for delete event, if value is set to null, the counter will not be
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java
index 9b186e8..28c3958 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java
@@ -44,7 +44,7 @@
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_CLIENT_PROTOCOL_VERSION;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_CLIENT_PROTOCOL_VERSION_KEY;
 import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INTERNAL_ERROR;
-import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.MALFORMED_HEADER;
+import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.ACCESS_DENIED;
 
 /**
  * This class creates the OzoneClient for the Rest endpoints.
@@ -77,7 +77,7 @@
 
   @PreDestroy
   public void destroy() throws IOException {
-    client.getObjectStore().getClientProxy().clearTheadLocalS3Auth();
+    client.getObjectStore().getClientProxy().clearThreadLocalS3Auth();
   }
   @Produces
   public S3Auth getSignature() {
@@ -90,7 +90,12 @@
       }
 
       String awsAccessId = signatureInfo.getAwsAccessId();
-      validateAccessId(awsAccessId);
+      // ONLY validate aws access id when needed.
+      if (awsAccessId == null || awsAccessId.equals("")) {
+        LOG.debug("Malformed s3 header. awsAccessID: ", awsAccessId);
+        throw ACCESS_DENIED;
+      }
+
       return new S3Auth(stringToSign,
           signatureInfo.getSignature(),
           awsAccessId);
@@ -123,14 +128,6 @@
     }
   }
 
-  // ONLY validate aws access id when needed.
-  private void validateAccessId(String awsAccessId) throws Exception {
-    if (awsAccessId == null || awsAccessId.equals("")) {
-      LOG.error("Malformed s3 header. awsAccessID: ", awsAccessId);
-      throw wrapOS3Exception(MALFORMED_HEADER);
-    }
-  }
-
   public void setOzoneConfiguration(OzoneConfiguration config) {
     this.ozoneConfiguration = config;
   }
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
index 7259d85..a257155 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
@@ -90,10 +90,10 @@
     if (host.length() > domain.length()) {
       String bucketName = host.substring(0, host.length() - domain.length());
 
-      if(!bucketName.endsWith(".")) {
+      if (!bucketName.endsWith(".")) {
         //Checking this as the virtual host style pattern is http://bucket.host/
         throw getException("Invalid S3 Gateway request {" + requestContext
-            .getUriInfo().getRequestUri().toString() +"}:" +" Host: {" + host
+            .getUriInfo().getRequestUri().toString() + "}:" + " Host: {" + host
             + " is in invalid format");
       } else {
         bucketName = bucketName.substring(0, bucketName.length() - 1);
@@ -134,7 +134,7 @@
    */
   private String getDomainName(String host) {
     String match = null;
-    int length=0;
+    int length = 0;
     for (String domainVal : domains) {
       if (host.endsWith(domainVal)) {
         int len = domainVal.length();
@@ -148,7 +148,7 @@
   }
 
   private String checkHostWithoutPort(String host) {
-    if (host.contains(":")){
+    if (host.contains(":")) {
       return host.substring(0, host.lastIndexOf(":"));
     } else {
       return host;
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
index dbf8cf3..949227b 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
@@ -67,6 +67,7 @@
 
 import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
 import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED;
+import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.ENCODING_TYPE;
 
 /**
@@ -148,7 +149,7 @@
       }
     } catch (OMException ex) {
       if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
-        throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, bucketName);
+        throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, ex);
       } else {
         throw ex;
       }
@@ -212,7 +213,7 @@
 
     if (count < maxKeys) {
       response.setTruncated(false);
-    } else if(ozoneKeyIterator.hasNext()) {
+    } else if (ozoneKeyIterator.hasNext()) {
       response.setTruncated(true);
       ContinueToken nextToken = new ContinueToken(lastKey, prevDir);
       response.setNextToken(nextToken.encodeToString());
@@ -241,12 +242,11 @@
       return Response.status(HttpStatus.SC_OK).header("Location", location)
           .build();
     } catch (OMException exception) {
+      if (exception.getResult() == ResultCodes.INVALID_BUCKET_NAME) {
+        throw newError(S3ErrorTable.INVALID_BUCKET_NAME, bucketName, exception);
+      }
       LOG.error("Error in Create Bucket Request for bucket: {}", bucketName,
           exception);
-      if (exception.getResult() == ResultCodes.INVALID_BUCKET_NAME) {
-        throw S3ErrorTable.newError(S3ErrorTable.INVALID_BUCKET_NAME,
-            bucketName);
-      }
       throw exception;
     }
   }
@@ -263,8 +263,7 @@
       ozoneMultipartUploadList = bucket.listMultipartUploads(prefix);
     } catch (OMException exception) {
       if (exception.getResult() == ResultCodes.PERMISSION_DENIED) {
-        throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED,
-            prefix);
+        throw newError(S3ErrorTable.ACCESS_DENIED, prefix, exception);
       }
       throw exception;
     }
@@ -291,12 +290,7 @@
   @HEAD
   public Response head(@PathParam("bucket") String bucketName)
       throws OS3Exception, IOException {
-    try {
-      getBucket(bucketName);
-    } catch (OS3Exception ex) {
-      LOG.error("Exception occurred in headBucket", ex);
-      throw ex;
-    }
+    getBucket(bucketName);
     return Response.ok().build();
   }
 
@@ -314,13 +308,11 @@
       deleteS3Bucket(bucketName);
     } catch (OMException ex) {
       if (ex.getResult() == ResultCodes.BUCKET_NOT_EMPTY) {
-        throw S3ErrorTable.newError(S3ErrorTable
-            .BUCKET_NOT_EMPTY, bucketName);
+        throw newError(S3ErrorTable.BUCKET_NOT_EMPTY, bucketName, ex);
       } else if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
-        throw S3ErrorTable.newError(S3ErrorTable
-            .NO_SUCH_BUCKET, bucketName);
+        throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, ex);
       } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
-        throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, bucketName);
+        throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, ex);
       } else {
         throw ex;
       }
@@ -407,14 +399,12 @@
       return result;
     } catch (OMException ex) {
       if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
-        throw S3ErrorTable.newError(S3ErrorTable
-            .NO_SUCH_BUCKET, bucketName);
+        throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, ex);
       } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
-        throw S3ErrorTable.newError(S3ErrorTable
-            .ACCESS_DENIED, bucketName);
+        throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, ex);
       } else {
         LOG.error("Failed to get acl of Bucket " + bucketName, ex);
-        throw S3ErrorTable.newError(S3ErrorTable.INTERNAL_ERROR, bucketName);
+        throw newError(S3ErrorTable.INTERNAL_ERROR, bucketName, ex);
       }
     }
   }
@@ -503,19 +493,17 @@
         }
       }
       // Add new permission on Volume
-      for(OzoneAcl acl : ozoneAclListOnVolume) {
+      for (OzoneAcl acl : ozoneAclListOnVolume) {
         volume.addAcl(acl);
       }
     } catch (OMException exception) {
+      if (exception.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
+        throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, exception);
+      } else if (exception.getResult() == ResultCodes.PERMISSION_DENIED) {
+        throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, exception);
+      }
       LOG.error("Error in set ACL Request for bucket: {}", bucketName,
           exception);
-      if (exception.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
-        throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET,
-            bucketName);
-      } else if (exception.getResult() == ResultCodes.PERMISSION_DENIED) {
-        throw S3ErrorTable.newError(S3ErrorTable
-            .ACCESS_DENIED, bucketName);
-      }
       throw exception;
     }
     return Response.status(HttpStatus.SC_OK).build();
@@ -536,13 +524,13 @@
     for (String acl: subValues) {
       String[] part = acl.split("=");
       if (part.length != 2) {
-        throw S3ErrorTable.newError(S3ErrorTable.INVALID_ARGUMENT, acl);
+        throw newError(S3ErrorTable.INVALID_ARGUMENT, acl);
       }
       S3Acl.ACLIdentityType type =
           S3Acl.ACLIdentityType.getTypeFromHeaderType(part[0]);
       if (type == null || !type.isSupported()) {
         LOG.warn("S3 grantee {} is null or not supported", part[0]);
-        throw S3ErrorTable.newError(NOT_IMPLEMENTED, part[0]);
+        throw newError(NOT_IMPLEMENTED, part[0]);
       }
       // Build ACL on Bucket
       BitSet aclsOnBucket =
@@ -569,13 +557,13 @@
     for (String acl: subValues) {
       String[] part = acl.split("=");
       if (part.length != 2) {
-        throw S3ErrorTable.newError(S3ErrorTable.INVALID_ARGUMENT, acl);
+        throw newError(S3ErrorTable.INVALID_ARGUMENT, acl);
       }
       S3Acl.ACLIdentityType type =
           S3Acl.ACLIdentityType.getTypeFromHeaderType(part[0]);
       if (type == null || !type.isSupported()) {
         LOG.warn("S3 grantee {} is null or not supported", part[0]);
-        throw S3ErrorTable.newError(NOT_IMPLEMENTED, part[0]);
+        throw newError(NOT_IMPLEMENTED, part[0]);
       }
       // Build ACL on Volume
       BitSet aclsOnVolume =
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
index 6a88075..b933363 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java
@@ -38,6 +38,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError;
+
 /**
  * Basic helpers for all the REST endpoints.
  */
@@ -57,7 +59,7 @@
       bucket = volume.getBucket(bucketName);
     } catch (OMException ex) {
       if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
-        throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName);
+        throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, ex);
       } else {
         throw ex;
       }
@@ -72,9 +74,9 @@
   @PostConstruct
   public void initialization() {
     LOG.debug("S3 access id: {}", s3Auth.getAccessID());
-    getClient().getObjectStore().
-        getClientProxy().
-        setTheadLocalS3Auth(s3Auth);
+    getClient().getObjectStore()
+        .getClientProxy()
+        .setThreadLocalS3Auth(s3Auth);
     init();
   }
 
@@ -88,9 +90,9 @@
     } catch (OMException ex) {
       if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND
           || ex.getResult() == ResultCodes.VOLUME_NOT_FOUND) {
-        throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName);
+        throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, ex);
       } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
-        throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, bucketName);
+        throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, ex);
       } else {
         throw ex;
       }
@@ -117,7 +119,7 @@
       client.getObjectStore().createS3Bucket(bucketName);
     } catch (OMException ex) {
       if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
-        throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, bucketName);
+        throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, ex);
       } else if (ex.getResult() != ResultCodes.BUCKET_ALREADY_EXISTS) {
         // S3 does not return error for bucket already exists, it just
         // returns the location.
@@ -138,8 +140,7 @@
       client.getObjectStore().deleteS3Bucket(s3BucketName);
     } catch (OMException ex) {
       if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
-        throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED,
-            s3BucketName);
+        throw newError(S3ErrorTable.ACCESS_DENIED, s3BucketName, ex);
       }
       throw ex;
     }
@@ -175,15 +176,14 @@
 
   private Iterator<? extends OzoneBucket> iterateBuckets(
       Function<OzoneVolume, Iterator<? extends OzoneBucket>> query)
-      throws IOException, OS3Exception{
+      throws IOException, OS3Exception {
     try {
       return query.apply(getVolume());
     } catch (OMException e) {
       if (e.getResult() == ResultCodes.VOLUME_NOT_FOUND) {
         return Collections.emptyIterator();
       } else  if (e.getResult() == ResultCodes.PERMISSION_DENIED) {
-        throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED,
-            "listBuckets");
+        throw newError(S3ErrorTable.ACCESS_DENIED, "listBuckets", e);
       } else {
         throw e;
       }
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListBucketResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListBucketResponse.java
index b9f8702..c0984ed 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListBucketResponse.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListBucketResponse.java
@@ -21,7 +21,11 @@
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.ozone.s3.commontypes.BucketMetadata;
 
-import javax.xml.bind.annotation.*;
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlElementWrapper;
+import javax.xml.bind.annotation.XmlRootElement;
 import java.util.ArrayList;
 import java.util.List;
 
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
index d2a45b1..863b1b0 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@ -95,6 +95,7 @@
 import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_REQUEST;
 import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_UPLOAD;
 import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.PRECOND_FAILED;
+import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCEPT_RANGE_HEADER;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.CONTENT_RANGE_HEADER;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER;
@@ -208,23 +209,19 @@
 
       return Response.ok().status(HttpStatus.SC_OK)
           .build();
-    } catch (IOException ex) {
-      LOG.error("Exception occurred in PutObject", ex);
-      if (ex instanceof  OMException) {
-        if (((OMException) ex).getResult() == ResultCodes.NOT_A_FILE) {
-          OS3Exception os3Exception = S3ErrorTable.newError(INVALID_REQUEST,
-              keyPath);
-          os3Exception.setErrorMessage("An error occurred (InvalidRequest) " +
-              "when calling the PutObject/MPU PartUpload operation: " +
-              OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are" +
-              " considered as Unix Paths. Path has Violated FS Semantics " +
-              "which caused put operation to fail.");
-          throw os3Exception;
-        } else if ((((OMException) ex).getResult() ==
-            ResultCodes.PERMISSION_DENIED)) {
-          throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, keyPath);
-        }
+    } catch (OMException ex) {
+      if (ex.getResult() == ResultCodes.NOT_A_FILE) {
+        OS3Exception os3Exception = newError(INVALID_REQUEST, keyPath, ex);
+        os3Exception.setErrorMessage("An error occurred (InvalidRequest) " +
+            "when calling the PutObject/MPU PartUpload operation: " +
+            OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are" +
+            " considered as Unix Paths. Path has Violated FS Semantics " +
+            "which caused put operation to fail.");
+        throw os3Exception;
+      } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
+        throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex);
       }
+      LOG.error("Exception occurred in PutObject", ex);
       throw ex;
     } finally {
       if (output != null) {
@@ -277,8 +274,7 @@
             length);
         LOG.debug("range Header provided: {}", rangeHeader);
         if (rangeHeader.isInValidRange()) {
-          throw S3ErrorTable.newError(
-              S3ErrorTable.INVALID_RANGE, rangeHeaderVal);
+          throw newError(S3ErrorTable.INVALID_RANGE, rangeHeaderVal);
         }
       }
       ResponseBuilder responseBuilder;
@@ -329,10 +325,9 @@
       return responseBuilder.build();
     } catch (OMException ex) {
       if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
-        throw S3ErrorTable.newError(S3ErrorTable
-            .NO_SUCH_KEY, keyPath);
+        throw newError(S3ErrorTable.NO_SUCH_KEY, keyPath, ex);
       } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
-        throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, keyPath);
+        throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex);
       } else {
         throw ex;
       }
@@ -371,7 +366,7 @@
         // Just return 404 with no content
         return Response.status(Status.NOT_FOUND).build();
       } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
-        throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, keyPath);
+        throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex);
       } else {
         throw ex;
       }
@@ -401,7 +396,7 @@
       ozoneBucket.abortMultipartUpload(key, uploadId);
     } catch (OMException ex) {
       if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
-        throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_UPLOAD, uploadId);
+        throw newError(S3ErrorTable.NO_SUCH_UPLOAD, uploadId, ex);
       }
       throw ex;
     }
@@ -436,8 +431,7 @@
       bucket.deleteKey(keyPath);
     } catch (OMException ex) {
       if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
-        throw S3ErrorTable.newError(S3ErrorTable
-            .NO_SUCH_BUCKET, bucketName);
+        throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, ex);
       } else if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
         //NOT_FOUND is not a problem, AWS doesn't throw exception for missing
         // keys. Just return 204
@@ -447,7 +441,7 @@
         // NOT_FOUND is not a problem, AWS doesn't throw exception for missing
         // keys. Just return 204
       } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
-        throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, keyPath);
+        throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex);
       } else {
         throw ex;
       }
@@ -497,11 +491,11 @@
       return Response.status(Status.OK).entity(
           multipartUploadInitiateResponse).build();
     } catch (OMException ex) {
+      if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
+        throw newError(S3ErrorTable.ACCESS_DENIED, key, ex);
+      }
       LOG.error("Error in Initiate Multipart Upload Request for bucket: {}, " +
           "key: {}", bucket, key, ex);
-      if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
-        throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, key);
-      }
       throw ex;
     }
   }
@@ -544,24 +538,22 @@
       return Response.status(Status.OK).entity(completeMultipartUploadResponse)
           .build();
     } catch (OMException ex) {
-      LOG.error("Error in Complete Multipart Upload Request for bucket: {}, " +
-          ", key: {}", bucket, key, ex);
       if (ex.getResult() == ResultCodes.INVALID_PART) {
-        throw S3ErrorTable.newError(S3ErrorTable.INVALID_PART, key);
+        throw newError(S3ErrorTable.INVALID_PART, key, ex);
       } else if (ex.getResult() == ResultCodes.INVALID_PART_ORDER) {
-        throw S3ErrorTable.newError(S3ErrorTable.INVALID_PART_ORDER, key);
+        throw newError(S3ErrorTable.INVALID_PART_ORDER, key, ex);
       } else if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
-        throw S3ErrorTable.newError(NO_SUCH_UPLOAD, uploadID);
+        throw newError(NO_SUCH_UPLOAD, uploadID, ex);
       } else if (ex.getResult() == ResultCodes.ENTITY_TOO_SMALL) {
-        throw S3ErrorTable.newError(ENTITY_TOO_SMALL, key);
-      } else if(ex.getResult() == ResultCodes.INVALID_REQUEST) {
-        OS3Exception os3Exception = S3ErrorTable.newError(INVALID_REQUEST, key);
+        throw newError(ENTITY_TOO_SMALL, key, ex);
+      } else if (ex.getResult() == ResultCodes.INVALID_REQUEST) {
+        OS3Exception os3Exception = newError(INVALID_REQUEST, key, ex);
         os3Exception.setErrorMessage("An error occurred (InvalidRequest) " +
             "when calling the CompleteMultipartUpload operation: You must " +
             "specify at least one part");
         throw os3Exception;
-      } else if(ex.getResult() == ResultCodes.NOT_A_FILE) {
-        OS3Exception os3Exception = S3ErrorTable.newError(INVALID_REQUEST, key);
+      } else if (ex.getResult() == ResultCodes.NOT_A_FILE) {
+        OS3Exception os3Exception = newError(INVALID_REQUEST, key, ex);
         os3Exception.setErrorMessage("An error occurred (InvalidRequest) " +
             "when calling the CompleteMultipartUpload operation: " +
             OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are " +
@@ -569,6 +561,8 @@
             "given KeyName caused failure for MPU");
         throw os3Exception;
       }
+      LOG.error("Error in Complete Multipart Upload Request for bucket: {}, " +
+          ", key: {}", bucket, key, ex);
       throw ex;
     }
   }
@@ -605,8 +599,7 @@
               headers.getHeaderString(COPY_SOURCE_IF_UNMODIFIED_SINCE);
           if (!checkCopySourceModificationTime(sourceKeyModificationTime,
               copySourceIfModifiedSince, copySourceIfUnmodifiedSince)) {
-            throw S3ErrorTable.newError(PRECOND_FAILED,
-                sourceBucket + "/" + sourceKey);
+            throw newError(PRECOND_FAILED, sourceBucket + "/" + sourceKey);
           }
 
           try (OzoneInputStream sourceObject =
@@ -654,11 +647,9 @@
 
     } catch (OMException ex) {
       if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
-        throw S3ErrorTable.newError(NO_SUCH_UPLOAD,
-            uploadID);
+        throw newError(NO_SUCH_UPLOAD, uploadID, ex);
       } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
-        throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED,
-            bucket + "/" + key);
+        throw newError(S3ErrorTable.ACCESS_DENIED, bucket + "/" + key, ex);
       }
       throw ex;
     }
@@ -713,11 +704,10 @@
 
     } catch (OMException ex) {
       if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
-        throw S3ErrorTable.newError(NO_SUCH_UPLOAD,
-            uploadID);
+        throw newError(NO_SUCH_UPLOAD, uploadID, ex);
       } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
-        throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED,
-            bucket + "/" + key + "/" + uploadID);
+        throw newError(S3ErrorTable.ACCESS_DENIED,
+            bucket + "/" + key + "/" + uploadID, ex);
       }
       throw ex;
     }
@@ -753,8 +743,7 @@
         // options like storage type are provided or not when source and
         // dest are given same
         if (storageTypeDefault) {
-          OS3Exception ex = S3ErrorTable.newError(S3ErrorTable
-              .INVALID_REQUEST, copyHeader);
+          OS3Exception ex = newError(S3ErrorTable.INVALID_REQUEST, copyHeader);
           ex.setErrorMessage("This copy request is illegal because it is " +
               "trying to copy an object to it self itself without changing " +
               "the object's metadata, storage class, website redirect " +
@@ -800,12 +789,12 @@
       return copyObjectResponse;
     } catch (OMException ex) {
       if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
-        throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_KEY, sourceKey);
+        throw newError(S3ErrorTable.NO_SUCH_KEY, sourceKey, ex);
       } else if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
-        throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET, sourceBucket);
+        throw newError(S3ErrorTable.NO_SUCH_BUCKET, sourceBucket, ex);
       } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
-        throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED,
-            destBucket + "/" + destkey);
+        throw newError(S3ErrorTable.ACCESS_DENIED,
+            destBucket + "/" + destkey, ex);
       }
       throw ex;
     } finally {
@@ -832,7 +821,7 @@
     }
     int pos = header.indexOf('/');
     if (pos == -1) {
-      OS3Exception ex = S3ErrorTable.newError(INVALID_ARGUMENT, header);
+      OS3Exception ex = newError(INVALID_ARGUMENT, header);
       ex.setErrorMessage("Copy Source must mention the source bucket and " +
           "key: sourcebucket/sourcekey");
       throw ex;
@@ -843,7 +832,7 @@
       String key = urlDecode(header.substring(pos + 1));
       return Pair.of(bucket, key);
     } catch (UnsupportedEncodingException e) {
-      OS3Exception ex = S3ErrorTable.newError(INVALID_ARGUMENT, header);
+      OS3Exception ex = newError(INVALID_ARGUMENT, header, e);
       ex.setErrorMessage("Copy Source header could not be url-decoded");
       throw ex;
     }
@@ -854,8 +843,7 @@
     try {
       return S3StorageType.valueOf(storageType);
     } catch (IllegalArgumentException ex) {
-      throw S3ErrorTable.newError(INVALID_ARGUMENT,
-          storageType);
+      throw newError(INVALID_ARGUMENT, storageType, ex);
     }
   }
 
@@ -883,7 +871,7 @@
     }
 
     long currentDate = System.currentTimeMillis();
-    if  (ozoneDateInMs <= currentDate){
+    if  (ozoneDateInMs <= currentDate) {
       return OptionalLong.of(ozoneDateInMs);
     } else {
       // dates in the future are invalid, so return empty()
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java
index c59c4d1..792f2e2 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java
@@ -83,7 +83,7 @@
 
 
     public static ACLType getType(String typeStr) {
-      for(ACLType type: ACLType.values()) {
+      for (ACLType type: ACLType.values()) {
         if (type.getValue().equals(typeStr)) {
           return type;
         }
@@ -139,7 +139,7 @@
     }
 
     public static ACLIdentityType getTypeFromGranteeType(String typeStr) {
-      for(ACLIdentityType type: ACLIdentityType.values()) {
+      for (ACLIdentityType type: ACLIdentityType.values()) {
         if (type.getGranteeType().equals(typeStr)) {
           return type;
         }
@@ -148,7 +148,7 @@
     }
 
     public static ACLIdentityType getTypeFromHeaderType(String typeStr) {
-      for(ACLIdentityType type: ACLIdentityType.values()) {
+      for (ACLIdentityType type: ACLIdentityType.values()) {
         if (type.getHeaderType().equals(typeStr)) {
           return type;
         }
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3BucketAcl.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3BucketAcl.java
index 66f931fd..ee9e1a0 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3BucketAcl.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3BucketAcl.java
@@ -169,10 +169,10 @@
     @XmlElement(name = "ID")
     private String id;
 
-    @XmlAttribute(name="xsi:type")
+    @XmlAttribute(name = "xsi:type")
     private String xsiType = "CanonicalUser";
 
-    @XmlAttribute(name="xmlns:xsi")
+    @XmlAttribute(name = "xmlns:xsi")
     private String xsiNs = "http://www.w3.org/2001/XMLSchema-instance";
 
     public String getXsiNs() {
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java
index 86d9fc0..84ec325 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java
@@ -26,7 +26,7 @@
 import static java.net.HttpURLConnection.HTTP_NOT_FOUND;
 import static java.net.HttpURLConnection.HTTP_PRECON_FAILED;
 import static java.net.HttpURLConnection.HTTP_NOT_IMPLEMENTED;
-import static java.net.HttpURLConnection.HTTP_SERVER_ERROR;
+import static java.net.HttpURLConnection.HTTP_INTERNAL_ERROR;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_NOT_SATISFIABLE;
 
 /**
@@ -65,7 +65,7 @@
 
   public static final OS3Exception MALFORMED_HEADER = new OS3Exception(
       "AuthorizationHeaderMalformed", "The authorization header you provided " +
-      "is invalid.", HTTP_NOT_FOUND);
+      "is invalid.", HTTP_BAD_REQUEST);
 
   public static final OS3Exception NO_SUCH_KEY = new OS3Exception(
       "NoSuchKey", "The specified key does not exist", HTTP_NOT_FOUND);
@@ -106,7 +106,7 @@
 
   public static final OS3Exception INTERNAL_ERROR = new OS3Exception(
       "InternalError", "We encountered an internal error. Please try again.",
-      HTTP_SERVER_ERROR);
+      HTTP_INTERNAL_ERROR);
 
   public static final OS3Exception ACCESS_DENIED = new OS3Exception(
       "AccessDenied", "User doesn't have the right to access this " +
@@ -120,17 +120,27 @@
       "NotImplemented", "This part of feature is not implemented yet.",
       HTTP_NOT_IMPLEMENTED);
 
+  public static OS3Exception newError(OS3Exception e, String resource) {
+    return newError(e, resource, null);
+  }
+
   /**
    * Create a new instance of Error.
    * @param e Error Template
    * @param resource Resource associated with this exception
+   * @param ex the original exception, may be null
    * @return creates a new instance of error based on the template
    */
-  public static OS3Exception newError(OS3Exception e, String resource) {
+  public static OS3Exception newError(OS3Exception e, String resource,
+      Exception ex) {
     OS3Exception err =  new OS3Exception(e.getCode(), e.getErrorMessage(),
         e.getHttpCode());
     err.setResource(resource);
-    LOG.error(err.toXml(), e);
+    if (e.getHttpCode() == HTTP_INTERNAL_ERROR) {
+      LOG.error("Internal Error: {}", err.toXml(), ex);
+    } else if (LOG.isDebugEnabled()) {
+      LOG.debug(err.toXml(), ex);
+    }
     return err;
   }
 }
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java
index 14bf2a2..1783b58 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java
@@ -71,7 +71,7 @@
     case 6:
       // Access id is kerberos principal.
       // Ex: testuser/om@EXAMPLE.COM/20190321/us-west-1/s3/aws4_request
-      accessKeyID = split[0] + "/" +split[1];
+      accessKeyID = split[0] + "/" + split[1];
       date = split[2].trim();
       awsRegion = split[3].trim();
       awsService = split[4].trim();
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/ContinueToken.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/ContinueToken.java
index ce75c59..0a34f14 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/ContinueToken.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/ContinueToken.java
@@ -111,7 +111,7 @@
 
       } catch (DecoderException ex) {
         OS3Exception os3Exception = S3ErrorTable.newError(S3ErrorTable
-            .INVALID_ARGUMENT, key);
+            .INVALID_ARGUMENT, key, ex);
         os3Exception.setErrorMessage("The continuation token provided is " +
             "incorrect");
         throw os3Exception;
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java
index 83305f0..d02c3cc 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.ozone.s3;
 
+import javax.ws.rs.WebApplicationException;
 import javax.ws.rs.container.ContainerRequestContext;
 import javax.ws.rs.core.MultivaluedHashMap;
 import javax.ws.rs.core.MultivaluedMap;
@@ -31,6 +32,9 @@
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.s3.signature.AWSSignatureProcessor;
 
+import static java.net.HttpURLConnection.HTTP_BAD_REQUEST;
+import static java.net.HttpURLConnection.HTTP_FORBIDDEN;
+
 import static org.apache.hadoop.ozone.s3.signature.SignatureParser.AUTHORIZATION_HEADER;
 import static org.apache.hadoop.ozone.s3.signature.SignatureProcessor.CONTENT_MD5;
 import static org.apache.hadoop.ozone.s3.signature.SignatureProcessor.CONTENT_TYPE;
@@ -118,7 +122,10 @@
         },
         {
             null, null, null, null, null, null
-        }
+        },
+        {
+            "", null, null, null, null, null
+        },
     });
   }
 
@@ -133,6 +140,36 @@
   }
 
   @Test
+  public void testGetSignature() {
+    try {
+      System.err.println("Testing: " + authHeader);
+      OzoneConfiguration configuration = new OzoneConfiguration();
+      configuration.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, "ozone1");
+      configuration.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "ozone1addr:9399");
+      producer.setOzoneConfiguration(configuration);
+      producer.getSignature();
+      if ("".equals(authHeader)) {
+        fail("Empty AuthHeader must fail");
+      }
+    } catch (WebApplicationException ex) {
+      if (authHeader == null || authHeader.equals("")) {
+        // Empty auth header should be 403
+        Assert.assertEquals(HTTP_FORBIDDEN, ex.getResponse().getStatus());
+        // TODO: Should return XML in body like this (bot not for now):
+        // <Error>
+        //   <Code>AccessDenied</Code><Message>Access Denied</Message>
+        //   <RequestId>...</RequestId><HostId>...</HostId>
+        // </Error>
+      } else {
+        // Other requests have stale timestamp and thus should fail
+        Assert.assertEquals(HTTP_BAD_REQUEST, ex.getResponse().getStatus());
+      }
+    } catch (Exception ex) {
+      fail("Unexpected exception: " + ex);
+    }
+  }
+
+  @Test
   public void testGetClientFailureWithMultipleServiceIds() {
     try {
       OzoneConfiguration configuration = new OzoneConfiguration();
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
index 2a46c55..19d9380 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
@@ -66,7 +66,7 @@
       virtualHostStyleUri = new URI("http://" + s3HttpAddr);
     } else if (path != null && queryParams == null) {
       virtualHostStyleUri = new URI("http://" + s3HttpAddr + path);
-    } else if (path !=null && queryParams != null)  {
+    } else if (path != null && queryParams != null)  {
       virtualHostStyleUri = new URI("http://" + s3HttpAddr + path +
           queryParams);
     } else {
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/commontypes/TestObjectKeyNameAdapter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/commontypes/TestObjectKeyNameAdapter.java
index 6666da7..df4b5f4 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/commontypes/TestObjectKeyNameAdapter.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/commontypes/TestObjectKeyNameAdapter.java
@@ -37,7 +37,7 @@
         getAdapter().marshal("a+b+c/"));
   }
 
-  private XmlAdapter<String, String> getAdapter(){
+  private XmlAdapter<String, String> getAdapter() {
     return (new ObjectKeyNameAdapter());
   }
 }
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java
index 19ab3bf..f5e4a06 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java
@@ -57,7 +57,7 @@
     assertEquals(0, response.getBucketsNum());
 
     String bucketBaseName = "bucket-" + getClass().getName();
-    for(int i = 0; i < 10; i++) {
+    for (int i = 0; i < 10; i++) {
       clientStub.getObjectStore().createS3Bucket(bucketBaseName + i);
     }
     response = (ListBucketResponse) rootEndpoint.get().getEntity();
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/FinalizeUpgradeSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/FinalizeUpgradeSubCommand.java
index de0bd7e..55be795 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/FinalizeUpgradeSubCommand.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/FinalizeUpgradeSubCommand.java
@@ -88,11 +88,11 @@
     try {
       UpgradeFinalizer.StatusAndMessages finalizationResponse =
           client.finalizeUpgrade(upgradeClientID);
-      if (isFinalized(finalizationResponse.status())){
+      if (isFinalized(finalizationResponse.status())) {
         System.out.println("Upgrade has already been finalized.");
         emitExitMsg();
         return null;
-      } else if (!isStarting(finalizationResponse.status())){
+      } else if (!isStarting(finalizationResponse.status())) {
         System.err.println("Invalid response from Ozone Manager.");
         System.err.println(
             "Current finalization status is: " + finalizationResponse.status()
@@ -116,7 +116,7 @@
       emitFinishedMsg("Ozone Manager");
     } catch (CancellationException e) {
       emitCancellationMsg("Ozone Manager");
-    } catch (InterruptedException e){
+    } catch (InterruptedException e) {
       emitCancellationMsg("Ozone Manager");
       Thread.currentThread().interrupt();
     } catch (ExecutionException e) {
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeScmUpgradeSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeScmUpgradeSubcommand.java
index fd354f7..8e46485 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeScmUpgradeSubcommand.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeScmUpgradeSubcommand.java
@@ -72,11 +72,11 @@
     try {
       StatusAndMessages finalizationResponse =
           scmClient.finalizeScmUpgrade(upgradeClientID);
-      if (isFinalized(finalizationResponse.status())){
+      if (isFinalized(finalizationResponse.status())) {
         System.out.println("Upgrade has already been finalized.");
         emitExitMsg();
         return;
-      } else if (!isStarting(finalizationResponse.status())){
+      } else if (!isStarting(finalizationResponse.status())) {
         System.err.println("Invalid response from Storage Container Manager.");
         System.err.println(
             "Current finalization status is: " + finalizationResponse.status()
@@ -101,7 +101,7 @@
       emitFinishedMsg("Storage Container Manager");
     } catch (CancellationException e) {
       emitCancellationMsg("Storage Container Manager");
-    } catch (InterruptedException e){
+    } catch (InterruptedException e) {
       emitCancellationMsg("Storage Container Manager");
       Thread.currentThread().interrupt();
     } catch (ExecutionException e) {
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeUpgradeCommandUtil.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeUpgradeCommandUtil.java
index 539ac28..308b900 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeUpgradeCommandUtil.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeUpgradeCommandUtil.java
@@ -80,13 +80,13 @@
   }
 
   public static void emitFinishedMsg(String component) {
-    System.out.println("Finalization of " + component +"'s metadata upgrade "
+    System.out.println("Finalization of " + component + "'s metadata upgrade "
         + "finished.");
   }
 
   public static void emitCancellationMsg(String component) {
     System.err.println("Finalization command was cancelled. Note that, this"
-        + "will not cancel finalization in " + component +". Progress can be"
+        + "will not cancel finalization in " + component + ". Progress can be"
         + "monitored in the Ozone Manager's log.");
   }
 }
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/AuditParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/AuditParser.java
index ae64c94..9af8a74 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/AuditParser.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/AuditParser.java
@@ -54,7 +54,7 @@
     new AuditParser().run(argv);
   }
 
-  public String getDatabase(){
+  public String getDatabase() {
     return database;
   }
 }
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java
index 8750e19..725b2b8 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java
@@ -70,7 +70,7 @@
 
   private static void loadProperties() {
     Properties props = new Properties();
-    try{
+    try {
       InputStream inputStream = DatabaseHelper.class.getClassLoader()
           .getResourceAsStream(ParserConsts.PROPS_FILE);
       if (inputStream != null) {
@@ -85,7 +85,7 @@
         throw new FileNotFoundException("property file '"
             + ParserConsts.PROPS_FILE + "' not found in the classpath");
       }
-    } catch (Exception e){
+    } catch (Exception e) {
       LOG.error(e.getMessage());
     }
 
@@ -145,14 +145,14 @@
       AuditEntry tempEntry = null;
 
       while (true) {
-        if (tempEntry == null){
+        if (tempEntry == null) {
           tempEntry = new AuditEntry();
         }
 
         if (currentLine == null) {
           break;
         } else {
-          if (!currentLine.matches(ParserConsts.DATE_REGEX)){
+          if (!currentLine.matches(ParserConsts.DATE_REGEX)) {
             tempEntry.appendException(currentLine);
           } else {
             entry = StringUtils.stripAll(currentLine.split("\\|"));
@@ -168,11 +168,11 @@
                 .setParams(ops[1])
                 .setResult(entry[6].substring(entry[6].indexOf('=') + 1))
                 .build();
-            if (entry.length == 8){
+            if (entry.length == 8) {
               tempEntry.setException(entry[7]);
             }
           }
-          if (nextLine == null || nextLine.matches(ParserConsts.DATE_REGEX)){
+          if (nextLine == null || nextLine.matches(ParserConsts.DATE_REGEX)) {
             listResult.add(tempEntry);
             tempEntry = null;
           }
@@ -205,8 +205,8 @@
       if (rs != null) {
         rsm = rs.getMetaData();
         int cols = rsm.getColumnCount();
-        while (rs.next()){
-          for (int index = 1; index <= cols; index++){
+        while (rs.next()) {
+          for (int index = 1; index <= cols; index++) {
             result.append(rs.getObject(index));
             result.append("\t");
           }
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java
index 5d58559..f3f8c45 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java
@@ -53,7 +53,7 @@
   @Override
   public Void call() throws Exception {
     try {
-      if(DatabaseHelper.validateTemplate(template)) {
+      if (DatabaseHelper.validateTemplate(template)) {
         System.out.println(
             DatabaseHelper.executeTemplate(auditParser.getDatabase(),
                 template)
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/model/AuditEntry.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/model/AuditEntry.java
index c6b0b33..035bf26 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/model/AuditEntry.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/model/AuditEntry.java
@@ -30,7 +30,7 @@
   private String result;
   private String exception;
 
-  public AuditEntry(){}
+  public AuditEntry() { }
 
   public String getUser() {
     return user;
@@ -104,7 +104,7 @@
     this.exception = exception.trim();
   }
 
-  public void appendException(String text){
+  public void appendException(String text) {
     this.exception += "\n" + text.trim();
   }
 
@@ -126,47 +126,47 @@
 
     }
 
-    public Builder setTimestamp(String ts){
+    public Builder setTimestamp(String ts) {
       this.timestamp = ts;
       return this;
     }
 
-    public Builder setLevel(String lvl){
+    public Builder setLevel(String lvl) {
       this.level = lvl;
       return this;
     }
 
-    public Builder setLogger(String lgr){
+    public Builder setLogger(String lgr) {
       this.logger = lgr;
       return this;
     }
 
-    public Builder setUser(String usr){
+    public Builder setUser(String usr) {
       this.user = usr;
       return this;
     }
 
-    public Builder setIp(String ipAddress){
+    public Builder setIp(String ipAddress) {
       this.ip = ipAddress;
       return this;
     }
 
-    public Builder setOp(String operation){
+    public Builder setOp(String operation) {
       this.op = operation;
       return this;
     }
 
-    public Builder setParams(String prms){
+    public Builder setParams(String prms) {
       this.params = prms;
       return this;
     }
 
-    public Builder setResult(String res){
+    public Builder setResult(String res) {
       this.result = res;
       return this;
     }
 
-    public Builder setException(String exp){
+    public Builder setException(String exp) {
       this.exception = exp;
       return this;
     }
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java
index 0deb7d5..24c8f24 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java
@@ -77,7 +77,7 @@
 
   @Override
   protected void execute(OzoneClient client, OzoneAddress address)
-          throws IOException, OzoneClientException{
+          throws IOException, OzoneClientException {
     containerOperationClient = new
             ContainerOperationClient(createOzoneConfiguration());
     xceiverClientManager = containerOperationClient
@@ -105,7 +105,7 @@
     List<OmKeyLocationInfo> locationInfos = keyInfo
             .getLatestVersionLocations().getBlocksLatestVersionOnly();
     // for zero-sized key
-    if(locationInfos.isEmpty()){
+    if (locationInfos.isEmpty()) {
       System.out.println("No Key Locations Found");
       return;
     }
@@ -142,7 +142,7 @@
       for (Map.Entry<DatanodeDetails, ContainerProtos.GetBlockResponseProto>
               entry: responses.entrySet()) {
         JsonObject jsonObj = new JsonObject();
-        if(entry.getValue() == null){
+        if (entry.getValue() == null) {
           LOG.error("Cant execute getBlock on this node");
           continue;
         }
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
index 2ecfa13..275908e 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
@@ -177,7 +177,7 @@
   }
 
   private void constructColumnFamilyMap(DBDefinition dbDefinition) {
-    if (dbDefinition == null){
+    if (dbDefinition == null) {
       System.out.println("Incorrect Db Path");
       return;
     }
@@ -217,7 +217,7 @@
     DBDefinitionFactory.setDnDBSchemaVersion(dnDBSchemaVersion);
     this.constructColumnFamilyMap(DBDefinitionFactory.
             getDefinition(Paths.get(dbPath)));
-    if (this.columnFamilyMap !=null) {
+    if (this.columnFamilyMap != null) {
       if (!this.columnFamilyMap.containsKey(tableName)) {
         System.out.print("Table with name:" + tableName + " does not exist");
       } else {
@@ -239,8 +239,8 @@
   }
 
   private String removeTrailingSlashIfNeeded(String dbPath) {
-    if(dbPath.endsWith(OzoneConsts.OZONE_URI_DELIMITER)){
-      dbPath = dbPath.substring(0, dbPath.length()-1);
+    if (dbPath.endsWith(OzoneConsts.OZONE_URI_DELIMITER)) {
+      dbPath = dbPath.substring(0, dbPath.length() - 1);
     }
     return dbPath;
   }
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java
index cabddf9..0ebc832 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java
@@ -170,7 +170,7 @@
     dumpInfo(Types.BUCKET, effectivePath, objectBucketId, bucketKey);
 
     Iterator<Path> pathIterator =  p.iterator();
-    while(pathIterator.hasNext()) {
+    while (pathIterator.hasNext()) {
       Path elem = pathIterator.next();
       String path =
           metadataManager.getOzonePathKey(lastObjectId, elem.toString());
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java
index 6a7f178..27a4c6b 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java
@@ -17,7 +17,10 @@
 
 package org.apache.hadoop.ozone.debug;
 
-import com.google.gson.*;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import com.google.gson.JsonArray;
+import com.google.gson.JsonObject;
 import org.apache.hadoop.hdds.cli.SubcommandWithParent;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -94,7 +97,7 @@
     configuration.setBoolean("ozone.client.verify.checksum",
         !isChecksumVerifyEnabled);
 
-    if(isChecksumVerifyEnabled) {
+    if (isChecksumVerifyEnabled) {
       clientProtocol = client.getObjectStore().getClientProxy();
       clientProtocolWithoutChecksum = new RpcClient(configuration, null);
     } else {
@@ -187,14 +190,14 @@
           Throwable cause = e.getCause();
           replicaJson.addProperty(JSON_PROPERTY_REPLICA_EXCEPTION,
               e.getMessage());
-          if(cause instanceof OzoneChecksumException) {
+          if (cause instanceof OzoneChecksumException) {
             BlockID blockID = block.getKey().getBlockID();
             String datanodeUUID = replica.getKey().getUuidString();
             is = getInputStreamWithoutChecksum(replicasWithoutChecksum,
                 datanodeUUID, blockID);
             Files.copy(is, replicaFile.toPath(),
                 StandardCopyOption.REPLACE_EXISTING);
-          } else if(cause instanceof StatusRuntimeException) {
+          } else if (cause instanceof StatusRuntimeException) {
             break;
           }
         } finally {
@@ -213,10 +216,10 @@
     OzoneInputStream is = new OzoneInputStream();
     for (Map.Entry<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>>
         block : replicasWithoutChecksum.entrySet()) {
-      if(block.getKey().getBlockID().equals(blockID)) {
+      if (block.getKey().getBlockID().equals(blockID)) {
         for (Map.Entry<DatanodeDetails, OzoneInputStream>
             replica : block.getValue().entrySet()) {
-          if(replica.getKey().getUuidString().equals(datanodeUUID)) {
+          if (replica.getKey().getUuidString().equals(datanodeUUID)) {
             is = replica.getValue();
           }
         }
@@ -234,8 +237,8 @@
         "_" + fileSuffix;
     System.out.println("Creating directory : " + directoryName);
     File dir = new File(outputDir + "/" + directoryName);
-    if (!dir.exists()){
-      if(dir.mkdir()) {
+    if (!dir.exists()) {
+      if (dir.mkdir()) {
         System.out.println("Successfully created!");
       } else {
         throw new IOException(String.format(
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java
index 0a639ec..a7b330c 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java
@@ -309,7 +309,7 @@
   /**
    * Print out reports with the given message.
    */
-  public void print(String msg){
+  public void print(String msg) {
     Consumer<String> print = freonCommand.isInteractive()
             ? System.out::println
             : LOG::info;
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkGenerator.java
index e774fcd..9e73bfb 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkGenerator.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkGenerator.java
@@ -136,7 +136,7 @@
       } else {
         xceiverClients = new ArrayList<>();
         pipelines = new HashSet<>();
-        for(String pipelineId:pipelinesFromCmd){
+        for (String pipelineId:pipelinesFromCmd) {
           List<Pipeline> selectedPipelines =  pipelinesFromSCM.stream()
               .filter((p -> p.getId().toString()
                   .equals("PipelineID=" + pipelineId)
@@ -144,11 +144,11 @@
                .collect(Collectors.toList());
           pipelines.addAll(selectedPipelines);
         }
-        for (Pipeline p:pipelines){
+        for (Pipeline p:pipelines) {
           LOG.info("Writing to pipeline: " + p.getId());
           xceiverClients.add(xceiverClientManager.acquireClient(p));
         }
-        if (pipelines.isEmpty()){
+        if (pipelines.isEmpty()) {
           throw new IllegalArgumentException(
               "Couldn't find the any/the selected pipeline");
         }
@@ -166,8 +166,8 @@
 
   private boolean pipelineContainsDatanode(Pipeline p,
       List<String> datanodeHosts) {
-    for (DatanodeDetails dn:p.getNodes()){
-      if (datanodeHosts.contains(dn.getHostName())){
+    for (DatanodeDetails dn:p.getNodes()) {
+      if (datanodeHosts.contains(dn.getHostName())) {
         return true;
       }
     }
@@ -219,7 +219,7 @@
             .setData(dataToWrite);
 
     XceiverClientSpi clientSpi = xceiverClients.get(
-        (int) (stepNo%(xceiverClients.size())));
+        (int) (stepNo % (xceiverClients.size())));
     sendWriteChunkRequest(blockId, writeChunkRequest,
         clientSpi);
 
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java
index b0937d0..0ed2a6e 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java
@@ -70,7 +70,7 @@
           "written in each directory. Full name --fileSize will be removed " +
           "in later versions.",
       defaultValue = "4096")
-  private int fileSizeInBytes;
+  private long fileSizeInBytes;
 
   @Option(names = {"-b", "--buffer"},
           description = "Size of buffer used to generated the file content.",
@@ -175,7 +175,7 @@
       }
     }
 
-    while(spanIndex < span) {
+    while (spanIndex < span) {
       String levelSubDir = makeDirWithGivenNumberOfFiles(parent);
       ++spanIndex;
 
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java
index 1f0c3e9..755c57a 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java
@@ -51,7 +51,7 @@
   @Option(names = {"-s", "--size"},
       description = "Size of the generated files (in bytes)",
       defaultValue = "10240")
-  private int fileSize;
+  private long fileSize;
 
   @Option(names = {"--buffer"},
       description = "Size of buffer used store the generated key content",
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/StreamingGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/StreamingGenerator.java
index 670a975..35580fd 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/StreamingGenerator.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/StreamingGenerator.java
@@ -63,7 +63,7 @@
   @CommandLine.Option(names = {"--size"},
       description = "Size of the generated files.",
       defaultValue = "104857600")
-  private int fileSize;
+  private long fileSize;
 
   private static final String SUB_DIR_NAME = "dir1";
 
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java
index c5d4d15..cfdc924 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java
@@ -163,7 +163,7 @@
     generatedConfig.setProperties(requiredProperties);
 
     File output = new File(path, "ozone-site.xml");
-    if(output.createNewFile()){
+    if (output.createNewFile()) {
       JAXBContext context =
           JAXBContext.newInstance(OzoneConfiguration.XMLConfiguration.class);
       Marshaller m = context.createMarshaller();
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java
index 740e667..af6d624 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java
@@ -151,7 +151,7 @@
         client = createRpcClientFromHostPort(ozoneURI.getHost(),
             ozoneURI.getPort(), conf);
       }
-    } else {// When host is not specified
+    } else { // When host is not specified
 
       Collection<String> omServiceIds = conf.getTrimmedStringCollection(
           OZONE_OM_SERVICE_IDS_KEY);
@@ -270,7 +270,7 @@
 
     // add leading slash to the path, if it does not exist
     int firstSlash = path.indexOf('/');
-    if(firstSlash != 0) {
+    if (firstSlash != 0) {
       path = "/" + path;
     }
 
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java
index 5c07662..e1592e5 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java
@@ -57,7 +57,7 @@
               " user if not specified")
   private String ownerName;
 
-  enum AllowedBucketLayouts {FILE_SYSTEM_OPTIMIZED, OBJECT_STORE}
+  enum AllowedBucketLayouts { FILE_SYSTEM_OPTIMIZED, OBJECT_STORE }
 
   @Option(names = { "--layout", "-l" },
       description = "Allowed Bucket Layouts: ${COMPLETION-CANDIDATES}",
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java
index 93a421a..fa83bbb 100644
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java
+++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java
@@ -186,7 +186,7 @@
   @Test
   public void testLoadCommand() {
     String[] args1 = new String[]{dbName, "load", LOGS1};
-    try{
+    try {
       execute(args1, "");
       fail("No exception thrown.");
     } catch (Exception e) {
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/conf/TestGetConfOptions.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/conf/TestGetConfOptions.java
index 2a5223f..75648b4 100644
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/conf/TestGetConfOptions.java
+++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/conf/TestGetConfOptions.java
@@ -54,12 +54,12 @@
   }
 
   @After
-  public void setUp(){
+  public void setUp() {
     bout.reset();
   }
 
   @AfterClass
-  public static void tearDown(){
+  public static void tearDown() {
     System.setOut(psBackup);
   }
 
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestProgressBar.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestProgressBar.java
index 1797d71..25b1945 100644
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestProgressBar.java
+++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestProgressBar.java
@@ -26,7 +26,11 @@
 import java.util.function.LongSupplier;
 import java.util.stream.LongStream;
 
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.anyChar;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.atLeastOnce;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
 
 /**
  * Tests for the Progressbar class for Freon.
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
index 2486e57..b378628 100644
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
+++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
@@ -144,10 +144,10 @@
             throw ex;
           }
         };
-    try{
+    try {
       cmd.parseWithHandlers(new CommandLine.RunLast(),
           exceptionHandler, args);
-    }catch(Exception ex){
+    }  catch (Exception ex) {
       Assert.assertTrue("Expected " + msg + ", but got: " + ex.getMessage(),
           ex.getMessage().contains(msg));
     }
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/test/OzoneTestDriver.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/test/OzoneTestDriver.java
index ac1f7fd..ef0cac7 100644
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/test/OzoneTestDriver.java
+++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/test/OzoneTestDriver.java
@@ -37,7 +37,7 @@
     try {
       pgd.addClass("freon", Freon.class,
           "Populates ozone with data.");
-    } catch(Throwable e) {
+    } catch (Throwable e) {
       e.printStackTrace();
     }
   }
@@ -46,7 +46,7 @@
     int exitCode = -1;
     try {
       exitCode = pgd.run(args);
-    } catch(Throwable e) {
+    } catch (Throwable e) {
       e.printStackTrace();
     }
 
@@ -55,7 +55,7 @@
     }
   }
 
-  public static void main(String[] args){
+  public static void main(String[] args) {
     new OzoneTestDriver().run(args);
   }
 }
diff --git a/pom.xml b/pom.xml
index cf94d9d..efa1aad 100644
--- a/pom.xml
+++ b/pom.xml
@@ -232,9 +232,9 @@
     <exec-maven-plugin.version>1.3.1</exec-maven-plugin.version>
     <make-maven-plugin.version>1.0-beta-1</make-maven-plugin.version>
     <native-maven-plugin.version>1.0-alpha-8</native-maven-plugin.version>
-    <maven-checkstyle-plugin.version>3.1.0</maven-checkstyle-plugin.version>
+    <maven-checkstyle-plugin.version>3.1.2</maven-checkstyle-plugin.version>
     <maven-site-plugin.version>3.9.1</maven-site-plugin.version>
-    <checkstyle.version>8.29</checkstyle.version>
+    <checkstyle.version>9.3</checkstyle.version>
     <surefire.fork.timeout>1200</surefire.fork.timeout>
     <aws-java-sdk.version>1.12.124</aws-java-sdk.version>
     <hsqldb.version>2.3.4</hsqldb.version>
@@ -1512,7 +1512,7 @@
           -->
         <groupId>net.minidev</groupId>
         <artifactId>json-smart</artifactId>
-        <version>2.3.1</version>
+        <version>2.4.7</version>
       </dependency>
       <dependency>
         <groupId>org.skyscreamer</groupId>
@@ -1977,6 +1977,30 @@
         <artifactId>maven-site-plugin</artifactId>
         <version>${maven-site-plugin.version}</version>
       </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-enforcer-plugin</artifactId>
+        <inherited>false</inherited>
+        <executions>
+          <execution>
+            <id>enforce-property</id>
+            <goals>
+              <goal>enforce</goal>
+            </goals>
+            <configuration>
+              <rules>
+                <requireProperty>
+                  <property>ozone.version</property>
+                  <message>You must set a ozone.version to be the same as ${project.version}</message>
+                  <regex>${project.version}</regex>
+                  <regexMessage>The ozone.version property should be set and should be ${project.version}.</regexMessage>
+                </requireProperty>
+              </rules>
+              <fail>true</fail>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
     </plugins>
   </build>